hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From apurt...@apache.org
Subject svn commit: r884310 [6/7] - in /hadoop/hbase/branches/0.20_on_hadoop-0.18.3: ./ bin/ conf/ lib/ src/contrib/ src/contrib/ec2/ src/contrib/ec2/bin/ src/contrib/ec2/bin/image/ src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/tableindexed...
Date Wed, 25 Nov 2009 22:30:39 GMT
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/HBaseTestCase.java Wed Nov 25 22:30:29 2009
@@ -224,8 +224,7 @@
     if (startKeyBytes == null || startKeyBytes.length == 0) {
       startKeyBytes = START_KEY_BYTES;
     }
-    return addContent(new HRegionIncommon(r), Bytes.toString(columnFamily),
-        null,
+    return addContent(new HRegionIncommon(r), Bytes.toString(columnFamily), null,
       startKeyBytes, endKey, -1);
   }
 

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHMsg.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHMsg.java?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHMsg.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestHMsg.java Wed Nov 25 22:30:29 2009
@@ -0,0 +1,55 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.util.Bytes;
+
+import junit.framework.TestCase;
+
+public class TestHMsg extends TestCase {
+  public void testList() {
+    List<HMsg> msgs = new ArrayList<HMsg>();
+    HMsg hmsg = null;
+    final int size = 10;
+    for (int i = 0; i < size; i++) {
+      byte [] b = Bytes.toBytes(i);
+      hmsg = new HMsg(HMsg.Type.MSG_REGION_OPEN,
+        new HRegionInfo(new HTableDescriptor(Bytes.toBytes("test")), b, b));
+      msgs.add(hmsg);
+    }
+    assertEquals(size, msgs.size());
+    int index = msgs.indexOf(hmsg);
+    assertNotSame(-1, index);
+    msgs.remove(index);
+    assertEquals(size - 1, msgs.size());
+    byte [] other = Bytes.toBytes("other");
+    hmsg = new HMsg(HMsg.Type.MSG_REGION_OPEN,
+      new HRegionInfo(new HTableDescriptor(Bytes.toBytes("test")), other, other));
+    assertEquals(-1, msgs.indexOf(hmsg));
+    // Assert that two HMsgs are same if same content.
+    byte [] b = Bytes.toBytes(1);
+    hmsg = new HMsg(HMsg.Type.MSG_REGION_OPEN,
+     new HRegionInfo(new HTableDescriptor(Bytes.toBytes("test")), b, b));
+    assertNotSame(-1, msgs.indexOf(hmsg));
+  }
+}

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestSerialization.java Wed Nov 25 22:30:29 2009
@@ -34,8 +34,11 @@
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowLock;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.BatchOperation;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
@@ -61,6 +64,14 @@
     super.tearDown();
   }
 
+  public void testCompareFilter() throws Exception {
+    Filter f = new RowFilter(CompareOp.EQUAL,
+      new BinaryComparator(Bytes.toBytes("testRowOne-2")));
+    byte [] bytes = Writables.getBytes(f);
+    Filter ff = (Filter)Writables.getWritable(bytes, new RowFilter());
+    assertNotNull(ff);
+  }
+
   public void testKeyValue() throws Exception {
     byte [] row = Bytes.toBytes(getName());
     byte [] column = Bytes.toBytes(getName() + ":" + getName());
@@ -359,6 +370,7 @@
 
     assertTrue(Bytes.equals(scan.getStartRow(), desScan.getStartRow()));
     assertTrue(Bytes.equals(scan.getStopRow(), desScan.getStopRow()));
+    assertEquals(scan.getCacheBlocks(), desScan.getCacheBlocks());
     Set<byte[]> set = null;
     Set<byte[]> desSet = null;
     
@@ -429,6 +441,47 @@
     Result deserialized = (Result)Writables.getWritable(b, new Result());
     assertEquals(r.size(), deserialized.size());
   }
+
+  public void testResultDynamicBuild() throws Exception {
+    byte [] rowA = Bytes.toBytes("rowA");
+    byte [] famA = Bytes.toBytes("famA");
+    byte [] qfA = Bytes.toBytes("qfA");
+    byte [] valueA = Bytes.toBytes("valueA");
+    
+    byte [] rowB = Bytes.toBytes("rowB");
+    byte [] famB = Bytes.toBytes("famB");
+    byte [] qfB = Bytes.toBytes("qfB");
+    byte [] valueB = Bytes.toBytes("valueB");
+    
+    KeyValue kvA = new KeyValue(rowA, famA, qfA, valueA);
+    KeyValue kvB = new KeyValue(rowB, famB, qfB, valueB);
+    
+    Result result = new Result(new KeyValue[]{kvA, kvB});
+    
+    byte [] rb = Writables.getBytes(result);
+    
+    
+    // Call getRow() first
+    Result deResult = (Result)Writables.getWritable(rb, new Result());
+    byte [] row = deResult.getRow();
+    assertTrue(Bytes.equals(row, rowA));
+    
+    // Call sorted() first
+    deResult = (Result)Writables.getWritable(rb, new Result());
+    assertTrue("results are not equivalent, first key mismatch",
+        result.sorted()[0].equals(deResult.sorted()[0]));
+    assertTrue("results are not equivalent, second key mismatch",
+        result.sorted()[1].equals(deResult.sorted()[1]));
+
+    // Call raw() first
+    deResult = (Result)Writables.getWritable(rb, new Result());
+    assertTrue("results are not equivalent, first key mismatch",
+        result.raw()[0].equals(deResult.raw()[0]));
+    assertTrue("results are not equivalent, second key mismatch",
+        result.raw()[1].equals(deResult.raw()[1]));
+    
+    
+  }
   
   public void testResultArray() throws Exception {
     byte [] rowA = Bytes.toBytes("rowA");

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/TestZooKeeper.java Wed Nov 25 22:30:29 2009
@@ -109,47 +109,53 @@
     ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance, sessionID, password);
     zk.close();
 
-    Thread.sleep(sessionTimeout * 3);
+    Thread.sleep(sessionTimeout * 2);
 
     System.err.println("ZooKeeper should have timed out");
     connection.relocateRegion(HConstants.ROOT_TABLE_NAME, HConstants.EMPTY_BYTE_ARRAY);
   }
 
-  public void testRegionServerSessionExpired() {
-    try {
-      this.conf.setBoolean("hbase.regionserver.restart.on.zk.expire", true);
-      new HTable(conf, HConstants.META_TABLE_NAME);
-  
-      ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
-      String quorumServers = zkw.getQuorumServers();
-      int sessionTimeout = conf.getInt("zookeeper.session.timeout", 2 * 1000);
-
-      HRegionServer rs = cluster.getRegionServer(0);
-      ZooKeeperWrapper rsZK = rs.getZooKeeperWrapper();
-      long sessionID = rsZK.getSessionID();
-      byte[] password = rsZK.getSessionPassword();
-  
-      ZooKeeper zk = new ZooKeeper(quorumServers, sessionTimeout, EmptyWatcher.instance, sessionID, password);
-      zk.close();
+  public void testRegionServerSessionExpired() throws Exception{
+    this.conf.setBoolean("hbase.regionserver.restart.on.zk.expire", true);
+    new HTable(conf, HConstants.META_TABLE_NAME);
+    HRegionServer rs = cluster.getRegionServer(0);
+    sessionExpirationHelper(rs.getZooKeeperWrapper());
+  }
 
-      Thread.sleep(sessionTimeout * 3);
+  public void testMasterSessionExpired() throws Exception {
+    new HTable(conf, HConstants.META_TABLE_NAME);
+    HMaster master = cluster.getMaster();
+    sessionExpirationHelper(master.getZooKeeperWrapper());
+  }
+
+  public void sessionExpirationHelper(ZooKeeperWrapper nodeZK) throws Exception{
+    ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
+    String quorumServers = zkw.getQuorumServers();
+    int sessionTimeout = 5 * 1000; // 5 seconds
+
+    byte[] password = nodeZK.getSessionPassword();
+    long sessionID = nodeZK.getSessionID();
+
+    ZooKeeper zk = new ZooKeeper(quorumServers,
+        sessionTimeout, EmptyWatcher.instance, sessionID, password);
+
+    zk.close();
+
+    Thread.sleep(sessionTimeout * 3L);
+
+    new HTable(conf, HConstants.META_TABLE_NAME);
+
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    HTableDescriptor desc = new HTableDescriptor("test");
+    HColumnDescriptor family = new HColumnDescriptor("fam");
+    desc.addFamily(family);
+    admin.createTable(desc);
+
+    HTable table = new HTable("test");
+    Put put = new Put(Bytes.toBytes("testrow"));
+    put.add(Bytes.toBytes("fam"), Bytes.toBytes("col"), Bytes.toBytes("testdata"));
+    table.put(put);
 
-      new HTable(conf, HConstants.META_TABLE_NAME);
-  
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      HTableDescriptor desc = new HTableDescriptor("test");
-      HColumnDescriptor family = new HColumnDescriptor("fam:");
-      desc.addFamily(family);
-      admin.createTable(desc);
-  
-      HTable table = new HTable("test");
-      Put put = new Put(Bytes.toBytes("testrow"));
-      put.add(Bytes.toBytes("fam"), Bytes.toBytes("col"), Bytes.toBytes("testdata"));
-      table.put(put);
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail();
-    }
   }
   
   public void testMultipleZK() {

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestClient.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestClient.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestClient.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/client/TestClient.java Wed Nov 25 22:30:29 2009
@@ -20,17 +20,39 @@
 
 package org.apache.hadoop.hbase.client;
 
-import java.io.IOException;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseClusterTestCase;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.QualifierFilter;
+import org.apache.hadoop.hbase.filter.RegexStringComparator;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.WhileMatchFilter;
 import org.apache.hadoop.hbase.util.Bytes;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.UUID;
+
+/**
+ * Tests from client-side of a cluster.
+ */
 public class TestClient extends HBaseClusterTestCase {
-
+  final Log LOG = LogFactory.getLog(getClass());
   private static byte [] ROW = Bytes.toBytes("testRow");
   private static byte [] FAMILY = Bytes.toBytes("testFamily");
   private static byte [] QUALIFIER = Bytes.toBytes("testQualifier");
@@ -44,9 +66,311 @@
   public TestClient() {
     super();
   }
+
+  /**
+   * Test from client side of an involved filter against a multi family that
+   * involves deletes.
+   * 
+   * @throws Exception
+   */
+  public void testWeirdCacheBehaviour() throws Exception {
+    byte[] TABLE = Bytes.toBytes("testWeirdCacheBehaviour");
+    byte[][] FAMILIES = new byte[][] { Bytes.toBytes("trans-blob"),
+        Bytes.toBytes("trans-type"), Bytes.toBytes("trans-date"),
+        Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") };
+    HTable ht = createTable(TABLE, FAMILIES);
+    String value = "this is the value";
+    String value2 = "this is some other value";
+    String keyPrefix1 = UUID.randomUUID().toString();
+    String keyPrefix2 = UUID.randomUUID().toString();
+    String keyPrefix3 = UUID.randomUUID().toString();
+    putRows(ht, 3, value, keyPrefix1);
+    putRows(ht, 3, value, keyPrefix2);
+    putRows(ht, 3, value, keyPrefix3);
+    ht.flushCommits();
+    putRows(ht, 3, value2, keyPrefix1);
+    putRows(ht, 3, value2, keyPrefix2);
+    putRows(ht, 3, value2, keyPrefix3);
+    HTable table = new HTable(conf, Bytes.toBytes("testWeirdCacheBehaviour"));
+    System.out.println("Checking values for key: " + keyPrefix1);
+    assertEquals("Got back incorrect number of rows from scan", 3,
+        getNumberOfRows(keyPrefix1, value2, table));
+    System.out.println("Checking values for key: " + keyPrefix2);
+    assertEquals("Got back incorrect number of rows from scan", 3,
+        getNumberOfRows(keyPrefix2, value2, table));
+    System.out.println("Checking values for key: " + keyPrefix3);
+    assertEquals("Got back incorrect number of rows from scan", 3,
+        getNumberOfRows(keyPrefix3, value2, table));
+    deleteColumns(ht, value2, keyPrefix1);
+    deleteColumns(ht, value2, keyPrefix2);
+    deleteColumns(ht, value2, keyPrefix3);
+    System.out.println("Starting important checks.....");
+    assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1,
+      0, getNumberOfRows(keyPrefix1, value2, table));
+    assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2,
+      0, getNumberOfRows(keyPrefix2, value2, table));
+    assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3,
+      0, getNumberOfRows(keyPrefix3, value2, table));
+    ht.setScannerCaching(0);
+    assertEquals("Got back incorrect number of rows from scan", 0,
+      getNumberOfRows(keyPrefix1, value2, table)); ht.setScannerCaching(100);
+    assertEquals("Got back incorrect number of rows from scan", 0,
+      getNumberOfRows(keyPrefix2, value2, table));
+  }
+
+  private void deleteColumns(HTable ht, String value, String keyPrefix)
+  throws IOException {
+    ResultScanner scanner = buildScanner(keyPrefix, value, ht);
+    Iterator<Result> it = scanner.iterator();
+    int count = 0;
+    while (it.hasNext()) {
+      Result result = it.next();
+      Delete delete = new Delete(result.getRow());
+      delete.deleteColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"));
+      ht.delete(delete);
+      count++;
+    }
+    assertEquals("Did not perform correct number of deletes", 3, count);
+  }
+
+  private int getNumberOfRows(String keyPrefix, String value, HTable ht)
+      throws Exception {
+    ResultScanner resultScanner = buildScanner(keyPrefix, value, ht);
+    Iterator<Result> scanner = resultScanner.iterator();
+    int numberOfResults = 0;
+    while (scanner.hasNext()) {
+      Result result = scanner.next();
+      System.out.println("Got back key: " + Bytes.toString(result.getRow()));
+      for (KeyValue kv : result.raw()) {
+        System.out.println("kv=" + kv.toString() + ", "
+            + Bytes.toString(kv.getValue()));
+      }
+      numberOfResults++;
+    }
+    return numberOfResults;
+  }
+
+  private ResultScanner buildScanner(String keyPrefix, String value, HTable ht)
+      throws IOException {
+    // OurFilterList allFilters = new OurFilterList();
+    FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
+    allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
+    SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
+        .toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes
+        .toBytes(value));
+    filter.setFilterIfMissing(true);
+    allFilters.addFilter(filter);
+
+    // allFilters.addFilter(new
+    // RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
+    // Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));
+
+    Scan scan = new Scan();
+    scan.addFamily(Bytes.toBytes("trans-blob"));
+    scan.addFamily(Bytes.toBytes("trans-type"));
+    scan.addFamily(Bytes.toBytes("trans-date"));
+    scan.addFamily(Bytes.toBytes("trans-tags"));
+    scan.addFamily(Bytes.toBytes("trans-group"));
+    scan.setFilter(allFilters);
+
+    return ht.getScanner(scan);
+  }
+
+  private void putRows(HTable ht, int numRows, String value, String key)
+      throws IOException {
+    for (int i = 0; i < numRows; i++) {
+      String row = key + "_" + UUID.randomUUID().toString();
+      System.out.println(String.format("Saving row: %s, with value %s", row,
+          value));
+      Put put = new Put(Bytes.toBytes(row));
+      put.add(Bytes.toBytes("trans-blob"), null, Bytes
+          .toBytes("value for blob"));
+      put.add(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement"));
+      put.add(Bytes.toBytes("trans-date"), null, Bytes
+          .toBytes("20090921010101999"));
+      put.add(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"), Bytes
+          .toBytes(value));
+      put.add(Bytes.toBytes("trans-group"), null, Bytes
+          .toBytes("adhocTransactionGroupId"));
+      ht.put(put);
+    }
+  }
+
+  /**
+   * Test filters when multiple regions.  It does counts.  Needs eye-balling of
+   * logs to ensure that we're not scanning more regions that we're supposed to.
+   * Related to the TestFilterAcrossRegions over in the o.a.h.h.filter package.
+   * @throws IOException
+   */
+  public void testFilterAcrossMutlipleRegions() throws IOException {
+    byte [] name = Bytes.toBytes(getName());
+    HTable t = createTable(name, FAMILY);
+    int rowCount = loadTable(t);
+    assertRowCount(t, rowCount);
+    // Split the table.  Should split on a reasonable key; 'lqj'
+    Map<HRegionInfo, HServerAddress> regions  = splitTable(t);
+    assertRowCount(t, rowCount);
+    // Get end key of first region.
+    byte [] endKey = regions.keySet().iterator().next().getEndKey();
+    // Count rows with a filter that stops us before passed 'endKey'.
+    // Should be count of rows in first region.
+    int endKeyCount = countRows(t, createScanWithRowFilter(endKey));
+    assertTrue(endKeyCount < rowCount);
+
+    // How do I know I did not got to second region?  Thats tough.  Can't really
+    // do that in client-side region test.  I verified by tracing in debugger.
+    // I changed the messages that come out when set to DEBUG so should see
+    // when scanner is done. Says "Finished with scanning..." with region name.
+    // Check that its finished in right region.
+
+    // New test.  Make it so scan goes into next region by one and then two.
+    // Make sure count comes out right.
+    byte [] key = new byte [] {endKey[0], endKey[1], (byte)(endKey[2] + 1)};
+    int plusOneCount = countRows(t, createScanWithRowFilter(key));
+    assertEquals(endKeyCount + 1, plusOneCount);
+    key = new byte [] {endKey[0], endKey[1], (byte)(endKey[2] + 2)};
+    int plusTwoCount = countRows(t, createScanWithRowFilter(key));
+    assertEquals(endKeyCount + 2, plusTwoCount);
+
+    // New test.  Make it so I scan one less than endkey.
+    key = new byte [] {endKey[0], endKey[1], (byte)(endKey[2] - 1)};
+    int minusOneCount = countRows(t, createScanWithRowFilter(key));
+    assertEquals(endKeyCount - 1, minusOneCount);
+    // For above test... study logs.  Make sure we do "Finished with scanning.."
+    // in first region and that we do not fall into the next region.
+    
+    key = new byte [] {'a', 'a', 'a'};
+    int countBBB = countRows(t,
+      createScanWithRowFilter(key, null, CompareFilter.CompareOp.EQUAL));
+    assertEquals(1, countBBB);
+
+    int countGreater = countRows(t, createScanWithRowFilter(endKey, null,
+      CompareFilter.CompareOp.GREATER_OR_EQUAL));
+    // Because started at start of table.
+    assertEquals(0, countGreater);
+    countGreater = countRows(t, createScanWithRowFilter(endKey, endKey,
+      CompareFilter.CompareOp.GREATER_OR_EQUAL));
+    assertEquals(rowCount - endKeyCount, countGreater);
+  }
   
-  public void XtestSuperSimple() throws Exception {
-    byte [] TABLE = Bytes.toBytes("testSuperSimple");
+  /**
+   * Load table with rows from 'aaa' to 'zzz'.
+   * @param t
+   * @return Count of rows loaded.
+   * @throws IOException
+   */
+  private int loadTable(final HTable t) throws IOException {
+    // Add data to table.
+    byte[] k = new byte[3];
+    int rowCount = 0;
+    for (byte b1 = 'a'; b1 < 'z'; b1++) {
+      for (byte b2 = 'a'; b2 < 'z'; b2++) {
+        for (byte b3 = 'a'; b3 < 'z'; b3++) {
+          k[0] = b1;
+          k[1] = b2;
+          k[2] = b3;
+          Put put = new Put(k);
+          put.add(FAMILY, new byte[0], k);
+          t.put(put);
+          rowCount++;
+        }
+      }
+    }
+    return rowCount;
+  }
+
+  /*
+   * @param key
+   * @return Scan with RowFilter that does LESS than passed key.
+   */
+  private Scan createScanWithRowFilter(final byte [] key) {
+    return createScanWithRowFilter(key, null, CompareFilter.CompareOp.LESS);
+  }
+
+  /*
+   * @param key
+   * @param op
+   * @param startRow
+   * @return Scan with RowFilter that does CompareOp op on passed key.
+   */
+  private Scan createScanWithRowFilter(final byte [] key,
+      final byte [] startRow, CompareFilter.CompareOp op) {
+    // Make sure key is of some substance... non-null and > than first key.
+    assertTrue(key != null && key.length > 0 &&
+      Bytes.BYTES_COMPARATOR.compare(key, new byte [] {'a', 'a', 'a'}) >= 0);
+    LOG.info("Key=" + Bytes.toString(key));
+    Scan s = startRow == null? new Scan(): new Scan(startRow);
+    Filter f = new RowFilter(op, new BinaryComparator(key));
+    f = new WhileMatchFilter(f);
+    s.setFilter(f);
+    return s;
+  }
+
+  /*
+   * @param t
+   * @param s
+   * @return Count of rows in table.
+   * @throws IOException
+   */
+  private int countRows(final HTable t, final Scan s)
+  throws IOException {
+    // Assert all rows in table.
+    ResultScanner scanner = t.getScanner(s);
+    int count = 0;
+    for (Result result: scanner) {
+      count++;
+      assertTrue(result.size() > 0);
+      // LOG.info("Count=" + count + ", row=" + Bytes.toString(result.getRow()));
+    }
+    return count;
+  }
+
+  private void assertRowCount(final HTable t, final int expected)
+  throws IOException {
+    assertEquals(expected, countRows(t, new Scan()));
+  }
+
+  /*
+   * Split table into multiple regions.
+   * @param t Table to split.
+   * @return Map of regions to servers.
+   * @throws IOException
+   */
+  private Map<HRegionInfo, HServerAddress> splitTable(final HTable t)
+  throws IOException {
+    // Split this table in two.
+    HBaseAdmin admin = new HBaseAdmin(this.conf);
+    admin.split(t.getTableName());
+    Map<HRegionInfo, HServerAddress> regions = waitOnSplit(t);
+    assertTrue(regions.size() > 1);
+    return regions;
+  }
+
+  /*
+   * Wait on table split.  May return because we waited long enough on the split
+   * and it didn't happen.  Caller should check.
+   * @param t
+   * @return Map of table regions; caller needs to check table actually split.
+   */
+  private Map<HRegionInfo, HServerAddress> waitOnSplit(final HTable t)
+  throws IOException {
+    Map<HRegionInfo, HServerAddress> regions = t.getRegionsInfo();
+    int originalCount = regions.size();
+    for (int i = 0; i < this.conf.getInt("hbase.test.retries", 30); i++) {
+      Thread.currentThread();
+      try {
+        Thread.sleep(this.conf.getInt("hbase.server.thread.wakefrequency", 1000));
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+      regions = t.getRegionsInfo();
+      if (regions.size() > originalCount) break;
+    }
+    return regions;
+  }
+
+  public void testSuperSimple() throws Exception {
+    byte [] TABLE = Bytes.toBytes(getName());
     HTable ht = createTable(TABLE, FAMILY);
     Put put = new Put(ROW);
     put.add(FAMILY, QUALIFIER, VALUE);
@@ -59,7 +383,41 @@
     scanner.close();
     System.out.println("Done.");
   }
-  
+
+  public void testFilters() throws Exception {
+    byte [] TABLE = Bytes.toBytes("testFilters");
+    HTable ht = createTable(TABLE, FAMILY);
+    byte [][] ROWS = makeN(ROW, 10);
+    byte [][] QUALIFIERS = {
+        Bytes.toBytes("col0-<d2v1>-<d3v2>"), Bytes.toBytes("col1-<d2v1>-<d3v2>"), 
+        Bytes.toBytes("col2-<d2v1>-<d3v2>"), Bytes.toBytes("col3-<d2v1>-<d3v2>"), 
+        Bytes.toBytes("col4-<d2v1>-<d3v2>"), Bytes.toBytes("col5-<d2v1>-<d3v2>"), 
+        Bytes.toBytes("col6-<d2v1>-<d3v2>"), Bytes.toBytes("col7-<d2v1>-<d3v2>"), 
+        Bytes.toBytes("col8-<d2v1>-<d3v2>"), Bytes.toBytes("col9-<d2v1>-<d3v2>")
+    };
+    for(int i=0;i<10;i++) {
+      Put put = new Put(ROWS[i]);
+      put.add(FAMILY, QUALIFIERS[i], VALUE);
+      ht.put(put);
+    }
+    Scan scan = new Scan();
+    scan.addFamily(FAMILY);
+    Filter filter = new QualifierFilter(CompareOp.EQUAL,
+        new RegexStringComparator("col[1-5]"));
+    scan.setFilter(filter);
+    ResultScanner scanner = ht.getScanner(scan);
+    int expectedIndex = 1;
+    for(Result result : ht.getScanner(scan)) {
+      assertEquals(result.size(), 1);
+      assertTrue(Bytes.equals(result.raw()[0].getRow(), ROWS[expectedIndex]));
+      assertTrue(Bytes.equals(result.raw()[0].getQualifier(), 
+          QUALIFIERS[expectedIndex]));
+      expectedIndex++;
+    }
+    assertEquals(expectedIndex, 6);
+    scanner.close();
+  }
+
   /**
    * Test simple table and non-existent row cases.
    */
@@ -1031,9 +1389,8 @@
         result.size() == 9);
     
   }
-  
+
   public void testDeletes() throws Exception {
-    
     byte [] TABLE = Bytes.toBytes("testDeletes");
     
     byte [][] ROWS = makeNAscii(ROW, 6);
@@ -1075,6 +1432,9 @@
     put.add(FAMILIES[0], QUALIFIER, ts[4], VALUES[4]);
     put.add(FAMILIES[0], QUALIFIER, ts[2], VALUES[2]);
     put.add(FAMILIES[0], QUALIFIER, ts[3], VALUES[3]);
+    put.add(FAMILIES[0], null, ts[4], VALUES[4]);
+    put.add(FAMILIES[0], null, ts[2], VALUES[2]);
+    put.add(FAMILIES[0], null, ts[3], VALUES[3]);
     ht.put(put);
     
     delete = new Delete(ROW);
@@ -1082,7 +1442,7 @@
     ht.delete(delete);
     
     get = new Get(ROW);
-    get.addFamily(FAMILIES[0]);
+    get.addColumn(FAMILIES[0], QUALIFIER);
     get.setMaxVersions(Integer.MAX_VALUE);
     result = ht.get(get);
     assertNResult(result, ROW, FAMILIES[0], QUALIFIER, 
@@ -1091,7 +1451,7 @@
         0, 2);
     
     scan = new Scan(ROW);
-    scan.addFamily(FAMILIES[0]);
+    scan.addColumn(FAMILIES[0], QUALIFIER);
     scan.setMaxVersions(Integer.MAX_VALUE);
     result = getSingleScanResult(ht, scan);
     assertNResult(result, ROW, FAMILIES[0], QUALIFIER, 
@@ -1099,6 +1459,16 @@
         new byte[][] {VALUES[1], VALUES[2], VALUES[3]},
         0, 2);
     
+    // Test for HBASE-1847
+    delete = new Delete(ROW);
+    delete.deleteColumn(FAMILIES[0], null);
+    ht.delete(delete);
+    
+    // Cleanup null qualifier
+    delete = new Delete(ROW);
+    delete.deleteColumns(FAMILIES[0], null);
+    ht.delete(delete);
+    
     // Expected client behavior might be that you can re-put deleted values
     // But alas, this is not to be.  We can't put them back in either case.
     
@@ -1280,8 +1650,38 @@
     assertTrue(Bytes.equals(result.sorted()[0].getValue(), VALUES[1]));
     assertTrue(Bytes.equals(result.sorted()[1].getValue(), VALUES[2]));
     scanner.close();
+    
+    // Add test of bulk deleting.
+    for (int i = 0; i < 10; i++) {
+      byte [] bytes = Bytes.toBytes(i);
+      put = new Put(bytes);
+      put.add(FAMILIES[0], QUALIFIER, bytes);
+      ht.put(put);
+    }
+    for (int i = 0; i < 10; i++) {
+      byte [] bytes = Bytes.toBytes(i);
+      get = new Get(bytes);
+      get.addFamily(FAMILIES[0]);
+      result = ht.get(get);
+      assertTrue(result.size() == 1);
+    }
+    ArrayList<Delete> deletes = new ArrayList<Delete>();
+    for (int i = 0; i < 10; i++) {
+      byte [] bytes = Bytes.toBytes(i);
+      delete = new Delete(bytes);
+      delete.deleteFamily(FAMILIES[0]);
+      deletes.add(delete);
+    }
+    ht.delete(deletes);
+    for (int i = 0; i < 10; i++) {
+      byte [] bytes = Bytes.toBytes(i);
+      get = new Get(bytes);
+      get.addFamily(FAMILIES[0]);
+      result = ht.get(get);
+      assertTrue(result.size() == 0);
+    }
   }
-  
+
   /**
    * Baseline "scalability" test.
    * 
@@ -2406,7 +2806,7 @@
   }
   
   private byte [][] makeN(byte [] base, int n) {
-    if(n > 256) {
+    if (n > 256) {
       return makeNBig(base, n);
     }
     byte [][] ret = new byte[n][];

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilter.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilter.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilter.java Wed Nov 25 22:30:29 2009
@@ -145,7 +145,6 @@
   }
 
   public void testNoFilter() throws Exception {
-    
     // No filter
     long expectedRows = this.numRows;
     long expectedKeys = this.colsPerRow;
@@ -161,17 +160,12 @@
   }
   
   public void testPrefixFilter() throws Exception {
-    
     // Grab rows from group one (half of total)
-    
     long expectedRows = this.numRows / 2;
     long expectedKeys = this.colsPerRow;
-    
     Scan s = new Scan();
     s.setFilter(new PrefixFilter(Bytes.toBytes("testRowOne")));
-
     verifyScan(s, expectedRows, expectedKeys);
-    
   }
   
   public void testPageFilter() throws Exception {
@@ -789,6 +783,138 @@
     
   }
   
+  public void testFirstKeyOnlyFilter() throws IOException {
+    Scan s = new Scan();
+    s.setFilter(new FirstKeyOnlyFilter());
+    // Expected KVs, the first KV from each of the remaining 6 rows
+    KeyValue [] kvs = {
+        new KeyValue(ROWS_ONE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+        new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+        new KeyValue(ROWS_ONE[3], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+        new KeyValue(ROWS_TWO[0], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+        new KeyValue(ROWS_TWO[2], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1]),
+        new KeyValue(ROWS_TWO[3], FAMILIES[0], QUALIFIERS_TWO[0], VALUES[1])
+    };
+    verifyScanFull(s, kvs);
+  }
+  
+  public void testSingleColumnValueFilter() throws IOException {
+    
+    // From HBASE-1821
+    // Desired action is to combine two SCVF in a FilterList
+    // Want to return only rows that match both conditions
+    
+    // Need to change one of the group one columns to use group two value
+    Put p = new Put(ROWS_ONE[2]);
+    p.add(FAMILIES[0], QUALIFIERS_ONE[2], VALUES[1]);
+    this.region.put(p);
+    
+    // Now let's grab rows that have Q_ONE[0](VALUES[0]) and Q_ONE[2](VALUES[1])
+    // Since group two rows don't have these qualifiers, they will pass
+    // so limiting scan to group one
+    List<Filter> filters = new ArrayList<Filter>();
+    filters.add(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0],
+        CompareOp.EQUAL, VALUES[0]));
+    filters.add(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[2],
+        CompareOp.EQUAL, VALUES[1]));
+    Filter f = new FilterList(Operator.MUST_PASS_ALL, filters);
+    Scan s = new Scan(ROWS_ONE[0], ROWS_TWO[0]);
+    s.addFamily(FAMILIES[0]);
+    s.setFilter(f);
+    // Expect only one row, all qualifiers
+    KeyValue [] kvs = {
+        new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+        new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[2], VALUES[1]),
+        new KeyValue(ROWS_ONE[2], FAMILIES[0], QUALIFIERS_ONE[3], VALUES[0])
+    };
+    verifyScanNoEarlyOut(s, 1, 3);
+    verifyScanFull(s, kvs);
+    
+    // In order to get expected behavior without limiting to group one
+    // need to wrap SCVFs in SkipFilters
+    filters = new ArrayList<Filter>();
+    filters.add(new SkipFilter(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0],
+        CompareOp.EQUAL, VALUES[0])));
+    filters.add(new SkipFilter(new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[2],
+        CompareOp.EQUAL, VALUES[1])));
+    f = new FilterList(Operator.MUST_PASS_ALL, filters);
+    s = new Scan(ROWS_ONE[0], ROWS_TWO[0]);
+    s.addFamily(FAMILIES[0]);
+    s.setFilter(f);
+    // Expect same KVs
+    verifyScanNoEarlyOut(s, 1, 3);
+    verifyScanFull(s, kvs);
+
+    // More tests from HBASE-1821 for Clint and filterIfMissing flag
+    
+    byte [][] ROWS_THREE = {
+        Bytes.toBytes("rowThree-0"), Bytes.toBytes("rowThree-1"),
+        Bytes.toBytes("rowThree-2"), Bytes.toBytes("rowThree-3")
+    };
+
+    // Give row 0 and 2 QUALIFIERS_ONE[0] (VALUE[0] VALUE[1])
+    // Give row 1 and 3 QUALIFIERS_ONE[1] (VALUE[0] VALUE[1])
+    
+    KeyValue [] srcKVs = new KeyValue [] {
+        new KeyValue(ROWS_THREE[0], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]),
+        new KeyValue(ROWS_THREE[1], FAMILIES[0], QUALIFIERS_ONE[0], VALUES[1]),
+        new KeyValue(ROWS_THREE[2], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[0]),
+        new KeyValue(ROWS_THREE[3], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[1])
+    };
+    
+    for(KeyValue kv : srcKVs) {
+      this.region.put(new Put(kv.getRow()).add(kv));
+    }
+    
+    // Match VALUES[0] against QUALIFIERS_ONE[0] with filterIfMissing = false
+    // Expect 3 rows (0, 2, 3)
+    SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILIES[0], 
+        QUALIFIERS_ONE[0], CompareOp.EQUAL, VALUES[0]);
+    s = new Scan(ROWS_THREE[0], Bytes.toBytes("rowThree-4"));
+    s.addFamily(FAMILIES[0]);
+    s.setFilter(scvf);
+    kvs = new KeyValue [] { srcKVs[0], srcKVs[2], srcKVs[3] };
+    verifyScanFull(s, kvs);
+    
+    // Match VALUES[0] against QUALIFIERS_ONE[0] with filterIfMissing = true
+    // Expect 1 row (0)
+    scvf = new SingleColumnValueFilter(FAMILIES[0], QUALIFIERS_ONE[0],
+        CompareOp.EQUAL, VALUES[0]);
+    scvf.setFilterIfMissing(true);
+    s = new Scan(ROWS_THREE[0], Bytes.toBytes("rowThree-4"));
+    s.addFamily(FAMILIES[0]);
+    s.setFilter(scvf);
+    kvs = new KeyValue [] { srcKVs[0] };
+    verifyScanFull(s, kvs);
+    
+    // Match VALUES[1] against QUALIFIERS_ONE[1] with filterIfMissing = true
+    // Expect 1 row (3)
+    scvf = new SingleColumnValueFilter(FAMILIES[0], 
+        QUALIFIERS_ONE[1], CompareOp.EQUAL, VALUES[1]);
+    scvf.setFilterIfMissing(true);
+    s = new Scan(ROWS_THREE[0], Bytes.toBytes("rowThree-4"));
+    s.addFamily(FAMILIES[0]);
+    s.setFilter(scvf);
+    kvs = new KeyValue [] { srcKVs[3] };
+    verifyScanFull(s, kvs);
+    
+    // Add QUALIFIERS_ONE[1] to ROWS_THREE[0] with VALUES[0]
+    KeyValue kvA = new KeyValue(ROWS_THREE[0], FAMILIES[0], QUALIFIERS_ONE[1], VALUES[0]);
+    this.region.put(new Put(kvA.getRow()).add(kvA));
+    
+    // Match VALUES[1] against QUALIFIERS_ONE[1] with filterIfMissing = true
+    // Expect 1 row (3)
+    scvf = new SingleColumnValueFilter(FAMILIES[0], 
+        QUALIFIERS_ONE[1], CompareOp.EQUAL, VALUES[1]);
+    scvf.setFilterIfMissing(true);
+    s = new Scan(ROWS_THREE[0], Bytes.toBytes("rowThree-4"));
+    s.addFamily(FAMILIES[0]);
+    s.setFilter(scvf);
+    kvs = new KeyValue [] { srcKVs[3] };
+    verifyScanFull(s, kvs);
+    
+  }
+  
   private void verifyScan(Scan s, long expectedRows, long expectedKeys) 
   throws IOException {
     InternalScanner scanner = this.region.getScanner(s);
@@ -799,6 +925,7 @@
       Arrays.sort(results.toArray(new KeyValue[results.size()]),
           KeyValue.COMPARATOR);
       LOG.info("counter=" + i + ", " + results);
+      if (results.isEmpty()) break;
       assertTrue("Scanned too many rows! Only expected " + expectedRows + 
           " total but already scanned " + (i+1), expectedRows > i);
       assertEquals("Expected " + expectedKeys + " keys per row but " +
@@ -843,8 +970,10 @@
       done = scanner.next(results);
       Arrays.sort(results.toArray(new KeyValue[results.size()]),
           KeyValue.COMPARATOR);
+      if(results.isEmpty()) break;
       assertTrue("Scanned too many keys! Only expected " + kvs.length + 
-          " total but already scanned " + (results.size() + idx), 
+          " total but already scanned " + (results.size() + idx) + 
+          (results.isEmpty() ? "" : "(" + results.get(0).toString() + ")"), 
           kvs.length >= idx + results.size());
       for(KeyValue kv : results) {
         LOG.info("row=" + row + ", result=" + kv.toString() + 

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilterAcrossRegions.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilterAcrossRegions.java?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilterAcrossRegions.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestFilterAcrossRegions.java Wed Nov 25 22:30:29 2009
@@ -0,0 +1,166 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.filter;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.InternalScanner;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Basic test of filters crossing region boundaries.
+ * Tests filters come back with right answer.  For client-side test of
+ * filter crossing boundaries, see filter tests in TestClient class.
+ */
+public class TestFilterAcrossRegions extends HBaseTestCase {
+  public void testStop() throws IOException {
+    byte [] name = Bytes.toBytes(getName());
+    HTableDescriptor htd = new HTableDescriptor(name);
+    htd.addFamily(new HColumnDescriptor(name));
+    // Make three regions: ""-10, 10-20, 20-"".
+    byte [] tenBoundary = Bytes.toBytes(10);
+    byte [] twentyBoundary = Bytes.toBytes(20);
+    HRegion r0 = createRegion(htd, HConstants.EMPTY_BYTE_ARRAY, tenBoundary);
+    HRegion r1 = createRegion(htd, tenBoundary, twentyBoundary);
+    HRegion r2 = createRegion(htd, twentyBoundary, HConstants.EMPTY_BYTE_ARRAY);
+    HRegion [] regions = new HRegion [] {r0, r1, r2};
+    final int max = 30;
+    try {
+      for (HRegion r: regions) {
+        populate(Bytes.toInt(r.getStartKey()), Bytes.toInt(r.getEndKey()), r,
+          max);
+      }
+      // Now I have 3 regions with rows of 0-9, 10-19, and 20-29.  Play with
+      // scanners and filters.
+      assertAllRows(regions, max);
+      assertFilterStops(regions, max);
+    } finally {
+      for (HRegion r: regions) r.close();
+    }
+  }
+
+  /*
+   * Test using a rowfilter.  Test that after we go beyond wanted row, we
+   * do not return any more rows.
+   * @param regions
+   * @param max
+   * @throws IOException
+   */
+  private void assertFilterStops(final HRegion [] regions, final int max)
+  throws IOException {
+    // Count of rows seen.
+    int count = 0;
+    // Row at which we want to stop.
+    int maximumRow = max/regions.length;
+    // Count of regions seen.
+    int regionCount = 0;
+    for (HRegion r: regions) {
+      // Make a filter that will stop inside first region.
+      Scan s = createFilterStopsScanner(maximumRow);
+      InternalScanner scanner = r.getScanner(s);
+      List<KeyValue> results = new ArrayList<KeyValue>();
+      boolean hasMore = false;
+      do {
+        hasMore = scanner.next(results);
+        if (hasMore) count++;
+        if (regionCount ==0) assertFalse(s.getFilter().filterAllRemaining());
+        results.clear();
+      } while (hasMore);
+      if (regionCount > 0) assertTrue(s.getFilter().filterAllRemaining());
+      regionCount++;
+    }
+    assertEquals(maximumRow - 1, count);
+  }
+
+  /*
+   * @param max
+   * @return A Scan with a RowFilter that has a binary comparator that does not
+   * go beyond <code>max</code> (Filter is wrapped in a WhileMatchFilter so that
+   * filterAllRemaining is true once we go beyond <code>max</code>).
+   */
+  private Scan createFilterStopsScanner(final int max) {
+    Scan s = new Scan();
+    Filter f = new RowFilter(CompareFilter.CompareOp.LESS,
+      new BinaryComparator(Bytes.toBytes(max)));
+    f = new WhileMatchFilter(f);
+    s.setFilter(f);
+    return s;
+  }
+
+  private void assertAllRows(final HRegion [] regions, final int max)
+  throws IOException {
+    int count = 0;
+    for (HRegion r: regions) {
+      count += scan(r, new Scan());
+    }
+    assertEquals(max, count);
+  }
+
+  private int scan(final HRegion r, final Scan scan) throws IOException {
+    InternalScanner scanner = r.getScanner(new Scan(scan));
+    int count = 0;
+    List<KeyValue> results = new ArrayList<KeyValue>();
+    do {
+      count++;
+    } while (scanner.next(results));
+    return count;
+  }
+
+  private HRegion createRegion(final HTableDescriptor htd, final byte [] start,
+    final byte [] end)
+  throws IOException {
+    HRegionInfo info = new HRegionInfo(htd, start, end, false);
+    Path path = new Path(this.testDir, getName()); 
+    return HRegion.createHRegion(info, path, conf);
+  }
+
+  /*
+   * Add rows between start and end to region <code>r</code>
+   * @param start
+   * @param end
+   * @param r
+   * @param max
+   * @throws IOException
+   */
+  private void populate(final int start, final int end, final HRegion r,
+      final int max)
+  throws IOException {
+    byte [] name = r.getTableDesc().getFamiliesKeys().iterator().next();
+    int s = start < 0? 0: start;
+    int e = end < 0? max: end;
+    for (int i = s; i < e; i++) {
+      Put p = new Put(Bytes.toBytes(i));
+      p.add(name, name, name);
+      r.put(p);
+    }
+  }
+}
\ No newline at end of file

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/filter/TestSingleColumnValueFilter.java Wed Nov 25 22:30:29 2009
@@ -80,16 +80,20 @@
 
   private void basicFilterTests(Filter filter)
       throws Exception {
-    KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1);
-    assertFalse("basicFilter1", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
-    kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2);
-    assertTrue("basicFilter2", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
+    KeyValue kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2);
+    assertTrue("basicFilter1", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
     kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_3);
-    assertTrue("basicFilter3", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
+    assertTrue("basicFilter2", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
     kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_4);
-    assertTrue("basicFilter4", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
-    assertFalse("basicFilterAllRemaining", filter.filterAllRemaining());
+    assertTrue("basicFilter3", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
     assertFalse("basicFilterNotNull", filter.filterRow());
+    filter.reset();
+    kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_1);
+    assertTrue("basicFilter4", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW);
+    kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER, VAL_2);
+    assertTrue("basicFilter4", filter.filterKeyValue(kv) == Filter.ReturnCode.NEXT_ROW);
+    assertFalse("basicFilterAllRemaining", filter.filterAllRemaining());
+    assertTrue("basicFilterNotNull", filter.filterRow());
   }
 
   private void substrFilterTests(Filter filter) 
@@ -100,7 +104,7 @@
       filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
     kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER,
       FULLSTRING_2);
-    assertFalse("substrFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
+    assertTrue("substrFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
     assertFalse("substrFilterAllRemaining", filter.filterAllRemaining());
     assertFalse("substrFilterNotNull", filter.filterRow());
   }
@@ -113,7 +117,7 @@
       filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
     kv = new KeyValue(ROW, COLUMN_FAMILY, COLUMN_QUALIFIER,
       FULLSTRING_2);
-    assertFalse("regexFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
+    assertTrue("regexFalse", filter.filterKeyValue(kv) == Filter.ReturnCode.INCLUDE);
     assertFalse("regexFilterAllRemaining", filter.filterAllRemaining());
     assertFalse("regexFilterNotNull", filter.filterRow());
   }    

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/TestHeapSize.java Wed Nov 25 22:30:29 2009
@@ -16,7 +16,6 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.RegionHistorian;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.hfile.CachedBlock;
@@ -291,15 +290,6 @@
       assertEquals(expected, actual);
     }
     
-    // RegionHistorian Overhead
-    cl = RegionHistorian.class;
-    actual = RegionHistorian.FIXED_OVERHEAD;
-    expected = ClassSize.estimateBase(cl, false);
-    if(expected != actual) {
-      ClassSize.estimateBase(cl, true);
-      assertEquals(expected, actual);
-    }
-    
     // Currently NOT testing Deep Overheads of many of these classes.
     // Deep overheads cover a vast majority of stuff, but will not be 100%
     // accurate because it's unclear when we're referencing stuff that's already

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/io/hfile/RandomSeek.java Wed Nov 25 22:30:29 2009
@@ -46,8 +46,10 @@
       String [] parts = str.split(",");
       l.add(parts[0] + ":" + parts[1] + ":" + parts[2]);
     }
+    istream.close();
     return l;
   }
+
   private static String randKey(List<String> keys) {
     Random r = new Random();
     //return keys.get(r.nextInt(keys.size()));

Added: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/master/TestRegionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/master/TestRegionManager.java?rev=884310&view=auto
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/master/TestRegionManager.java (added)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/master/TestRegionManager.java Wed Nov 25 22:30:29 2009
@@ -0,0 +1,69 @@
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestCase;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.util.StringUtils;
+
+public class TestRegionManager extends HBaseClusterTestCase {
+   public void testGetFirstMetaRegionForRegionAfterMetaSplit()
+   throws Exception {
+     HTable meta = new HTable(HConstants.META_TABLE_NAME);
+     HMaster master = this.cluster.getMaster();
+     HServerAddress address = master.getMasterAddress();
+     HTableDescriptor tableDesc = new HTableDescriptor(Bytes.toBytes("_MY_TABLE_"));
+     HTableDescriptor metaTableDesc = meta.getTableDescriptor();
+     // master.regionManager.onlineMetaRegions already contains first .META. region at key Bytes.toBytes("")
+     byte[] startKey0 = Bytes.toBytes("f");
+     byte[] endKey0 = Bytes.toBytes("h");
+     HRegionInfo regionInfo0 = new HRegionInfo(tableDesc, startKey0, endKey0);
+
+     // 1st .META. region will be something like .META.,,1253625700761
+     HRegionInfo metaRegionInfo0 = new HRegionInfo(metaTableDesc, Bytes.toBytes(""), regionInfo0.getRegionName());
+     MetaRegion meta0 = new MetaRegion(address, metaRegionInfo0);
+   
+     byte[] startKey1 = Bytes.toBytes("j");
+     byte[] endKey1 = Bytes.toBytes("m");
+     HRegionInfo regionInfo1 = new HRegionInfo(tableDesc, startKey1, endKey1);
+     // 2nd .META. region will be something like .META.,_MY_TABLE_,f,1253625700761,1253625700761 
+     HRegionInfo metaRegionInfo1 = new HRegionInfo(metaTableDesc, regionInfo0.getRegionName(), regionInfo1.getRegionName());
+     MetaRegion meta1 = new MetaRegion(address, metaRegionInfo1);
+
+
+     // 3rd .META. region will be something like .META.,_MY_TABLE_,j,1253625700761,1253625700761
+     HRegionInfo metaRegionInfo2 = new HRegionInfo(metaTableDesc, regionInfo1.getRegionName(), Bytes.toBytes(""));
+     MetaRegion meta2 = new MetaRegion(address, metaRegionInfo2);
+
+     byte[] startKeyX = Bytes.toBytes("h");
+     byte[] endKeyX = Bytes.toBytes("j");
+     HRegionInfo regionInfoX = new HRegionInfo(tableDesc, startKeyX, endKeyX);
+   
+   
+     master.regionManager.offlineMetaRegion(startKey0);
+     master.regionManager.putMetaRegionOnline(meta0);
+     master.regionManager.putMetaRegionOnline(meta1);
+     master.regionManager.putMetaRegionOnline(meta2);
+   
+//    for (byte[] b : master.regionManager.getOnlineMetaRegions().keySet()) {
+//      System.out.println("FROM TEST KEY " + b +"  " +new String(b));
+//    }
+
+     assertEquals(metaRegionInfo1.getStartKey(), master.regionManager.getFirstMetaRegionForRegion(regionInfoX).getStartKey());
+     assertEquals(metaRegionInfo1.getRegionName(), master.regionManager.getFirstMetaRegionForRegion(regionInfoX).getRegionName());
+   }
+}

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java Wed Nov 25 22:30:29 2009
@@ -78,23 +78,23 @@
 
     //ts1
     ts1 = System.nanoTime();
-    del10 = dt.new Delete(col1, 0, col1Len, del, ts1);
-    del11 = dt.new Delete(col2, 0, col2Len, del, ts1);
-    delQf10 = dt.new Delete(col1, 0, col1Len, delCol, ts1);
-    delQf11 = dt.new Delete(col2, 0, col2Len, delCol, ts1);
-    delFam10 = dt.new Delete(empty, 0, 0, delFam, ts1);
+    del10 = new Delete(col1, 0, col1Len, del, ts1);
+    del11 = new Delete(col2, 0, col2Len, del, ts1);
+    delQf10 = new Delete(col1, 0, col1Len, delCol, ts1);
+    delQf11 = new Delete(col2, 0, col2Len, delCol, ts1);
+    delFam10 = new Delete(empty, 0, 0, delFam, ts1);
     
     //ts2
     ts2 = System.nanoTime();
-    del20 = dt.new Delete(col1, 0, col1Len, del, ts2);
-    del21 = dt.new Delete(col2, 0, col2Len, del, ts2);
-    delQf20 = dt.new Delete(col1, 0, col1Len, delCol, ts2);
-    delQf21 = dt.new Delete(col2, 0, col2Len, delCol, ts2);
-    delFam20 = dt.new Delete(empty, 0, 0, delFam, ts1);
+    del20 = new Delete(col1, 0, col1Len, del, ts2);
+    del21 = new Delete(col2, 0, col2Len, del, ts2);
+    delQf20 = new Delete(col1, 0, col1Len, delCol, ts2);
+    delQf21 = new Delete(col2, 0, col2Len, delCol, ts2);
+    delFam20 = new Delete(empty, 0, 0, delFam, ts1);
     
     //ts3
     ts3 = System.nanoTime();
-    del30 = dt.new Delete(col1, 0, col1Len, del, ts3);
+    del30 = new Delete(col1, 0, col1Len, del, ts3);
   }
   
   public void testUpdate_CompareDeletes() {
@@ -309,5 +309,20 @@
     
     assertEquals(true, dt.isDeleted(col2, 0, col2Len, ts1));
   }
+
+  // HBASE-1951
+  public void testStackOverflow() {
+    List<Delete> dels = new ArrayList<Delete>();
+    Delete adel = new Delete(col1, 0, col1Len, del, 0L);
+    for(long i = 0; i < 9000; i++) {
+      dt.add(adel.buffer, adel.qualifierOffset, adel.qualifierLength,
+          i, adel.type);
+    }
+
+
+    //update()
+    dt.update();
+    assertEquals(false, dt.isDeleted(col2, 0, col2Len, 7000000));
+  }
   
 }

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java Wed Nov 25 22:30:29 2009
@@ -40,6 +40,10 @@
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.PrefixFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -60,6 +64,8 @@
   // Test names
   private final byte[] tableName = Bytes.toBytes("testtable");;
   private final byte[] qual1 = Bytes.toBytes("qual1");
+  private final byte[] qual2 = Bytes.toBytes("qual2");
+  private final byte[] qual3 = Bytes.toBytes("qual3");
   private final byte[] value1 = Bytes.toBytes("value1");
   private final byte[] value2 = Bytes.toBytes("value2");
   private final byte [] row = Bytes.toBytes("rowA");
@@ -77,6 +83,127 @@
   // individual code pieces in the HRegion. Putting files locally in
   // /tmp/testtable
   //////////////////////////////////////////////////////////////////////////////
+  
+
+  /**
+   * An involved filter test.  Has multiple column families and deletes in mix.
+   */
+  public void testWeirdCacheBehaviour() throws Exception {
+    byte[] TABLE = Bytes.toBytes("testWeirdCacheBehaviour");
+    byte[][] FAMILIES = new byte[][] { Bytes.toBytes("trans-blob"),
+        Bytes.toBytes("trans-type"), Bytes.toBytes("trans-date"),
+        Bytes.toBytes("trans-tags"), Bytes.toBytes("trans-group") };
+    initHRegion(TABLE, getName(), FAMILIES);
+    String value = "this is the value";
+    String value2 = "this is some other value";
+    String keyPrefix1 = "prefix1"; // UUID.randomUUID().toString();
+    String keyPrefix2 = "prefix2"; // UUID.randomUUID().toString();
+    String keyPrefix3 = "prefix3"; // UUID.randomUUID().toString();
+    putRows(this.region, 3, value, keyPrefix1);
+    putRows(this.region, 3, value, keyPrefix2);
+    putRows(this.region, 3, value, keyPrefix3);
+    // this.region.flushCommits();
+    putRows(this.region, 3, value2, keyPrefix1);
+    putRows(this.region, 3, value2, keyPrefix2);
+    putRows(this.region, 3, value2, keyPrefix3);
+    System.out.println("Checking values for key: " + keyPrefix1);
+    assertEquals("Got back incorrect number of rows from scan", 3,
+      getNumberOfRows(keyPrefix1, value2, this.region));
+    System.out.println("Checking values for key: " + keyPrefix2);
+    assertEquals("Got back incorrect number of rows from scan", 3,
+      getNumberOfRows(keyPrefix2, value2, this.region));
+    System.out.println("Checking values for key: " + keyPrefix3);
+    assertEquals("Got back incorrect number of rows from scan", 3,
+      getNumberOfRows(keyPrefix3, value2, this.region));
+    deleteColumns(this.region, value2, keyPrefix1);
+    deleteColumns(this.region, value2, keyPrefix2);
+    deleteColumns(this.region, value2, keyPrefix3);
+    System.out.println("Starting important checks.....");
+    assertEquals("Got back incorrect number of rows from scan: " + keyPrefix1,
+      0, getNumberOfRows(keyPrefix1, value2, this.region));
+    assertEquals("Got back incorrect number of rows from scan: " + keyPrefix2,
+      0, getNumberOfRows(keyPrefix2, value2, this.region));
+    assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3,
+      0, getNumberOfRows(keyPrefix3, value2, this.region));
+  }
+
+  private void deleteColumns(HRegion r, String value, String keyPrefix)
+  throws IOException {
+    InternalScanner scanner = buildScanner(keyPrefix, value, r);
+    int count = 0;
+    boolean more = false;
+    List<KeyValue> results = new ArrayList<KeyValue>();
+    do {
+      more = scanner.next(results);
+      if (results != null && !results.isEmpty())
+        count++;
+      else
+        break;
+      Delete delete = new Delete(results.get(0).getRow());
+      delete.deleteColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"));
+      r.delete(delete, null, false);
+      results.clear();
+    } while (more);
+    assertEquals("Did not perform correct number of deletes", 3, count);
+  }
+
+  private int getNumberOfRows(String keyPrefix, String value, HRegion r) throws Exception {
+    InternalScanner resultScanner = buildScanner(keyPrefix, value, r);
+    int numberOfResults = 0;
+    List<KeyValue> results = new ArrayList<KeyValue>();
+    boolean more = false;
+    do {
+      more = resultScanner.next(results);
+      if (results != null && !results.isEmpty()) numberOfResults++;
+      else break;
+      for (KeyValue kv: results) {
+        System.out.println("kv=" + kv.toString() + ", " + Bytes.toString(kv.getValue()));
+      }
+      results.clear();
+    } while(more);
+    return numberOfResults;
+  }
+
+  private InternalScanner buildScanner(String keyPrefix, String value, HRegion r)
+  throws IOException {
+    // Defaults FilterList.Operator.MUST_PASS_ALL.
+    FilterList allFilters = new FilterList();
+    allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
+    // Only return rows where this column value exists in the row.
+    SingleColumnValueFilter filter =
+      new SingleColumnValueFilter(Bytes.toBytes("trans-tags"),
+        Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
+    filter.setFilterIfMissing(true);
+    allFilters.addFilter(filter);
+    Scan scan = new Scan();
+    scan.addFamily(Bytes.toBytes("trans-blob"));
+    scan.addFamily(Bytes.toBytes("trans-type"));
+    scan.addFamily(Bytes.toBytes("trans-date"));
+    scan.addFamily(Bytes.toBytes("trans-tags"));
+    scan.addFamily(Bytes.toBytes("trans-group"));
+    scan.setFilter(allFilters);
+    return r.getScanner(scan);
+  }
+
+  private void putRows(HRegion r, int numRows, String value, String key)
+  throws IOException {
+    for (int i = 0; i < numRows; i++) {
+      String row = key + "_" + i/* UUID.randomUUID().toString() */;
+      System.out.println(String.format("Saving row: %s, with value %s", row,
+        value));
+      Put put = new Put(Bytes.toBytes(row));
+      put.add(Bytes.toBytes("trans-blob"), null,
+        Bytes.toBytes("value for blob"));
+      put.add(Bytes.toBytes("trans-type"), null, Bytes.toBytes("statement"));
+      put.add(Bytes.toBytes("trans-date"), null,
+        Bytes.toBytes("20090921010101999"));
+      put.add(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"),
+        Bytes.toBytes(value));
+      put.add(Bytes.toBytes("trans-group"), null,
+        Bytes.toBytes("adhocTransactionGroupId"));
+      r.put(put);
+    }
+  }
 
   public void testFamilyWithAndWithoutColon() throws Exception {
     byte [] b = Bytes.toBytes(getName());
@@ -208,11 +335,11 @@
     
     //checkAndPut with wrong value
     Store store = region.getStore(fam1);
-    int size = store.memstore.kvset.size();
+    store.memstore.kvset.size();
     
     boolean res = region.checkAndPut(row1, fam1, qf1, val1, put, lockId, true);
     assertEquals(true, res);
-    size = store.memstore.kvset.size();
+    store.memstore.kvset.size();
     
     Get get = new Get(row1);
     get.addColumn(fam2, qf1);
@@ -790,7 +917,7 @@
     scan.addFamily(fam1);
     scan.addFamily(fam2);
     try {
-      InternalScanner is = region.getScanner(scan);
+      region.getScanner(scan);
     } catch (Exception e) {
       assertTrue("Families could not be found in Region", false);
     }
@@ -811,7 +938,7 @@
     scan.addFamily(fam2);
     boolean ok = false;
     try {
-      InternalScanner is = region.getScanner(scan);
+      region.getScanner(scan);
     } catch (Exception e) {
       ok = true;
     }
@@ -1246,7 +1373,6 @@
     byte [] col2 = Bytes.toBytes("Pub222");
 
 
-
     Put put = new Put(row1);
     put.add(family, col1, Bytes.toBytes(10L));
     region.put(put);
@@ -1275,11 +1401,166 @@
     List<KeyValue> results = new ArrayList<KeyValue>();
     assertEquals(false, s.next(results));
     assertEquals(0, results.size());
+  }
 
+  public void testIncrementColumnValue_UpdatingInPlace() throws IOException {
+    initHRegion(tableName, getName(), fam1);
+
+    long value = 1L;
+    long amount = 3L;
 
+    Put put = new Put(row);
+    put.add(fam1, qual1, Bytes.toBytes(value));
+    region.put(put);
 
+    long result = region.incrementColumnValue(row, fam1, qual1, amount, true);
     
+    assertEquals(value+amount, result);
+
+    Store store = region.getStore(fam1);
+    assertEquals(1, store.memstore.kvset.size());
+    assertTrue(store.memstore.snapshot.isEmpty());
+
+    assertICV(row, fam1, qual1, value+amount);
   }
+
+  public void testIncrementColumnValue_ConcurrentFlush() throws IOException {
+    initHRegion(tableName, getName(), fam1);
+
+    long value = 1L;
+    long amount = 3L;
+
+    Put put = new Put(row);
+    put.add(fam1, qual1, Bytes.toBytes(value));
+    region.put(put);
+
+    // now increment during a flush
+    Thread t = new Thread() {
+      public void run() {
+        try {
+          region.flushcache();
+        } catch (IOException e) {
+          LOG.info("test ICV, got IOE during flushcache()");
+        }
+      }
+    };
+    t.start();
+    long r = region.incrementColumnValue(row, fam1, qual1, amount, true);
+    assertEquals(value+amount, r);
+
+    // this also asserts there is only 1 KeyValue in the set.
+    assertICV(row, fam1, qual1, value+amount);
+  }
+
+  public void testIncrementColumnValue_UpdatingInPlace_Negative()
+    throws IOException {
+    initHRegion(tableName, getName(), fam1);
+
+    long value = 3L;
+    long amount = -1L;
+
+    Put put = new Put(row);
+    put.add(fam1, qual1, Bytes.toBytes(value));
+    region.put(put);
+
+    long result = region.incrementColumnValue(row, fam1, qual1, amount, true);
+    assertEquals(value+amount, result);
+
+    assertICV(row, fam1, qual1, value+amount);
+  }
+
+  public void testIncrementColumnValue_AddingNew()
+    throws IOException {
+    initHRegion(tableName, getName(), fam1);
+
+    long value = 1L;
+    long amount = 3L;
+
+    Put put = new Put(row);
+    put.add(fam1, qual1, Bytes.toBytes(value));
+    put.add(fam1, qual2, Bytes.toBytes(value));
+    region.put(put);
+
+    long result = region.incrementColumnValue(row, fam1, qual3, amount, true);
+    assertEquals(amount, result);
+
+    Get get = new Get(row);
+    get.addColumn(fam1, qual3);
+    Result rr = region.get(get, null);
+    assertEquals(1, rr.size());
+
+    // ensure none of the other cols were incremented.
+    assertICV(row, fam1, qual1, value);
+    assertICV(row, fam1, qual2, value);
+    assertICV(row, fam1, qual3, amount);
+  }
+
+  public void testIncrementColumnValue_UpdatingFromSF() throws IOException {
+    initHRegion(tableName, getName(), fam1);
+
+    long value = 1L;
+    long amount = 3L;
+
+    Put put = new Put(row);
+    put.add(fam1, qual1, Bytes.toBytes(value));
+    put.add(fam1, qual2, Bytes.toBytes(value));
+    region.put(put);
+
+    // flush to disk.
+    region.flushcache();
+
+    Store store = region.getStore(fam1);
+    assertEquals(0, store.memstore.kvset.size());
+
+    long r = region.incrementColumnValue(row, fam1, qual1, amount, true);
+    assertEquals(value+amount, r);
+
+    assertICV(row, fam1, qual1, value+amount);
+  }
+
+  public void testIncrementColumnValue_AddingNewAfterSFCheck()
+    throws IOException {
+    initHRegion(tableName, getName(), fam1);
+
+    long value = 1L;
+    long amount = 3L;
+
+    Put put = new Put(row);
+    put.add(fam1, qual1, Bytes.toBytes(value));
+    put.add(fam1, qual2, Bytes.toBytes(value));
+    region.put(put);
+    region.flushcache();
+
+    Store store = region.getStore(fam1);
+    assertEquals(0, store.memstore.kvset.size());
+
+    long r = region.incrementColumnValue(row, fam1, qual3, amount, true);
+    assertEquals(amount, r);
+
+    assertICV(row, fam1, qual3, amount);
+
+    region.flushcache();
+
+    // ensure that this gets to disk.
+    assertICV(row, fam1, qual3, amount);
+  }
+
+  private void assertICV(byte [] row,
+                         byte [] familiy,
+                         byte[] qualifier,
+                         long amount) throws IOException {
+    // run a get and see?
+    Get get = new Get(row);
+    get.addColumn(familiy, qualifier);
+    Result result = region.get(get, null);
+    assertEquals(1, result.size());
+
+    KeyValue kv = result.raw()[0];
+    long r = Bytes.toLong(kv.getValue());
+    assertEquals(amount, r);
+  }
+
+
   
   public void testScanner_Wildcard_FromMemStoreAndFiles_EnforceVersions()
   throws IOException {
@@ -1600,32 +1881,20 @@
     conf.setLong("hbase.hregion.max.filesize", 1024 * 128);
     return conf;
   }  
-  
-  //////////////////////////////////////////////////////////////////////////////
-  // Helpers
-  //////////////////////////////////////////////////////////////////////////////
-  private HBaseConfiguration initHRegion() {
-    HBaseConfiguration conf = new HBaseConfiguration();
-    
-    conf.set("hbase.hstore.compactionThreshold", "2");
-    conf.setLong("hbase.hregion.max.filesize", 65536);
-    
-    return conf;
-  }
-  
+
   private void initHRegion (byte [] tableName, String callingMethod,
-      byte[] ... families) throws IOException{
+    byte[] ... families)
+  throws IOException {
     initHRegion(tableName, callingMethod, new HBaseConfiguration(), families);
   }
   
   private void initHRegion (byte [] tableName, String callingMethod,
-      HBaseConfiguration conf, byte [] ... families) throws IOException{
+    HBaseConfiguration conf, byte [] ... families)
+  throws IOException{
     HTableDescriptor htd = new HTableDescriptor(tableName);
     for(byte [] family : families) {
-      HColumnDescriptor hcd = new HColumnDescriptor(family);
       htd.addFamily(new HColumnDescriptor(family));
     }
-    
     HRegionInfo info = new HRegionInfo(htd, null, null, false);
     Path path = new Path(DIR + callingMethod); 
     region = HRegion.createHRegion(info, path, conf);

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java Wed Nov 25 22:30:29 2009
@@ -160,12 +160,49 @@
     }
     
   }
+  
+  public void testScannerLeak() {
+    // Test for unclosed scanners (HBASE-1927)
+    
+    List<KeyValue> l1 = new ArrayList<KeyValue>();
+    l1.add(new KeyValue(row1, fam1, col5, data));
+    l1.add(new KeyValue(row2, fam1, col1, data));
+    l1.add(new KeyValue(row2, fam1, col2, data));
+    scanners.add(new Scanner(l1));
+
+    List<KeyValue> l2 = new ArrayList<KeyValue>();
+    l2.add(new KeyValue(row1, fam1, col1, data));
+    l2.add(new KeyValue(row1, fam1, col2, data));
+    scanners.add(new Scanner(l2));
+
+    List<KeyValue> l3 = new ArrayList<KeyValue>();
+    l3.add(new KeyValue(row1, fam1, col3, data));
+    l3.add(new KeyValue(row1, fam1, col4, data));
+    l3.add(new KeyValue(row1, fam2, col1, data));
+    l3.add(new KeyValue(row1, fam2, col2, data));
+    l3.add(new KeyValue(row2, fam1, col3, data));
+    scanners.add(new Scanner(l3));
+    
+    List<KeyValue> l4 = new ArrayList<KeyValue>();
+    scanners.add(new Scanner(l4));
+
+    //Creating KeyValueHeap
+    KeyValueHeap kvh =
+      new KeyValueHeap(scanners.toArray(new Scanner[0]), KeyValue.COMPARATOR);
+    
+    while(kvh.next() != null);
+    
+    for(Scanner scanner : scanners) {
+      assertTrue(scanner.isClosed());
+    }
+  }
 
   private class Scanner implements KeyValueScanner {
     private Set<KeyValue> scan =
       new TreeSet<KeyValue>((Comparator)KeyValue.COMPARATOR);
     private Iterator<KeyValue> iter;
     private KeyValue current;
+    private boolean closed = false;
 
     public Scanner(List<KeyValue> list) {
       Collections.sort(list, (Comparator)KeyValue.COMPARATOR);
@@ -189,7 +226,13 @@
       return oldCurrent;
     }
 
-    public void close(){}
+    public void close(){
+      closed = true;
+    }
+    
+    public boolean isClosed() {
+      return closed;
+    }
     
     public boolean seek(KeyValue seekKv) {
       while(iter.hasNext()){

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java Wed Nov 25 22:30:29 2009
@@ -27,6 +27,7 @@
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.KeyValueTestUtil;
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
@@ -163,4 +164,103 @@
     }
   }
 
+  
+  /**
+   * Verify that {@link QueryMatcher} only skips expired KeyValue 
+   * instances and does not exit early from the row (skipping 
+   * later non-expired KeyValues).  This version mimics a Get with
+   * explicitly specified column qualifiers.
+   * 
+   * @throws IOException
+   */
+  public void testMatch_ExpiredExplicit()
+  throws IOException {
+    
+    long testTTL = 1000;
+    MatchCode [] expected = new MatchCode[] {
+        MatchCode.SKIP,
+        MatchCode.INCLUDE,
+        MatchCode.SKIP,
+        MatchCode.INCLUDE,
+        MatchCode.SKIP,
+        MatchCode.NEXT
+    };
+        
+    QueryMatcher qm = new QueryMatcher(get, fam2,
+        get.getFamilyMap().get(fam2), testTTL, rowComparator, 1);
+    
+    long now = System.currentTimeMillis();
+    KeyValue [] kvs = new KeyValue[] {
+        new KeyValue(row1, fam2, col1, now-100, data),
+        new KeyValue(row1, fam2, col2, now-50, data),
+        new KeyValue(row1, fam2, col3, now-5000, data),
+        new KeyValue(row1, fam2, col4, now-500, data),
+        new KeyValue(row1, fam2, col5, now-10000, data),
+        new KeyValue(row2, fam1, col1, now-10, data)        
+    };
+
+    List<MatchCode> actual = new ArrayList<MatchCode>(kvs.length);
+    for (KeyValue kv : kvs) {
+      actual.add( qm.match(kv) );
+    }
+    
+    assertEquals(expected.length, actual.size());
+    for (int i=0; i<expected.length; i++) {
+      if(PRINT){
+        System.out.println("expected "+expected[i]+ 
+            ", actual " +actual.get(i));
+      }
+      assertEquals(expected[i], actual.get(i));
+    }
+  }
+  
+  
+  /**
+   * Verify that {@link QueryMatcher} only skips expired KeyValue 
+   * instances and does not exit early from the row (skipping 
+   * later non-expired KeyValues).  This version mimics a Get with
+   * wildcard-inferred column qualifiers.
+   * 
+   * @throws IOException
+   */ 
+  public void testMatch_ExpiredWildcard()
+  throws IOException {
+    
+    long testTTL = 1000;
+    MatchCode [] expected = new MatchCode[] {
+        MatchCode.INCLUDE,
+        MatchCode.INCLUDE,
+        MatchCode.SKIP,
+        MatchCode.INCLUDE,
+        MatchCode.SKIP,
+        MatchCode.NEXT
+    };
+        
+    QueryMatcher qm = new QueryMatcher(get, fam2,
+        null, testTTL, rowComparator, 1);
+    
+    long now = System.currentTimeMillis();
+    KeyValue [] kvs = new KeyValue[] {
+        new KeyValue(row1, fam2, col1, now-100, data),
+        new KeyValue(row1, fam2, col2, now-50, data),
+        new KeyValue(row1, fam2, col3, now-5000, data),
+        new KeyValue(row1, fam2, col4, now-500, data),
+        new KeyValue(row1, fam2, col5, now-10000, data),
+        new KeyValue(row2, fam1, col1, now-10, data)        
+    };
+
+    List<MatchCode> actual = new ArrayList<MatchCode>(kvs.length);
+    for (KeyValue kv : kvs) {
+      actual.add( qm.match(kv) );
+    }
+    
+    assertEquals(expected.length, actual.size());
+    for (int i=0; i<expected.length; i++) {
+      if(PRINT){
+        System.out.println("expected "+expected[i]+ 
+            ", actual " +actual.get(i));
+      }
+      assertEquals(expected[i], actual.get(i));
+    }
+  }
 }

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java Wed Nov 25 22:30:29 2009
@@ -180,7 +180,8 @@
     try {
       this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
       addContent(this.r, HConstants.CATALOG_FAMILY);
-      Filter newFilter = new PrefixFilter(Bytes.toBytes("ab"));
+      byte [] prefix = Bytes.toBytes("ab");
+      Filter newFilter = new PrefixFilter(prefix);
       Scan scan = new Scan();
       scan.setFilter(newFilter);
       rowPrefixFilter(scan);

Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=884310&r1=884309&r2=884310&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestStore.java Wed Nov 25 22:30:29 2009
@@ -232,163 +232,40 @@
   //////////////////////////////////////////////////////////////////////////////
   // IncrementColumnValue tests
   //////////////////////////////////////////////////////////////////////////////
-  /**
-   * Testing if the update in place works. When you want to update a value that
-   * is already in memstore, you don't delete it and put a new one, but just 
-   * update the value in the original KeyValue
-   * @throws IOException
+  /*
+   * test the internal details of how ICV works, especially during a flush scenario.
    */
-  public void testIncrementColumnValue_UpdatingInPlace() throws IOException {
-    init(this.getName());
-
-    //Put data in memstore
-    long value = 1L;
-    long amount = 3L;
-    this.store.add(new KeyValue(row, family, qf1, Bytes.toBytes(value)));
-    
-    Store.ICVResult vas = this.store.incrementColumnValue(row, family, qf1, amount);
-    assertEquals(value+amount, vas.value);
-    store.add(vas.kv);
-    Get get = new Get(row);
-    get.addColumn(family, qf1);
-    NavigableSet<byte[]> qualifiers = 
-      new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    qualifiers.add(qf1);
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    this.store.get(get, qualifiers, result);
-    assertEquals(value + amount, Bytes.toLong(result.get(0).getValue()));
-  }
-
-  /**
-   * Same as above but for a negative number
-   * @throws IOException
-   */
-  public void testIncrementColumnValue_UpdatingInPlace_Negative() 
-  throws IOException {
-    init(this.getName());
-
-    //Put data in memstore
-    long value = 3L;
-    long amount = -1L;
-    this.store.add(new KeyValue(row, family, qf1, Bytes.toBytes(value)));
-    
-    Store.ICVResult vas = this.store.incrementColumnValue(row, family, qf1, amount);
-    assertEquals(vas.value, value+amount);
-    store.add(vas.kv);
-    Get get = new Get(row);
-    get.addColumn(family, qf1);
-    NavigableSet<byte[]> qualifiers = 
-      new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    qualifiers.add(qf1);
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    this.store.get(get, qualifiers, result);
-    assertEquals(value + amount, Bytes.toLong(result.get(0).getValue()));
-  }
-  
-  /**
-   * When there is no mathing key already, adding a new.
-   * @throws IOException
-   */
-  public void testIncrementColumnValue_AddingNew() throws IOException {
-    init(this.getName());
-    
-    //Put data in memstore
-    long value = 1L;
-    long amount = 3L;
-    this.store.add(new KeyValue(row, family, qf1, Bytes.toBytes(value)));
-    this.store.add(new KeyValue(row, family, qf2, Bytes.toBytes(value)));
-    
-    Store.ICVResult vas = this.store.incrementColumnValue(row, family, qf3, amount);
-    store.add(vas.kv);
-    Get get = new Get(row);
-    get.addColumn(family, qf3);
-    NavigableSet<byte[]> qualifiers = 
-      new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    qualifiers.add(qf3);
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    this.store.get(get, qualifiers, result);
-    assertEquals(amount, Bytes.toLong(result.get(0).getValue()));
-  }
-
-  /**
-   * When we have the key in a file add a new key + value to memstore with the 
-   * updates value. 
-   * @throws IOException
-   */
-  public void testIncrementColumnValue_UpdatingFromSF() throws IOException {
-    init(this.getName());
-    
-    //Put data in memstore
-    long value = 1L;
-    long amount = 3L;
-    this.store.add(new KeyValue(row, family, qf1, Bytes.toBytes(value)));
-    this.store.add(new KeyValue(row, family, qf2, Bytes.toBytes(value)));
-    
-    flush(1);
-    
-    Store.ICVResult vas = this.store.incrementColumnValue(row, family, qf1, amount);
-    store.add(vas.kv);
-    Get get = new Get(row);
-    get.addColumn(family, qf1);
-    NavigableSet<byte[]> qualifiers = 
-      new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    qualifiers.add(qf1);
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    this.store.get(get, qualifiers, result);
-    assertEquals(value + amount, Bytes.toLong(result.get(0).getValue()));
-  }
-
-  /**
-   * Same as testIncrementColumnValue_AddingNew() except that the keys are
-   * checked in file not in memstore
-   * @throws IOException
-   */
-  public void testIncrementColumnValue_AddingNewAfterSFCheck() 
-  throws IOException {
-    init(this.getName());
-    
-    //Put data in memstore
-    long value = 1L;
-    long amount = 3L;
-    this.store.add(new KeyValue(row, family, qf1, Bytes.toBytes(value)));
-    this.store.add(new KeyValue(row, family, qf2, Bytes.toBytes(value)));
-    
-    flush(1);
-    
-    Store.ICVResult vas = this.store.incrementColumnValue(row, family, qf3, amount);
-    store.add(vas.kv);
-    Get get = new Get(row);
-    get.addColumn(family, qf3);
-    NavigableSet<byte[]> qualifiers = 
-      new ConcurrentSkipListSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    qualifiers.add(qf3);
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    this.store.get(get, qualifiers, result);
-    assertEquals(amount, Bytes.toLong(result.get(0).getValue()));
-  }
-
   public void testIncrementColumnValue_ICVDuringFlush()
     throws IOException {
     init(this.getName());
 
-    long value = 1L;
-    long amount = 3L;
+    long oldValue = 1L;
+    long newValue = 3L;
     this.store.add(new KeyValue(row, family, qf1,
         System.currentTimeMillis(),
-        Bytes.toBytes(value)));
+        Bytes.toBytes(oldValue)));
 
     // snapshot the store.
     this.store.snapshot();
 
-    // incrment during the snapshot...
+    // add other things:
+    this.store.add(new KeyValue(row, family, qf2,
+        System.currentTimeMillis(),
+        Bytes.toBytes(oldValue)));
+
+    // update during the snapshot.
+    long ret = this.store.updateColumnValue(row, family, qf1, newValue);
 
-    Store.ICVResult vas = this.store.incrementColumnValue(row, family, qf1, amount);
+    // memstore should have grown by some amount.
+    assertTrue(ret > 0);
 
     // then flush.
     this.store.flushCache(id++);
     assertEquals(1, this.store.getStorefiles().size());
-    assertEquals(0, this.store.memstore.kvset.size());
+    // from the one we inserted up there, and a new one
+    assertEquals(2, this.store.memstore.kvset.size());
 
+    // how many key/values for this row are there?
     Get get = new Get(row);
     get.addColumn(family, qf1);
     get.setMaxVersions(); // all versions.
@@ -398,12 +275,15 @@
     cols.add(qf1);
 
     this.store.get(get, cols, results);
-    // only one, because Store.ICV doesnt add to memcache.
-    assertEquals(1, results.size());
+    assertEquals(2, results.size());
+
+    long ts1 = results.get(0).getTimestamp();
+    long ts2 = results.get(1).getTimestamp();
+
+    assertTrue(ts1 > ts2);
+
+    assertEquals(newValue, Bytes.toLong(results.get(0).getValue()));
+    assertEquals(oldValue, Bytes.toLong(results.get(1).getValue()));
 
-    // but the timestamps should be different...
-    long icvTs = vas.kv.getTimestamp();
-    long storeTs = results.get(0).getTimestamp();
-    assertTrue(icvTs != storeTs);
   }
 }
\ No newline at end of file



Mime
View raw message