hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r990018 [8/10] - in /hbase/branches/0.90_master_rewrite: ./ bin/ bin/replication/ src/assembly/ src/docbkx/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/org/apache/hadoop/hbase/filter/ s...
Date Fri, 27 Aug 2010 05:01:07 GMT
Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java Fri Aug 27 05:01:02 2010
@@ -106,10 +106,15 @@ public class TestHFileOutputFormat  {
       byte keyBytes[] = new byte[keyLength];
       byte valBytes[] = new byte[valLength];
       
-      Random random = new Random(System.currentTimeMillis());
+      int taskId = context.getTaskAttemptID().getTaskID().getId();
+      assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!";
+
+      Random random = new Random();
       for (int i = 0; i < ROWSPERSPLIT; i++) {
 
         random.nextBytes(keyBytes);
+        // Ensure that unique tasks generate unique keys
+        keyBytes[keyLength - 1] = (byte)(taskId & 0xFF);
         random.nextBytes(valBytes);
         ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes);
 

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java Fri Aug 27 05:01:02 2010
@@ -175,10 +175,11 @@ public class TestLoadIncrementalHFiles {
   {
     HFile.Writer writer = new HFile.Writer(fs, path, BLOCKSIZE, COMPRESSION,
         KeyValue.KEY_COMPARATOR);
+    long now = System.currentTimeMillis();
     try {
       // subtract 2 since iterateOnSplits doesn't include boundary keys
       for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, numRows-2)) {
-        KeyValue kv = new KeyValue(key, family, qualifier, key);
+        KeyValue kv = new KeyValue(key, family, qualifier, now, key);
         writer.append(kv);
       }
     } finally {

Added: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java?rev=990018&view=auto
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java (added)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/master/TestLogsCleaner.java Fri Aug 27 05:01:02 2010
@@ -0,0 +1,160 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertEquals;
+
+import java.net.URLEncoder;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.replication.ReplicationZookeeperWrapper;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestLogsCleaner {
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private ReplicationZookeeperWrapper zkHelper;
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniZKCluster();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniZKCluster();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    /* TODO REENABLE
+    zkHelper = new ReplicationZookeeperWrapper(
+        ZooKeeperWrapper.createInstance(conf, HRegionServer.class.getName()),
+        conf, new AtomicBoolean(true), "test-cluster");
+        */
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testLogCleaning() throws Exception{
+    Configuration c = TEST_UTIL.getConfiguration();
+    Path oldLogDir = new Path(HBaseTestingUtility.getTestDir(),
+        HConstants.HREGION_OLDLOGDIR_NAME);
+    String fakeMachineName = URLEncoder.encode("regionserver:60020", "UTF8");
+
+    FileSystem fs = FileSystem.get(c);
+    Stoppable stoppable = new Stoppable() {
+      private volatile boolean stopped = false;
+
+      @Override
+      public void stop(String why) {
+        this.stopped = true;
+      }
+
+      @Override
+      public boolean isStopped() {
+        return this.stopped;
+      }
+    };
+    LogsCleaner cleaner  = new LogsCleaner(1000, stoppable,c, fs, oldLogDir);
+
+    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
+    long now = System.currentTimeMillis();
+    fs.delete(oldLogDir, true);
+    fs.mkdirs(oldLogDir);
+    // Case 1: 2 invalid files, which would be deleted directly
+    fs.createNewFile(new Path(oldLogDir, "a"));
+    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
+    // Case 2: 1 "recent" file, not even deletable for the first log cleaner
+    // (TimeToLiveLogCleaner), so we are not going down the chain
+    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));
+    System.out.println("Now is: " + now);
+    for (int i = 0; i < 30; i++) {
+      // Case 3: old files which would be deletable for the first log cleaner
+      // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
+      Path fileName = new Path(oldLogDir, fakeMachineName + "." +
+          (now - 6000000 - i) );
+      fs.createNewFile(fileName);
+      // Case 4: put 3 old log files in ZK indicating that they are scheduled
+      // for replication so these files would pass the first log cleaner
+      // (TimeToLiveLogCleaner) but would be rejected by the second
+      // (ReplicationLogCleaner)
+      if (i % (30/3) == 0) {
+// REENABLE        zkHelper.addLogToList(fileName.getName(), fakeMachineName);
+        System.out.println("Replication log file: " + fileName);
+      }
+    }
+    for (FileStatus stat : fs.listStatus(oldLogDir)) {
+      System.out.println(stat.getPath().toString());
+    }
+
+    // Case 2: 1 newer file, not even deletable for the first log cleaner
+    // (TimeToLiveLogCleaner), so we are not going down the chain
+    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));
+
+    assertEquals(34, fs.listStatus(oldLogDir).length);
+
+    // This will take care of 20 old log files (default max we can delete)
+    cleaner.chore();
+
+    assertEquals(14, fs.listStatus(oldLogDir).length);
+
+    // We will delete all remaining log files which are not scheduled for
+    // replication and those that are invalid
+    cleaner.chore();
+
+    // We end up with the current log file, a newer one and the 3 old log
+    // files which are scheduled for replication
+    assertEquals(5, fs.listStatus(oldLogDir).length);
+
+    for (FileStatus file : fs.listStatus(oldLogDir)) {
+      System.out.println("Keeped log files: " + file.getPath().getName());
+    }
+  }
+}
\ No newline at end of file

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/KeyValueScanFixture.java Fri Aug 27 05:01:02 2010
@@ -95,6 +95,11 @@ public class KeyValueScanFixture impleme
   }
 
   @Override
+  public boolean reseek(KeyValue key) {
+    return seek(key);
+  }
+
+  @Override
   public void close() {
     // noop.
   }

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java Fri Aug 27 05:01:02 2010
@@ -26,8 +26,7 @@ import java.util.TreeSet;
 import java.util.Arrays;
 
 import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
 import org.apache.hadoop.hbase.util.Bytes;
 
 
@@ -49,7 +48,7 @@ public class TestExplicitColumnTracker e
 
 
     //Initialize result
-    List<MatchCode> result = new ArrayList<MatchCode>();
+    List<ScanQueryMatcher.MatchCode> result = new ArrayList<ScanQueryMatcher.MatchCode>();
 
     //"Match"
     for(byte [] col : scannerColumns){
@@ -76,12 +75,12 @@ public class TestExplicitColumnTracker e
     //Looking for every other
     columns.add(col2);
     columns.add(col4);
-    List<MatchCode> expected = new ArrayList<MatchCode>();
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.DONE);
+    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>();
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.DONE);
     int maxVersions = 1;
 
     //Create "Scanner"
@@ -106,26 +105,26 @@ public class TestExplicitColumnTracker e
     columns.add(col2);
     columns.add(col4);
 
-    List<MatchCode> expected = new ArrayList<MatchCode>();
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.SKIP);
-
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.SKIP);
-
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.SKIP);
-
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.DONE);
-
-    expected.add(MatchCode.DONE);
-    expected.add(MatchCode.DONE);
-    expected.add(MatchCode.DONE);
+    List<ScanQueryMatcher.MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>();
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.DONE);
+
+    expected.add(ScanQueryMatcher.MatchCode.DONE);
+    expected.add(ScanQueryMatcher.MatchCode.DONE);
+    expected.add(ScanQueryMatcher.MatchCode.DONE);
     int maxVersions = 2;
 
     //Create "Scanner"
@@ -183,10 +182,10 @@ public class TestExplicitColumnTracker e
       col2, col3, col5 }));
     List<byte[]> scanner = Arrays.<byte[]>asList(
       new byte[][] { col1, col4 });
-    List<MatchCode> expected = Arrays.<MatchCode>asList(
-      new MatchCode[] {
-        MatchCode.SKIP,
-        MatchCode.SKIP });
+    List<ScanQueryMatcher.MatchCode> expected = Arrays.<ScanQueryMatcher.MatchCode>asList(
+      new ScanQueryMatcher.MatchCode[] {
+        ScanQueryMatcher.MatchCode.SKIP,
+        ScanQueryMatcher.MatchCode.SKIP });
     runTest(1, columns, scanner, expected);
   }
 }

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java Fri Aug 27 05:01:02 2010
@@ -1,4 +1,4 @@
-/**
+/*
  * Copyright 2010 The Apache Software Foundation
  *
  * Licensed to the Apache Software Foundation (ASF) under one

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Fri Aug 27 05:01:02 2010
@@ -50,8 +50,11 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
+import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
 
 import com.google.common.base.Joiner;
@@ -361,7 +364,7 @@ public class TestHRegion extends HBaseTe
       assertEquals(OperationStatusCode.SUCCESS, codes[i]);
     }
     assertEquals(1, HLog.getSyncOps());
-    
+
     LOG.info("Next a batch put with one invalid family");
     puts[5].add(Bytes.toBytes("BAD_CF"), qual, val);
     codes = this.region.put(puts);
@@ -371,7 +374,7 @@ public class TestHRegion extends HBaseTe
         OperationStatusCode.SUCCESS, codes[i]);
     }
     assertEquals(1, HLog.getSyncOps());
-    
+
     LOG.info("Next a batch put that has to break into two batches to avoid a lock");
     Integer lockedRow = region.obtainRowLock(Bytes.toBytes("row_2"));
 
@@ -396,7 +399,7 @@ public class TestHRegion extends HBaseTe
       if (System.currentTimeMillis() - startWait > 10000) {
         fail("Timed out waiting for thread to sync first minibatch");
       }
-    }    
+    }
     LOG.info("...releasing row lock, which should let put thread continue");
     region.releaseRowLock(lockedRow);
     LOG.info("...joining on thread");
@@ -408,7 +411,7 @@ public class TestHRegion extends HBaseTe
       assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
         OperationStatusCode.SUCCESS, codes[i]);
     }
-    
+
     LOG.info("Nexta, a batch put which uses an already-held lock");
     lockedRow = region.obtainRowLock(Bytes.toBytes("row_2"));
     LOG.info("...obtained row lock");
@@ -427,13 +430,13 @@ public class TestHRegion extends HBaseTe
     }
     // Make sure we didn't do an extra batch
     assertEquals(1, HLog.getSyncOps());
-    
+
     // Make sure we still hold lock
     assertTrue(region.isRowLocked(lockedRow));
     LOG.info("...releasing lock");
     region.releaseRowLock(lockedRow);
   }
-  
+
   //////////////////////////////////////////////////////////////////////////////
   // checkAndMutate tests
   //////////////////////////////////////////////////////////////////////////////
@@ -486,6 +489,14 @@ public class TestHRegion extends HBaseTe
     res = region.checkAndMutate(row1, fam1, qf1, emptyVal, delete, lockId,
         true);
     assertTrue(res);
+
+    //checkAndPut looking for a null value
+    put = new Put(row1);
+    put.add(fam1, qf1, val1);
+
+    res = region.checkAndMutate(row1, fam1, qf1, null, put, lockId, true);
+    assertTrue(res);
+    
   }
 
   public void testCheckAndMutate_WithWrongValue() throws IOException{
@@ -834,7 +845,7 @@ public class TestHRegion extends HBaseTe
     result = region.get(get, null);
     assertEquals(0, result.size());
   }
-  
+
   /**
    * Tests that the special LATEST_TIMESTAMP option for puts gets
    * replaced by the actual timestamp
@@ -863,7 +874,7 @@ public class TestHRegion extends HBaseTe
     LOG.info("Got: " + kv);
     assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp",
         kv.getTimestamp() != HConstants.LATEST_TIMESTAMP);
-    
+
     // Check same with WAL enabled (historically these took different
     // code paths, so check both)
     row = Bytes.toBytes("row2");
@@ -1246,54 +1257,38 @@ public class TestHRegion extends HBaseTe
   public void testMerge() throws IOException {
     byte [] tableName = Bytes.toBytes("testtable");
     byte [][] families = {fam1, fam2, fam3};
-
     HBaseConfiguration hc = initSplit();
     //Setting up region
     String method = this.getName();
     initHRegion(tableName, method, hc, families);
-
     try {
       LOG.info("" + addContent(region, fam3));
       region.flushcache();
       byte [] splitRow = region.compactStores();
       assertNotNull(splitRow);
       LOG.info("SplitRow: " + Bytes.toString(splitRow));
-      HRegion [] regions = split(region, splitRow);
+      HRegion [] subregions = splitRegion(region, splitRow);
       try {
         // Need to open the regions.
-        // TODO: Add an 'open' to HRegion... don't do open by constructing
-        // instance.
-        for (int i = 0; i < regions.length; i++) {
-          regions[i] = openClosedRegion(regions[i]);
+        for (int i = 0; i < subregions.length; i++) {
+          openClosedRegion(subregions[i]);
+          subregions[i].compactStores();
         }
         Path oldRegionPath = region.getRegionDir();
+        Path oldRegion1 = subregions[0].getRegionDir();
+        Path oldRegion2 = subregions[1].getRegionDir();
         long startTime = System.currentTimeMillis();
-        HRegion subregions [] = region.splitRegion(splitRow);
-        if (subregions != null) {
-          LOG.info("Split region elapsed time: "
-              + ((System.currentTimeMillis() - startTime) / 1000.0));
-          assertEquals("Number of subregions", subregions.length, 2);
-          for (int i = 0; i < subregions.length; i++) {
-            subregions[i] = openClosedRegion(subregions[i]);
-            subregions[i].compactStores();
-          }
-
-          // Now merge it back together
-          Path oldRegion1 = subregions[0].getRegionDir();
-          Path oldRegion2 = subregions[1].getRegionDir();
-          startTime = System.currentTimeMillis();
-          region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
-          LOG.info("Merge regions elapsed time: " +
-              ((System.currentTimeMillis() - startTime) / 1000.0));
-          fs.delete(oldRegion1, true);
-          fs.delete(oldRegion2, true);
-          fs.delete(oldRegionPath, true);
-        }
+        region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
+        LOG.info("Merge regions elapsed time: " +
+            ((System.currentTimeMillis() - startTime) / 1000.0));
+        fs.delete(oldRegion1, true);
+        fs.delete(oldRegion2, true);
+        fs.delete(oldRegionPath, true);
         LOG.info("splitAndMerge completed.");
       } finally {
-        for (int i = 0; i < regions.length; i++) {
+        for (int i = 0; i < subregions.length; i++) {
           try {
-            regions[i].close();
+            subregions[i].close();
           } catch (IOException e) {
             // Ignore.
           }
@@ -1307,6 +1302,38 @@ public class TestHRegion extends HBaseTe
     }
   }
 
+  /**
+   * @param parent Region to split.
+   * @param midkey Key to split around.
+   * @return The Regions we created.
+   * @throws IOException
+   */
+  HRegion [] splitRegion(final HRegion parent, final byte [] midkey)
+  throws IOException {
+    PairOfSameType<HRegion> result = null;
+    SplitTransaction st = new SplitTransaction(parent, midkey);
+    // If prepare does not return true, for some reason -- logged inside in
+    // the prepare call -- we are not ready to split just now.  Just return.
+    if (!st.prepare()) return null;
+    try {
+      result = st.execute(null, null);
+    } catch (IOException ioe) {
+      try {
+        LOG.info("Running rollback of failed split of " +
+          parent.getRegionNameAsString() + "; " + ioe.getMessage());
+        st.rollback(null);
+        LOG.info("Successful rollback of failed split of " +
+          parent.getRegionNameAsString());
+        return null;
+      } catch (RuntimeException e) {
+        // If failed rollback, kill this server to avoid having a hole in table.
+        LOG.info("Failed rollback of failed split of " +
+          parent.getRegionNameAsString() + " -- aborting server", e);
+      }
+    }
+    return new HRegion [] {result.getFirst(), result.getSecond()};
+  }
+
   //////////////////////////////////////////////////////////////////////////////
   // Scanner tests
   //////////////////////////////////////////////////////////////////////////////
@@ -1367,6 +1394,7 @@ public class TestHRegion extends HBaseTe
     String method = this.getName();
     initHRegion(tableName, method, families);
 
+
     //Putting data in Region
     Put put = new Put(row1);
     put.add(fam1, null, null);
@@ -1384,10 +1412,12 @@ public class TestHRegion extends HBaseTe
     scan.addFamily(fam2);
     scan.addFamily(fam4);
     is = (RegionScanner) region.getScanner(scan);
+    ReadWriteConsistencyControl.resetThreadReadPoint(region.getRWCC());
     assertEquals(1, ((RegionScanner)is).storeHeap.getHeap().size());
 
     scan = new Scan();
     is = (RegionScanner) region.getScanner(scan);
+    ReadWriteConsistencyControl.resetThreadReadPoint(region.getRWCC());
     assertEquals(families.length -1,
         ((RegionScanner)is).storeHeap.getHeap().size());
   }
@@ -1856,6 +1886,7 @@ public class TestHRegion extends HBaseTe
     assertEquals(value+amount, result);
 
     Store store = region.getStore(fam1);
+    // ICV removes any extra values floating around in there.
     assertEquals(1, store.memstore.kvset.size());
     assertTrue(store.memstore.snapshot.isEmpty());
 
@@ -1863,6 +1894,8 @@ public class TestHRegion extends HBaseTe
   }
 
   public void testIncrementColumnValue_BumpSnapshot() throws IOException {
+    ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
+    EnvironmentEdgeManagerTestHelper.injectEdge(mee);
     initHRegion(tableName, getName(), fam1);
 
     long value = 42L;
@@ -2133,7 +2166,7 @@ public class TestHRegion extends HBaseTe
       byte [] splitRow = region.compactStores();
       assertNotNull(splitRow);
       LOG.info("SplitRow: " + Bytes.toString(splitRow));
-      HRegion [] regions = split(region, splitRow);
+      HRegion [] regions = splitRegion(region, splitRow);
       try {
         // Need to open the regions.
         // TODO: Add an 'open' to HRegion... don't do open by constructing
@@ -2173,7 +2206,7 @@ public class TestHRegion extends HBaseTe
         for (int i = 0; i < regions.length; i++) {
           HRegion[] rs = null;
           if (midkeys[i] != null) {
-            rs = split(regions[i], midkeys[i]);
+            rs = splitRegion(regions[i], midkeys[i]);
             for (int j = 0; j < rs.length; j++) {
               sortedMap.put(Bytes.toString(rs[j].getRegionName()),
                 openClosedRegion(rs[j]));
@@ -2226,7 +2259,7 @@ public class TestHRegion extends HBaseTe
 
     HRegion [] regions = null;
     try {
-      regions = region.splitRegion(Bytes.toBytes("" + splitRow));
+      regions = splitRegion(region, Bytes.toBytes("" + splitRow));
       //Opening the regions returned.
       for (int i = 0; i < regions.length; i++) {
         regions[i] = openClosedRegion(regions[i]);
@@ -2651,7 +2684,93 @@ public class TestHRegion extends HBaseTe
 
   }
 
+  //////////////////////////////////////////////////////////////////////////////
+  // Bloom filter test
+  //////////////////////////////////////////////////////////////////////////////
+
+  public void testAllColumnsWithBloomFilter() throws IOException {
+    byte [] TABLE = Bytes.toBytes("testAllColumnsWithBloomFilter");
+    byte [] FAMILY = Bytes.toBytes("family");
+
+    //Create table
+    HColumnDescriptor hcd = new HColumnDescriptor(FAMILY, Integer.MAX_VALUE,
+        HColumnDescriptor.DEFAULT_COMPRESSION,
+        HColumnDescriptor.DEFAULT_IN_MEMORY,
+        HColumnDescriptor.DEFAULT_BLOCKCACHE,
+        Integer.MAX_VALUE, HColumnDescriptor.DEFAULT_TTL,
+        "rowcol",
+        HColumnDescriptor.DEFAULT_REPLICATION_SCOPE);
+    HTableDescriptor htd = new HTableDescriptor(TABLE);
+    htd.addFamily(hcd);
+    HRegionInfo info = new HRegionInfo(htd, null, null, false);
+    Path path = new Path(DIR + "testAllColumnsWithBloomFilter");
+    region = HRegion.createHRegion(info, path, conf);
+
+    // For row:0, col:0: insert versions 1 through 5.
+    byte row[] = Bytes.toBytes("row:" + 0);
+    byte column[] = Bytes.toBytes("column:" + 0);
+    Put put = new Put(row);
+    for (long idx = 1; idx <= 4; idx++) {
+      put.add(FAMILY, column, idx, Bytes.toBytes("value-version-" + idx));
+    }
+    region.put(put);
+
+    //Flush
+    region.flushcache();
+
+    //Get rows
+    Get get = new Get(row);
+    get.setMaxVersions();
+    KeyValue[] kvs = region.get(get, null).raw();
+
+    //Check if rows are correct
+    assertEquals(4, kvs.length);
+    checkOneCell(kvs[0], FAMILY, 0, 0, 4);
+    checkOneCell(kvs[1], FAMILY, 0, 0, 3);
+    checkOneCell(kvs[2], FAMILY, 0, 0, 2);
+    checkOneCell(kvs[3], FAMILY, 0, 0, 1);
+  }
+
+  /**
+    * Testcase to cover bug-fix for HBASE-2823
+    * Ensures correct delete when issuing delete row
+    * on columns with bloom filter set to row+col (BloomType.ROWCOL)
+   */
+  public void testDeleteRowWithBloomFilter() throws IOException {
+    byte [] tableName = Bytes.toBytes("testDeleteRowWithBloomFilter");
+    byte [] familyName = Bytes.toBytes("familyName");
+
+    // Create Table
+    HColumnDescriptor hcd = new HColumnDescriptor(familyName, Integer.MAX_VALUE,
+        HColumnDescriptor.DEFAULT_COMPRESSION, false, true,
+        HColumnDescriptor.DEFAULT_TTL, "rowcol");
+
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    htd.addFamily(hcd);
+    HRegionInfo info = new HRegionInfo(htd, null, null, false);
+    Path path = new Path(DIR + "TestDeleteRowWithBloomFilter");
+    region = HRegion.createHRegion(info, path, conf);
+
+    // Insert some data
+    byte row[] = Bytes.toBytes("row1");
+    byte col[] = Bytes.toBytes("col1");
+
+    Put put = new Put(row);
+    put.add(familyName, col, 1, Bytes.toBytes("SomeRandomValue"));
+    region.put(put);
+    region.flushcache();
 
+    Delete del = new Delete(row);
+    region.delete(del, null, true);
+    region.flushcache();
+
+    // Get remaining rows (should have none)
+    Get get = new Get(row);
+    get.addColumn(familyName, col);
+
+    KeyValue[] keyValues = region.get(get, null).raw();
+    assertTrue(keyValues.length == 0);
+  }
 
   private void putData(int startRow, int numRows, byte [] qf,
       byte [] ...families)
@@ -2732,15 +2851,6 @@ public class TestHRegion extends HBaseTe
     }
   }
 
-  protected HRegion [] split(final HRegion r, final byte [] splitRow)
-  throws IOException {
-    // Assert can get mid key from passed region.
-    assertGet(r, fam3, splitRow);
-    HRegion [] regions = r.splitRegion(splitRow);
-    assertEquals(regions.length, 2);
-    return regions;
-  }
-
   private HBaseConfiguration initSplit() {
     HBaseConfiguration conf = new HBaseConfiguration();
     // Always compact if there is more than one store file.
@@ -2775,6 +2885,31 @@ public class TestHRegion extends HBaseTe
     }
     HRegionInfo info = new HRegionInfo(htd, null, null, false);
     Path path = new Path(DIR + callingMethod);
+    if (fs.exists(path)) {
+      if (!fs.delete(path, true)) {
+        throw new IOException("Failed delete of " + path);
+      }
+    }
     region = HRegion.createHRegion(info, path, conf);
   }
+
+  /**
+   * Assert that the passed in KeyValue has expected contents for the
+   * specified row, column & timestamp.
+   */
+  private void checkOneCell(KeyValue kv, byte[] cf,
+                             int rowIdx, int colIdx, long ts) {
+    String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts;
+    assertEquals("Row mismatch which checking: " + ctx,
+                 "row:"+ rowIdx, Bytes.toString(kv.getRow()));
+    assertEquals("ColumnFamily mismatch while checking: " + ctx,
+                 Bytes.toString(cf), Bytes.toString(kv.getFamily()));
+    assertEquals("Column qualifier mismatch while checking: " + ctx,
+                 "column:" + colIdx, Bytes.toString(kv.getQualifier()));
+    assertEquals("Timestamp mismatch while checking: " + ctx,
+                 ts, kv.getTimestamp());
+    assertEquals("Value mismatch while checking: " + ctx,
+                 "value-version-" + ts, Bytes.toString(kv.getValue()));
+  }
+
 }

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java Fri Aug 27 05:01:02 2010
@@ -27,7 +27,6 @@ import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.Bytes;
 
@@ -255,6 +254,11 @@ public class TestKeyValueHeap extends HB
       }
       return false;
     }
+
+    @Override
+    public boolean reseek(KeyValue key) throws IOException {
+      return seek(key);
+    }
   }
 
 }

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java Fri Aug 27 05:01:02 2010
@@ -163,7 +163,7 @@ public class TestMemStore extends TestCa
 
   /**
    * A simple test which verifies the 3 possible states when scanning across snapshot.
-   * @throws IOException 
+   * @throws IOException
    */
   public void testScanAcrossSnapshot2() throws IOException {
     // we are going to the scanning across snapshot with two kvs
@@ -210,7 +210,7 @@ public class TestMemStore extends TestCa
       throws IOException {
     scanner.seek(KeyValue.createFirstOnRow(new byte[]{}));
     List<KeyValue> returned = Lists.newArrayList();
-    
+
     while (true) {
       KeyValue next = scanner.next();
       if (next == null) break;
@@ -313,15 +313,15 @@ public class TestMemStore extends TestCa
 
     // COMPLETE INSERT 2
     rwcc.completeMemstoreInsert(w);
-    
+
     // NOW SHOULD SEE NEW KVS IN ADDITION TO OLD KVS.
     // See HBASE-1485 for discussion about what we should do with
     // the duplicate-TS inserts
     ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
     s = this.memstore.getScanners().get(0);
-    assertScannerResults(s, new KeyValue[]{kv21, kv11, kv22, kv12});    
+    assertScannerResults(s, new KeyValue[]{kv21, kv11, kv22, kv12});
   }
-  
+
   /**
    * When we insert a higher-memstoreTS deletion of a cell but with
    * the same timestamp, we still need to provide consistent reads
@@ -369,9 +369,9 @@ public class TestMemStore extends TestCa
     // NOW WE SHOULD SEE DELETE
     ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
     s = this.memstore.getScanners().get(0);
-    assertScannerResults(s, new KeyValue[]{kv11, kvDel, kv12});    
+    assertScannerResults(s, new KeyValue[]{kv11, kvDel, kv12});
   }
-  
+
 
   private static class ReadOwnWritesTester extends Thread {
     static final int NUM_TRIES = 1000;
@@ -454,7 +454,7 @@ public class TestMemStore extends TestCa
     }
   }
 
-  /** 
+  /**
    * Test memstore snapshots
    * @throws IOException
    */
@@ -570,64 +570,6 @@ public class TestMemStore extends TestCa
     }
   }
 
-  public void testGet_Basic_Found() throws IOException {
-    byte [] row = Bytes.toBytes("testrow");
-    byte [] fam = Bytes.toBytes("testfamily");
-    byte [] qf1 = Bytes.toBytes("testqualifier1");
-    byte [] qf2 = Bytes.toBytes("testqualifier2");
-    byte [] qf3 = Bytes.toBytes("testqualifier3");
-    byte [] val = Bytes.toBytes("testval");
-
-    //Setting up memstore
-    KeyValue add1 = new KeyValue(row, fam ,qf1, val);
-    KeyValue add2 = new KeyValue(row, fam ,qf2, val);
-    KeyValue add3 = new KeyValue(row, fam ,qf3, val);
-    memstore.add(add1);
-    memstore.add(add2);
-    memstore.add(add3);
-
-    //test
-    Get get = new Get(row);
-    NavigableSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    columns.add(qf2);
-    long ttl = Long.MAX_VALUE;
-
-    QueryMatcher matcher =
-      new QueryMatcher(get, fam, columns, ttl, KeyValue.KEY_COMPARATOR, 1);
-
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    boolean res = memstore.get(matcher, result);
-    assertEquals(true, res);
-  }
-
-  public void testGet_Basic_NotFound() throws IOException {
-    byte [] row = Bytes.toBytes("testrow");
-    byte [] fam = Bytes.toBytes("testfamily");
-    byte [] qf1 = Bytes.toBytes("testqualifier1");
-    byte [] qf2 = Bytes.toBytes("testqualifier2");
-    byte [] qf3 = Bytes.toBytes("testqualifier3");
-    byte [] val = Bytes.toBytes("testval");
-
-    //Setting up memstore
-    KeyValue add1 = new KeyValue(row, fam ,qf1, val);
-    KeyValue add3 = new KeyValue(row, fam ,qf3, val);
-    memstore.add(add1);
-    memstore.add(add3);
-
-    //test
-    Get get = new Get(row);
-    NavigableSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    columns.add(qf2);
-    long ttl = Long.MAX_VALUE;
-
-    QueryMatcher matcher =
-      new QueryMatcher(get, fam, columns, ttl, KeyValue.KEY_COMPARATOR, 1);
-
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    boolean res = memstore.get(matcher, result);
-    assertEquals(false, res);
-  }
-
   public void testGet_memstoreAndSnapShot() throws IOException {
     byte [] row = Bytes.toBytes("testrow");
     byte [] fam = Bytes.toBytes("testfamily");
@@ -638,16 +580,6 @@ public class TestMemStore extends TestCa
     byte [] qf5 = Bytes.toBytes("testqualifier5");
     byte [] val = Bytes.toBytes("testval");
 
-    //Creating get
-    Get get = new Get(row);
-    NavigableSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    columns.add(qf2);
-    columns.add(qf4);
-    long ttl = Long.MAX_VALUE;
-
-    QueryMatcher matcher =
-      new QueryMatcher(get, fam, columns, ttl, KeyValue.KEY_COMPARATOR, 1);
-
     //Setting up memstore
     memstore.add(new KeyValue(row, fam ,qf1, val));
     memstore.add(new KeyValue(row, fam ,qf2, val));
@@ -660,64 +592,6 @@ public class TestMemStore extends TestCa
     memstore.add(new KeyValue(row, fam ,qf4, val));
     memstore.add(new KeyValue(row, fam ,qf5, val));
     assertEquals(2, memstore.kvset.size());
-
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    boolean res = memstore.get(matcher, result);
-    assertEquals(true, res);
-  }
-
-  public void testGet_SpecificTimeStamp() throws IOException {
-    byte [] row = Bytes.toBytes("testrow");
-    byte [] fam = Bytes.toBytes("testfamily");
-    byte [] qf1 = Bytes.toBytes("testqualifier1");
-    byte [] qf2 = Bytes.toBytes("testqualifier2");
-    byte [] qf3 = Bytes.toBytes("testqualifier3");
-    byte [] val = Bytes.toBytes("testval");
-
-    long ts1 = System.currentTimeMillis();
-    long ts2 = ts1++;
-    long ts3 = ts2++;
-
-    //Creating get
-    Get get = new Get(row);
-    get.setTimeStamp(ts2);
-    NavigableSet<byte[]> columns = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
-    columns.add(qf1);
-    columns.add(qf2);
-    columns.add(qf3);
-    long ttl = Long.MAX_VALUE;
-
-    QueryMatcher matcher = new QueryMatcher(get, fam, columns, ttl,
-      KeyValue.KEY_COMPARATOR, 1);
-
-    //Setting up expected
-    List<KeyValue> expected = new ArrayList<KeyValue>();
-    KeyValue kv1 = new KeyValue(row, fam ,qf1, ts2, val);
-    KeyValue kv2 = new KeyValue(row, fam ,qf2, ts2, val);
-    KeyValue kv3 = new KeyValue(row, fam ,qf3, ts2, val);
-    expected.add(kv1);
-    expected.add(kv2);
-    expected.add(kv3);
-
-    //Setting up memstore
-    memstore.add(new KeyValue(row, fam ,qf1, ts1, val));
-    memstore.add(new KeyValue(row, fam ,qf2, ts1, val));
-    memstore.add(new KeyValue(row, fam ,qf3, ts1, val));
-    memstore.add(kv1);
-    memstore.add(kv2);
-    memstore.add(kv3);
-    memstore.add(new KeyValue(row, fam ,qf1, ts3, val));
-    memstore.add(new KeyValue(row, fam ,qf2, ts3, val));
-    memstore.add(new KeyValue(row, fam ,qf3, ts3, val));
-
-    //Get
-    List<KeyValue> result = new ArrayList<KeyValue>();
-    memstore.get(matcher, result);
-
-    assertEquals(expected.size(), result.size());
-    for(int i=0; i<expected.size(); i++){
-      assertEquals(expected.get(i), result.get(i));
-    }
   }
 
   //////////////////////////////////////////////////////////////////////////////
@@ -785,7 +659,7 @@ public class TestMemStore extends TestCa
     expected.add(put2);
     expected.add(put1);
 
-    
+
     assertEquals(4, memstore.kvset.size());
     int i = 0;
     for (KeyValue kv: memstore.kvset) {
@@ -825,7 +699,7 @@ public class TestMemStore extends TestCa
     expected.add(put3);
 
 
-    
+
     assertEquals(5, memstore.kvset.size());
     int i = 0;
     for (KeyValue kv: memstore.kvset) {
@@ -884,9 +758,42 @@ public class TestMemStore extends TestCa
   }
 
 
+  ////////////////////////////////////
+  //Test for timestamps
+  ////////////////////////////////////
+
+  /**
+   * Test to ensure correctness when using Memstore with multiple timestamps
+   */
+  public void testMultipleTimestamps() throws IOException {
+    long[] timestamps = new long[] {20,10,5,1};
+    Scan scan = new Scan();
+
+    for (long timestamp: timestamps)
+      addRows(memstore,timestamp);
+
+    scan.setTimeRange(0, 2);
+    assertTrue(memstore.shouldSeek(scan));
+
+    scan.setTimeRange(20, 82);
+    assertTrue(memstore.shouldSeek(scan));
+
+    scan.setTimeRange(10, 20);
+    assertTrue(memstore.shouldSeek(scan));
+
+    scan.setTimeRange(8, 12);
+    assertTrue(memstore.shouldSeek(scan));
+
+    /*This test is not required for correctness but it should pass when
+     * timestamp range optimization is on*/
+    //scan.setTimeRange(28, 42);
+    //assertTrue(!memstore.shouldSeek(scan));
+  }
+
+
   //////////////////////////////////////////////////////////////////////////////
   // Helpers
-  //////////////////////////////////////////////////////////////////////////////  
+  //////////////////////////////////////////////////////////////////////////////
   private static byte [] makeQualifier(final int i1, final int i2){
     return Bytes.toBytes(Integer.toString(i1) + ";" +
         Integer.toString(i2));
@@ -1008,5 +915,5 @@ public class TestMemStore extends TestCa
 
   }
 
-  
+
 }

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java Fri Aug 27 05:01:02 2010
@@ -28,7 +28,8 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KeyComparator;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
 import org.apache.hadoop.hbase.util.Bytes;
 
 
@@ -51,6 +52,7 @@ public class TestQueryMatcher extends HB
 
   long ttl = Long.MAX_VALUE;
   KeyComparator rowComparator;
+  private Scan scan;
 
   public void setUp() throws Exception {
     super.setUp();
@@ -72,6 +74,7 @@ public class TestQueryMatcher extends HB
     get.addColumn(fam2, col2);
     get.addColumn(fam2, col4);
     get.addColumn(fam2, col5);
+    this.scan = new Scan(get);
 
     rowComparator = KeyValue.KEY_COMPARATOR;
 
@@ -83,29 +86,31 @@ public class TestQueryMatcher extends HB
     //of just byte []
 
     //Expected result
-    List<MatchCode> expected = new ArrayList<MatchCode>();
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.DONE);
+    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>();
+    expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.DONE);
 
-    QueryMatcher qm = new QueryMatcher(get, fam2,
+    // 2,4,5
+    ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2,
         get.getFamilyMap().get(fam2), ttl, rowComparator, 1);
 
     List<KeyValue> memstore = new ArrayList<KeyValue>();
-    memstore.add(new KeyValue(row1, fam2, col1, data));
-    memstore.add(new KeyValue(row1, fam2, col2, data));
-    memstore.add(new KeyValue(row1, fam2, col3, data));
-    memstore.add(new KeyValue(row1, fam2, col4, data));
-    memstore.add(new KeyValue(row1, fam2, col5, data));
+    memstore.add(new KeyValue(row1, fam2, col1, 1, data));
+    memstore.add(new KeyValue(row1, fam2, col2, 1, data));
+    memstore.add(new KeyValue(row1, fam2, col3, 1, data));
+    memstore.add(new KeyValue(row1, fam2, col4, 1, data));
+    memstore.add(new KeyValue(row1, fam2, col5, 1, data));
 
     memstore.add(new KeyValue(row2, fam1, col1, data));
 
-    List<MatchCode> actual = new ArrayList<MatchCode>();
+    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>();
+    qm.setRow(memstore.get(0).getRow());
 
-    for(KeyValue kv : memstore){
+    for (KeyValue kv : memstore){
       actual.add(qm.match(kv));
     }
 
@@ -126,27 +131,29 @@ public class TestQueryMatcher extends HB
     //of just byte []
 
     //Expected result
-    List<MatchCode> expected = new ArrayList<MatchCode>();
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.NEXT);
+    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>();
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.DONE);
 
-    QueryMatcher qm = new QueryMatcher(get, fam2, null, ttl, rowComparator, 1);
+    ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2, null, ttl, rowComparator, 1);
 
     List<KeyValue> memstore = new ArrayList<KeyValue>();
-    memstore.add(new KeyValue(row1, fam2, col1, data));
-    memstore.add(new KeyValue(row1, fam2, col2, data));
-    memstore.add(new KeyValue(row1, fam2, col3, data));
-    memstore.add(new KeyValue(row1, fam2, col4, data));
-    memstore.add(new KeyValue(row1, fam2, col5, data));
-    memstore.add(new KeyValue(row2, fam1, col1, data));
+    memstore.add(new KeyValue(row1, fam2, col1, 1, data));
+    memstore.add(new KeyValue(row1, fam2, col2, 1, data));
+    memstore.add(new KeyValue(row1, fam2, col3, 1, data));
+    memstore.add(new KeyValue(row1, fam2, col4, 1, data));
+    memstore.add(new KeyValue(row1, fam2, col5, 1, data));
+    memstore.add(new KeyValue(row2, fam1, col1, 1, data));
+
+    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>();
 
-    List<MatchCode> actual = new ArrayList<MatchCode>();
+    qm.setRow(memstore.get(0).getRow());
 
-    for(KeyValue kv : memstore){
+    for(KeyValue kv : memstore) {
       actual.add(qm.match(kv));
     }
 
@@ -162,7 +169,7 @@ public class TestQueryMatcher extends HB
 
 
   /**
-   * Verify that {@link QueryMatcher} only skips expired KeyValue
+   * Verify that {@link ScanQueryMatcher} only skips expired KeyValue
    * instances and does not exit early from the row (skipping
    * later non-expired KeyValues).  This version mimics a Get with
    * explicitly specified column qualifiers.
@@ -174,15 +181,15 @@ public class TestQueryMatcher extends HB
 
     long testTTL = 1000;
     MatchCode [] expected = new MatchCode[] {
-        MatchCode.SKIP,
-        MatchCode.INCLUDE,
-        MatchCode.SKIP,
-        MatchCode.INCLUDE,
-        MatchCode.SKIP,
-        MatchCode.NEXT
+        ScanQueryMatcher.MatchCode.SEEK_NEXT_COL,
+        ScanQueryMatcher.MatchCode.INCLUDE,
+        ScanQueryMatcher.MatchCode.SEEK_NEXT_COL,
+        ScanQueryMatcher.MatchCode.INCLUDE,
+        ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW,
+        ScanQueryMatcher.MatchCode.DONE
     };
 
-    QueryMatcher qm = new QueryMatcher(get, fam2,
+    ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2,
         get.getFamilyMap().get(fam2), testTTL, rowComparator, 1);
 
     long now = System.currentTimeMillis();
@@ -195,6 +202,8 @@ public class TestQueryMatcher extends HB
         new KeyValue(row2, fam1, col1, now-10, data)
     };
 
+    qm.setRow(kvs[0].getRow());
+
     List<MatchCode> actual = new ArrayList<MatchCode>(kvs.length);
     for (KeyValue kv : kvs) {
       actual.add( qm.match(kv) );
@@ -212,7 +221,7 @@ public class TestQueryMatcher extends HB
 
 
   /**
-   * Verify that {@link QueryMatcher} only skips expired KeyValue
+   * Verify that {@link ScanQueryMatcher} only skips expired KeyValue
    * instances and does not exit early from the row (skipping
    * later non-expired KeyValues).  This version mimics a Get with
    * wildcard-inferred column qualifiers.
@@ -224,15 +233,15 @@ public class TestQueryMatcher extends HB
 
     long testTTL = 1000;
     MatchCode [] expected = new MatchCode[] {
-        MatchCode.INCLUDE,
-        MatchCode.INCLUDE,
-        MatchCode.SKIP,
-        MatchCode.INCLUDE,
-        MatchCode.SKIP,
-        MatchCode.NEXT
+        ScanQueryMatcher.MatchCode.INCLUDE,
+        ScanQueryMatcher.MatchCode.INCLUDE,
+        ScanQueryMatcher.MatchCode.SEEK_NEXT_COL,
+        ScanQueryMatcher.MatchCode.INCLUDE,
+        ScanQueryMatcher.MatchCode.SEEK_NEXT_COL,
+        ScanQueryMatcher.MatchCode.DONE
     };
 
-    QueryMatcher qm = new QueryMatcher(get, fam2,
+    ScanQueryMatcher qm = new ScanQueryMatcher(scan, fam2,
         null, testTTL, rowComparator, 1);
 
     long now = System.currentTimeMillis();
@@ -244,8 +253,9 @@ public class TestQueryMatcher extends HB
         new KeyValue(row1, fam2, col5, now-10000, data),
         new KeyValue(row2, fam1, col1, now-10, data)
     };
+    qm.setRow(kvs[0].getRow());
 
-    List<MatchCode> actual = new ArrayList<MatchCode>(kvs.length);
+    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>(kvs.length);
     for (KeyValue kv : kvs) {
       actual.add( qm.match(kv) );
     }

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanWildcardColumnTracker.java Fri Aug 27 05:01:02 2010
@@ -24,7 +24,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
+import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode;
 import org.apache.hadoop.hbase.util.Bytes;
 
 public class TestScanWildcardColumnTracker extends HBaseTestCase {
@@ -32,7 +32,6 @@ public class TestScanWildcardColumnTrack
   final static int VERSIONS = 2;
 
   public void testCheckColumn_Ok() {
-    //Create a WildcardColumnTracker
     ScanWildcardColumnTracker tracker =
       new ScanWildcardColumnTracker(VERSIONS);
 
@@ -45,15 +44,15 @@ public class TestScanWildcardColumnTrack
 
     //Setting up expected result
     List<MatchCode> expected = new ArrayList<MatchCode>();
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
 
-    List<MatchCode> actual = new ArrayList<MatchCode>();
+    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<MatchCode>();
 
     for(byte [] qualifier : qualifiers) {
-      MatchCode mc = tracker.checkColumn(qualifier, 0, qualifier.length);
+      ScanQueryMatcher.MatchCode mc = tracker.checkColumn(qualifier, 0, qualifier.length);
       actual.add(mc);
     }
 
@@ -64,7 +63,6 @@ public class TestScanWildcardColumnTrack
   }
 
   public void testCheckColumn_EnforceVersions() {
-    //Create a WildcardColumnTracker
     ScanWildcardColumnTracker tracker =
       new ScanWildcardColumnTracker(VERSIONS);
 
@@ -76,13 +74,13 @@ public class TestScanWildcardColumnTrack
     qualifiers.add(Bytes.toBytes("qualifer2"));
 
     //Setting up expected result
-    List<MatchCode> expected = new ArrayList<MatchCode>();
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.INCLUDE);
-    expected.add(MatchCode.SKIP);
-    expected.add(MatchCode.INCLUDE);
+    List<ScanQueryMatcher.MatchCode> expected = new ArrayList<MatchCode>();
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
+    expected.add(ScanQueryMatcher.MatchCode.SKIP);
+    expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
 
-    List<MatchCode> actual = new ArrayList<MatchCode>();
+    List<MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>();
 
     for(byte [] qualifier : qualifiers) {
       MatchCode mc = tracker.checkColumn(qualifier, 0, qualifier.length);
@@ -96,7 +94,6 @@ public class TestScanWildcardColumnTrack
   }
 
   public void DisabledTestCheckColumn_WrongOrder() {
-    //Create a WildcardColumnTracker
     ScanWildcardColumnTracker tracker =
       new ScanWildcardColumnTracker(VERSIONS);
 

Added: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java?rev=990018&view=auto
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (added)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java Fri Aug 27 05:01:02 2010
@@ -0,0 +1,253 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Test the {@link SplitTransaction} class against an HRegion (as opposed to
+ * running cluster).
+ */
+public class TestSplitTransaction {
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private final Path testdir =
+    HBaseTestingUtility.getTestDir(this.getClass().getName());
+  private HRegion parent;
+  private HLog wal;
+  private FileSystem fs;
+  private static final byte [] STARTROW = new byte [] {'a', 'a', 'a'};
+  // '{' is next ascii after 'z'.
+  private static final byte [] ENDROW = new byte [] {'{', '{', '{'};
+  private static final byte [] GOOD_SPLIT_ROW = new byte [] {'d', 'd', 'd'};
+  private static final byte [] CF = HConstants.CATALOG_FAMILY;
+
+  @Before public void setup() throws IOException {
+    this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
+    this.fs.delete(this.testdir, true);
+    this.wal = new HLog(fs, new Path(this.testdir, "logs"),
+      new Path(this.testdir, "archive"),
+      TEST_UTIL.getConfiguration(), null);
+    this.parent = createRegion(this.testdir, this.wal);
+  }
+
+  @After public void teardown() throws IOException {
+    if (this.parent != null && !this.parent.isClosed()) this.parent.close();
+    if (this.fs.exists(this.parent.getRegionDir()) &&
+        !this.fs.delete(this.parent.getRegionDir(), true)) {
+      throw new IOException("Failed delete of " + this.parent.getRegionDir());
+    }
+    if (this.wal != null) this.wal.closeAndDelete();
+    this.fs.delete(this.testdir, true);
+  }
+
+  /**
+   * Test straight prepare works.  Tries to split on {@link #GOOD_SPLIT_ROW}
+   * @throws IOException
+   */
+  @Test public void testPrepare() throws IOException {
+    prepareGOOD_SPLIT_ROW();
+  }
+
+  private SplitTransaction prepareGOOD_SPLIT_ROW() {
+    SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW);
+    assertTrue(st.prepare());
+    // Assert the write lock is held on successful prepare as the javadoc asserts.
+    assertTrue(this.parent.lock.writeLock().isHeldByCurrentThread());
+    return st;
+  }
+
+  /**
+   * Pass an unreasonable split row.
+   */
+  @Test public void testPrepareWithBadSplitRow() throws IOException {
+    // Pass start row as split key.
+    SplitTransaction st = new SplitTransaction(this.parent, STARTROW);
+    assertFalse(st.prepare());
+    st = new SplitTransaction(this.parent, HConstants.EMPTY_BYTE_ARRAY);
+    assertFalse(st.prepare());
+    st = new SplitTransaction(this.parent, new byte [] {'A', 'A', 'A'});
+    assertFalse(st.prepare());
+    st = new SplitTransaction(this.parent, ENDROW);
+    assertFalse(st.prepare());
+  }
+
+  @Test public void testPrepareWithClosedRegion() throws IOException {
+    this.parent.close();
+    SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW);
+    assertFalse(st.prepare());
+  }
+
+  @Test public void testWholesomeSplit() throws IOException {
+    final int rowcount = TEST_UTIL.loadRegion(this.parent, CF);
+    assertTrue(rowcount > 0);
+    int parentRowCount = countRows(this.parent);
+    assertEquals(rowcount, parentRowCount);
+
+    // Start transaction.
+    SplitTransaction st = prepareGOOD_SPLIT_ROW();
+
+    // Run the execute.  Look at what it returns.
+    PairOfSameType<HRegion> daughters = st.execute(null, null);
+    // Do some assertions about execution.
+    assertTrue(this.fs.exists(st.getSplitDir()));
+    // Assert the parent region is closed.
+    assertTrue(this.parent.isClosed());
+
+    // Assert splitdir is empty -- because its content will have been moved out
+    // to be under the daughter region dirs.
+    assertEquals(0, this.fs.listStatus(st.getSplitDir()).length);
+    // Check daughters have correct key span.
+    assertTrue(Bytes.equals(this.parent.getStartKey(),
+      daughters.getFirst().getStartKey()));
+    assertTrue(Bytes.equals(GOOD_SPLIT_ROW,
+      daughters.getFirst().getEndKey()));
+    assertTrue(Bytes.equals(daughters.getSecond().getStartKey(),
+      GOOD_SPLIT_ROW));
+    assertTrue(Bytes.equals(this.parent.getEndKey(),
+      daughters.getSecond().getEndKey()));
+    // Count rows.
+    int daughtersRowCount = 0;
+    for (HRegion r: daughters) {
+      // Open so can count its content.
+      HRegion openRegion = HRegion.openHRegion(r.getRegionInfo(),
+        r.getLog(), r.getConf());
+      try {
+        int count = countRows(openRegion);
+        assertTrue(count > 0 && count != rowcount);
+        daughtersRowCount += count;
+      } finally {
+        openRegion.close();
+      }
+    }
+    assertEquals(rowcount, daughtersRowCount);
+    // Assert the write lock is no longer held on parent
+    assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
+  }
+
+  @Test public void testRollback() throws IOException {
+    final int rowcount = TEST_UTIL.loadRegion(this.parent, CF);
+    assertTrue(rowcount > 0);
+    int parentRowCount = countRows(this.parent);
+    assertEquals(rowcount, parentRowCount);
+
+    // Start transaction.
+    SplitTransaction st = prepareGOOD_SPLIT_ROW();
+    SplitTransaction spiedUponSt = spy(st);
+    when(spiedUponSt.createDaughterRegion(spiedUponSt.getSecondDaughter())).
+      thenThrow(new MockedFailedDaughterCreation());
+    // Run the execute.  Look at what it returns.
+    boolean expectedException = false;
+    try {
+      spiedUponSt.execute(null, null);
+    } catch (MockedFailedDaughterCreation e) {
+      expectedException = true;
+    }
+    assertTrue(expectedException);
+    // Run rollback
+    spiedUponSt.rollback(null);
+
+    // Assert I can scan parent.
+    int parentRowCount2 = countRows(this.parent);
+    assertEquals(parentRowCount, parentRowCount2);
+
+    // Assert rollback cleaned up stuff in fs
+    assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getFirstDaughter())));
+    assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getSecondDaughter())));
+    assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
+
+    // Now retry the split but do not throw an exception this time.
+    assertTrue(st.prepare());
+    PairOfSameType<HRegion> daughters = st.execute(null, null);
+    // Count rows.
+    int daughtersRowCount = 0;
+    for (HRegion r: daughters) {
+      // Open so can count its content.
+      HRegion openRegion = HRegion.openHRegion(r.getRegionInfo(),
+        r.getLog(), r.getConf());
+      try {
+        int count = countRows(openRegion);
+        assertTrue(count > 0 && count != rowcount);
+        daughtersRowCount += count;
+      } finally {
+        openRegion.close();
+      }
+    }
+    assertEquals(rowcount, daughtersRowCount);
+    // Assert the write lock is no longer held on parent
+    assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
+  }
+
+  /**
+   * Exception used in this class only.
+   */
+  @SuppressWarnings("serial")
+  private class MockedFailedDaughterCreation extends IOException {}
+
+  private int countRows(final HRegion r) throws IOException {
+    int rowcount = 0;
+    InternalScanner scanner = r.getScanner(new Scan());
+    try {
+      List<KeyValue> kvs = new ArrayList<KeyValue>();
+      boolean hasNext = true;
+      while (hasNext) {
+        hasNext = scanner.next(kvs);
+        if (!kvs.isEmpty()) rowcount++;
+      }
+    } finally {
+      scanner.close();
+    }
+    return rowcount;
+  }
+
+  static HRegion createRegion(final Path testdir, final HLog wal)
+  throws IOException {
+    // Make a region with start and end keys. Use 'aaa', to 'AAA'.  The load
+    // region utility will add rows between 'aaa' and 'zzz'.
+    HTableDescriptor htd = new HTableDescriptor("table");
+    HColumnDescriptor hcd = new HColumnDescriptor(CF);
+    htd.addFamily(hcd);
+    HRegionInfo hri = new HRegionInfo(htd, STARTROW, ENDROW);
+    return HRegion.openHRegion(hri, wal, TEST_UTIL.getConfiguration());
+  }
+}
\ No newline at end of file

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Fri Aug 27 05:01:02 2010
@@ -27,7 +27,6 @@ import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
 import java.util.NavigableSet;
-import java.util.TreeSet;
 import java.util.concurrent.ConcurrentSkipListSet;
 
 import junit.framework.TestCase;
@@ -52,7 +51,9 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
+import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 
 import com.google.common.base.Joiner;
@@ -62,7 +63,7 @@ import com.google.common.base.Joiner;
  */
 public class TestStore extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestStore.class);
-  
+
   Store store;
   byte [] table = Bytes.toBytes("table");
   byte [] family = Bytes.toBytes("family");
@@ -99,7 +100,7 @@ public class TestStore extends TestCase 
     Iterator<byte[]> iter = qualifiers.iterator();
     while(iter.hasNext()){
       byte [] next = iter.next();
-      expected.add(new KeyValue(row, family, next, null));
+      expected.add(new KeyValue(row, family, next, 1, (byte[])null));
       get.addColumn(family, next);
     }
   }
@@ -107,7 +108,7 @@ public class TestStore extends TestCase 
   private void init(String methodName) throws IOException {
     init(methodName, HBaseConfiguration.create());
   }
-  
+
   private void init(String methodName, Configuration conf)
   throws IOException {
     //Setting up a Store
@@ -140,8 +141,8 @@ public class TestStore extends TestCase 
   public void testEmptyStoreFile() throws IOException {
     init(this.getName());
     // Write a store file.
-    this.store.add(new KeyValue(row, family, qf1, null));
-    this.store.add(new KeyValue(row, family, qf2, null));
+    this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
     flush(1);
     // Now put in place an empty store file.  Its a little tricky.  Have to
     // do manually with hacked in sequence id.
@@ -150,7 +151,7 @@ public class TestStore extends TestCase 
     long seqid = f.getMaxSequenceId();
     Configuration c = HBaseConfiguration.create();
     FileSystem fs = FileSystem.get(c);
-    StoreFile.Writer w = StoreFile.createWriter(fs, storedir, 
+    StoreFile.Writer w = StoreFile.createWriter(fs, storedir,
         StoreFile.DEFAULT_BLOCKSIZE_SMALL);
     w.appendMetadata(seqid + 1, false);
     w.close();
@@ -161,7 +162,10 @@ public class TestStore extends TestCase 
       this.store.getFamily(), fs, c);
     System.out.println(this.store.getHRegionInfo().getEncodedName());
     assertEquals(2, this.store.getStorefilesCount());
-    this.store.get(get, qualifiers, result);
+
+    result = HBaseTestingUtility.getFromStoreFile(store,
+        get.getRow(),
+        qualifiers);
     assertEquals(1, result.size());
   }
 
@@ -173,15 +177,16 @@ public class TestStore extends TestCase 
     init(this.getName());
 
     //Put data in memstore
-    this.store.add(new KeyValue(row, family, qf1, null));
-    this.store.add(new KeyValue(row, family, qf2, null));
-    this.store.add(new KeyValue(row, family, qf3, null));
-    this.store.add(new KeyValue(row, family, qf4, null));
-    this.store.add(new KeyValue(row, family, qf5, null));
-    this.store.add(new KeyValue(row, family, qf6, null));
+    this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null));
 
     //Get
-    this.store.get(get, qualifiers, result);
+    result = HBaseTestingUtility.getFromStoreFile(store,
+        get.getRow(), qualifiers);
 
     //Compare
     assertCheck();
@@ -195,25 +200,28 @@ public class TestStore extends TestCase 
     init(this.getName());
 
     //Put data in memstore
-    this.store.add(new KeyValue(row, family, qf1, null));
-    this.store.add(new KeyValue(row, family, qf2, null));
+    this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
     //flush
     flush(1);
 
     //Add more data
-    this.store.add(new KeyValue(row, family, qf3, null));
-    this.store.add(new KeyValue(row, family, qf4, null));
+    this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null));
     //flush
     flush(2);
 
     //Add more data
-    this.store.add(new KeyValue(row, family, qf5, null));
-    this.store.add(new KeyValue(row, family, qf6, null));
+    this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null));
     //flush
     flush(3);
 
     //Get
-    this.store.get(get, qualifiers, result);
+    result = HBaseTestingUtility.getFromStoreFile(store,
+        get.getRow(),
+        qualifiers);
+    //this.store.get(get, qualifiers, result);
 
     //Need to sort the result since multiple files
     Collections.sort(result, KeyValue.COMPARATOR);
@@ -230,23 +238,24 @@ public class TestStore extends TestCase 
     init(this.getName());
 
     //Put data in memstore
-    this.store.add(new KeyValue(row, family, qf1, null));
-    this.store.add(new KeyValue(row, family, qf2, null));
+    this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
     //flush
     flush(1);
 
     //Add more data
-    this.store.add(new KeyValue(row, family, qf3, null));
-    this.store.add(new KeyValue(row, family, qf4, null));
+    this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf4, 1, (byte[])null));
     //flush
     flush(2);
 
     //Add more data
-    this.store.add(new KeyValue(row, family, qf5, null));
-    this.store.add(new KeyValue(row, family, qf6, null));
+    this.store.add(new KeyValue(row, family, qf5, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf6, 1, (byte[])null));
 
     //Get
-    this.store.get(get, qualifiers, result);
+    result = HBaseTestingUtility.getFromStoreFile(store,
+        get.getRow(), qualifiers);
 
     //Need to sort the result since multiple files
     Collections.sort(result, KeyValue.COMPARATOR);
@@ -276,7 +285,7 @@ public class TestStore extends TestCase 
    * test the internal details of how ICV works, especially during a flush scenario.
    */
   public void testIncrementColumnValue_ICVDuringFlush()
-    throws IOException {
+      throws IOException, InterruptedException {
     init(this.getName());
 
     long oldValue = 1L;
@@ -311,10 +320,7 @@ public class TestStore extends TestCase 
     get.setMaxVersions(); // all versions.
     List<KeyValue> results = new ArrayList<KeyValue>();
 
-    NavigableSet<byte[]> cols = new TreeSet<byte[]>();
-    cols.add(qf1);
-
-    this.store.get(get, cols, results);
+    results = HBaseTestingUtility.getFromStoreFile(store, get);
     assertEquals(2, results.size());
 
     long ts1 = results.get(0).getTimestamp();
@@ -324,7 +330,73 @@ public class TestStore extends TestCase 
 
     assertEquals(newValue, Bytes.toLong(results.get(0).getValue()));
     assertEquals(oldValue, Bytes.toLong(results.get(1).getValue()));
+  }
+
+  public void testIncrementColumnValue_SnapshotFlushCombo() throws Exception {
+    ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
+    EnvironmentEdgeManagerTestHelper.injectEdge(mee);
+    init(this.getName());
+
+    long oldValue = 1L;
+    long newValue = 3L;
+    this.store.add(new KeyValue(row, family, qf1,
+        EnvironmentEdgeManager.currentTimeMillis(),
+        Bytes.toBytes(oldValue)));
+
+    // snapshot the store.
+    this.store.snapshot();
+
+    // update during the snapshot, the exact same TS as the Put (lololol)
+    long ret = this.store.updateColumnValue(row, family, qf1, newValue);
+
+    // memstore should have grown by some amount.
+    assertTrue(ret > 0);
+
+    // then flush.
+    flushStore(store, id++);
+    assertEquals(1, this.store.getStorefiles().size());
+    assertEquals(1, this.store.memstore.kvset.size());
+
+    // now increment again:
+    newValue += 1;
+    this.store.updateColumnValue(row, family, qf1, newValue);
+
+    // at this point we have a TS=1 in snapshot, and a TS=2 in kvset, so increment again:
+    newValue += 1;
+    this.store.updateColumnValue(row, family, qf1, newValue);
+
+    // the second TS should be TS=2 or higher., even though 'time=1' right now.
+
+
+    // how many key/values for this row are there?
+    Get get = new Get(row);
+    get.addColumn(family, qf1);
+    get.setMaxVersions(); // all versions.
+    List<KeyValue> results = new ArrayList<KeyValue>();
+
+    results = HBaseTestingUtility.getFromStoreFile(store, get);
+    assertEquals(2, results.size());
+
+    long ts1 = results.get(0).getTimestamp();
+    long ts2 = results.get(1).getTimestamp();
+
+    assertTrue(ts1 > ts2);
+    assertEquals(newValue, Bytes.toLong(results.get(0).getValue()));
+    assertEquals(oldValue, Bytes.toLong(results.get(1).getValue()));
+
+    mee.setValue(2); // time goes up slightly
+    newValue += 1;
+    this.store.updateColumnValue(row, family, qf1, newValue);
+
+    results = HBaseTestingUtility.getFromStoreFile(store, get);
+    assertEquals(2, results.size());
+
+    ts1 = results.get(0).getTimestamp();
+    ts2 = results.get(1).getTimestamp();
 
+    assertTrue(ts1 > ts2);
+    assertEquals(newValue, Bytes.toLong(results.get(0).getValue()));
+    assertEquals(oldValue, Bytes.toLong(results.get(1).getValue()));
   }
 
   public void testHandleErrorsInFlush() throws Exception {
@@ -340,21 +412,21 @@ public class TestStore extends TestCase 
     // Make sure it worked (above is sensitive to caching details in hadoop core)
     FileSystem fs = FileSystem.get(conf);
     assertEquals(FaultyFileSystem.class, fs.getClass());
-    
+
     // Initialize region
     init(getName(), conf);
 
     LOG.info("Adding some data");
-    this.store.add(new KeyValue(row, family, qf1, null));
-    this.store.add(new KeyValue(row, family, qf2, null));
-    this.store.add(new KeyValue(row, family, qf3, null));
+    this.store.add(new KeyValue(row, family, qf1, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf2, 1, (byte[])null));
+    this.store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
 
     LOG.info("Before flush, we should have no files");
     FileStatus[] files = fs.listStatus(store.getHomedir());
     Path[] paths = FileUtil.stat2Paths(files);
     System.err.println("Got paths: " + Joiner.on(",").join(paths));
     assertEquals(0, paths.length);
-        
+
     //flush
     try {
       LOG.info("Flushing");
@@ -363,7 +435,7 @@ public class TestStore extends TestCase 
     } catch (IOException ioe) {
       assertTrue(ioe.getMessage().contains("Fault injected"));
     }
- 
+
     LOG.info("After failed flush, we should still have no files!");
     files = fs.listStatus(store.getHomedir());
     paths = FileUtil.stat2Paths(files);
@@ -371,27 +443,27 @@ public class TestStore extends TestCase 
     assertEquals(0, paths.length);
   }
 
-  
+
   static class FaultyFileSystem extends FilterFileSystem {
     List<SoftReference<FaultyOutputStream>> outStreams =
       new ArrayList<SoftReference<FaultyOutputStream>>();
     private long faultPos = 200;
-    
+
     public FaultyFileSystem() {
       super(new LocalFileSystem());
       System.err.println("Creating faulty!");
     }
-    
+
     @Override
     public FSDataOutputStream create(Path p) throws IOException {
       return new FaultyOutputStream(super.create(p), faultPos);
     }
 
   }
-  
+
   static class FaultyOutputStream extends FSDataOutputStream {
     volatile long faultPos = Long.MAX_VALUE;
-    
+
     public FaultyOutputStream(FSDataOutputStream out,
         long faultPos) throws IOException {
       super(out, null);
@@ -404,7 +476,7 @@ public class TestStore extends TestCase 
       injectFault();
       super.write(buf, offset, length);
     }
-    
+
     private void injectFault() throws IOException {
       if (getPos() >= faultPos) {
         throw new IOException("Fault injected");
@@ -412,12 +484,87 @@ public class TestStore extends TestCase 
     }
   }
 
-  
-  
+
+
   private static void flushStore(Store store, long id) throws IOException {
     StoreFlusher storeFlusher = store.getStoreFlusher(id);
     storeFlusher.prepare();
     storeFlusher.flushCache();
     storeFlusher.commit();
   }
-}
\ No newline at end of file
+
+
+
+  /**
+   * Generate a list of KeyValues for testing based on given parameters
+   * @param timestamps
+   * @param numRows
+   * @param qualifier
+   * @param family
+   * @return
+   */
+  List<KeyValue> getKeyValueSet(long[] timestamps, int numRows,
+      byte[] qualifier, byte[] family) {
+    List<KeyValue> kvList = new ArrayList<KeyValue>();
+    for (int i=1;i<=numRows;i++) {
+      byte[] b = Bytes.toBytes(i);
+      for (long timestamp: timestamps) {
+        kvList.add(new KeyValue(b, family, qualifier, timestamp, b));
+      }
+    }
+    return kvList;
+  }
+
+  /**
+   * Test to ensure correctness when using Stores with multiple timestamps
+   * @throws IOException
+   */
+  public void testMultipleTimestamps() throws IOException {
+    int numRows = 1;
+    long[] timestamps1 = new long[] {1,5,10,20};
+    long[] timestamps2 = new long[] {30,80};
+
+    init(this.getName());
+
+    List<KeyValue> kvList1 = getKeyValueSet(timestamps1,numRows, qf1, family);
+    for (KeyValue kv : kvList1) {
+      this.store.add(kv);
+    }
+
+    this.store.snapshot();
+    flushStore(store, id++);
+
+    List<KeyValue> kvList2 = getKeyValueSet(timestamps2,numRows, qf1, family);
+    for(KeyValue kv : kvList2) {
+      this.store.add(kv);
+    }
+
+    List<KeyValue> result;
+    Get get = new Get(Bytes.toBytes(1));
+    get.addColumn(family,qf1);
+
+    get.setTimeRange(0,15);
+    result = HBaseTestingUtility.getFromStoreFile(store, get);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(40,90);
+    result = HBaseTestingUtility.getFromStoreFile(store, get);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(10,45);
+    result = HBaseTestingUtility.getFromStoreFile(store, get);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(80,145);
+    result = HBaseTestingUtility.getFromStoreFile(store, get);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(1,2);
+    result = HBaseTestingUtility.getFromStoreFile(store, get);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(90,200);
+    result = HBaseTestingUtility.getFromStoreFile(store, get);
+    assertTrue(result.size()==0);
+  }
+}

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Fri Aug 27 05:01:02 2010
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.List;
 import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.Reference.Range;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
@@ -354,7 +356,9 @@ public class TestStoreFile extends HBase
       TreeSet<byte[]> columns = new TreeSet<byte[]>();
       columns.add("family:col".getBytes());
 
-      boolean exists = scanner.shouldSeek(row.getBytes(), columns);
+      Scan scan = new Scan(row.getBytes(),row.getBytes());
+      scan.addColumn("family".getBytes(), "family:col".getBytes());
+      boolean exists = scanner.shouldSeek(scan, columns);
       if (i % 2 == 0) {
         if (!exists) falseNeg++;
       } else {
@@ -428,7 +432,9 @@ public class TestStoreFile extends HBase
           TreeSet<byte[]> columns = new TreeSet<byte[]>();
           columns.add(("col" + col).getBytes());
 
-          boolean exists = scanner.shouldSeek(row.getBytes(), columns);
+          Scan scan = new Scan(row.getBytes(),row.getBytes());
+          scan.addColumn("family".getBytes(), ("col"+col).getBytes());
+          boolean exists = scanner.shouldSeek(scan, columns);
           boolean shouldRowExist = i % 2 == 0;
           boolean shouldColExist = j % 2 == 0;
           shouldColExist = shouldColExist || bt[x] == StoreFile.BloomType.ROW;
@@ -496,5 +502,77 @@ public class TestStoreFile extends HBase
     Mockito.doReturn(name).when(mock).toString();
     return mock;
   }
-  
+
+  /**
+   *Generate a list of KeyValues for testing based on given parameters
+   * @param timestamps
+   * @param numRows
+   * @param qualifier
+   * @param family
+   * @return
+   */
+  List<KeyValue> getKeyValueSet(long[] timestamps, int numRows,
+      byte[] qualifier, byte[] family) {
+    List<KeyValue> kvList = new ArrayList<KeyValue>();
+    for (int i=1;i<=numRows;i++) {
+      byte[] b = Bytes.toBytes(i) ;
+      LOG.info(Bytes.toString(b));
+      LOG.info(Bytes.toString(b));
+      for (long timestamp: timestamps)
+      {
+        kvList.add(new KeyValue(b, family, qualifier, timestamp, b));
+      }
+    }
+    return kvList;
+  }
+
+  /**
+   * Test to ensure correctness when using StoreFile with multiple timestamps
+   * @throws IOException
+   */
+  public void testMultipleTimestamps() throws IOException {
+    byte[] family = Bytes.toBytes("familyname");
+    byte[] qualifier = Bytes.toBytes("qualifier");
+    int numRows = 10;
+    long[] timestamps = new long[] {20,10,5,1};
+    Scan scan = new Scan();
+
+    Path storedir = new Path(new Path(this.testDir, "regionname"),
+    "familyname");
+    Path dir = new Path(storedir, "1234567890");
+    StoreFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024);
+
+    List<KeyValue> kvList = getKeyValueSet(timestamps,numRows,
+        family, qualifier);
+
+    for (KeyValue kv : kvList) {
+      writer.append(kv);
+    }
+    writer.appendMetadata(0, false);
+    writer.close();
+
+    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
+        StoreFile.BloomType.NONE, false);
+    StoreFile.Reader reader = hsf.createReader();
+    StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
+    TreeSet<byte[]> columns = new TreeSet<byte[]>();
+    columns.add(qualifier);
+
+    scan.setTimeRange(20, 100);
+    assertTrue(scanner.shouldSeek(scan, columns));
+
+    scan.setTimeRange(1, 2);
+    assertTrue(scanner.shouldSeek(scan, columns));
+
+    scan.setTimeRange(8, 10);
+    assertTrue(scanner.shouldSeek(scan, columns));
+
+    scan.setTimeRange(7, 50);
+    assertTrue(scanner.shouldSeek(scan, columns));
+
+    /*This test is not required for correctness but it should pass when
+     * timestamp range optimization is on*/
+    //scan.setTimeRange(27, 50);
+    //assertTrue(!scanner.shouldSeek(scan, columns));
+  }
 }

Modified: hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java?rev=990018&r1=990017&r2=990018&view=diff
==============================================================================
--- hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java (original)
+++ hbase/branches/0.90_master_rewrite/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java Fri Aug 27 05:01:02 2010
@@ -452,6 +452,8 @@ public class TestStoreScanner extends Te
     scan.updateReaders();
 
     scan.updateReaders();
+
+    scan.peek();
   }
 
 



Mime
View raw message