hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nspiegelb...@apache.org
Subject svn commit: r1176177 [12/13] - in /hbase/branches/0.89: ./ bin/ bin/replication/ docs/ src/ src/assembly/ src/docs/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/avro/ src/main/java/org/apache/hadoop/hbase/avro/generated/...
Date Tue, 27 Sep 2011 02:42:01 GMT
Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,90 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.executor.RegionTransitionEventData;
+import org.apache.hadoop.hbase.executor.HBaseEventHandler.HBaseEventType;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestRestartCluster {
+  private static final Log LOG = LogFactory.getLog(TestRestartCluster.class);
+  private static Configuration conf;
+  private static HBaseTestingUtility utility;
+  private static ZooKeeperWrapper zkWrapper;
+  private static final byte[] TABLENAME = Bytes.toBytes("master_transitions");
+  private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a")};
+
+  @BeforeClass public static void beforeAllTests() throws Exception {
+    conf = HBaseConfiguration.create();
+    utility = new HBaseTestingUtility(conf);
+  }
+
+  @AfterClass public static void afterAllTests() throws IOException {
+    utility.shutdownMiniCluster();
+  }
+
+  @Before public void setup() throws IOException {
+  }
+
+  @Test (timeout=300000) public void testRestartClusterAfterKill()throws Exception {
+    utility.startMiniZKCluster();
+    zkWrapper = ZooKeeperWrapper.createInstance(conf, "cluster1");
+
+    // create the unassigned region, throw up a region opened state for META
+    String unassignedZNode = zkWrapper.getRegionInTransitionZNode();
+    zkWrapper.createZNodeIfNotExists(unassignedZNode);
+    byte[] data = null;
+    HBaseEventType hbEventType = HBaseEventType.RS2ZK_REGION_OPENED;
+    try {
+      data = Writables.getBytes(new RegionTransitionEventData(hbEventType, HMaster.MASTER));
+    } catch (IOException e) {
+      LOG.error("Error creating event data for " + hbEventType, e);
+    }
+    zkWrapper.createOrUpdateUnassignedRegion(
+        HRegionInfo.ROOT_REGIONINFO.getEncodedName(), data);
+    zkWrapper.createOrUpdateUnassignedRegion(
+        HRegionInfo.FIRST_META_REGIONINFO.getEncodedName(), data);
+    LOG.debug("Created UNASSIGNED zNode for ROOT and META regions in state " + HBaseEventType.M2ZK_REGION_OFFLINE);
+
+    // start the HB cluster
+    LOG.info("Starting HBase cluster...");
+    utility.startMiniCluster(2);
+
+    utility.createTable(TABLENAME, FAMILIES);
+    LOG.info("Created a table, waiting for table to be available...");
+    utility.waitTableAvailable(TABLENAME, 60*1000);
+
+    LOG.info("Master deleted unassgined region and started up successfully.");
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedCloseRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedCloseRegion.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedCloseRegion.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedCloseRegion.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,241 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HMsg;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.ProcessRegionClose;
+import org.apache.hadoop.hbase.master.RegionServerOperation;
+import org.apache.hadoop.hbase.master.RegionServerOperationListener;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.util.Writables;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestZKBasedCloseRegion {
+  private static final Log LOG = LogFactory.getLog(TestZKBasedCloseRegion.class);
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final String TABLENAME = "master_transitions";
+  private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a"),
+    Bytes.toBytes("b"), Bytes.toBytes("c")};
+
+  @BeforeClass public static void beforeAllTests() throws Exception {
+    Configuration c = TEST_UTIL.getConfiguration();
+    c.setBoolean("dfs.support.append", true);
+    c.setInt("hbase.regionserver.info.port", 0);
+    c.setInt("hbase.master.meta.thread.rescanfrequency", 5*1000);
+    TEST_UTIL.startMiniCluster(2);
+    TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
+    HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
+    int countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
+    waitUntilAllRegionsAssigned(countOfRegions);
+    addToEachStartKey(countOfRegions);
+  }
+
+  @AfterClass public static void afterAllTests() throws IOException {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before public void setup() throws IOException {
+    if (TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size() < 2) {
+      // Need at least two servers.
+      LOG.info("Started new server=" +
+        TEST_UTIL.getHBaseCluster().startRegionServer());
+
+    }
+  }
+
+  @Test (timeout=300000) public void testCloseRegion()
+  throws Exception {
+    LOG.info("Running testCloseRegion");
+    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+    LOG.info("Number of region servers = " + cluster.getLiveRegionServerThreads().size());
+
+    int rsIdx = 0;
+    HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(rsIdx);
+    Collection<HRegion> regions = regionServer.getOnlineRegions();
+    HRegion region = regions.iterator().next();
+    LOG.debug("Asking RS to close region " + region.getRegionNameAsString());
+
+    AtomicBoolean closeEventProcessed = new AtomicBoolean(false);
+    RegionServerOperationListener listener =
+      new CloseRegionEventListener(region.getRegionNameAsString(), closeEventProcessed);
+    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+    master.getRegionServerOperationQueue().registerRegionServerOperationListener(listener);
+    HMsg closeRegionMsg = new HMsg(HMsg.Type.MSG_REGION_CLOSE,
+                                   region.getRegionInfo(),
+                                   Bytes.toBytes("Forcing close in test")
+                                  );
+    TEST_UTIL.getHBaseCluster().addMessageToSendRegionServer(rsIdx, closeRegionMsg);
+
+    synchronized(closeEventProcessed) {
+      // wait for 3 minutes
+      closeEventProcessed.wait(3*60*1000);
+    }
+    if(!closeEventProcessed.get()) {
+      throw new Exception("Timed out, close event not called on master.");
+    }
+    else {
+      LOG.info("Done with test, RS informed master successfully.");
+    }
+  }
+
+  public static class CloseRegionEventListener implements RegionServerOperationListener {
+
+    private static final Log LOG = LogFactory.getLog(CloseRegionEventListener.class);
+    String regionToClose;
+    AtomicBoolean closeEventProcessed;
+
+    public CloseRegionEventListener(String regionToClose, AtomicBoolean closeEventProcessed) {
+      this.regionToClose = regionToClose;
+      this.closeEventProcessed = closeEventProcessed;
+    }
+
+    @Override
+    public boolean process(HServerInfo serverInfo, HMsg incomingMsg) {
+      return true;
+    }
+
+    @Override
+    public boolean process(RegionServerOperation op) throws IOException {
+      return true;
+    }
+
+    @Override
+    public void processed(RegionServerOperation op) {
+      LOG.debug("Master processing object: " + op.getClass().getCanonicalName());
+      if(op instanceof ProcessRegionClose) {
+        ProcessRegionClose regionCloseOp = (ProcessRegionClose)op;
+        String region = regionCloseOp.getRegionInfo().getRegionNameAsString();
+        LOG.debug("Finished closing region " + region + ", expected to close region " + regionToClose);
+        if(regionToClose.equals(region)) {
+          closeEventProcessed.set(true);
+        }
+        synchronized(closeEventProcessed) {
+          closeEventProcessed.notifyAll();
+        }
+      }
+    }
+
+  }
+
+
+  private static void waitUntilAllRegionsAssigned(final int countOfRegions)
+  throws IOException {
+    HTable meta = new HTable(TEST_UTIL.getConfiguration(),
+      HConstants.META_TABLE_NAME);
+    while (true) {
+      int rows = 0;
+      Scan scan = new Scan();
+      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
+      ResultScanner s = meta.getScanner(scan);
+      for (Result r = null; (r = s.next()) != null;) {
+        byte [] b =
+          r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
+        if (b == null || b.length <= 0) break;
+        rows++;
+      }
+      s.close();
+      // If I get to here and all rows have a Server, then all have been assigned.
+      if (rows == countOfRegions) break;
+      LOG.info("Found=" + rows);
+      Threads.sleep(1000);
+    }
+  }
+
+  /*
+   * Add to each of the regions in .META. a value.  Key is the startrow of the
+   * region (except its 'aaa' for first region).  Actual value is the row name.
+   * @param expected
+   * @return
+   * @throws IOException
+   */
+  private static int addToEachStartKey(final int expected) throws IOException {
+    HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
+    HTable meta = new HTable(TEST_UTIL.getConfiguration(),
+        HConstants.META_TABLE_NAME);
+    int rows = 0;
+    Scan scan = new Scan();
+    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+    ResultScanner s = meta.getScanner(scan);
+    for (Result r = null; (r = s.next()) != null;) {
+      byte [] b =
+        r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+      if (b == null || b.length <= 0) break;
+      HRegionInfo hri = Writables.getHRegionInfo(b);
+      // If start key, add 'aaa'.
+      byte [] row = getStartKey(hri);
+      Put p = new Put(row);
+      p.add(getTestFamily(), getTestQualifier(), row);
+      t.put(p);
+      rows++;
+    }
+    s.close();
+    Assert.assertEquals(expected, rows);
+    return rows;
+  }
+
+  private static byte [] getStartKey(final HRegionInfo hri) {
+    return Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey())?
+        Bytes.toBytes("aaa"): hri.getStartKey();
+  }
+
+  private static byte [] getTestFamily() {
+    return FAMILIES[0];
+  }
+
+  private static byte [] getTestQualifier() {
+    return getTestFamily();
+  }
+
+  public static void main(String args[]) throws Exception {
+    TestZKBasedCloseRegion.beforeAllTests();
+
+    TestZKBasedCloseRegion test = new TestZKBasedCloseRegion();
+    test.setup();
+    test.testCloseRegion();
+
+    TestZKBasedCloseRegion.afterAllTests();
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedReopenRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedReopenRegion.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedReopenRegion.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedReopenRegion.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,268 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HMsg;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.ProcessRegionClose;
+import org.apache.hadoop.hbase.master.ProcessRegionOpen;
+import org.apache.hadoop.hbase.master.RegionServerOperation;
+import org.apache.hadoop.hbase.master.RegionServerOperationListener;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.util.Writables;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestZKBasedReopenRegion {
+  private static final Log LOG = LogFactory.getLog(TestZKBasedReopenRegion.class);
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final String TABLENAME = "master_transitions";
+  private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a"),
+    Bytes.toBytes("b"), Bytes.toBytes("c")};
+
+  @BeforeClass public static void beforeAllTests() throws Exception {
+    Configuration c = TEST_UTIL.getConfiguration();
+    c.setBoolean("dfs.support.append", true);
+    c.setInt("hbase.regionserver.info.port", 0);
+    c.setInt("hbase.master.meta.thread.rescanfrequency", 5*1000);
+    TEST_UTIL.startMiniCluster(2);
+    TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
+    HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
+    int countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
+    waitUntilAllRegionsAssigned(countOfRegions);
+    addToEachStartKey(countOfRegions);
+  }
+
+  @AfterClass public static void afterAllTests() throws IOException {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before public void setup() throws IOException {
+    if (TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size() < 2) {
+      // Need at least two servers.
+      LOG.info("Started new server=" +
+        TEST_UTIL.getHBaseCluster().startRegionServer());
+
+    }
+  }
+
+  @Test (timeout=300000) public void testOpenRegion()
+  throws Exception {
+    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+    LOG.info("Number of region servers = " + cluster.getLiveRegionServerThreads().size());
+
+    int rsIdx = 0;
+    HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(rsIdx);
+    Collection<HRegion> regions = regionServer.getOnlineRegions();
+    HRegion region = regions.iterator().next();
+    LOG.debug("Asking RS to close region " + region.getRegionNameAsString());
+
+    AtomicBoolean closeEventProcessed = new AtomicBoolean(false);
+    AtomicBoolean reopenEventProcessed = new AtomicBoolean(false);
+    RegionServerOperationListener listener =
+      new ReopenRegionEventListener(region.getRegionNameAsString(),
+                                    closeEventProcessed,
+                                    reopenEventProcessed);
+    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+    master.getRegionServerOperationQueue().registerRegionServerOperationListener(listener);
+    HMsg closeRegionMsg = new HMsg(HMsg.Type.MSG_REGION_CLOSE,
+                                   region.getRegionInfo(),
+                                   Bytes.toBytes("Forcing close in test")
+                                  );
+    TEST_UTIL.getHBaseCluster().addMessageToSendRegionServer(rsIdx, closeRegionMsg);
+
+    synchronized(closeEventProcessed) {
+      closeEventProcessed.wait(3*60*1000);
+    }
+    if(!closeEventProcessed.get()) {
+      throw new Exception("Timed out, close event not called on master.");
+    }
+
+    synchronized(reopenEventProcessed) {
+      reopenEventProcessed.wait(3*60*1000);
+    }
+    if(!reopenEventProcessed.get()) {
+      throw new Exception("Timed out, open event not called on master after region close.");
+    }
+
+    LOG.info("Done with test, RS informed master successfully.");
+  }
+
+  public static class ReopenRegionEventListener implements RegionServerOperationListener {
+
+    private static final Log LOG = LogFactory.getLog(ReopenRegionEventListener.class);
+    String regionToClose;
+    AtomicBoolean closeEventProcessed;
+    AtomicBoolean reopenEventProcessed;
+
+    public ReopenRegionEventListener(String regionToClose,
+                                     AtomicBoolean closeEventProcessed,
+                                     AtomicBoolean reopenEventProcessed) {
+      this.regionToClose = regionToClose;
+      this.closeEventProcessed = closeEventProcessed;
+      this.reopenEventProcessed = reopenEventProcessed;
+    }
+
+    @Override
+    public boolean process(HServerInfo serverInfo, HMsg incomingMsg) {
+      return true;
+    }
+
+    @Override
+    public boolean process(RegionServerOperation op) throws IOException {
+      return true;
+    }
+
+    @Override
+    public void processed(RegionServerOperation op) {
+      LOG.debug("Master processing object: " + op.getClass().getCanonicalName());
+      if(op instanceof ProcessRegionClose) {
+        ProcessRegionClose regionCloseOp = (ProcessRegionClose)op;
+        String region = regionCloseOp.getRegionInfo().getRegionNameAsString();
+        LOG.debug("Finished closing region " + region + ", expected to close region " + regionToClose);
+        if(regionToClose.equals(region)) {
+          closeEventProcessed.set(true);
+        }
+        synchronized(closeEventProcessed) {
+          closeEventProcessed.notifyAll();
+        }
+      }
+      // Wait for open event AFTER we have closed the region
+      if(closeEventProcessed.get()) {
+        if(op instanceof ProcessRegionOpen) {
+          ProcessRegionOpen regionOpenOp = (ProcessRegionOpen)op;
+          String region = regionOpenOp.getRegionInfo().getRegionNameAsString();
+          LOG.debug("Finished closing region " + region + ", expected to close region " + regionToClose);
+          if(regionToClose.equals(region)) {
+            reopenEventProcessed.set(true);
+          }
+          synchronized(reopenEventProcessed) {
+            reopenEventProcessed.notifyAll();
+          }
+        }
+      }
+
+    }
+
+  }
+
+
+  private static void waitUntilAllRegionsAssigned(final int countOfRegions)
+  throws IOException {
+    HTable meta = new HTable(TEST_UTIL.getConfiguration(),
+      HConstants.META_TABLE_NAME);
+    while (true) {
+      int rows = 0;
+      Scan scan = new Scan();
+      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
+      ResultScanner s = meta.getScanner(scan);
+      for (Result r = null; (r = s.next()) != null;) {
+        byte [] b =
+          r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
+        if (b == null || b.length <= 0) break;
+        rows++;
+      }
+      s.close();
+      // If I get to here and all rows have a Server, then all have been assigned.
+      if (rows == countOfRegions) break;
+      LOG.info("Found=" + rows);
+      Threads.sleep(1000);
+    }
+  }
+
+  /*
+   * Add to each of the regions in .META. a value.  Key is the startrow of the
+   * region (except its 'aaa' for first region).  Actual value is the row name.
+   * @param expected
+   * @return
+   * @throws IOException
+   */
+  private static int addToEachStartKey(final int expected) throws IOException {
+    HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
+    HTable meta = new HTable(TEST_UTIL.getConfiguration(),
+        HConstants.META_TABLE_NAME);
+    int rows = 0;
+    Scan scan = new Scan();
+    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+    ResultScanner s = meta.getScanner(scan);
+    for (Result r = null; (r = s.next()) != null;) {
+      byte [] b =
+        r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+      if (b == null || b.length <= 0) break;
+      HRegionInfo hri = Writables.getHRegionInfo(b);
+      // If start key, add 'aaa'.
+      byte [] row = getStartKey(hri);
+      Put p = new Put(row);
+      p.add(getTestFamily(), getTestQualifier(), row);
+      t.put(p);
+      rows++;
+    }
+    s.close();
+    Assert.assertEquals(expected, rows);
+    return rows;
+  }
+
+  private static byte [] getStartKey(final HRegionInfo hri) {
+    return Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey())?
+        Bytes.toBytes("aaa"): hri.getStartKey();
+  }
+
+  private static byte [] getTestFamily() {
+    return FAMILIES[0];
+  }
+
+  private static byte [] getTestQualifier() {
+    return getTestFamily();
+  }
+
+  public static void main(String args[]) throws Exception {
+    TestZKBasedReopenRegion.beforeAllTests();
+
+    TestZKBasedReopenRegion test = new TestZKBasedReopenRegion();
+    test.setup();
+    test.testOpenRegion();
+
+    TestZKBasedReopenRegion.afterAllTests();
+  }
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Tue Sep 27 02:41:56 2011
@@ -74,9 +74,6 @@ public class TestCompaction extends HBas
     super.setUp();
     HTableDescriptor htd = createTableDescriptor(getName());
     this.r = createNewHRegion(htd, null, null);
-    this.compactionDir = HRegion.getCompactionDir(this.r.getBaseDir());
-    this.regionCompactionDir = new Path(this.compactionDir,
-                        this.r.getRegionInfo().getEncodedName());
   }
 
   @Override
@@ -150,10 +147,6 @@ public class TestCompaction extends HBas
 //    assertEquals(cellValues.length, 3);
     r.flushcache();
     r.compactStores();
-    // check compaction dir is exists
-    assertTrue(this.cluster.getFileSystem().exists(this.compactionDir));
-    // check Compaction Dir for this Regions is cleaned up
-    assertTrue(!this.cluster.getFileSystem().exists(this.regionCompactionDir));
     // Always 3 versions if that is what max versions is.
     byte [] secondRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
     // Increment the least significant character so we get to next row.

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java Tue Sep 27 02:41:56 2011
@@ -31,15 +31,14 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 
 
-public class TestExplicitColumnTracker extends HBaseTestCase
-implements HConstants {
+public class TestExplicitColumnTracker extends HBaseTestCase {
   private boolean PRINT = false;
 
-  private final byte [] col1 = Bytes.toBytes("col1");
-  private final byte [] col2 = Bytes.toBytes("col2");
-  private final byte [] col3 = Bytes.toBytes("col3");
-  private final byte [] col4 = Bytes.toBytes("col4");
-  private final byte [] col5 = Bytes.toBytes("col5");
+  private final byte[] col1 = Bytes.toBytes("col1");
+  private final byte[] col2 = Bytes.toBytes("col2");
+  private final byte[] col3 = Bytes.toBytes("col3");
+  private final byte[] col4 = Bytes.toBytes("col4");
+  private final byte[] col5 = Bytes.toBytes("col5");
 
   private void runTest(int maxVersions,
                        TreeSet<byte[]> trackColumns,
@@ -50,13 +49,13 @@ implements HConstants {
 
 
     //Initialize result
-    List<MatchCode> result = new ArrayList<MatchCode>(); 
-    
+    List<MatchCode> result = new ArrayList<MatchCode>();
+
     //"Match"
     for(byte [] col : scannerColumns){
       result.add(exp.checkColumn(col, 0, col.length));
     }
-    
+
     assertEquals(expected.size(), result.size());
     for(int i=0; i< expected.size(); i++){
       assertEquals(expected.get(i), result.get(i));

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java Tue Sep 27 02:41:56 2011
@@ -76,7 +76,7 @@ public class TestFSErrorsExposed {
 
     StoreFile sf = new StoreFile(fs, writer.getPath(), false,
         util.getConfiguration(), StoreFile.BloomType.NONE, false);
-    HFile.Reader reader = sf.createReader();
+    StoreFile.Reader reader = sf.createReader();
     HFileScanner scanner = reader.getScanner(false, true);
 
     FaultyInputStream inStream = fs.inStreams.get(0).get();
@@ -111,7 +111,7 @@ public class TestFSErrorsExposed {
         HBaseTestingUtility.getTestDir("internalScannerExposesErrors"),
         "regionname"), "familyname");
     FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
-    HFile.Writer writer = StoreFile.createWriter(fs, hfilePath, 2 * 1024);
+    StoreFile.Writer writer = StoreFile.createWriter(fs, hfilePath, 2 * 1024);
     TestStoreFile.writeStoreFile(
         writer, Bytes.toBytes("cf"), Bytes.toBytes("qual"));
 
@@ -176,11 +176,9 @@ public class TestFSErrorsExposed {
       try {
         util.countRows(table);
         fail("Did not fail to count after removing data");
-      } catch (RuntimeException rte) {
-        // We get RTE instead of IOE since java Iterable<?> doesn't throw
-        // IOE
-        LOG.info("Got expected error", rte);
-        assertTrue(rte.getMessage().contains("Could not seek"));
+      } catch (Exception e) {
+        LOG.info("Got expected error", e);
+        assertTrue(e.getMessage().contains("Could not seek"));
       }
 
     } finally {

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java Tue Sep 27 02:41:56 2011
@@ -44,19 +44,19 @@ import org.apache.hadoop.hdfs.MiniDFSClu
  * {@link TestGet} is a medley of tests of get all done up as a single test.
  * This class
  */
-public class TestGetClosestAtOrBefore extends HBaseTestCase implements HConstants {
-  static final Log LOG = LogFactory.getLog(TestGetClosestAtOrBefore.class);
+public class TestGetClosestAtOrBefore extends HBaseTestCase {
+  private static final Log LOG = LogFactory.getLog(TestGetClosestAtOrBefore.class);
   private MiniDFSCluster miniHdfs;
 
-  private static final byte [] T00 = Bytes.toBytes("000");
-  private static final byte [] T10 = Bytes.toBytes("010");
-  private static final byte [] T11 = Bytes.toBytes("011");
-  private static final byte [] T12 = Bytes.toBytes("012");
-  private static final byte [] T20 = Bytes.toBytes("020");
-  private static final byte [] T30 = Bytes.toBytes("030");
-  private static final byte [] T31 = Bytes.toBytes("031");
-  private static final byte [] T35 = Bytes.toBytes("035");
-  private static final byte [] T40 = Bytes.toBytes("040");
+  private static final byte[] T00 = Bytes.toBytes("000");
+  private static final byte[] T10 = Bytes.toBytes("010");
+  private static final byte[] T11 = Bytes.toBytes("011");
+  private static final byte[] T12 = Bytes.toBytes("012");
+  private static final byte[] T20 = Bytes.toBytes("020");
+  private static final byte[] T30 = Bytes.toBytes("030");
+  private static final byte[] T31 = Bytes.toBytes("031");
+  private static final byte[] T35 = Bytes.toBytes("035");
+  private static final byte[] T40 = Bytes.toBytes("040");
 
   @Override
   protected void setUp() throws Exception {
@@ -86,7 +86,8 @@ public class TestGetClosestAtOrBefore ex
           i == 0? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i),
           i == last? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes((byte)i + interval));
         Put put = new Put(hri.getRegionName());
-        put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(hri));
+        put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+                Writables.getBytes(hri));
         mr.put(put, false);
       }
     }
@@ -346,4 +347,4 @@ public class TestGetClosestAtOrBefore ex
     }
     super.tearDown();
   }
-}
\ No newline at end of file
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetDeleteTracker.java Tue Sep 27 02:41:56 2011
@@ -30,17 +30,17 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 
 
-public class TestGetDeleteTracker extends HBaseTestCase implements HConstants {
+public class TestGetDeleteTracker extends HBaseTestCase {
 
   private static final boolean PRINT = true;
 
-  private byte [] col1 = null;
-  private byte [] col2 = null;
+  private byte[] col1 = null;
+  private byte[] col2 = null;
 
   private int col1Len = 0;
   private int col2Len = 0;
 
-  private byte [] empty = null;
+  private byte[] empty = null;
 
   private long ts1 = 0L;
   private long ts2 = 0L;

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Tue Sep 27 02:41:56 2011
@@ -27,9 +27,12 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MultithreadedTestUtil;
+import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
@@ -44,11 +47,16 @@ import org.apache.hadoop.hbase.filter.Fi
 import org.apache.hadoop.hbase.filter.PrefixFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScanner;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
 import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 
+import com.google.common.base.Joiner;
+import com.google.common.collect.Lists;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -58,6 +66,7 @@ import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 
 
 /**
@@ -328,6 +337,103 @@ public class TestHRegion extends HBaseTe
     assertTrue(exception);
   }
 
+  @SuppressWarnings("unchecked")
+  public void testBatchPut() throws Exception {
+    byte[] b = Bytes.toBytes(getName());
+    byte[] cf = Bytes.toBytes("cf");
+    byte[] qual = Bytes.toBytes("qual");
+    byte[] val = Bytes.toBytes("val");
+    initHRegion(b, getName(), cf);
+
+    HLog.getSyncOps(); // clear counter from prior tests
+    assertEquals(0, HLog.getSyncOps());
+
+    LOG.info("First a batch put with all valid puts");
+    final Put[] puts = new Put[10];
+    for (int i = 0; i < 10; i++) {
+      puts[i] = new Put(Bytes.toBytes("row_" + i));
+      puts[i].add(cf, qual, val);
+    }
+
+    OperationStatusCode[] codes = this.region.put(puts);
+    assertEquals(10, codes.length);
+    for (int i = 0; i < 10; i++) {
+      assertEquals(OperationStatusCode.SUCCESS, codes[i]);
+    }
+    assertEquals(1, HLog.getSyncOps());
+
+    LOG.info("Next a batch put with one invalid family");
+    puts[5].add(Bytes.toBytes("BAD_CF"), qual, val);
+    codes = this.region.put(puts);
+    assertEquals(10, codes.length);
+    for (int i = 0; i < 10; i++) {
+      assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
+        OperationStatusCode.SUCCESS, codes[i]);
+    }
+    assertEquals(1, HLog.getSyncOps());
+
+    LOG.info("Next a batch put that has to break into two batches to avoid a lock");
+    Integer lockedRow = region.obtainRowLock(Bytes.toBytes("row_2"));
+
+    MultithreadedTestUtil.TestContext ctx =
+      new MultithreadedTestUtil.TestContext(HBaseConfiguration.create());
+    final AtomicReference<OperationStatusCode[]> retFromThread =
+      new AtomicReference<OperationStatusCode[]>();
+    TestThread putter = new TestThread(ctx) {
+      @Override
+      public void doWork() throws IOException {
+        retFromThread.set(region.put(puts));
+      }
+    };
+    LOG.info("...starting put thread while holding lock");
+    ctx.addThread(putter);
+    ctx.startThreads();
+
+    LOG.info("...waiting for put thread to sync first time");
+    long startWait = System.currentTimeMillis();
+    while (HLog.getSyncOps() == 0) {
+      Thread.sleep(100);
+      if (System.currentTimeMillis() - startWait > 10000) {
+        fail("Timed out waiting for thread to sync first minibatch");
+      }
+    }
+    LOG.info("...releasing row lock, which should let put thread continue");
+    region.releaseRowLock(lockedRow);
+    LOG.info("...joining on thread");
+    ctx.stop();
+    LOG.info("...checking that next batch was synced");
+    assertEquals(1, HLog.getSyncOps());
+    codes = retFromThread.get();
+    for (int i = 0; i < 10; i++) {
+      assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
+        OperationStatusCode.SUCCESS, codes[i]);
+    }
+
+    LOG.info("Nexta, a batch put which uses an already-held lock");
+    lockedRow = region.obtainRowLock(Bytes.toBytes("row_2"));
+    LOG.info("...obtained row lock");
+    List<Pair<Put, Integer>> putsAndLocks = Lists.newArrayList();
+    for (int i = 0; i < 10; i++) {
+      Pair<Put, Integer> pair = new Pair<Put, Integer>(puts[i], null);
+      if (i == 2) pair.setSecond(lockedRow);
+      putsAndLocks.add(pair);
+    }
+
+    codes = region.put(putsAndLocks.toArray(new Pair[0]));
+    LOG.info("...performed put");
+    for (int i = 0; i < 10; i++) {
+      assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
+        OperationStatusCode.SUCCESS, codes[i]);
+    }
+    // Make sure we didn't do an extra batch
+    assertEquals(1, HLog.getSyncOps());
+
+    // Make sure we still hold lock
+    assertTrue(region.isRowLocked(lockedRow));
+    LOG.info("...releasing lock");
+    region.releaseRowLock(lockedRow);
+  }
+
   //////////////////////////////////////////////////////////////////////////////
   // checkAndMutate tests
   //////////////////////////////////////////////////////////////////////////////
@@ -729,6 +835,53 @@ public class TestHRegion extends HBaseTe
     assertEquals(0, result.size());
   }
 
+  /**
+   * Tests that the special LATEST_TIMESTAMP option for puts gets
+   * replaced by the actual timestamp
+   */
+  public void testPutWithLatestTS() throws IOException {
+    byte [] tableName = Bytes.toBytes("testtable");
+    byte [] fam = Bytes.toBytes("info");
+    byte [][] families = {fam};
+    String method = this.getName();
+    initHRegion(tableName, method, families);
+
+    byte [] row = Bytes.toBytes("row1");
+    // column names
+    byte [] qual = Bytes.toBytes("qual");
+
+    // add data with LATEST_TIMESTAMP, put without WAL
+    Put put = new Put(row);
+    put.add(fam, qual, HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"));
+    region.put(put, false);
+
+    // Make sure it shows up with an actual timestamp
+    Get get = new Get(row).addColumn(fam, qual);
+    Result result = region.get(get, null);
+    assertEquals(1, result.size());
+    KeyValue kv = result.raw()[0];
+    LOG.info("Got: " + kv);
+    assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp",
+        kv.getTimestamp() != HConstants.LATEST_TIMESTAMP);
+
+    // Check same with WAL enabled (historically these took different
+    // code paths, so check both)
+    row = Bytes.toBytes("row2");
+    put = new Put(row);
+    put.add(fam, qual, HConstants.LATEST_TIMESTAMP, Bytes.toBytes("value"));
+    region.put(put, true);
+
+    // Make sure it shows up with an actual timestamp
+    get = new Get(row).addColumn(fam, qual);
+    result = region.get(get, null);
+    assertEquals(1, result.size());
+    kv = result.raw()[0];
+    LOG.info("Got: " + kv);
+    assertTrue("LATEST_TIMESTAMP was not replaced with real timestamp",
+        kv.getTimestamp() != HConstants.LATEST_TIMESTAMP);
+
+  }
+
   public void testScanner_DeleteOneFamilyNotAnother() throws IOException {
     byte [] tableName = Bytes.toBytes("test_table");
     byte [] fam1 = Bytes.toBytes("columnA");

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeyValueHeap.java Tue Sep 27 02:41:56 2011
@@ -32,24 +32,23 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.Bytes;
 
 
-public class TestKeyValueHeap extends HBaseTestCase
-implements HConstants {
+public class TestKeyValueHeap extends HBaseTestCase {
   private static final boolean PRINT = false;
 
   List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>();
 
-  private byte [] row1;
-  private byte [] fam1;
-  private byte [] col1;
-  private byte [] data;
-
-  private byte [] row2;
-  private byte [] fam2;
-  private byte [] col2;
-
-  private byte [] col3;
-  private byte [] col4;
-  private byte [] col5;
+  private byte[] row1;
+  private byte[] fam1;
+  private byte[] col1;
+  private byte[] data;
+
+  private byte[] row2;
+  private byte[] fam2;
+  private byte[] col2;
+
+  private byte[] col3;
+  private byte[] col4;
+  private byte[] col5;
 
   public void setUp() throws Exception {
     super.setUp();

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java Tue Sep 27 02:41:56 2011
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionse
 import java.io.IOException;
 import java.rmi.UnexpectedException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.NavigableSet;
 import java.util.TreeSet;
@@ -38,6 +39,10 @@ import org.apache.hadoop.hbase.client.Ge
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 
+import com.google.common.base.Joiner;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
 /** memstore test case */
 public class TestMemStore extends TestCase {
   private final Log LOG = LogFactory.getLog(this.getClass());
@@ -204,11 +209,18 @@ public class TestMemStore extends TestCa
   private void assertScannerResults(KeyValueScanner scanner, KeyValue[] expected)
       throws IOException {
     scanner.seek(KeyValue.createFirstOnRow(new byte[]{}));
-    for (KeyValue kv : expected) {
-      assertTrue(0 ==
-          KeyValue.COMPARATOR.compare(kv,
-              scanner.next()));
+    List<KeyValue> returned = Lists.newArrayList();
+
+    while (true) {
+      KeyValue next = scanner.next();
+      if (next == null) break;
+      returned.add(next);
     }
+
+    assertTrue(
+        "Got:\n" + Joiner.on("\n").join(returned) +
+        "\nExpected:\n" + Joiner.on("\n").join(expected),
+        Iterables.elementsEqual(Arrays.asList(expected), returned));
     assertNull(scanner.peek());
   }
 
@@ -252,6 +264,115 @@ public class TestMemStore extends TestCa
     assertScannerResults(s, new KeyValue[]{kv1, kv2});
   }
 
+  /**
+   * Regression test for HBASE-2616, HBASE-2670.
+   * When we insert a higher-memstoreTS version of a cell but with
+   * the same timestamp, we still need to provide consistent reads
+   * for the same scanner.
+   */
+  public void testMemstoreEditsVisibilityWithSameKey() throws IOException {
+    final byte[] row = Bytes.toBytes(1);
+    final byte[] f = Bytes.toBytes("family");
+    final byte[] q1 = Bytes.toBytes("q1");
+    final byte[] q2 = Bytes.toBytes("q2");
+    final byte[] v1 = Bytes.toBytes("value1");
+    final byte[] v2 = Bytes.toBytes("value2");
+
+    // INSERT 1: Write both columns val1
+    ReadWriteConsistencyControl.WriteEntry w =
+        rwcc.beginMemstoreInsert();
+
+    KeyValue kv11 = new KeyValue(row, f, q1, v1);
+    kv11.setMemstoreTS(w.getWriteNumber());
+    memstore.add(kv11);
+
+    KeyValue kv12 = new KeyValue(row, f, q2, v1);
+    kv12.setMemstoreTS(w.getWriteNumber());
+    memstore.add(kv12);
+    rwcc.completeMemstoreInsert(w);
+
+    // BEFORE STARTING INSERT 2, SEE FIRST KVS
+    ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
+    KeyValueScanner s = this.memstore.getScanners().get(0);
+    assertScannerResults(s, new KeyValue[]{kv11, kv12});
+
+    // START INSERT 2: Write both columns val2
+    w = rwcc.beginMemstoreInsert();
+    KeyValue kv21 = new KeyValue(row, f, q1, v2);
+    kv21.setMemstoreTS(w.getWriteNumber());
+    memstore.add(kv21);
+
+    KeyValue kv22 = new KeyValue(row, f, q2, v2);
+    kv22.setMemstoreTS(w.getWriteNumber());
+    memstore.add(kv22);
+
+    // BEFORE COMPLETING INSERT 2, SEE FIRST KVS
+    ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
+    s = this.memstore.getScanners().get(0);
+    assertScannerResults(s, new KeyValue[]{kv11, kv12});
+
+    // COMPLETE INSERT 2
+    rwcc.completeMemstoreInsert(w);
+
+    // NOW SHOULD SEE NEW KVS IN ADDITION TO OLD KVS.
+    // See HBASE-1485 for discussion about what we should do with
+    // the duplicate-TS inserts
+    ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
+    s = this.memstore.getScanners().get(0);
+    assertScannerResults(s, new KeyValue[]{kv21, kv11, kv22, kv12});
+  }
+
+  /**
+   * When we insert a higher-memstoreTS deletion of a cell but with
+   * the same timestamp, we still need to provide consistent reads
+   * for the same scanner.
+   */
+  public void testMemstoreDeletesVisibilityWithSameKey() throws IOException {
+    final byte[] row = Bytes.toBytes(1);
+    final byte[] f = Bytes.toBytes("family");
+    final byte[] q1 = Bytes.toBytes("q1");
+    final byte[] q2 = Bytes.toBytes("q2");
+    final byte[] v1 = Bytes.toBytes("value1");
+    // INSERT 1: Write both columns val1
+    ReadWriteConsistencyControl.WriteEntry w =
+        rwcc.beginMemstoreInsert();
+
+    KeyValue kv11 = new KeyValue(row, f, q1, v1);
+    kv11.setMemstoreTS(w.getWriteNumber());
+    memstore.add(kv11);
+
+    KeyValue kv12 = new KeyValue(row, f, q2, v1);
+    kv12.setMemstoreTS(w.getWriteNumber());
+    memstore.add(kv12);
+    rwcc.completeMemstoreInsert(w);
+
+    // BEFORE STARTING INSERT 2, SEE FIRST KVS
+    ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
+    KeyValueScanner s = this.memstore.getScanners().get(0);
+    assertScannerResults(s, new KeyValue[]{kv11, kv12});
+
+    // START DELETE: Insert delete for one of the columns
+    w = rwcc.beginMemstoreInsert();
+    KeyValue kvDel = new KeyValue(row, f, q2, kv11.getTimestamp(),
+        KeyValue.Type.DeleteColumn);
+    kvDel.setMemstoreTS(w.getWriteNumber());
+    memstore.add(kvDel);
+
+    // BEFORE COMPLETING DELETE, SEE FIRST KVS
+    ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
+    s = this.memstore.getScanners().get(0);
+    assertScannerResults(s, new KeyValue[]{kv11, kv12});
+
+    // COMPLETE DELETE
+    rwcc.completeMemstoreInsert(w);
+
+    // NOW WE SHOULD SEE DELETE
+    ReadWriteConsistencyControl.resetThreadReadPoint(rwcc);
+    s = this.memstore.getScanners().get(0);
+    assertScannerResults(s, new KeyValue[]{kv11, kvDel, kv12});
+  }
+
+
   private static class ReadOwnWritesTester extends Thread {
     static final int NUM_TRIES = 1000;
 
@@ -333,7 +454,7 @@ public class TestMemStore extends TestCa
     }
   }
 
-  /** 
+  /**
    * Test memstore snapshots
    * @throws IOException
    */
@@ -664,7 +785,7 @@ public class TestMemStore extends TestCa
     expected.add(put2);
     expected.add(put1);
 
-    
+
     assertEquals(4, memstore.kvset.size());
     int i = 0;
     for (KeyValue kv: memstore.kvset) {
@@ -704,7 +825,7 @@ public class TestMemStore extends TestCa
     expected.add(put3);
 
 
-    
+
     assertEquals(5, memstore.kvset.size());
     int i = 0;
     for (KeyValue kv: memstore.kvset) {
@@ -763,9 +884,42 @@ public class TestMemStore extends TestCa
   }
 
 
+  ////////////////////////////////////
+  //Test for timestamps
+  ////////////////////////////////////
+
+  /**
+   * Test to ensure correctness when using Memstore with multiple timestamps
+   */
+  public void testMultipleTimestamps() throws IOException {
+    long[] timestamps = new long[] {20,10,5,1};
+    Scan scan = new Scan();
+
+    for (long timestamp: timestamps)
+      addRows(memstore,timestamp);
+
+    scan.setTimeRange(0, 2);
+    assertTrue(memstore.shouldSeek(scan));
+
+    scan.setTimeRange(20, 82);
+    assertTrue(memstore.shouldSeek(scan));
+
+    scan.setTimeRange(10, 20);
+    assertTrue(memstore.shouldSeek(scan));
+
+    scan.setTimeRange(8, 12);
+    assertTrue(memstore.shouldSeek(scan));
+
+    /*This test is not required for correctness but it should pass when
+     * timestamp range optimization is on*/
+    //scan.setTimeRange(28, 42);
+    //assertTrue(!memstore.shouldSeek(scan));
+  }
+
+
   //////////////////////////////////////////////////////////////////////////////
   // Helpers
-  //////////////////////////////////////////////////////////////////////////////  
+  //////////////////////////////////////////////////////////////////////////////
   private static byte [] makeQualifier(final int i1, final int i2){
     return Bytes.toBytes(Integer.toString(i1) + ";" +
         Integer.toString(i2));
@@ -887,5 +1041,5 @@ public class TestMemStore extends TestCa
 
   }
 
-  
+
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java Tue Sep 27 02:41:56 2011
@@ -34,21 +34,20 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.util.Bytes;
 
 
-public class TestQueryMatcher extends HBaseTestCase
-implements HConstants {
+public class TestQueryMatcher extends HBaseTestCase {
   private static final boolean PRINT = false;
 
-  private byte [] row1;
-  private byte [] row2;
-  private byte [] fam1;
-  private byte [] fam2;
-  private byte [] col1;
-  private byte [] col2;
-  private byte [] col3;
-  private byte [] col4;
-  private byte [] col5;
+  private byte[] row1;
+  private byte[] row2;
+  private byte[] fam1;
+  private byte[] fam2;
+  private byte[] col1;
+  private byte[] col2;
+  private byte[] col3;
+  private byte[] col4;
+  private byte[] col5;
 
-  private byte [] data;
+  private byte[] data;
 
   private Get get;
 

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadWriteConsistencyControl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadWriteConsistencyControl.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadWriteConsistencyControl.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestReadWriteConsistencyControl.java Tue Sep 27 02:41:56 2011
@@ -1,3 +1,22 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hbase.regionserver;
 
 import junit.framework.TestCase;

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanDeleteTracker.java Tue Sep 27 02:41:56 2011
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.util.Bytes;
 
 
-public class TestScanDeleteTracker extends HBaseTestCase implements HConstants {
+public class TestScanDeleteTracker extends HBaseTestCase {
 
   private ScanDeleteTracker sdt;
   private long timestamp = 10L;

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Tue Sep 27 02:41:56 2011
@@ -20,35 +20,51 @@
 
 package org.apache.hadoop.hbase.regionserver;
 
+import java.io.IOException;
+import java.lang.ref.SoftReference;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentSkipListSet;
+
 import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FilterFileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.security.UnixUserGroupInformation;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.NavigableSet;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentSkipListSet;
+import com.google.common.base.Joiner;
 
 /**
  * Test class fosr the Store
  */
 public class TestStore extends TestCase {
+  public static final Log LOG = LogFactory.getLog(TestStore.class);
+
   Store store;
   byte [] table = Bytes.toBytes("table");
   byte [] family = Bytes.toBytes("family");
@@ -91,15 +107,17 @@ public class TestStore extends TestCase 
   }
 
   private void init(String methodName) throws IOException {
+    init(methodName, HBaseConfiguration.create());
+  }
+
+  private void init(String methodName, Configuration conf)
+  throws IOException {
     //Setting up a Store
     Path basedir = new Path(DIR+methodName);
     Path logdir = new Path(DIR+methodName+"/logs");
     Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME);
     HColumnDescriptor hcd = new HColumnDescriptor(family);
-    HBaseConfiguration conf = new HBaseConfiguration();
     FileSystem fs = FileSystem.get(conf);
-    Path reconstructionLog = null;
-    Progressable reporter = null;
 
     fs.delete(logdir, true);
 
@@ -109,8 +127,7 @@ public class TestStore extends TestCase 
     HLog hlog = new HLog(fs, logdir, oldLogDir, conf, null);
     HRegion region = new HRegion(basedir, hlog, fs, conf, info, null);
 
-    store = new Store(basedir, region, hcd, fs, reconstructionLog, conf,
-        reporter);
+    store = new Store(basedir, region, hcd, fs, conf);
   }
 
 
@@ -133,7 +150,7 @@ public class TestStore extends TestCase 
     StoreFile f = this.store.getStorefiles().get(0);
     Path storedir = f.getPath().getParent();
     long seqid = f.getMaxSequenceId();
-    HBaseConfiguration c = new HBaseConfiguration();
+    Configuration c = HBaseConfiguration.create();
     FileSystem fs = FileSystem.get(c);
     StoreFile.Writer w = StoreFile.createWriter(fs, storedir, 
         StoreFile.DEFAULT_BLOCKSIZE_SMALL);
@@ -143,7 +160,7 @@ public class TestStore extends TestCase 
     // Reopen it... should pick up two files
     this.store = new Store(storedir.getParent().getParent(),
       this.store.getHRegion(),
-      this.store.getFamily(), fs, null, c, null);
+      this.store.getFamily(), fs, c);
     System.out.println(this.store.getHRegionInfo().getEncodedName());
     assertEquals(2, this.store.getStorefilesCount());
     this.store.get(get, qualifiers, result);
@@ -312,10 +329,181 @@ public class TestStore extends TestCase 
 
   }
 
+  public void testHandleErrorsInFlush() throws Exception {
+    LOG.info("Setting up a faulty file system that cannot write");
+
+    Configuration conf = HBaseConfiguration.create();
+    // Set a different UGI so we don't get the same cached LocalFS instance
+    conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME,
+        "testhandleerrorsinflush,foo");
+    // Inject our faulty LocalFileSystem
+    conf.setClass("fs.file.impl", FaultyFileSystem.class,
+        FileSystem.class);
+    // Make sure it worked (above is sensitive to caching details in hadoop core)
+    FileSystem fs = FileSystem.get(conf);
+    assertEquals(FaultyFileSystem.class, fs.getClass());
+
+    // Initialize region
+    init(getName(), conf);
+
+    LOG.info("Adding some data");
+    this.store.add(new KeyValue(row, family, qf1, null));
+    this.store.add(new KeyValue(row, family, qf2, null));
+    this.store.add(new KeyValue(row, family, qf3, null));
+
+    LOG.info("Before flush, we should have no files");
+    FileStatus[] files = fs.listStatus(store.getHomedir());
+    Path[] paths = FileUtil.stat2Paths(files);
+    System.err.println("Got paths: " + Joiner.on(",").join(paths));
+    assertEquals(0, paths.length);
+
+    //flush
+    try {
+      LOG.info("Flushing");
+      flush(1);
+      fail("Didn't bubble up IOE!");
+    } catch (IOException ioe) {
+      assertTrue(ioe.getMessage().contains("Fault injected"));
+    }
+
+    LOG.info("After failed flush, we should still have no files!");
+    files = fs.listStatus(store.getHomedir());
+    paths = FileUtil.stat2Paths(files);
+    System.err.println("Got paths: " + Joiner.on(",").join(paths));
+    assertEquals(0, paths.length);
+  }
+
+
+  static class FaultyFileSystem extends FilterFileSystem {
+    List<SoftReference<FaultyOutputStream>> outStreams =
+      new ArrayList<SoftReference<FaultyOutputStream>>();
+    private long faultPos = 200;
+
+    public FaultyFileSystem() {
+      super(new LocalFileSystem());
+      System.err.println("Creating faulty!");
+    }
+
+    @Override
+    public FSDataOutputStream create(Path p) throws IOException {
+      return new FaultyOutputStream(super.create(p), faultPos);
+    }
+
+  }
+
+  static class FaultyOutputStream extends FSDataOutputStream {
+    volatile long faultPos = Long.MAX_VALUE;
+
+    public FaultyOutputStream(FSDataOutputStream out,
+        long faultPos) throws IOException {
+      super(out, null);
+      this.faultPos = faultPos;
+    }
+
+    @Override
+    public void write(byte[] buf, int offset, int length) throws IOException {
+      System.err.println("faulty stream write at pos " + getPos());
+      injectFault();
+      super.write(buf, offset, length);
+    }
+
+    private void injectFault() throws IOException {
+      if (getPos() >= faultPos) {
+        throw new IOException("Fault injected");
+      }
+    }
+  }
+
+
+
   private static void flushStore(Store store, long id) throws IOException {
     StoreFlusher storeFlusher = store.getStoreFlusher(id);
     storeFlusher.prepare();
     storeFlusher.flushCache();
     storeFlusher.commit();
   }
-}
+
+
+
+  /**
+   * Generate a list of KeyValues for testing based on given parameters
+   * @param timestamps
+   * @param numRows
+   * @param qualifier
+   * @param family
+   * @return
+   */
+  List<KeyValue> getKeyValueSet(long[] timestamps, int numRows,
+      byte[] qualifier, byte[] family) {
+    List<KeyValue> kvList = new ArrayList<KeyValue>();
+    for (int i=1;i<=numRows;i++) {
+      byte[] b = Bytes.toBytes(i);
+      for (long timestamp: timestamps) {
+        kvList.add(new KeyValue(b, family, qualifier, timestamp, b));
+      }
+    }
+    return kvList;
+  }
+
+  /**
+   * Test to ensure correctness when using Stores with multiple timestamps
+   * @throws IOException
+   */
+  public void testMultipleTimestamps() throws IOException {
+    int numRows = 1;
+    long[] timestamps1 = new long[] {1,5,10,20};
+    long[] timestamps2 = new long[] {30,80};
+
+    init(this.getName());
+
+    List<KeyValue> kvList1 = getKeyValueSet(timestamps1,numRows, qf1, family);
+    for (KeyValue kv : kvList1) {
+      this.store.add(kv);
+    }
+
+    this.store.snapshot();
+    flushStore(store, id++);
+
+    List<KeyValue> kvList2 = getKeyValueSet(timestamps2,numRows, qf1, family);
+    for(KeyValue kv : kvList2) {
+      this.store.add(kv);
+    }
+
+    NavigableSet<byte[]> columns = new ConcurrentSkipListSet<byte[]>(
+        Bytes.BYTES_COMPARATOR);
+    columns.add(qf1);
+    List<KeyValue> result;
+    Get get = new Get(Bytes.toBytes(1));
+    get.addColumn(family,qf1);
+
+    get.setTimeRange(0,15);
+    result = new ArrayList<KeyValue>();
+    this.store.get(get, columns, result);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(40,90);
+    result = new ArrayList<KeyValue>();
+    this.store.get(get, columns, result);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(10,45);
+    result = new ArrayList<KeyValue>();
+    this.store.get(get, columns, result);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(80,145);
+    result = new ArrayList<KeyValue>();
+    this.store.get(get, columns, result);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(1,2);
+    result = new ArrayList<KeyValue>();
+    this.store.get(get, columns, result);
+    assertTrue(result.size()>0);
+
+    get.setTimeRange(90,200);
+    result = new ArrayList<KeyValue>();
+    this.store.get(get, columns, result);
+    assertTrue(result.size()==0);
+  }
+}
\ No newline at end of file

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Tue Sep 27 02:41:56 2011
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.List;
 import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
@@ -35,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseTest
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.Reference.Range;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
@@ -82,14 +84,14 @@ public class TestStoreFile extends HBase
    */
   public void testBasicHalfMapFile() throws Exception {
     // Make up a directory hierarchy that has a regiondir and familyname.
-    HFile.Writer writer = StoreFile.createWriter(this.fs,
+    StoreFile.Writer writer = StoreFile.createWriter(this.fs,
       new Path(new Path(this.testDir, "regionname"), "familyname"), 2 * 1024);
     writeStoreFile(writer);
-    checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf, 
+    checkHalfHFile(new StoreFile(this.fs, writer.getPath(), true, conf,
         StoreFile.BloomType.NONE, false));
   }
 
-  private void writeStoreFile(final HFile.Writer writer) throws IOException {
+  private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
     writeStoreFile(writer, Bytes.toBytes(getName()), Bytes.toBytes(getName()));
   }
   /*
@@ -98,7 +100,7 @@ public class TestStoreFile extends HBase
    * @param writer
    * @throws IOException
    */
-  public static void writeStoreFile(final HFile.Writer writer, byte[] fam, byte[] qualifier)
+  public static void writeStoreFile(final StoreFile.Writer writer, byte[] fam, byte[] qualifier)
   throws IOException {
     long now = System.currentTimeMillis();
     try {
@@ -123,11 +125,11 @@ public class TestStoreFile extends HBase
     Path storedir = new Path(new Path(this.testDir, "regionname"), "familyname");
     Path dir = new Path(storedir, "1234567890");
     // Make a store file and write data to it.
-    HFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024);
+    StoreFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024);
     writeStoreFile(writer);
-    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf, 
+    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
         StoreFile.BloomType.NONE, false);
-    HFile.Reader reader = hsf.createReader();
+    StoreFile.Reader reader = hsf.createReader();
     // Split on a row, not in middle of row.  Midkey returned by reader
     // may be in middle of row.  Create new one with empty column and
     // timestamp.
@@ -137,7 +139,7 @@ public class TestStoreFile extends HBase
     byte [] finalRow = kv.getRow();
     // Make a reference
     Path refPath = StoreFile.split(fs, dir, hsf, midRow, Range.top);
-    StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf, 
+    StoreFile refHsf = new StoreFile(this.fs, refPath, true, conf,
         StoreFile.BloomType.NONE, false);
     // Now confirm that I can read from the reference and that it only gets
     // keys from top half of the file.
@@ -174,9 +176,9 @@ public class TestStoreFile extends HBase
     Path bottomPath = StoreFile.split(this.fs, bottomDir,
       f, midRow, Range.bottom);
     // Make readers on top and bottom.
-    HFile.Reader top = new StoreFile(this.fs, topPath, true, conf, 
+    StoreFile.Reader top = new StoreFile(this.fs, topPath, true, conf,
         StoreFile.BloomType.NONE, false).createReader();
-    HFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf, 
+    StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, true, conf,
         StoreFile.BloomType.NONE, false).createReader();
     ByteBuffer previous = null;
     LOG.info("Midkey: " + midKV.toString());
@@ -229,9 +231,9 @@ public class TestStoreFile extends HBase
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
         Range.bottom);
-      top = new StoreFile(this.fs, topPath, true, conf, 
+      top = new StoreFile(this.fs, topPath, true, conf,
           StoreFile.BloomType.NONE, false).createReader();
-      bottom = new StoreFile(this.fs, bottomPath, true, conf, 
+      bottom = new StoreFile(this.fs, bottomPath, true, conf,
           StoreFile.BloomType.NONE, false).createReader();
       bottomScanner = bottom.getScanner(false, false);
       int count = 0;
@@ -251,7 +253,6 @@ public class TestStoreFile extends HBase
           key.arrayOffset(), key.limit(), badmidkey, 0, badmidkey.length) >= 0);
         if (first) {
           first = false;
-          first = false;
           KeyValue keyKV = KeyValue.createKeyValueFromKey(key);
           LOG.info("First top when key < bottom: " + keyKV);
           String tmp = Bytes.toString(keyKV.getRow());
@@ -275,9 +276,9 @@ public class TestStoreFile extends HBase
       topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top);
       bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey,
         Range.bottom);
-      top = new StoreFile(this.fs, topPath, true, conf, 
+      top = new StoreFile(this.fs, topPath, true, conf,
           StoreFile.BloomType.NONE, false).createReader();
-      bottom = new StoreFile(this.fs, bottomPath, true, conf, 
+      bottom = new StoreFile(this.fs, bottomPath, true, conf,
           StoreFile.BloomType.NONE, false).createReader();
       first = true;
       bottomScanner = bottom.getScanner(false, false);
@@ -317,45 +318,47 @@ public class TestStoreFile extends HBase
       fs.delete(f.getPath(), true);
     }
   }
-  
+
   private static String ROOT_DIR =
     HBaseTestingUtility.getTestDir("TestStoreFile").toString();
   private static String localFormatter = "%010d";
-  
+
   public void testBloomFilter() throws Exception {
     FileSystem fs = FileSystem.getLocal(conf);
     conf.setFloat("io.hfile.bloom.error.rate", (float)0.01);
     conf.setBoolean("io.hfile.bloom.enabled", true);
-    
+
     // write the file
     Path f = new Path(ROOT_DIR, getName());
-    StoreFile.Writer writer = new StoreFile.Writer(fs, f, 
-        StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM, 
+    StoreFile.Writer writer = new StoreFile.Writer(fs, f,
+        StoreFile.DEFAULT_BLOCKSIZE_SMALL, HFile.DEFAULT_COMPRESSION_ALGORITHM,
         conf, KeyValue.COMPARATOR, StoreFile.BloomType.ROW, 2000);
 
     long now = System.currentTimeMillis();
     for (int i = 0; i < 2000; i += 2) {
-      String row = String.format(localFormatter, Integer.valueOf(i));
+      String row = String.format(localFormatter, i);
       KeyValue kv = new KeyValue(row.getBytes(), "family".getBytes(),
         "col".getBytes(), now, "value".getBytes());
       writer.append(kv);
     }
     writer.close();
-    
+
     StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false);
     reader.loadFileInfo();
     reader.loadBloomfilter();
-    HFileScanner scanner = reader.getScanner(false, false);
+    StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
 
     // check false positives rate
     int falsePos = 0;
     int falseNeg = 0;
     for (int i = 0; i < 2000; i++) {
-      String row = String.format(localFormatter, Integer.valueOf(i));
+      String row = String.format(localFormatter, i);
       TreeSet<byte[]> columns = new TreeSet<byte[]>();
       columns.add("family:col".getBytes());
-      
-      boolean exists = scanner.shouldSeek(row.getBytes(), columns);
+
+      Scan scan = new Scan(row.getBytes(),row.getBytes());
+      scan.addColumn("family".getBytes(), "family:col".getBytes());
+      boolean exists = scanner.shouldSeek(scan, columns);
       if (i % 2 == 0) {
         if (!exists) falseNeg++;
       } else {
@@ -369,19 +372,19 @@ public class TestStoreFile extends HBase
     System.out.println("False positives: " + falsePos);
     assertTrue(falsePos < 2);
   }
-  
+
   public void testBloomTypes() throws Exception {
     float err = (float) 0.01;
     FileSystem fs = FileSystem.getLocal(conf);
     conf.setFloat("io.hfile.bloom.error.rate", err);
     conf.setBoolean("io.hfile.bloom.enabled", true);
-    
+
     int rowCount = 50;
     int colCount = 10;
     int versions = 2;
-    
+
     // run once using columns and once using rows
-    StoreFile.BloomType[] bt = 
+    StoreFile.BloomType[] bt =
       {StoreFile.BloomType.ROWCOL, StoreFile.BloomType.ROW};
     int[] expKeys    = {rowCount*colCount, rowCount};
     // below line deserves commentary.  it is expected bloom false positives
@@ -393,19 +396,19 @@ public class TestStoreFile extends HBase
     for (int x : new int[]{0,1}) {
       // write the file
       Path f = new Path(ROOT_DIR, getName());
-      StoreFile.Writer writer = new StoreFile.Writer(fs, f, 
-          StoreFile.DEFAULT_BLOCKSIZE_SMALL, 
+      StoreFile.Writer writer = new StoreFile.Writer(fs, f,
+          StoreFile.DEFAULT_BLOCKSIZE_SMALL,
           HFile.DEFAULT_COMPRESSION_ALGORITHM,
           conf, KeyValue.COMPARATOR, bt[x], expKeys[x]);
-  
+
       long now = System.currentTimeMillis();
       for (int i = 0; i < rowCount*2; i += 2) { // rows
         for (int j = 0; j < colCount*2; j += 2) {   // column qualifiers
-          String row = String.format(localFormatter, Integer.valueOf(i));
-          String col = String.format(localFormatter, Integer.valueOf(j)); 
-          for (int k= 0; k < versions; ++k) { // versions 
-            KeyValue kv = new KeyValue(row.getBytes(), 
-              "family".getBytes(), ("col" + col).getBytes(), 
+          String row = String.format(localFormatter, i);
+          String col = String.format(localFormatter, j);
+          for (int k= 0; k < versions; ++k) { // versions
+            KeyValue kv = new KeyValue(row.getBytes(),
+              "family".getBytes(), ("col" + col).getBytes(),
                 now-k, Bytes.toBytes((long)-1));
             writer.append(kv);
           }
@@ -416,20 +419,22 @@ public class TestStoreFile extends HBase
       StoreFile.Reader reader = new StoreFile.Reader(fs, f, null, false);
       reader.loadFileInfo();
       reader.loadBloomfilter();
-      HFileScanner scanner = reader.getScanner(false, false);
-      assertEquals(expKeys[x], reader.getBloomFilter().getKeyCount());
-      
+      StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
+      assertEquals(expKeys[x], reader.bloomFilter.getKeyCount());
+
       // check false positives rate
       int falsePos = 0;
       int falseNeg = 0;
       for (int i = 0; i < rowCount*2; ++i) { // rows
         for (int j = 0; j < colCount*2; ++j) {   // column qualifiers
-          String row = String.format(localFormatter, Integer.valueOf(i));
-          String col = String.format(localFormatter, Integer.valueOf(j)); 
+          String row = String.format(localFormatter, i);
+          String col = String.format(localFormatter, j);
           TreeSet<byte[]> columns = new TreeSet<byte[]>();
           columns.add(("col" + col).getBytes());
 
-          boolean exists = scanner.shouldSeek(row.getBytes(), columns);
+          Scan scan = new Scan(row.getBytes(),row.getBytes());
+          scan.addColumn("family".getBytes(), ("col"+col).getBytes());
+          boolean exists = scanner.shouldSeek(scan, columns);
           boolean shouldRowExist = i % 2 == 0;
           boolean shouldColExist = j % 2 == 0;
           shouldColExist = shouldColExist || bt[x] == StoreFile.BloomType.ROW;
@@ -448,7 +453,6 @@ public class TestStoreFile extends HBase
       assertEquals(0, falseNeg);
       assertTrue(falsePos < 2*expErr[x]);
     }
-    
   }
   
   public void testFlushTimeComparator() {
@@ -499,4 +503,76 @@ public class TestStoreFile extends HBase
     return mock;
   }
 
+  /**
+   *Generate a list of KeyValues for testing based on given parameters
+   * @param timestamps
+   * @param numRows
+   * @param qualifier
+   * @param family
+   * @return
+   */
+  List<KeyValue> getKeyValueSet(long[] timestamps, int numRows,
+      byte[] qualifier, byte[] family) {
+    List<KeyValue> kvList = new ArrayList<KeyValue>();
+    for (int i=1;i<=numRows;i++) {
+      byte[] b = Bytes.toBytes(i) ;
+      LOG.info(Bytes.toString(b));
+      LOG.info(Bytes.toString(b));
+      for (long timestamp: timestamps)
+      {
+        kvList.add(new KeyValue(b, family, qualifier, timestamp, b));
+      }
+    }
+    return kvList;
+  }
+
+  /**
+   * Test to ensure correctness when using StoreFile with multiple timestamps
+   * @throws IOException
+   */
+  public void testMultipleTimestamps() throws IOException {
+    byte[] family = Bytes.toBytes("familyname");
+    byte[] qualifier = Bytes.toBytes("qualifier");
+    int numRows = 10;
+    long[] timestamps = new long[] {20,10,5,1};
+    Scan scan = new Scan();
+
+    Path storedir = new Path(new Path(this.testDir, "regionname"),
+    "familyname");
+    Path dir = new Path(storedir, "1234567890");
+    StoreFile.Writer writer = StoreFile.createWriter(this.fs, dir, 8 * 1024);
+
+    List<KeyValue> kvList = getKeyValueSet(timestamps,numRows,
+        family, qualifier);
+
+    for (KeyValue kv : kvList) {
+      writer.append(kv);
+    }
+    writer.appendMetadata(0, false);
+    writer.close();
+
+    StoreFile hsf = new StoreFile(this.fs, writer.getPath(), true, conf,
+        StoreFile.BloomType.NONE, false);
+    StoreFile.Reader reader = hsf.createReader();
+    StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
+    TreeSet<byte[]> columns = new TreeSet<byte[]>();
+    columns.add(qualifier);
+
+    scan.setTimeRange(20, 100);
+    assertTrue(scanner.shouldSeek(scan, columns));
+
+    scan.setTimeRange(1, 2);
+    assertTrue(scanner.shouldSeek(scan, columns));
+
+    scan.setTimeRange(8, 10);
+    assertTrue(scanner.shouldSeek(scan, columns));
+
+    scan.setTimeRange(7, 50);
+    assertTrue(scanner.shouldSeek(scan, columns));
+
+    /*This test is not required for correctness but it should pass when
+     * timestamp range optimization is on*/
+    //scan.setTimeRange(27, 50);
+    //assertTrue(!scanner.shouldSeek(scan, columns));
+  }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java Tue Sep 27 02:41:56 2011
@@ -25,6 +25,10 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueTestUtil;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.mockito.Mockito;
+import org.mockito.stubbing.OngoingStubbing;
+
+import com.google.common.collect.Lists;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -105,7 +109,6 @@ public class TestStoreScanner extends Te
     results = new ArrayList<KeyValue>();
     assertEquals(true, scan.next(results));
     assertEquals(3, results.size());
-
   }
 
   public void testScanSameTimestamp() throws IOException {
@@ -136,6 +139,7 @@ public class TestStoreScanner extends Te
    * Test test shows exactly how the matcher's return codes confuses the StoreScanner
    * and prevent it from doing the right thing.  Seeking once, then nexting twice
    * should return R1, then R2, but in this case it doesnt.
+   * TODO this comment makes no sense above. Appears to do the right thing.
    * @throws IOException
    */
   public void testWontNextToNext() throws IOException {
@@ -252,7 +256,7 @@ public class TestStoreScanner extends Te
         KeyValueTestUtil.create("R2", "cf", "z", now, KeyValue.Type.Put, "dont-care")
     };
     List<KeyValueScanner> scanners = scanFixture(kvs1, kvs2);
-    
+
     Scan scanSpec = new Scan(Bytes.toBytes("R1")).setMaxVersions(2);
     StoreScanner scan =
       new StoreScanner(scanSpec, CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
@@ -280,6 +284,7 @@ public class TestStoreScanner extends Te
     assertEquals(kvs[0], results.get(0));
     assertEquals(kvs[1], results.get(1));
   }
+
   public void testWildCardScannerUnderDeletes() throws IOException {
     KeyValue [] kvs = new KeyValue [] {
         KeyValueTestUtil.create("R1", "cf", "a", 2, KeyValue.Type.Put, "dont-care"), // inc
@@ -312,6 +317,7 @@ public class TestStoreScanner extends Te
     assertEquals(kvs[6], results.get(3));
     assertEquals(kvs[7], results.get(4));
   }
+
   public void testDeleteFamily() throws IOException {
     KeyValue [] kvs = new KeyValue[] {
         KeyValueTestUtil.create("R1", "cf", "a", 100, KeyValue.Type.DeleteFamily, "dont-care"),
@@ -358,8 +364,7 @@ public class TestStoreScanner extends Te
     assertEquals(kvs[3], results.get(0));
   }
 
-  public void testSkipColumn() throws IOException {
-    KeyValue [] kvs = new KeyValue[] {
+  private static final  KeyValue [] kvs = new KeyValue[] {
         KeyValueTestUtil.create("R1", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
         KeyValueTestUtil.create("R1", "cf", "b", 11, KeyValue.Type.Put, "dont-care"),
         KeyValueTestUtil.create("R1", "cf", "c", 11, KeyValue.Type.Put, "dont-care"),
@@ -371,6 +376,8 @@ public class TestStoreScanner extends Te
         KeyValueTestUtil.create("R1", "cf", "i", 11, KeyValue.Type.Put, "dont-care"),
         KeyValueTestUtil.create("R2", "cf", "a", 11, KeyValue.Type.Put, "dont-care"),
     };
+
+  public void testSkipColumn() throws IOException {
     List<KeyValueScanner> scanners = scanFixture(kvs);
     StoreScanner scan =
       new StoreScanner(new Scan(), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
@@ -390,9 +397,9 @@ public class TestStoreScanner extends Te
     results.clear();
     assertEquals(false, scan.next(results));
   }
-  
+
   /*
-   * Test expiration of KeyValues in combination with a configured TTL for 
+   * Test expiration of KeyValues in combination with a configured TTL for
    * a column family (as should be triggered in a major compaction).
    */
   public void testWildCardTtlScan() throws IOException {
@@ -430,4 +437,39 @@ public class TestStoreScanner extends Te
 
     assertEquals(false, scanner.next(results));
   }
+
+  public void testScannerReseekDoesntNPE() throws Exception {
+    List<KeyValueScanner> scanners = scanFixture(kvs);
+    StoreScanner scan =
+        new StoreScanner(new Scan(), CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+            getCols("a", "d"), scanners);
+
+
+    // Previously a updateReaders twice in a row would cause an NPE.  In test this would also
+    // normally cause an NPE because scan.store is null.  So as long as we get through these
+    // two calls we are good and the bug was quashed.
+
+    scan.updateReaders();
+
+    scan.updateReaders();
+
+    scan.peek();
+  }
+
+
+  /**
+   * TODO this fails, since we don't handle deletions, etc, in peek
+   */
+  public void SKIP_testPeek() throws Exception {
+    KeyValue [] kvs = new KeyValue [] {
+        KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Put, "dont-care"),
+        KeyValueTestUtil.create("R1", "cf", "a", 1, KeyValue.Type.Delete, "dont-care"),
+    };
+    List<KeyValueScanner> scanners = scanFixture(kvs);
+    Scan scanSpec = new Scan(Bytes.toBytes("R1"));
+    StoreScanner scan =
+      new StoreScanner(scanSpec, CF, Long.MAX_VALUE, KeyValue.COMPARATOR,
+          getCols("a"), scanners);
+    assertNull(scan.peek());
+  }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/regionserver/TestWildcardColumnTracker.java Tue Sep 27 02:41:56 2011
@@ -28,19 +28,18 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.regionserver.QueryMatcher.MatchCode;
 import org.apache.hadoop.hbase.util.Bytes;
 
-public class TestWildcardColumnTracker extends HBaseTestCase
-implements HConstants {
+public class TestWildcardColumnTracker extends HBaseTestCase {
   private boolean PRINT = false;
 
   public void testGet_SingleVersion() {
-    if(PRINT) {
+    if (PRINT) {
       System.out.println("SingleVersion");
     }
-    byte [] col1 = Bytes.toBytes("col1");
-    byte [] col2 = Bytes.toBytes("col2");
-    byte [] col3 = Bytes.toBytes("col3");
-    byte [] col4 = Bytes.toBytes("col4");
-    byte [] col5 = Bytes.toBytes("col5");
+    byte[] col1 = Bytes.toBytes("col1");
+    byte[] col2 = Bytes.toBytes("col2");
+    byte[] col3 = Bytes.toBytes("col3");
+    byte[] col4 = Bytes.toBytes("col4");
+    byte[] col5 = Bytes.toBytes("col5");
 
     //Create tracker
     List<MatchCode> expected = new ArrayList<MatchCode>();



Mime
View raw message