hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nspiegelb...@apache.org
Subject svn commit: r1176177 [11/13] - in /hbase/branches/0.89: ./ bin/ bin/replication/ docs/ src/ src/assembly/ src/docs/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/avro/ src/main/java/org/apache/hadoop/hbase/avro/generated/...
Date Tue, 27 Sep 2011 02:42:01 GMT
Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/TestMultiParallelPut.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/TestMultiParallelPut.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/TestMultiParallelPut.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/TestMultiParallelPut.java Tue Sep 27 02:41:56 2011
@@ -20,6 +20,8 @@
 
 package org.apache.hadoop.hbase;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
@@ -31,6 +33,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 public class TestMultiParallelPut extends MultiRegionTable {
+  final Log LOG = LogFactory.getLog(getClass());
   private static final byte[] VALUE = Bytes.toBytes("value");
   private static final byte[] QUALIFIER = Bytes.toBytes("qual");
   private static final String FAMILY = "family";
@@ -42,7 +45,6 @@ public class TestMultiParallelPut extend
     super(2, FAMILY);
     desc = new HTableDescriptor(TEST_TABLE);
     desc.addFamily(new HColumnDescriptor(FAMILY));
-
     makeKeys();
   }
 
@@ -51,7 +53,6 @@ public class TestMultiParallelPut extend
       byte [] cp = new byte[k.length+1];
       System.arraycopy(k, 0, cp, 0, k.length);
       cp[k.length] = 1;
-
       keys.add(cp);
     }
   }
@@ -59,35 +60,33 @@ public class TestMultiParallelPut extend
   List<byte[]> keys = new ArrayList<byte[]>();
 
   public void testParallelPut() throws Exception {
+    LOG.info("Starting testParallelPut");
     doATest(false);
   }
+
   public void testParallelPutWithRSAbort() throws Exception {
+    LOG.info("Starting testParallelPutWithRSAbort");
     doATest(true);
   }
 
   public void doATest(boolean doAbort) throws Exception {
-
     HTable table = new HTable(TEST_TABLE);
     table.setAutoFlush(false);
     table.setWriteBufferSize(10 * 1024 * 1024);
-
     for ( byte [] k : keys ) {
       Put put = new Put(k);
       put.add(BYTES_FAMILY, QUALIFIER, VALUE);
-
       table.put(put);
     }
-
     table.flushCommits();
 
     if (doAbort) {
+      LOG.info("Aborting...");
       cluster.abortRegionServer(0);
-
       // try putting more keys after the abort.
       for ( byte [] k : keys ) {
         Put put = new Put(k);
         put.add(BYTES_FAMILY, QUALIFIER, VALUE);
-
         table.put(put);
       }
       table.flushCommits();
@@ -96,9 +95,7 @@ public class TestMultiParallelPut extend
     for (byte [] k : keys ) {
       Get get = new Get(k);
       get.addColumn(BYTES_FAMILY, QUALIFIER);
-
       Result r = table.get(get);
-
       assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER));
       assertEquals(0,
           Bytes.compareTo(VALUE,
@@ -107,16 +104,13 @@ public class TestMultiParallelPut extend
 
     HBaseAdmin admin = new HBaseAdmin(conf);
     ClusterStatus cs = admin.getClusterStatus();
-
     int expectedServerCount = 2;
-    if (doAbort)
-      expectedServerCount = 1;
-
+    if (doAbort)  expectedServerCount = 1;
+    LOG.info("Clusterstatus servers count " + cs.getServers());
     assertEquals(expectedServerCount, cs.getServers());
     for ( HServerInfo info : cs.getServerInfo()) {
-      System.out.println(info);
-      assertTrue( info.getLoad().getNumberOfRegions() > 10);
+      LOG.info("Info from clusterstatus=" + info);
+      assertTrue(info.getLoad().getNumberOfRegions() > 8);
     }
   }
-
-}
+}
\ No newline at end of file

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java Tue Sep 27 02:41:56 2011
@@ -51,7 +51,7 @@ public class TestZooKeeper {
   private final static HBaseTestingUtility
       TEST_UTIL = new HBaseTestingUtility();
 
-  private Configuration    conf;
+  private static Configuration conf;
 
   /**
    * @throws java.lang.Exception
@@ -61,7 +61,8 @@ public class TestZooKeeper {
     // Test we can first start the ZK cluster by itself
     TEST_UTIL.startMiniZKCluster();
     TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
-    TEST_UTIL.startMiniCluster(1);
+    TEST_UTIL.startMiniCluster(2);
+    conf = TEST_UTIL.getConfiguration();
   }
 
   /**
@@ -77,16 +78,10 @@ public class TestZooKeeper {
    */
   @Before
   public void setUp() throws Exception {
-    conf = TEST_UTIL.getConfiguration();
+    TEST_UTIL.ensureSomeRegionServersAvailable(2);
   }
 
   /**
-   * @throws java.lang.Exception
-   */
-  @After
-  public void tearDown() throws Exception {}
-
-  /**
    * See HBASE-1232 and http://wiki.apache.org/hadoop/ZooKeeper/FAQ#4.
    * @throws IOException
    * @throws InterruptedException
@@ -96,7 +91,9 @@ public class TestZooKeeper {
       throws IOException, InterruptedException {
     new HTable(conf, HConstants.META_TABLE_NAME);
 
-    ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
+    ZooKeeperWrapper zkw =
+        ZooKeeperWrapper.createInstance(conf, TestZooKeeper.class.getName());
+    zkw.registerListener(EmptyWatcher.instance);
     String quorumServers = zkw.getQuorumServers();
     int sessionTimeout = 5 * 1000; // 5 seconds
     HConnection connection = HConnectionManager.getConnection(conf);
@@ -117,14 +114,12 @@ public class TestZooKeeper {
   public void testRegionServerSessionExpired() throws Exception{
     LOG.info("Starting testRegionServerSessionExpired");
     new HTable(conf, HConstants.META_TABLE_NAME);
-    TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getConfiguration().
-      setBoolean("hbase.regionserver.restart.on.zk.expire", true);
     TEST_UTIL.expireRegionServerSession(0);
     testSanity();
   }
   @Test
   public void testMasterSessionExpired() throws Exception {
-    LOG.info("Starting testRegionServerSessionExpired");
+    LOG.info("Starting testMasterSessionExpired");
     new HTable(conf, HConstants.META_TABLE_NAME);
     TEST_UTIL.expireMasterSession();
     testSanity();
@@ -158,7 +153,7 @@ public class TestZooKeeper {
       HTable localMeta = new HTable(conf, HConstants.META_TABLE_NAME);
       Configuration otherConf = HBaseConfiguration.create(conf);
       otherConf.set(HConstants.ZOOKEEPER_QUORUM, "127.0.0.1");
-      HTable ipMeta = new HTable(conf, HConstants.META_TABLE_NAME);
+      HTable ipMeta = new HTable(otherConf, HConstants.META_TABLE_NAME);
 
       // dummy, just to open the connection
       localMeta.exists(new Get(HConstants.LAST_ROW));
@@ -184,7 +179,9 @@ public class TestZooKeeper {
    */
   @Test
   public void testZNodeDeletes() throws Exception {
-    ZooKeeperWrapper zkw = new ZooKeeperWrapper(conf, EmptyWatcher.instance);
+    ZooKeeperWrapper zkw =
+        ZooKeeperWrapper.createInstance(conf, TestZooKeeper.class.getName());
+    zkw.registerListener(EmptyWatcher.instance);
     zkw.ensureExists("/l1/l2/l3/l4");
     try {
       zkw.deleteZNode("/l1/l2");

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/avro/TestAvroServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/avro/TestAvroServer.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/avro/TestAvroServer.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/avro/TestAvroServer.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,225 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.avro;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HBaseClusterTestCase;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import org.apache.avro.Schema;
+import org.apache.avro.generic.GenericArray;
+import org.apache.avro.generic.GenericData;
+
+import org.apache.hadoop.hbase.avro.generated.AColumn;
+import org.apache.hadoop.hbase.avro.generated.AColumnValue;
+import org.apache.hadoop.hbase.avro.generated.AFamilyDescriptor;
+import org.apache.hadoop.hbase.avro.generated.AGet;
+import org.apache.hadoop.hbase.avro.generated.APut;
+import org.apache.hadoop.hbase.avro.generated.AResult;
+import org.apache.hadoop.hbase.avro.generated.ATableDescriptor;
+
+/**
+ * Unit testing for AvroServer.HBaseImpl, a part of the
+ * org.apache.hadoop.hbase.avro package.
+ */
+public class TestAvroServer {
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  // Static names for tables, columns, rows, and values
+  // TODO(hammer): Better style to define these in test method?
+  private static ByteBuffer tableAname = ByteBuffer.wrap(Bytes.toBytes("tableA"));
+  private static ByteBuffer tableBname = ByteBuffer.wrap(Bytes.toBytes("tableB"));
+  private static ByteBuffer familyAname = ByteBuffer.wrap(Bytes.toBytes("FamilyA"));
+  private static ByteBuffer qualifierAname = ByteBuffer.wrap(Bytes.toBytes("QualifierA"));
+  private static ByteBuffer rowAname = ByteBuffer.wrap(Bytes.toBytes("RowA"));
+  private static ByteBuffer valueA = ByteBuffer.wrap(Bytes.toBytes("ValueA"));
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniCluster(3);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    // Nothing to do.
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @After
+  public void tearDown() throws Exception {
+    // Nothing to do.
+  }
+
+  /**
+   * Tests for creating, enabling, disabling, modifying, and deleting tables.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testTableAdminAndMetadata() throws Exception {
+    AvroServer.HBaseImpl impl = new AvroServer.HBaseImpl();
+
+    assertEquals(impl.listTables().size(), 0);
+
+    ATableDescriptor tableA = new ATableDescriptor();
+    tableA.name = tableAname;
+    impl.createTable(tableA);
+    assertEquals(impl.listTables().size(), 1);
+    assertTrue(impl.isTableEnabled(tableAname));
+    assertTrue(impl.tableExists(tableAname));
+
+    ATableDescriptor tableB = new ATableDescriptor();
+    tableB.name = tableBname;
+    impl.createTable(tableB);
+    assertEquals(impl.listTables().size(), 2);
+
+    impl.disableTable(tableBname);
+    assertFalse(impl.isTableEnabled(tableBname));
+
+    impl.deleteTable(tableBname);
+    assertEquals(impl.listTables().size(), 1);
+
+    impl.disableTable(tableAname);
+    assertFalse(impl.isTableEnabled(tableAname));
+
+    tableA.maxFileSize = 123456L;
+    impl.modifyTable(tableAname, tableA);
+    assertEquals((long) impl.describeTable(tableAname).maxFileSize, 123456L);
+
+    impl.enableTable(tableAname);
+    assertTrue(impl.isTableEnabled(tableAname));
+    impl.disableTable(tableAname);
+    impl.deleteTable(tableAname);
+  }
+
+  /**
+   * Tests for creating, modifying, and deleting column families.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testFamilyAdminAndMetadata() throws Exception {
+    AvroServer.HBaseImpl impl = new AvroServer.HBaseImpl();
+
+    ATableDescriptor tableA = new ATableDescriptor();
+    tableA.name = tableAname;
+    AFamilyDescriptor familyA = new AFamilyDescriptor();
+    familyA.name = familyAname;
+    Schema familyArraySchema = Schema.createArray(AFamilyDescriptor.SCHEMA$);
+    GenericArray<AFamilyDescriptor> families = new GenericData.Array<AFamilyDescriptor>(1, familyArraySchema);
+    families.add(familyA);
+    tableA.families = families;
+    impl.createTable(tableA);
+    assertEquals(impl.describeTable(tableAname).families.size(), 1);
+
+    impl.disableTable(tableAname);
+    assertFalse(impl.isTableEnabled(tableAname));
+
+    familyA.maxVersions = 123456;
+    impl.modifyFamily(tableAname, familyAname, familyA);
+    assertEquals((int) impl.describeFamily(tableAname, familyAname).maxVersions, 123456);
+
+    impl.deleteFamily(tableAname, familyAname);
+    assertEquals(impl.describeTable(tableAname).families.size(), 0);
+
+    impl.disableTable(tableAname);
+    impl.deleteTable(tableAname);
+  }
+
+  /**
+   * Tests for adding, reading, and deleting data.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testDML() throws Exception {
+    AvroServer.HBaseImpl impl = new AvroServer.HBaseImpl();
+
+    ATableDescriptor tableA = new ATableDescriptor();
+    tableA.name = tableAname;
+    AFamilyDescriptor familyA = new AFamilyDescriptor();
+    familyA.name = familyAname;
+    Schema familyArraySchema = Schema.createArray(AFamilyDescriptor.SCHEMA$);
+    GenericArray<AFamilyDescriptor> families = new GenericData.Array<AFamilyDescriptor>(1, familyArraySchema);
+    families.add(familyA);
+    tableA.families = families;
+    impl.createTable(tableA);
+    assertEquals(impl.describeTable(tableAname).families.size(), 1);
+
+    AGet getA = new AGet();
+    getA.row = rowAname;
+    Schema columnsSchema = Schema.createArray(AColumn.SCHEMA$);
+    GenericArray<AColumn> columns = new GenericData.Array<AColumn>(1, columnsSchema);
+    AColumn column = new AColumn();
+    column.family = familyAname;
+    column.qualifier = qualifierAname;
+    columns.add(column);
+    getA.columns = columns;
+
+    assertFalse(impl.exists(tableAname, getA));
+
+    APut putA = new APut();
+    putA.row = rowAname;
+    Schema columnValuesSchema = Schema.createArray(AColumnValue.SCHEMA$);
+    GenericArray<AColumnValue> columnValues = new GenericData.Array<AColumnValue>(1, columnValuesSchema);
+    AColumnValue acv = new AColumnValue();
+    acv.family = familyAname;
+    acv.qualifier = qualifierAname;
+    acv.value = valueA;
+    columnValues.add(acv);
+    putA.columnValues = columnValues;
+
+    impl.put(tableAname, putA);
+    assertTrue(impl.exists(tableAname, getA));
+
+    assertEquals(impl.get(tableAname, getA).entries.size(), 1);
+
+    impl.disableTable(tableAname);
+    impl.deleteTable(tableAname);
+  }
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Tue Sep 27 02:41:56 2011
@@ -25,6 +25,11 @@ import static org.junit.Assert.assertSam
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
@@ -35,6 +40,7 @@ import java.util.UUID;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -3541,4 +3547,148 @@ public class TestFromClientSide {
 
     assertTrue(scan.getFamilyMap().get(FAMILY).size() == 0);
   }
-}
\ No newline at end of file
+
+  /**
+   * HBASE-2468 use case 1 and 2: region info de/serialization
+   */
+   @Test
+   public void testRegionCacheDeSerialization() throws Exception {
+     // 1. test serialization.
+     LOG.info("Starting testRegionCacheDeSerialization");
+     final byte[] TABLENAME = Bytes.toBytes("testCachePrewarm2");
+     final byte[] FAMILY = Bytes.toBytes("family");
+     Configuration conf = TEST_UTIL.getConfiguration();
+     TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+     // Set up test table:
+     // Create table:
+     HTable table = new HTable(conf, TABLENAME);
+
+     // Create multiple regions for this table
+     TEST_UTIL.createMultiRegions(table, FAMILY);
+
+     Path tempPath = new Path(HBaseTestingUtility.getTestDir(), "regions.dat");
+
+     final String tempFileName = tempPath.toString();
+
+     FileOutputStream fos = new FileOutputStream(tempFileName);
+     DataOutputStream dos = new DataOutputStream(fos);
+
+     // serialize the region info and output to a local file.
+     table.serializeRegionInfo(dos);
+     dos.flush();
+     dos.close();
+
+     // read a local file and deserialize the region info from it.
+     FileInputStream fis = new FileInputStream(tempFileName);
+     DataInputStream dis = new DataInputStream(fis);
+
+     Map<HRegionInfo, HServerAddress> deserRegions =
+       table.deserializeRegionInfo(dis);
+     dis.close();
+
+     // regions obtained from meta scanner.
+     Map<HRegionInfo, HServerAddress> loadedRegions =
+       table.getRegionsInfo();
+
+     // set the deserialized regions to the global cache.
+     table.getConnection().clearRegionCache();
+
+     table.getConnection().prewarmRegionCache(table.getTableName(),
+         deserRegions);
+
+     // verify whether the 2 maps are identical or not.
+     assertEquals("Number of cached region is incorrect",
+         HConnectionManager.getCachedRegionCount(conf, TABLENAME),
+         loadedRegions.size());
+
+     // verify each region is prefetched or not.
+     for (Map.Entry<HRegionInfo, HServerAddress> e: loadedRegions.entrySet()) {
+       HRegionInfo hri = e.getKey();
+       assertTrue(HConnectionManager.isRegionCached(conf,
+           hri.getTableDesc().getName(), hri.getStartKey()));
+     }
+
+     // delete the temp file
+     File f = new java.io.File(tempFileName);
+     f.delete();
+     LOG.info("Finishing testRegionCacheDeSerialization");
+   }
+
+  /**
+   * HBASE-2468 use case 3:
+   */
+  @Test
+  public void testRegionCachePreWarm() throws Exception {
+    LOG.info("Starting testRegionCachePreWarm");
+    final byte [] TABLENAME = Bytes.toBytes("testCachePrewarm");
+    Configuration conf = TEST_UTIL.getConfiguration();
+
+    // Set up test table:
+    // Create table:
+    TEST_UTIL.createTable(TABLENAME, FAMILY);
+
+    // disable region cache for the table.
+    HTable.setRegionCachePrefetch(conf, TABLENAME, false);
+    assertFalse("The table is disabled for region cache prefetch",
+        HTable.getRegionCachePrefetch(conf, TABLENAME));
+
+    HTable table = new HTable(conf, TABLENAME);
+
+    // create many regions for the table.
+    TEST_UTIL.createMultiRegions(table, FAMILY);
+    // This count effectively waits until the regions have been
+    // fully assigned
+    TEST_UTIL.countRows(table);
+    table.getConnection().clearRegionCache();
+    assertEquals("Clearing cache should have 0 cached ", 0,
+        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    // A Get is suppose to do a region lookup request
+    Get g = new Get(Bytes.toBytes("aaa"));
+    table.get(g);
+
+    // only one region should be cached if the cache prefetch is disabled.
+    assertEquals("Number of cached region is incorrect ", 1,
+        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    // now we enable cached prefetch.
+    HTable.setRegionCachePrefetch(conf, TABLENAME, true);
+    assertTrue("The table is enabled for region cache prefetch",
+        HTable.getRegionCachePrefetch(conf, TABLENAME));
+
+    HTable.setRegionCachePrefetch(conf, TABLENAME, false);
+    assertFalse("The table is disabled for region cache prefetch",
+        HTable.getRegionCachePrefetch(conf, TABLENAME));
+
+    HTable.setRegionCachePrefetch(conf, TABLENAME, true);
+    assertTrue("The table is enabled for region cache prefetch",
+        HTable.getRegionCachePrefetch(conf, TABLENAME));
+
+    table.getConnection().clearRegionCache();
+
+    assertEquals("Number of cached region is incorrect ", 0,
+        HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    // if there is a cache miss, some additional regions should be prefetched.
+    Get g2 = new Get(Bytes.toBytes("bbb"));
+    table.get(g2);
+
+    // Get the configured number of cache read-ahead regions.  For various
+    // reasons, the meta may not yet have all regions in place (e.g. hbase-2757).
+    // That the prefetch gets at least half shows prefetch is bascially working.
+    int prefetchRegionNumber = conf.getInt("hbase.client.prefetch.limit", 10) / 2;
+
+    // the total number of cached regions == region('aaa") + prefeched regions.
+    LOG.info("Testing how many regions cached");
+    assertTrue(prefetchRegionNumber < HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    table.getConnection().clearRegionCache();
+
+    Get g3 = new Get(Bytes.toBytes("abc"));
+    table.get(g3);
+    assertTrue(prefetchRegionNumber < HConnectionManager.getCachedRegionCount(conf, TABLENAME));
+
+    LOG.info("Finishing testRegionCachePreWarm");
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * This class is for testing HCM features
+ */
+public class TestHCM {
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final byte[] TABLE_NAME = Bytes.toBytes("test");
+  private static final byte[] FAM_NAM = Bytes.toBytes("f");
+  private static final byte[] ROW = Bytes.toBytes("bbb");
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniCluster(1);
+  }
+
+  /**
+   * Test that when we delete a location using the first row of a region
+   * that we really delete it.
+   * @throws Exception
+   */
+  @Test
+  public void testRegionCaching() throws Exception{
+    HTable table = TEST_UTIL.createTable(TABLE_NAME, FAM_NAM);
+    TEST_UTIL.createMultiRegions(table, FAM_NAM);
+    Put put = new Put(ROW);
+    put.add(FAM_NAM, ROW, ROW);
+    table.put(put);
+    HConnectionManager.TableServers conn =
+        (HConnectionManager.TableServers) table.getConnection();
+    assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW));
+    conn.deleteCachedLocation(TABLE_NAME, ROW);
+    HRegionLocation rl = conn.getCachedLocation(TABLE_NAME, ROW);
+    assertNull("What is this location?? " + rl, rl);
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,96 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.mockito.Mockito.*;
+
+public class TestMetaScanner {
+  final Log LOG = LogFactory.getLog(getClass());
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniCluster(1);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testMetaScanner() throws Exception {
+    LOG.info("Starting testMetaScanner");
+    final byte[] TABLENAME = Bytes.toBytes("testMetaScanner");
+    final byte[] FAMILY = Bytes.toBytes("family");
+    TEST_UTIL.createTable(TABLENAME, FAMILY);
+    Configuration conf = TEST_UTIL.getConfiguration();
+    HTable table = new HTable(conf, TABLENAME);
+    TEST_UTIL.createMultiRegions(conf, table, FAMILY,
+        new byte[][]{
+          HConstants.EMPTY_START_ROW,
+          Bytes.toBytes("region_a"),
+          Bytes.toBytes("region_b")});
+    // Make sure all the regions are deployed
+    TEST_UTIL.countRows(table);
+
+    MetaScanner.MetaScannerVisitor visitor =
+      mock(MetaScanner.MetaScannerVisitor.class);
+    doReturn(true).when(visitor).processRow((Result)anyObject());
+
+    // Scanning the entire table should give us three rows
+    MetaScanner.metaScan(conf, visitor, TABLENAME);
+    verify(visitor, times(3)).processRow((Result)anyObject());
+
+    // Scanning the table with a specified empty start row should also
+    // give us three META rows
+    reset(visitor);
+    doReturn(true).when(visitor).processRow((Result)anyObject());
+    MetaScanner.metaScan(conf, visitor, TABLENAME, HConstants.EMPTY_BYTE_ARRAY, 1000);
+    verify(visitor, times(3)).processRow((Result)anyObject());
+
+    // Scanning the table starting in the middle should give us two rows:
+    // region_a and region_b
+    reset(visitor);
+    doReturn(true).when(visitor).processRow((Result)anyObject());
+    MetaScanner.metaScan(conf, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1000);
+    verify(visitor, times(2)).processRow((Result)anyObject());
+
+    // Scanning with a limit of 1 should only give us one row
+    reset(visitor);
+    doReturn(true).when(visitor).processRow((Result)anyObject());
+    MetaScanner.metaScan(conf, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1);
+    verify(visitor, times(1)).processRow((Result)anyObject());
+
+  }
+}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,303 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Run tests related to {@link TimestampsFilter} using HBase client APIs.
+ * Sets up the HBase mini cluster once at start. Each creates a table
+ * named for the method and does its stuff against that.
+ */
+public class TestMultipleTimestamps {
+  final Log LOG = LogFactory.getLog(getClass());
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniCluster(3);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    // Nothing to do.
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @After
+  public void tearDown() throws Exception {
+    // Nothing to do.
+  }
+
+  @Test
+  public void testWithVersionDeletes() throws Exception {
+
+    // first test from memstore (without flushing).
+    testWithVersionDeletes(false);
+
+    // run same test against HFiles (by forcing a flush).
+    testWithVersionDeletes(true);
+  }
+
+  public void testWithVersionDeletes(boolean flushTables) throws IOException {
+    byte [] TABLE = Bytes.toBytes("testWithVersionDeletes_" +
+        (flushTables ? "flush" : "noflush"));
+    byte [] FAMILY = Bytes.toBytes("event_log");
+    byte [][] FAMILIES = new byte[][] { FAMILY };
+
+    // create table; set versions to max...
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, Integer.MAX_VALUE);
+
+    // For row:0, col:0: insert versions 1 through 5.
+    putNVersions(ht, FAMILY, 0, 0, 1, 5);
+
+    if (flushTables) {
+      flush();
+    }
+
+    // delete version 4.
+    deleteOneVersion(ht, FAMILY, 0, 0, 4);
+
+    // request a bunch of versions including the deleted version. We should
+    // only get back entries for the versions that exist.
+    KeyValue kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L, 4L, 5L));
+    assertEquals(3, kvs.length);
+    checkOneCell(kvs[0], FAMILY, 0, 0, 5);
+    checkOneCell(kvs[1], FAMILY, 0, 0, 3);
+    checkOneCell(kvs[2], FAMILY, 0, 0, 2);
+  }
+
+  @Test
+  public void testWithMultipleVersionDeletes() throws IOException {
+    byte [] TABLE = Bytes.toBytes("testWithMultipleVersionDeletes");
+    byte [] FAMILY = Bytes.toBytes("event_log");
+    byte [][] FAMILIES = new byte[][] { FAMILY };
+
+    // create table; set versions to max...
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, Integer.MAX_VALUE);
+
+    // For row:0, col:0: insert versions 1 through 5.
+    putNVersions(ht, FAMILY, 0, 0, 1, 5);
+
+    flush();
+
+    // delete all versions before 4.
+    deleteAllVersionsBefore(ht, FAMILY, 0, 0, 4);
+
+    // request a bunch of versions including the deleted version. We should
+    // only get back entries for the versions that exist.
+    KeyValue kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L));
+    assertEquals(0, kvs.length);
+  }
+
+  @Test
+  public void testWithColumnDeletes() throws IOException {
+    byte [] TABLE = Bytes.toBytes("testWithColumnDeletes");
+    byte [] FAMILY = Bytes.toBytes("event_log");
+    byte [][] FAMILIES = new byte[][] { FAMILY };
+
+    // create table; set versions to max...
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, Integer.MAX_VALUE);
+
+    // For row:0, col:0: insert versions 1 through 5.
+    putNVersions(ht, FAMILY, 0, 0, 1, 5);
+
+    flush();
+
+    // delete all versions before 4.
+    deleteColumn(ht, FAMILY, 0, 0);
+
+    // request a bunch of versions including the deleted version. We should
+    // only get back entries for the versions that exist.
+    KeyValue kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L));
+    assertEquals(0, kvs.length);
+  }
+
+  @Test
+  public void testWithFamilyDeletes() throws IOException {
+    byte [] TABLE = Bytes.toBytes("testWithFamilyDeletes");
+    byte [] FAMILY = Bytes.toBytes("event_log");
+    byte [][] FAMILIES = new byte[][] { FAMILY };
+
+    // create table; set versions to max...
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, Integer.MAX_VALUE);
+
+    // For row:0, col:0: insert versions 1 through 5.
+    putNVersions(ht, FAMILY, 0, 0, 1, 5);
+
+    flush();
+
+    // delete all versions before 4.
+    deleteFamily(ht, FAMILY, 0);
+
+    // request a bunch of versions including the deleted version. We should
+    // only get back entries for the versions that exist.
+    KeyValue kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L));
+    assertEquals(0, kvs.length);
+  }
+
+  // Flush tables. Since flushing is asynchronous, sleep for a bit.
+  private void flush() throws IOException {
+    TEST_UTIL.flush();
+    try {
+      Thread.sleep(3000);
+    } catch (InterruptedException i) {
+      // ignore
+    }
+  }
+
+  /**
+   * Assert that the passed in KeyValue has expected contents for the
+   * specified row, column & timestamp.
+   */
+  private void checkOneCell(KeyValue kv, byte[] cf,
+      int rowIdx, int colIdx, long ts) {
+
+    String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts;
+
+    assertEquals("Row mismatch which checking: " + ctx,
+        "row:"+ rowIdx, Bytes.toString(kv.getRow()));
+
+    assertEquals("ColumnFamily mismatch while checking: " + ctx,
+        Bytes.toString(cf), Bytes.toString(kv.getFamily()));
+
+    assertEquals("Column qualifier mismatch while checking: " + ctx,
+        "column:" + colIdx,
+        Bytes.toString(kv.getQualifier()));
+
+    assertEquals("Timestamp mismatch while checking: " + ctx,
+        ts, kv.getTimestamp());
+
+    assertEquals("Value mismatch while checking: " + ctx,
+        "value-version-" + ts, Bytes.toString(kv.getValue()));
+  }
+
+  /**
+   * Uses the TimestampFilter on a Get to request a specified list of
+   * versions for the row/column specified by rowIdx & colIdx.
+   *
+   */
+  private  KeyValue[] getNVersions(HTable ht, byte[] cf, int rowIdx,
+      int colIdx, List<Long> versions)
+  throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    byte column[] = Bytes.toBytes("column:" + colIdx);
+    Get get = new Get(row);
+    get.addColumn(cf, column);
+    get.setMaxVersions();
+    get.setTimeRange(Collections.min(versions), Collections.max(versions)+1);
+    Result result = ht.get(get);
+
+    return result.raw();
+  }
+
+  /**
+   * Insert in specific row/column versions with timestamps
+   * versionStart..versionEnd.
+   */
+  private void putNVersions(HTable ht, byte[] cf, int rowIdx, int colIdx,
+      long versionStart, long versionEnd)
+  throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    byte column[] = Bytes.toBytes("column:" + colIdx);
+    Put put = new Put(row);
+
+    for (long idx = versionStart; idx <= versionEnd; idx++) {
+      put.add(cf, column, idx, Bytes.toBytes("value-version-" + idx));
+    }
+
+    ht.put(put);
+  }
+
+  /**
+   * For row/column specified by rowIdx/colIdx, delete the cell
+   * corresponding to the specified version.
+   */
+  private void deleteOneVersion(HTable ht, byte[] cf, int rowIdx,
+      int colIdx, long version)
+  throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    byte column[] = Bytes.toBytes("column:" + colIdx);
+    Delete del = new Delete(row);
+    del.deleteColumn(cf, column, version);
+    ht.delete(del);
+  }
+
+  /**
+   * For row/column specified by rowIdx/colIdx, delete all cells
+   * preceeding the specified version.
+   */
+  private void deleteAllVersionsBefore(HTable ht, byte[] cf, int rowIdx,
+      int colIdx, long version)
+  throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    byte column[] = Bytes.toBytes("column:" + colIdx);
+    Delete del = new Delete(row);
+    del.deleteColumns(cf, column, version);
+    ht.delete(del);
+  }
+
+  private void deleteColumn(HTable ht, byte[] cf, int rowIdx, int colIdx) throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    byte column[] = Bytes.toBytes("column:" + colIdx);
+    Delete del = new Delete(row);
+    del.deleteColumns(cf, column);
+    ht.delete(del);
+  }
+
+  private void deleteFamily(HTable ht, byte[] cf, int rowIdx) throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    Delete del = new Delete(row);
+    del.deleteFamily(cf);
+    ht.delete(del);
+  }
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java Tue Sep 27 02:41:56 2011
@@ -1,9 +1,29 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hbase.client;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -23,7 +43,11 @@ public class TestScannerTimeout {
       TEST_UTIL = new HBaseTestingUtility();
 
   final Log LOG = LogFactory.getLog(getClass());
-  private final byte[] someBytes = Bytes.toBytes("f");
+  private final static byte[] SOME_BYTES = Bytes.toBytes("f");
+  private final static byte[] TABLE_NAME = Bytes.toBytes("t");
+  private final static int NB_ROWS = 10;
+  private final static int SCANNER_TIMEOUT = 1000;
+  private static HTable table;
 
    /**
    * @throws java.lang.Exception
@@ -31,8 +55,14 @@ public class TestScannerTimeout {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     Configuration c = TEST_UTIL.getConfiguration();
-    c.setInt("hbase.regionserver.lease.period", 1000);
-    TEST_UTIL.startMiniCluster(1);
+    c.setInt("hbase.regionserver.lease.period", SCANNER_TIMEOUT);
+    TEST_UTIL.startMiniCluster(2);
+    table = TEST_UTIL.createTable(Bytes.toBytes("t"), SOME_BYTES);
+     for (int i = 0; i < NB_ROWS; i++) {
+      Put put = new Put(Bytes.toBytes(i));
+      put.add(SOME_BYTES, SOME_BYTES, SOME_BYTES);
+      table.put(put);
+    }
   }
 
   /**
@@ -48,13 +78,7 @@ public class TestScannerTimeout {
    */
   @Before
   public void setUp() throws Exception {
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @After
-  public void tearDown() throws Exception {
+    TEST_UTIL.ensureSomeRegionServersAvailable(2);
   }
 
   /**
@@ -63,22 +87,16 @@ public class TestScannerTimeout {
    */
   @Test
   public void test2481() throws Exception {
-    int initialCount = 10;
-    HTable t = TEST_UTIL.createTable(Bytes.toBytes("t"), someBytes);
-    for (int i = 0; i < initialCount; i++) {
-      Put put = new Put(Bytes.toBytes(i));
-      put.add(someBytes, someBytes, someBytes);
-      t.put(put);
-    }
     Scan scan = new Scan();
-    ResultScanner r = t.getScanner(scan);
+    ResultScanner r = table.getScanner(scan);
     int count = 0;
     try {
       Result res = r.next();
       while (res != null) {
         count++;
         if (count == 5) {
-          Thread.sleep(1500);
+          // Sleep just a bit more to be sure
+          Thread.sleep(SCANNER_TIMEOUT+100);
         }
         res = r.next();
       }
@@ -88,4 +106,32 @@ public class TestScannerTimeout {
     }
     fail("We should be timing out");
   }
+
+  /**
+   * Test that scanner can continue even if the region server it was reading
+   * from failed. Before 2772, it reused the same scanner id.
+   * @throws Exception
+   */
+  @Test
+  public void test2772() throws Exception {
+    int rs = TEST_UTIL.getHBaseCluster().getServerWith(
+        TEST_UTIL.getHBaseCluster().getRegions(
+            TABLE_NAME).get(0).getRegionName());
+    Scan scan = new Scan();
+    // Set a very high timeout, we want to test what happens when a RS
+    // fails but the region is recovered before the lease times out.
+    // Since the RS is already created, this conf is client-side only for
+    // this new table
+    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
+    conf.setInt(
+        HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, SCANNER_TIMEOUT*100);
+    HTable higherScanTimeoutTable = new HTable(conf, TABLE_NAME);
+    ResultScanner r = higherScanTimeoutTable.getScanner(scan);
+    // This takes way less than SCANNER_TIMEOUT*100
+    TEST_UTIL.getHBaseCluster().getRegionServer(rs).abort("die!");
+    Result[] results = r.next(NB_ROWS);
+    assertEquals(NB_ROWS, results.length);
+    r.close();
+
+  }
 }

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/client/TestTimestampsFilter.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,341 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.TimestampsFilter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Run tests related to {@link TimestampsFilter} using HBase client APIs.
+ * Sets up the HBase mini cluster once at start. Each creates a table
+ * named for the method and does its stuff against that.
+ */
+public class TestTimestampsFilter {
+  final Log LOG = LogFactory.getLog(getClass());
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.startMiniCluster(3);
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @Before
+  public void setUp() throws Exception {
+    // Nothing to do.
+  }
+
+  /**
+   * @throws java.lang.Exception
+   */
+  @After
+  public void tearDown() throws Exception {
+    // Nothing to do.
+  }
+
+  /**
+   * Test from client side for TimestampsFilter.
+   *
+   * The TimestampsFilter provides the ability to request cells (KeyValues)
+   * whose timestamp/version is in the specified list of timestamps/version.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testTimestampsFilter() throws Exception {
+    byte [] TABLE = Bytes.toBytes("testTimestampsFilter");
+    byte [] FAMILY = Bytes.toBytes("event_log");
+    byte [][] FAMILIES = new byte[][] { FAMILY };
+    KeyValue kvs[];
+
+    // create table; set versions to max...
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, Integer.MAX_VALUE);
+
+    for (int rowIdx = 0; rowIdx < 5; rowIdx++) {
+      for (int colIdx = 0; colIdx < 5; colIdx++) {
+        // insert versions 201..300
+        putNVersions(ht, FAMILY, rowIdx, colIdx, 201, 300);
+        // insert versions 1..100
+        putNVersions(ht, FAMILY, rowIdx, colIdx, 1, 100);
+      }
+    }
+
+    // do some verification before flush
+    verifyInsertedValues(ht, FAMILY);
+
+    flush();
+
+    // do some verification after flush
+    verifyInsertedValues(ht, FAMILY);
+
+    // Insert some more versions after flush. These should be in memstore.
+    // After this we should have data in both memstore & HFiles.
+    for (int rowIdx = 0; rowIdx < 5; rowIdx++) {
+      for (int colIdx = 0; colIdx < 5; colIdx++) {
+        putNVersions(ht, FAMILY, rowIdx, colIdx, 301, 400);
+        putNVersions(ht, FAMILY, rowIdx, colIdx, 101, 200);
+      }
+    }
+
+    for (int rowIdx = 0; rowIdx < 5; rowIdx++) {
+      for (int colIdx = 0; colIdx < 5; colIdx++) {
+        kvs = getNVersions(ht, FAMILY, rowIdx, colIdx,
+                           Arrays.asList(505L, 5L, 105L, 305L, 205L));
+        assertEquals(4, kvs.length);
+        checkOneCell(kvs[0], FAMILY, rowIdx, colIdx, 305);
+        checkOneCell(kvs[1], FAMILY, rowIdx, colIdx, 205);
+        checkOneCell(kvs[2], FAMILY, rowIdx, colIdx, 105);
+        checkOneCell(kvs[3], FAMILY, rowIdx, colIdx, 5);
+      }
+    }
+
+    // Request an empty list of versions using the Timestamps filter;
+    // Should return none.
+    kvs = getNVersions(ht, FAMILY, 2, 2, new ArrayList<Long>());
+    assertEquals(0, kvs.length);
+
+    //
+    // Test the filter using a Scan operation
+    // Scan rows 0..4. For each row, get all its columns, but only
+    // those versions of the columns with the specified timestamps.
+    Result[] results = scanNVersions(ht, FAMILY, 0, 4,
+                                     Arrays.asList(6L, 106L, 306L));
+    assertEquals("# of rows returned from scan", 5, results.length);
+    for (int rowIdx = 0; rowIdx < 5; rowIdx++) {
+      kvs = results[rowIdx].raw();
+      // each row should have 5 columns.
+      // And we have requested 3 versions for each.
+      assertEquals("Number of KeyValues in result for row:" + rowIdx,
+                   3*5, kvs.length);
+      for (int colIdx = 0; colIdx < 5; colIdx++) {
+        int offset = colIdx * 3;
+        checkOneCell(kvs[offset + 0], FAMILY, rowIdx, colIdx, 306);
+        checkOneCell(kvs[offset + 1], FAMILY, rowIdx, colIdx, 106);
+        checkOneCell(kvs[offset + 2], FAMILY, rowIdx, colIdx, 6);
+      }
+    }
+  }
+
+  /**
+   * Test TimestampsFilter in the presence of version deletes.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testWithVersionDeletes() throws Exception {
+
+    // first test from memstore (without flushing).
+    testWithVersionDeletes(false);
+
+    // run same test against HFiles (by forcing a flush).
+    testWithVersionDeletes(true);
+  }
+
+  private void testWithVersionDeletes(boolean flushTables) throws IOException {
+    byte [] TABLE = Bytes.toBytes("testWithVersionDeletes_" +
+                                   (flushTables ? "flush" : "noflush"));
+    byte [] FAMILY = Bytes.toBytes("event_log");
+    byte [][] FAMILIES = new byte[][] { FAMILY };
+
+    // create table; set versions to max...
+    HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, Integer.MAX_VALUE);
+
+    // For row:0, col:0: insert versions 1 through 5.
+    putNVersions(ht, FAMILY, 0, 0, 1, 5);
+
+    // delete version 4.
+    deleteOneVersion(ht, FAMILY, 0, 0, 4);
+
+    if (flushTables) {
+      flush();
+    }
+
+    // request a bunch of versions including the deleted version. We should
+    // only get back entries for the versions that exist.
+    KeyValue kvs[] = getNVersions(ht, FAMILY, 0, 0, Arrays.asList(2L, 3L, 4L, 5L));
+    assertEquals(3, kvs.length);
+    checkOneCell(kvs[0], FAMILY, 0, 0, 5);
+    checkOneCell(kvs[1], FAMILY, 0, 0, 3);
+    checkOneCell(kvs[2], FAMILY, 0, 0, 2);
+  }
+
+  private void verifyInsertedValues(HTable ht, byte[] cf) throws IOException {
+    for (int rowIdx = 0; rowIdx < 5; rowIdx++) {
+      for (int colIdx = 0; colIdx < 5; colIdx++) {
+        // ask for versions that exist.
+        KeyValue[] kvs = getNVersions(ht, cf, rowIdx, colIdx,
+                                      Arrays.asList(5L, 300L, 6L, 80L));
+        assertEquals(4, kvs.length);
+        checkOneCell(kvs[0], cf, rowIdx, colIdx, 300);
+        checkOneCell(kvs[1], cf, rowIdx, colIdx, 80);
+        checkOneCell(kvs[2], cf, rowIdx, colIdx, 6);
+        checkOneCell(kvs[3], cf, rowIdx, colIdx, 5);
+
+        // ask for versions that do not exist.
+        kvs = getNVersions(ht, cf, rowIdx, colIdx,
+                           Arrays.asList(101L, 102L));
+        assertEquals(0, kvs.length);
+
+        // ask for some versions that exist and some that do not.
+        kvs = getNVersions(ht, cf, rowIdx, colIdx,
+                           Arrays.asList(1L, 300L, 105L, 70L, 115L));
+        assertEquals(3, kvs.length);
+        checkOneCell(kvs[0], cf, rowIdx, colIdx, 300);
+        checkOneCell(kvs[1], cf, rowIdx, colIdx, 70);
+        checkOneCell(kvs[2], cf, rowIdx, colIdx, 1);
+      }
+    }
+  }
+
+  // Flush tables. Since flushing is asynchronous, sleep for a bit.
+  private void flush() throws IOException {
+    TEST_UTIL.flush();
+    try {
+      Thread.sleep(3000);
+    } catch (InterruptedException i) {
+      // ignore
+    }
+  }
+
+  /**
+   * Assert that the passed in KeyValue has expected contents for the
+   * specified row, column & timestamp.
+   */
+  private void checkOneCell(KeyValue kv, byte[] cf,
+                             int rowIdx, int colIdx, long ts) {
+
+    String ctx = "rowIdx=" + rowIdx + "; colIdx=" + colIdx + "; ts=" + ts;
+
+    assertEquals("Row mismatch which checking: " + ctx,
+                 "row:"+ rowIdx, Bytes.toString(kv.getRow()));
+
+    assertEquals("ColumnFamily mismatch while checking: " + ctx,
+                 Bytes.toString(cf), Bytes.toString(kv.getFamily()));
+
+    assertEquals("Column qualifier mismatch while checking: " + ctx,
+                 "column:" + colIdx,
+                  Bytes.toString(kv.getQualifier()));
+
+    assertEquals("Timestamp mismatch while checking: " + ctx,
+                 ts, kv.getTimestamp());
+
+    assertEquals("Value mismatch while checking: " + ctx,
+                 "value-version-" + ts, Bytes.toString(kv.getValue()));
+  }
+
+  /**
+   * Uses the TimestampFilter on a Get to request a specified list of
+   * versions for the row/column specified by rowIdx & colIdx.
+   *
+   */
+  private  KeyValue[] getNVersions(HTable ht, byte[] cf, int rowIdx,
+                                   int colIdx, List<Long> versions)
+    throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    byte column[] = Bytes.toBytes("column:" + colIdx);
+    Filter filter = new TimestampsFilter(versions);
+    Get get = new Get(row);
+    get.addColumn(cf, column);
+    get.setFilter(filter);
+    get.setMaxVersions();
+    Result result = ht.get(get);
+
+    return result.raw();
+  }
+
+  /**
+   * Uses the TimestampFilter on a Scan to request a specified list of
+   * versions for the rows from startRowIdx to endRowIdx (both inclusive).
+   */
+  private Result[] scanNVersions(HTable ht, byte[] cf, int startRowIdx,
+                                 int endRowIdx, List<Long> versions)
+    throws IOException {
+    byte startRow[] = Bytes.toBytes("row:" + startRowIdx);
+    byte endRow[] = Bytes.toBytes("row:" + endRowIdx + 1); // exclusive
+    Filter filter = new TimestampsFilter(versions);
+    Scan scan = new Scan(startRow, endRow);
+    scan.setFilter(filter);
+    scan.setMaxVersions();
+    ResultScanner scanner = ht.getScanner(scan);
+    return scanner.next(endRowIdx - startRowIdx + 1);
+  }
+
+  /**
+   * Insert in specific row/column versions with timestamps
+   * versionStart..versionEnd.
+   */
+  private void putNVersions(HTable ht, byte[] cf, int rowIdx, int colIdx,
+                            long versionStart, long versionEnd)
+      throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    byte column[] = Bytes.toBytes("column:" + colIdx);
+    Put put = new Put(row);
+
+    for (long idx = versionStart; idx <= versionEnd; idx++) {
+      put.add(cf, column, idx, Bytes.toBytes("value-version-" + idx));
+    }
+
+    ht.put(put);
+  }
+
+  /**
+   * For row/column specified by rowIdx/colIdx, delete the cell
+   * corresponding to the specified version.
+   */
+  private void deleteOneVersion(HTable ht, byte[] cf, int rowIdx,
+                                int colIdx, long version)
+    throws IOException {
+    byte row[] = Bytes.toBytes("row:" + rowIdx);
+    byte column[] = Bytes.toBytes("column:" + colIdx);
+    Delete del = new Delete(row);
+    del.deleteColumn(cf, column, version);
+    ht.delete(del);
+  }
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java Tue Sep 27 02:41:56 2011
@@ -162,8 +162,8 @@ public class TestFilter extends HBaseTes
   }
 
   protected void tearDown() throws Exception {
-    super.tearDown();
     this.region.close();
+    super.tearDown();
   }
 
   public void testNoFilter() throws Exception {

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java Tue Sep 27 02:41:56 2011
@@ -183,17 +183,17 @@ public class TestTimeRangeMapRed extends
   private void verify() throws IOException {
     Scan scan = new Scan();
     scan.addColumn(FAMILY_NAME, COLUMN_NAME);
-    scan.setMaxVersions();
+    scan.setMaxVersions(1);
     ResultScanner scanner = table.getScanner(scan);
     for (Result r: scanner) {
       for (KeyValue kv : r.sorted()) {
-        assertEquals(TIMESTAMP.get(kv.getTimestamp()), (Boolean)Bytes.toBoolean(kv.getValue()));
         log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(kv.getFamily())
             + "\t" + Bytes.toString(kv.getQualifier())
             + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(kv.getValue()));
+        assertEquals(TIMESTAMP.get(kv.getTimestamp()), (Boolean)Bytes.toBoolean(kv.getValue()));
       }
     }
     scanner.close();
   }
 
-}
\ No newline at end of file
+}

Copied: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestKillingServersFromMaster.java (from r1176175, hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java)
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestKillingServersFromMaster.java?p2=hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestKillingServersFromMaster.java&p1=hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java&r1=1176175&r2=1176177&rev=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestKillingServersFromMaster.java Tue Sep 27 02:41:56 2011
@@ -29,17 +29,21 @@ import org.apache.hadoop.hbase.HServerAd
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
+import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-public class TestMasterWrongRS {
+public class TestKillingServersFromMaster {
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static MiniHBaseCluster cluster;
 
   @BeforeClass
   public static void beforeAllTests() throws Exception {
-    TEST_UTIL.startMiniCluster(3);
+    TEST_UTIL.startMiniCluster(2);
+    cluster = TEST_UTIL.getHBaseCluster();
   }
 
   @AfterClass
@@ -47,29 +51,54 @@ public class TestMasterWrongRS {
     TEST_UTIL.shutdownMiniCluster();
   }
 
+  @Before
+  public void setup() throws IOException {
+    TEST_UTIL.ensureSomeRegionServersAvailable(2);
+  }
+
   /**
-   * Test when region servers start reporting with the wrong address
-   * or start code. Currently the decision is to shut them down.
+   * Test that a region server that reports with the wrong start code
+   * gets shut down
    * See HBASE-2613
    * @throws Exception
    */
-  @Test
-  public void testRsReportsWrongServerName() throws Exception {
-    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+  @Test (timeout=180000)
+  public void testRsReportsWrongStartCode() throws Exception {
     MiniHBaseClusterRegionServer firstServer =
       (MiniHBaseClusterRegionServer)cluster.getRegionServer(0);
-    HRegionServer secondServer = cluster.getRegionServer(1);
     HServerInfo hsi = firstServer.getServerInfo();
+    // This constructor creates a new startcode
     firstServer.setHServerInfo(new HServerInfo(hsi.getServerAddress(),
       hsi.getInfoPort(), hsi.getHostname()));
-    // Sleep while the region server pings back
-    Thread.sleep(2000);
-    assertTrue(firstServer.isOnline());
-    assertEquals(2, cluster.getLiveRegionServerThreads().size());
-
-    secondServer.getHServerInfo().setServerAddress(new HServerAddress("0.0.0.0", 60010));
-    Thread.sleep(2000);
-    assertTrue(secondServer.isOnline());
+    cluster.waitOnRegionServer(0);
+    assertEquals(1, cluster.getLiveRegionServerThreads().size());
+  }
+
+  /**
+   * Test that a region server that reports with the wrong address
+   * gets shut down
+   * See HBASE-2613
+   * @throws Exception
+   */
+  @Test (timeout=180000)
+  public void testRsReportsWrongAddress() throws Exception {
+    MiniHBaseClusterRegionServer firstServer =
+      (MiniHBaseClusterRegionServer)cluster.getRegionServer(0);
+    firstServer.getHServerInfo().setServerAddress(
+      new HServerAddress("0.0.0.0", 60010));
+    cluster.waitOnRegionServer(0);
+    assertEquals(1, cluster.getLiveRegionServerThreads().size());
+  }
+
+  /**
+   * Send a YouAreDeadException to the region server and expect it to shut down
+   * See HBASE-2691
+   * @throws Exception
+   */
+  @Test (timeout=180000)
+  public void testSendYouAreDead() throws Exception {
+    cluster.addExceptionToSendRegionServer(0, new YouAreDeadException("bam!"));
+    cluster.waitOnRegionServer(0);
     assertEquals(1, cluster.getLiveRegionServerThreads().size());
   }
 }

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java Tue Sep 27 02:41:56 2011
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.HMsg;
 import org.apache.hadoop.hbase.HServerInfo;
@@ -29,16 +30,23 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.executor.HBaseEventHandler;
+import org.apache.hadoop.hbase.executor.HBaseEventHandler.HBaseEventHandlerListener;
+import org.apache.hadoop.hbase.executor.HBaseEventHandler.HBaseEventType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 
 import java.io.IOException;
+import java.util.List;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+
+import com.google.common.base.Joiner;
+
 import static org.junit.Assert.*;
 
 public class TestMaster {
@@ -67,44 +75,56 @@ public class TestMaster {
     TEST_UTIL.createTable(TABLENAME, FAMILYNAME);
     TEST_UTIL.loadTable(new HTable(TABLENAME), FAMILYNAME);
 
+    List<Pair<HRegionInfo, HServerAddress>> tableRegions =
+      m.getTableRegions(TABLENAME);
+    LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
+    assertEquals(1, tableRegions.size());
+    assertArrayEquals(HConstants.EMPTY_START_ROW,
+        tableRegions.get(0).getFirst().getStartKey());
+    assertArrayEquals(HConstants.EMPTY_END_ROW,
+        tableRegions.get(0).getFirst().getEndKey());
+
+    // Now trigger a split and stop when the split is in progress
+
     CountDownLatch aboutToOpen = new CountDownLatch(1);
     CountDownLatch proceed = new CountDownLatch(1);
     RegionOpenListener list = new RegionOpenListener(aboutToOpen, proceed);
-    m.getRegionServerOperationQueue().registerRegionServerOperationListener(list);
+    HBaseEventHandler.registerListener(list);
 
+    LOG.info("Splitting table");
     admin.split(TABLENAME);
+    LOG.info("Waiting for split result to be about to open");
     aboutToOpen.await(60, TimeUnit.SECONDS);
-
     try {
-      m.getTableRegions(TABLENAME);
+      LOG.info("Making sure we can call getTableRegions while opening");
+      tableRegions = m.getTableRegions(TABLENAME);
+      LOG.info("Regions: " + Joiner.on(',').join(tableRegions));
+      // We have three regions because one is split-in-progress
+      assertEquals(3, tableRegions.size());
+      LOG.info("Making sure we can call getTableRegionClosest while opening");
       Pair<HRegionInfo,HServerAddress> pair =
-        m.getTableRegionClosest(TABLENAME, Bytes.toBytes("cde"));
-      assertNull(pair);
-      /**
-       * TODO: these methods return null when the regions are not deployed.
-       * These tests should be uncommented after HBASE-2656.
-      assertNotNull(pair);
-      m.getTableRegionFromName(pair.getFirst().getRegionName());
-      */
+        m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
+      LOG.info("Result is: " + pair);
+      Pair<HRegionInfo, HServerAddress> tableRegionFromName = m.getTableRegionFromName(pair.getFirst().getRegionName());
+      assertEquals(tableRegionFromName.getFirst(), pair.getFirst());
     } finally {
       proceed.countDown();
     }
   }
 
-  static class RegionOpenListener implements RegionServerOperationListener {
+  static class RegionOpenListener implements HBaseEventHandlerListener {
     CountDownLatch aboutToOpen, proceed;
 
-    public RegionOpenListener(
-      CountDownLatch aboutToOpen, CountDownLatch proceed)
+    public RegionOpenListener(CountDownLatch aboutToOpen, CountDownLatch proceed)
     {
       this.aboutToOpen = aboutToOpen;
       this.proceed = proceed;
     }
 
     @Override
-    public boolean process(HServerInfo serverInfo, HMsg incomingMsg) {
-      if (!incomingMsg.isType(HMsg.Type.MSG_REPORT_OPEN)) {
-        return true;
+    public void afterProcess(HBaseEventHandler event) {
+      if (event.getHBEvent() != HBaseEventType.RS2ZK_REGION_OPENED) {
+        return;
       }
       try {
         aboutToOpen.countDown();
@@ -112,16 +132,11 @@ public class TestMaster {
       } catch (InterruptedException ie) {
         throw new RuntimeException(ie);
       }
-      return true;
-    }
-
-    @Override
-    public boolean process(RegionServerOperation op) throws IOException {
-      return true;
+      return;
     }
 
     @Override
-    public void processed(RegionServerOperation op) {
+    public void beforeProcess(HBaseEventHandler event) {
     }
   }
 

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterTransitions.java Tue Sep 27 02:41:56 2011
@@ -84,7 +84,7 @@ public class TestMasterTransitions {
     TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
     HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
     int countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
-    waitUntilAllRegionsAssigned(countOfRegions);
+    TEST_UTIL.waitUntilAllRegionsAssigned(countOfRegions);
     addToEachStartKey(countOfRegions);
   }
 
@@ -93,12 +93,7 @@ public class TestMasterTransitions {
   }
 
   @Before public void setup() throws IOException {
-    if (TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size() < 2) {
-      // Need at least two servers.
-      LOG.info("Started new server=" +
-        TEST_UTIL.getHBaseCluster().startRegionServer());
-      
-    }
+    TEST_UTIL.ensureSomeRegionServersAvailable(2);
   }
 
   /**
@@ -342,10 +337,10 @@ public class TestMasterTransitions {
       if (!incomingMsg.isType(HMsg.Type.MSG_REPORT_PROCESS_OPEN)) return true;
       // Save the region that is in transition so can test later it came back.
       this.regionToFind = incomingMsg.getRegionInfo();
-      LOG.info("ABORTING " + this.victim + " because got a " +
+      String msg = "ABORTING " + this.victim + " because got a " +
         HMsg.Type.MSG_REPORT_PROCESS_OPEN + " on this server for " +
-        incomingMsg.getRegionInfo().getRegionNameAsString());
-      this.victim.abort();
+        incomingMsg.getRegionInfo().getRegionNameAsString();
+      this.victim.abort(msg);
       this.abortSent = true;
       return true;
     }
@@ -462,36 +457,6 @@ public class TestMasterTransitions {
   }
 
   /*
-   * Wait until all rows in .META. have a non-empty info:server.  This means
-   * all regions have been deployed, master has been informed and updated
-   * .META. with the regions deployed server.
-   * @param countOfRegions How many regions in .META.
-   * @throws IOException
-   */
-  private static void waitUntilAllRegionsAssigned(final int countOfRegions)
-  throws IOException {
-    HTable meta = new HTable(TEST_UTIL.getConfiguration(),
-      HConstants.META_TABLE_NAME);
-    while (true) {
-      int rows = 0;
-      Scan scan = new Scan();
-      scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
-      ResultScanner s = meta.getScanner(scan);
-      for (Result r = null; (r = s.next()) != null;) {
-        byte [] b =
-          r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
-        if (b == null || b.length <= 0) break;
-        rows++;
-      }
-      s.close();
-      // If I get to here and all rows have a Server, then all have been assigned.
-      if (rows == countOfRegions) break;
-      LOG.info("Found=" + rows);
-      Threads.sleep(1000); 
-    }
-  }
-
-  /*
    * @return Count of regions in meta table.
    * @throws IOException
    */

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestMasterWrongRS.java Tue Sep 27 02:41:56 2011
@@ -1,75 +0,0 @@
-/**
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestMasterWrongRS {
-  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
-  @BeforeClass
-  public static void beforeAllTests() throws Exception {
-    TEST_UTIL.startMiniCluster(3);
-  }
-
-  @AfterClass
-  public static void afterAllTests() throws IOException {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  /**
-   * Test when region servers start reporting with the wrong address
-   * or start code. Currently the decision is to shut them down.
-   * See HBASE-2613
-   * @throws Exception
-   */
-  @Test
-  public void testRsReportsWrongServerName() throws Exception {
-    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-    MiniHBaseClusterRegionServer firstServer =
-      (MiniHBaseClusterRegionServer)cluster.getRegionServer(0);
-    HRegionServer secondServer = cluster.getRegionServer(1);
-    HServerInfo hsi = firstServer.getServerInfo();
-    firstServer.setHServerInfo(new HServerInfo(hsi.getServerAddress(),
-      hsi.getInfoPort(), hsi.getHostname()));
-    // Sleep while the region server pings back
-    Thread.sleep(2000);
-    assertTrue(firstServer.isOnline());
-    assertEquals(2, cluster.getLiveRegionServerThreads().size());
-
-    secondServer.getHServerInfo().setServerAddress(new HServerAddress("0.0.0.0", 60010));
-    Thread.sleep(2000);
-    assertTrue(secondServer.isOnline());
-    assertEquals(1, cluster.getLiveRegionServerThreads().size());
-  }
-}

Added: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestROOTAssignment.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestROOTAssignment.java?rev=1176177&view=auto
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestROOTAssignment.java (added)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestROOTAssignment.java Tue Sep 27 02:41:56 2011
@@ -0,0 +1,169 @@
+/**
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.CopyOnWriteArraySet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HMsg;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test issues assigning ROOT.
+ */
+public class TestROOTAssignment {
+  private static final Log LOG = LogFactory.getLog(TestROOTAssignment.class);
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final byte [] TABLENAME = Bytes.toBytes("root_assignments");
+  private static final byte [][] FAMILIES =
+    new byte [][] {Bytes.toBytes("family")};
+
+  /**
+   * Start up a mini cluster and put a small table of many empty regions into it.
+   * @throws Exception
+   */
+  @BeforeClass public static void beforeAllTests() throws Exception {
+    TEST_UTIL.getConfiguration().setInt("hbase.regions.percheckin", 2);
+    // Start a cluster of two regionservers.
+    TEST_UTIL.startMiniCluster(2);
+    // Create a table of three families.  This will assign a region.
+    TEST_UTIL.createTable(TABLENAME, FAMILIES);
+    HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
+    int countOfRegions = TEST_UTIL.createMultiRegions(t, FAMILIES[0]);
+    TEST_UTIL.waitUntilAllRegionsAssigned(countOfRegions);
+    HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
+    TEST_UTIL.loadTable(table, FAMILIES[0]);
+    table.close();
+  }
+
+  @AfterClass public static void afterAllTests() throws IOException {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before public void setup() throws IOException {
+    TEST_UTIL.ensureSomeRegionServersAvailable(2);
+  }
+
+  /**
+   * Interrupt processing of server shutdown so it gets put on delay queue.
+   */
+  static class PostponeShutdownProcessing implements RegionServerOperationListener {
+    // Map of what we've delayed so we don't do do repeated delays.
+    private final Set<RegionServerOperation> postponed =
+      new CopyOnWriteArraySet<RegionServerOperation>();
+    private boolean done = false;
+    private final HServerAddress rootServerAddress;
+    private final HMaster master;
+
+    PostponeShutdownProcessing(final HMaster master,
+        final HServerAddress rootServerAddress) {
+      this.master = master;
+      this.rootServerAddress = rootServerAddress;
+    }
+
+    @Override
+    public boolean process(final RegionServerOperation op) throws IOException {
+      // If a regionserver shutdown and its of the root server, then we want to
+      // delay the processing of the shutdown
+      boolean result = true;
+      if (op instanceof ProcessServerShutdown) {
+        ProcessServerShutdown pss = (ProcessServerShutdown)op;
+        if (pss.getDeadServerAddress().equals(this.rootServerAddress)) {
+          // Don't postpone more than once.
+          if (!this.postponed.contains(pss)) {
+            this.postponed.add(pss);
+            Assert.assertNull(this.master.getRegionManager().getRootRegionLocation());
+            pss.setDelay(1 * 1000);
+            // Return false.  This will add this op to the delayed queue.
+            result = false;
+          }
+        }
+      }
+      return result;
+    }
+
+    @Override
+    public boolean process(HServerInfo serverInfo, HMsg incomingMsg) {
+      return true;
+    }
+
+    @Override
+    public void processed(RegionServerOperation op) {
+      if (op instanceof ProcessServerShutdown) {
+        ProcessServerShutdown pss = (ProcessServerShutdown)op;
+        if (pss.getDeadServerAddress().equals(this.rootServerAddress)) {
+          this.done = true;
+        }
+      }
+    }
+
+    public boolean isDone() {
+      return this.done;
+    }
+  }
+
+  /**
+   * If the split of the log for the regionserver hosting ROOT doesn't go off
+   * smoothly, if the process server shutdown gets added to the delayed queue
+   * of events to process, then ROOT was not being allocated, ever.
+   * @see <a href="https://issues.apache.org/jira/browse/HBASE-2707">HBASE-2707</a>
+   */
+  @Test (timeout=300000) public void testROOTDeployedThoughProblemSplittingLog()
+  throws Exception {
+    LOG.info("Running testROOTDeployedThoughProblemSplittingLog");
+    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+    final HMaster master = cluster.getMaster();
+    byte [] rootRegion = Bytes.toBytes("-ROOT-,,0");
+    int rootIndex = cluster.getServerWith(rootRegion);
+    final HRegionServer rootHRS = cluster.getRegionServer(rootIndex);
+
+    // Add our RegionServerOperationsListener
+    PostponeShutdownProcessing listener = new PostponeShutdownProcessing(master,
+      rootHRS.getHServerInfo().getServerAddress());
+    master.getRegionServerOperationQueue().
+      registerRegionServerOperationListener(listener);
+    try {
+      // Now close the server carrying meta.
+      cluster.abortRegionServer(rootIndex);
+
+      // Wait for processing of the shutdown server.
+      while(!listener.isDone()) Threads.sleep(100);
+      master.getRegionManager().waitForRootRegionLocation();
+    } finally {
+      master.getRegionServerOperationQueue().
+        unregisterRegionServerOperationListener(listener);
+    }
+  }
+}

Modified: hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java?rev=1176177&r1=1176176&r2=1176177&view=diff
==============================================================================
--- hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java (original)
+++ hbase/branches/0.89/src/test/java/org/apache/hadoop/hbase/master/TestRegionServerOperationQueue.java Tue Sep 27 02:41:56 2011
@@ -18,9 +18,13 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hbase.master;
+import static org.junit.Assert.*;
+
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.master.RegionServerOperationQueue.ProcessingResultCode;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -47,6 +51,8 @@ public class TestRegionServerOperationQu
   }
 
   @Test
-  public void testNothing() throws Exception {
+  public void testWeDoNotGetStuckInDelayQueue() throws Exception {
+    ProcessingResultCode code = this.queue.process();
+    assertTrue(ProcessingResultCode.NOOP == code);
   }
 }



Mime
View raw message