hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From st...@apache.org
Subject svn commit: r1329400 [3/3] - in /hbase/trunk/src: main/java/org/apache/hadoop/hbase/io/hfile/ main/java/org/apache/hadoop/hbase/metrics/histogram/ main/java/org/apache/hadoop/hbase/regionserver/ main/java/org/apache/hadoop/hbase/util/ test/java/org/apa...
Date Mon, 23 Apr 2012 19:40:14 GMT
Added: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java?rev=1329400&view=auto
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
(added)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionOnCluster.java
Mon Apr 23 19:40:13 2012
@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.MediumTests;
+import org.junit.Test;
+
+/**
+ * Tests that need to spin up a cluster testing an {@link HRegion}.  Use
+ * {@link TestHRegion} if you don't need a cluster, if you can test w/ a
+ * standalone {@link HRegion}.
+ */
+@Category(MediumTests.class)
+public class TestHRegionOnCluster {
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @Test (timeout=180000)
+  public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
+    final int NUM_MASTERS = 1;
+    final int NUM_RS = 3;
+    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
+
+    try {
+      final byte[] TABLENAME = Bytes
+          .toBytes("testDataCorrectnessReplayingRecoveredEdits");
+      final byte[] FAMILY = Bytes.toBytes("family");
+      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+      HMaster master = cluster.getMaster();
+
+      // Create table
+      HTableDescriptor desc = new HTableDescriptor(TABLENAME);
+      desc.addFamily(new HColumnDescriptor(FAMILY));
+      HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
+      hbaseAdmin.createTable(desc);
+
+      assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));
+
+      // Put data: r1->v1
+      HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
+      putDataAndVerify(table, "r1", FAMILY, "v1", 1);
+
+      // Move region to target server
+      HRegionInfo regionInfo = table.getRegionLocation("r1").getRegionInfo();
+      int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
+      HRegionServer originServer = cluster.getRegionServer(originServerNum);
+      int targetServerNum = (originServerNum + 1) % NUM_RS;
+      HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
+      assertFalse(originServer.equals(targetServer));
+      hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
+          Bytes.toBytes(targetServer.getServerName().getServerName()));
+      do {
+        Thread.sleep(1);
+      } while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
+
+      // Put data: r2->v2
+      putDataAndVerify(table, "r2", FAMILY, "v2", 2);
+
+      // Move region to origin server
+      hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
+          Bytes.toBytes(originServer.getServerName().getServerName()));
+      do {
+        Thread.sleep(1);
+      } while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
+
+      // Put data: r3->v3
+      putDataAndVerify(table, "r3", FAMILY, "v3", 3);
+
+      // Kill target server
+      targetServer.kill();
+      cluster.getRegionServerThreads().get(targetServerNum).join();
+      // Wait until finish processing of shutdown
+      while (master.getServerManager().areDeadServersInProgress()) {
+        Thread.sleep(5);
+      }
+      // Kill origin server
+      originServer.kill();
+      cluster.getRegionServerThreads().get(originServerNum).join();
+
+      // Put data: r4->v4
+      putDataAndVerify(table, "r4", FAMILY, "v4", 4);
+
+    } finally {
+      TEST_UTIL.shutdownMiniCluster();
+    }
+  }
+
+  private void putDataAndVerify(HTable table, String row, byte[] family,
+      String value, int verifyNum) throws IOException {
+    System.out.println("=========Putting data :" + row);
+    Put put = new Put(Bytes.toBytes(row));
+    put.add(family, Bytes.toBytes("q1"), Bytes.toBytes(value));
+    table.put(put);
+    ResultScanner resultScanner = table.getScanner(new Scan());
+    List<Result> results = new ArrayList<Result>();
+    while (true) {
+      Result r = resultScanner.next();
+      if (r == null)
+        break;
+      results.add(r);
+    }
+    resultScanner.close();
+    if (results.size() != verifyNum) {
+      System.out.println(results);
+    }
+    assertEquals(verifyNum, results.size());
+  }
+
+  @org.junit.Rule
+  public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
+    new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
+}

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java?rev=1329400&r1=1329399&r2=1329400&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java Mon
Apr 23 19:40:13 2012
@@ -52,40 +52,45 @@ public class TestMinVersions extends HBa
   public void testGetClosestBefore() throws Exception {
     HTableDescriptor htd = createTableDescriptor(getName(), 1, 1000, 1, false);
     HRegion region = createNewHRegion(htd, null, null);
+    try {
 
-    // 2s in the past
-    long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
-
-    Put p = new Put(T1, ts);
-    p.add(c0, c0, T1);
-    region.put(p);
-
-    p = new Put(T1, ts+1);
-    p.add(c0, c0, T4);
-    region.put(p);
-
-    p = new Put(T3, ts);
-    p.add(c0, c0, T3);
-    region.put(p);
-
-    // now make sure that getClosestBefore(...) get can
-    // rows that would be expired without minVersion.
-    // also make sure it gets the latest version
-    Result r = region.getClosestRowBefore(T1, c0);
-    checkResult(r, c0, T4);
-
-    r = region.getClosestRowBefore(T2, c0);
-    checkResult(r, c0, T4);
-
-    // now flush/compact
-    region.flushcache();
-    region.compactStores(true);
-
-    r = region.getClosestRowBefore(T1, c0);
-    checkResult(r, c0, T4);
+      // 2s in the past
+      long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
 
-    r = region.getClosestRowBefore(T2, c0);
-    checkResult(r, c0, T4);
+      Put p = new Put(T1, ts);
+      p.add(c0, c0, T1);
+      region.put(p);
+
+      p = new Put(T1, ts+1);
+      p.add(c0, c0, T4);
+      region.put(p);
+
+      p = new Put(T3, ts);
+      p.add(c0, c0, T3);
+      region.put(p);
+
+      // now make sure that getClosestBefore(...) get can
+      // rows that would be expired without minVersion.
+      // also make sure it gets the latest version
+      Result r = region.getClosestRowBefore(T1, c0);
+      checkResult(r, c0, T4);
+
+      r = region.getClosestRowBefore(T2, c0);
+      checkResult(r, c0, T4);
+
+      // now flush/compact
+      region.flushcache();
+      region.compactStores(true);
+
+      r = region.getClosestRowBefore(T1, c0);
+      checkResult(r, c0, T4);
+
+      r = region.getClosestRowBefore(T2, c0);
+      checkResult(r, c0, T4);
+    } finally {
+      region.close();
+      region.getLog().closeAndDelete();
+    }
   }
 
   /**
@@ -96,48 +101,52 @@ public class TestMinVersions extends HBa
     // keep 3 versions minimum
     HTableDescriptor htd = createTableDescriptor(getName(), 3, 1000, 1, false);
     HRegion region = createNewHRegion(htd, null, null);
-
     // 2s in the past
     long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
 
-    Put p = new Put(T1, ts-1);
-    p.add(c0, c0, T2);
-    region.put(p);
-
-    p = new Put(T1, ts-3);
-    p.add(c0, c0, T0);
-    region.put(p);
-
-    // now flush/compact
-    region.flushcache();
-    region.compactStores(true);
-
-    p = new Put(T1, ts);
-    p.add(c0, c0, T3);
-    region.put(p);
-
-    p = new Put(T1, ts-2);
-    p.add(c0, c0, T1);
-    region.put(p);
-
-    p = new Put(T1, ts-3);
-    p.add(c0, c0, T0);
-    region.put(p);
-
-    // newest version in the memstore
-    // the 2nd oldest in the store file
-    // and the 3rd, 4th oldest also in the memstore
-
-    Get g = new Get(T1);
-    g.setMaxVersions();
-    Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
-    checkResult(r, c0, T3,T2,T1);
-
-    g = new Get(T1);
-    g.setMaxVersions();
-    g.addColumn(c0, c0);
-    r = region.get(g, null);  // this'll use ExplicitColumnTracker
-    checkResult(r, c0, T3,T2,T1);
+    try {
+      Put p = new Put(T1, ts-1);
+      p.add(c0, c0, T2);
+      region.put(p);
+
+      p = new Put(T1, ts-3);
+      p.add(c0, c0, T0);
+      region.put(p);
+
+      // now flush/compact
+      region.flushcache();
+      region.compactStores(true);
+
+      p = new Put(T1, ts);
+      p.add(c0, c0, T3);
+      region.put(p);
+
+      p = new Put(T1, ts-2);
+      p.add(c0, c0, T1);
+      region.put(p);
+
+      p = new Put(T1, ts-3);
+      p.add(c0, c0, T0);
+      region.put(p);
+
+      // newest version in the memstore
+      // the 2nd oldest in the store file
+      // and the 3rd, 4th oldest also in the memstore
+
+      Get g = new Get(T1);
+      g.setMaxVersions();
+      Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+      checkResult(r, c0, T3,T2,T1);
+
+      g = new Get(T1);
+      g.setMaxVersions();
+      g.addColumn(c0, c0);
+      r = region.get(g, null);  // this'll use ExplicitColumnTracker
+      checkResult(r, c0, T3,T2,T1);
+    } finally {
+      region.close();
+      region.getLog().closeAndDelete();
+    }
   }
 
   /**
@@ -150,47 +159,52 @@ public class TestMinVersions extends HBa
     // 2s in the past
     long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
 
-    Put p = new Put(T1, ts-2);
-    p.add(c0, c0, T1);
-    region.put(p);
-
-    p = new Put(T1, ts-1);
-    p.add(c0, c0, T2);
-    region.put(p);
-
-    p = new Put(T1, ts);
-    p.add(c0, c0, T3);
-    region.put(p);
-
-    Delete d = new Delete(T1, ts-1, null);
-    region.delete(d, null, true);
-
-    Get g = new Get(T1);
-    g.setMaxVersions();
-    Result r = region.get(g, null);  // this'll use ScanWildcardColumnTracker
-    checkResult(r, c0, T3);
-
-    g = new Get(T1);
-    g.setMaxVersions();
-    g.addColumn(c0, c0);
-    r = region.get(g, null);  // this'll use ExplicitColumnTracker
-    checkResult(r, c0, T3);
-
-    // now flush/compact
-    region.flushcache();
-    region.compactStores(true);
-
-    // try again
-    g = new Get(T1);
-    g.setMaxVersions();
-    r = region.get(g, null);  // this'll use ScanWildcardColumnTracker
-    checkResult(r, c0, T3);
-
-    g = new Get(T1);
-    g.setMaxVersions();
-    g.addColumn(c0, c0);
-    r = region.get(g, null);  // this'll use ExplicitColumnTracker
-    checkResult(r, c0, T3);
+    try {
+      Put p = new Put(T1, ts-2);
+      p.add(c0, c0, T1);
+      region.put(p);
+
+      p = new Put(T1, ts-1);
+      p.add(c0, c0, T2);
+      region.put(p);
+
+      p = new Put(T1, ts);
+      p.add(c0, c0, T3);
+      region.put(p);
+
+      Delete d = new Delete(T1, ts-1, null);
+      region.delete(d, null, true);
+
+      Get g = new Get(T1);
+      g.setMaxVersions();
+      Result r = region.get(g, null);  // this'll use ScanWildcardColumnTracker
+      checkResult(r, c0, T3);
+
+      g = new Get(T1);
+      g.setMaxVersions();
+      g.addColumn(c0, c0);
+      r = region.get(g, null);  // this'll use ExplicitColumnTracker
+      checkResult(r, c0, T3);
+
+      // now flush/compact
+      region.flushcache();
+      region.compactStores(true);
+
+      // try again
+      g = new Get(T1);
+      g.setMaxVersions();
+      r = region.get(g, null);  // this'll use ScanWildcardColumnTracker
+      checkResult(r, c0, T3);
+
+      g = new Get(T1);
+      g.setMaxVersions();
+      g.addColumn(c0, c0);
+      r = region.get(g, null);  // this'll use ExplicitColumnTracker
+      checkResult(r, c0, T3);
+    } finally {
+      region.close();
+      region.getLog().closeAndDelete();
+    }
   }
 
   /**
@@ -203,63 +217,68 @@ public class TestMinVersions extends HBa
     // 2s in the past
     long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
 
-    // 2nd version
-    Put p = new Put(T1, ts-2);
-    p.add(c0, c0, T2);
-    region.put(p);
-
-    // 3rd version
-    p = new Put(T1, ts-1);
-    p.add(c0, c0, T3);
-    region.put(p);
-
-    // 4th version
-    p = new Put(T1, ts);
-    p.add(c0, c0, T4);
-    region.put(p);
-
-    // now flush/compact
-    region.flushcache();
-    region.compactStores(true);
-
-    // now put the first version (backdated)
-    p = new Put(T1, ts-3);
-    p.add(c0, c0, T1);
-    region.put(p);
-
-    // now the latest change is in the memstore,
-    // but it is not the latest version
-
-    Result r = region.get(new Get(T1), null);
-    checkResult(r, c0, T4);
-
-    Get g = new Get(T1);
-    g.setMaxVersions();
-    r = region.get(g, null); // this'll use ScanWildcardColumnTracker
-    checkResult(r, c0, T4,T3);
-
-    g = new Get(T1);
-    g.setMaxVersions();
-    g.addColumn(c0, c0);
-    r = region.get(g, null);  // this'll use ExplicitColumnTracker
-    checkResult(r, c0, T4,T3);
-
-    p = new Put(T1, ts+1);
-    p.add(c0, c0, T5);
-    region.put(p);
-
-    // now the latest version is in the memstore
-
-    g = new Get(T1);
-    g.setMaxVersions();
-    r = region.get(g, null);  // this'll use ScanWildcardColumnTracker
-    checkResult(r, c0, T5,T4);
-
-    g = new Get(T1);
-    g.setMaxVersions();
-    g.addColumn(c0, c0);
-    r = region.get(g, null);  // this'll use ExplicitColumnTracker
-    checkResult(r, c0, T5,T4);
+    try {
+      // 2nd version
+      Put p = new Put(T1, ts-2);
+      p.add(c0, c0, T2);
+      region.put(p);
+
+      // 3rd version
+      p = new Put(T1, ts-1);
+      p.add(c0, c0, T3);
+      region.put(p);
+
+      // 4th version
+      p = new Put(T1, ts);
+      p.add(c0, c0, T4);
+      region.put(p);
+
+      // now flush/compact
+      region.flushcache();
+      region.compactStores(true);
+
+      // now put the first version (backdated)
+      p = new Put(T1, ts-3);
+      p.add(c0, c0, T1);
+      region.put(p);
+
+      // now the latest change is in the memstore,
+      // but it is not the latest version
+
+      Result r = region.get(new Get(T1), null);
+      checkResult(r, c0, T4);
+
+      Get g = new Get(T1);
+      g.setMaxVersions();
+      r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+      checkResult(r, c0, T4,T3);
+
+      g = new Get(T1);
+      g.setMaxVersions();
+      g.addColumn(c0, c0);
+      r = region.get(g, null);  // this'll use ExplicitColumnTracker
+      checkResult(r, c0, T4,T3);
+
+      p = new Put(T1, ts+1);
+      p.add(c0, c0, T5);
+      region.put(p);
+
+      // now the latest version is in the memstore
+
+      g = new Get(T1);
+      g.setMaxVersions();
+      r = region.get(g, null);  // this'll use ScanWildcardColumnTracker
+      checkResult(r, c0, T5,T4);
+
+      g = new Get(T1);
+      g.setMaxVersions();
+      g.addColumn(c0, c0);
+      r = region.get(g, null);  // this'll use ExplicitColumnTracker
+      checkResult(r, c0, T5,T4);
+    } finally {
+      region.close();
+      region.getLog().closeAndDelete();
+    }
   }
 
   /**
@@ -269,83 +288,88 @@ public class TestMinVersions extends HBa
     // 1 version minimum, 1000 versions maximum, ttl = 1s
     HTableDescriptor htd = createTableDescriptor(getName(), 2, 1000, 1, false);
     HRegion region = createNewHRegion(htd, null, null);
+    try {
 
-    // 2s in the past
-    long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
+      // 2s in the past
+      long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
 
-     // 1st version
-    Put p = new Put(T1, ts-3);
-    p.add(c0, c0, T1);
-    region.put(p);
-
-    // 2nd version
-    p = new Put(T1, ts-2);
-    p.add(c0, c0, T2);
-    region.put(p);
-
-    // 3rd version
-    p = new Put(T1, ts-1);
-    p.add(c0, c0, T3);
-    region.put(p);
-
-    // 4th version
-    p = new Put(T1, ts);
-    p.add(c0, c0, T4);
-    region.put(p);
-
-    Result r = region.get(new Get(T1), null);
-    checkResult(r, c0, T4);
-
-    Get g = new Get(T1);
-    g.setTimeRange(0L, ts+1);
-    r = region.get(g, null);
-    checkResult(r, c0, T4);
-
-    // oldest version still exists
-    g.setTimeRange(0L, ts-2);
-    r = region.get(g, null);
-    checkResult(r, c0, T1);
-
-    // gets see only available versions
-    // even before compactions
-    g = new Get(T1);
-    g.setMaxVersions();
-    r = region.get(g, null); // this'll use ScanWildcardColumnTracker
-    checkResult(r, c0, T4,T3);
-
-    g = new Get(T1);
-    g.setMaxVersions();
-    g.addColumn(c0, c0);
-    r = region.get(g, null);  // this'll use ExplicitColumnTracker
-    checkResult(r, c0, T4,T3);
-
-    // now flush
-    region.flushcache();
-
-    // with HBASE-4241 a flush will eliminate the expired rows
-    g = new Get(T1);
-    g.setTimeRange(0L, ts-2);
-    r = region.get(g, null);
-    assertTrue(r.isEmpty());
-
-    // major compaction
-    region.compactStores(true);
-
-    // after compaction the 4th version is still available
-    g = new Get(T1);
-    g.setTimeRange(0L, ts+1);
-    r = region.get(g, null);
-    checkResult(r, c0, T4);
-
-    // so is the 3rd
-    g.setTimeRange(0L, ts);
-    r = region.get(g, null);
-    checkResult(r, c0, T3);
-
-    // but the 2nd and earlier versions are gone
-    g.setTimeRange(0L, ts-1);
-    r = region.get(g, null);
-    assertTrue(r.isEmpty());
+       // 1st version
+      Put p = new Put(T1, ts-3);
+      p.add(c0, c0, T1);
+      region.put(p);
+
+      // 2nd version
+      p = new Put(T1, ts-2);
+      p.add(c0, c0, T2);
+      region.put(p);
+
+      // 3rd version
+      p = new Put(T1, ts-1);
+      p.add(c0, c0, T3);
+      region.put(p);
+
+      // 4th version
+      p = new Put(T1, ts);
+      p.add(c0, c0, T4);
+      region.put(p);
+
+      Result r = region.get(new Get(T1), null);
+      checkResult(r, c0, T4);
+
+      Get g = new Get(T1);
+      g.setTimeRange(0L, ts+1);
+      r = region.get(g, null);
+      checkResult(r, c0, T4);
+
+  // oldest version still exists
+      g.setTimeRange(0L, ts-2);
+      r = region.get(g, null);
+      checkResult(r, c0, T1);
+
+      // gets see only available versions
+      // even before compactions
+      g = new Get(T1);
+      g.setMaxVersions();
+      r = region.get(g, null); // this'll use ScanWildcardColumnTracker
+      checkResult(r, c0, T4,T3);
+
+      g = new Get(T1);
+      g.setMaxVersions();
+      g.addColumn(c0, c0);
+      r = region.get(g, null);  // this'll use ExplicitColumnTracker
+      checkResult(r, c0, T4,T3);
+
+      // now flush
+      region.flushcache();
+
+      // with HBASE-4241 a flush will eliminate the expired rows
+      g = new Get(T1);
+      g.setTimeRange(0L, ts-2);
+      r = region.get(g, null);
+      assertTrue(r.isEmpty());
+
+      // major compaction
+      region.compactStores(true);
+
+      // after compaction the 4th version is still available
+      g = new Get(T1);
+      g.setTimeRange(0L, ts+1);
+      r = region.get(g, null);
+      checkResult(r, c0, T4);
+
+      // so is the 3rd
+      g.setTimeRange(0L, ts);
+      r = region.get(g, null);
+      checkResult(r, c0, T3);
+
+      // but the 2nd and earlier versions are gone
+      g.setTimeRange(0L, ts-1);
+      r = region.get(g, null);
+      assertTrue(r.isEmpty());
+    } finally {
+      region.close();
+      region.getLog().closeAndDelete();
+    }
   }
 
   /**
@@ -359,62 +383,67 @@ public class TestMinVersions extends HBa
 
     // 2s in the past
     long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
+    try {
 
-    Put p = new Put(T1, ts-3);
-    p.add(c0, c0, T0);
-    p.add(c1, c1, T0);
-    region.put(p);
-
-    p = new Put(T1, ts-2);
-    p.add(c0, c0, T1);
-    p.add(c1, c1, T1);
-    region.put(p);
-
-    p = new Put(T1, ts-1);
-    p.add(c0, c0, T2);
-    p.add(c1, c1, T2);
-    region.put(p);
-
-    p = new Put(T1, ts);
-    p.add(c0, c0, T3);
-    p.add(c1, c1, T3);
-    region.put(p);
-
-    List<Long> tss = new ArrayList<Long>();
-    tss.add(ts-1);
-    tss.add(ts-2);
-
-    Get g = new Get(T1);
-    g.addColumn(c1,c1);
-    g.setFilter(new TimestampsFilter(tss));
-    g.setMaxVersions();
-    Result r = region.get(g, null);
-    checkResult(r, c1, T2,T1);
-
-    g = new Get(T1);
-    g.addColumn(c0,c0);
-    g.setFilter(new TimestampsFilter(tss));
-    g.setMaxVersions();
-    r = region.get(g, null);
-    checkResult(r, c0, T2,T1);
-
-    // now flush/compact
-    region.flushcache();
-    region.compactStores(true);
-
-    g = new Get(T1);
-    g.addColumn(c1,c1);
-    g.setFilter(new TimestampsFilter(tss));
-    g.setMaxVersions();
-    r = region.get(g, null);
-    checkResult(r, c1, T2);
-
-    g = new Get(T1);
-    g.addColumn(c0,c0);
-    g.setFilter(new TimestampsFilter(tss));
-    g.setMaxVersions();
-    r = region.get(g, null);
-    checkResult(r, c0, T2);
+      Put p = new Put(T1, ts-3);
+      p.add(c0, c0, T0);
+      p.add(c1, c1, T0);
+      region.put(p);
+
+      p = new Put(T1, ts-2);
+      p.add(c0, c0, T1);
+      p.add(c1, c1, T1);
+      region.put(p);
+
+      p = new Put(T1, ts-1);
+      p.add(c0, c0, T2);
+      p.add(c1, c1, T2);
+      region.put(p);
+
+      p = new Put(T1, ts);
+      p.add(c0, c0, T3);
+      p.add(c1, c1, T3);
+      region.put(p);
+
+      List<Long> tss = new ArrayList<Long>();
+      tss.add(ts-1);
+      tss.add(ts-2);
+
+      Get g = new Get(T1);
+      g.addColumn(c1,c1);
+      g.setFilter(new TimestampsFilter(tss));
+      g.setMaxVersions();
+      Result r = region.get(g, null);
+      checkResult(r, c1, T2,T1);
+
+      g = new Get(T1);
+      g.addColumn(c0,c0);
+      g.setFilter(new TimestampsFilter(tss));
+      g.setMaxVersions();
+      r = region.get(g, null);
+      checkResult(r, c0, T2,T1);
+
+      // now flush/compact
+      region.flushcache();
+      region.compactStores(true);
+
+      g = new Get(T1);
+      g.addColumn(c1,c1);
+      g.setFilter(new TimestampsFilter(tss));
+      g.setMaxVersions();
+      r = region.get(g, null);
+      checkResult(r, c1, T2);
+
+      g = new Get(T1);
+      g.addColumn(c0,c0);
+      g.setFilter(new TimestampsFilter(tss));
+      g.setMaxVersions();
+      r = region.get(g, null);
+      checkResult(r, c0, T2);
+    } finally {
+      region.close();
+      region.getLog().closeAndDelete();
+    }
   }
 
   private void checkResult(Result r, byte[] col, byte[] ... vals) {

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java?rev=1329400&r1=1329399&r2=1329400&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
(original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java
Mon Apr 23 19:40:13 2012
@@ -68,31 +68,34 @@ public class TestResettingCounters {
       }
     }
     HRegion region = HRegion.createHRegion(hri, path, conf, htd);
+    try {
+      Increment odd = new Increment(rows[0]);
+      Increment even = new Increment(rows[0]);
+      Increment all = new Increment(rows[0]);
+      for (int i=0;i<numQualifiers;i++) {
+        if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
+        else odd.addColumn(families[0], qualifiers[i], 1);
+        all.addColumn(families[0], qualifiers[i], 1);
+      }
 
-    Increment odd = new Increment(rows[0]);
-    Increment even = new Increment(rows[0]);
-    Increment all = new Increment(rows[0]);
-    for (int i=0;i<numQualifiers;i++) {
-      if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
-      else odd.addColumn(families[0], qualifiers[i], 1);
-      all.addColumn(families[0], qualifiers[i], 1);
-    }
-
-    // increment odd qualifiers 5 times and flush
-    for (int i=0;i<5;i++) region.increment(odd, null, false);
-    region.flushcache();
-
-    // increment even qualifiers 5 times
-    for (int i=0;i<5;i++) region.increment(even, null, false);
-
-    // increment all qualifiers, should have value=6 for all
-    Result result = region.increment(all, null, false);
-    assertEquals(numQualifiers, result.size());
-    KeyValue [] kvs = result.raw();
-    for (int i=0;i<kvs.length;i++) {
-      System.out.println(kvs[i].toString());
-      assertTrue(Bytes.equals(kvs[i].getQualifier(), qualifiers[i]));
-      assertEquals(6, Bytes.toLong(kvs[i].getValue()));
+      // increment odd qualifiers 5 times and flush
+      for (int i=0;i<5;i++) region.increment(odd, null, false);
+      region.flushcache();
+
+      // increment even qualifiers 5 times
+      for (int i=0;i<5;i++) region.increment(even, null, false);
+
+      // increment all qualifiers, should have value=6 for all
+      Result result = region.increment(all, null, false);
+      assertEquals(numQualifiers, result.size());
+      KeyValue [] kvs = result.raw();
+      for (int i=0;i<kvs.length;i++) {
+        System.out.println(kvs[i].toString());
+        assertTrue(Bytes.equals(kvs[i].getQualifier(), qualifiers[i]));
+        assertEquals(6, Bytes.toLong(kvs[i].getValue()));
+      }
+    } finally {
+      HRegion.closeHRegion(region);
     }
     region.close();
     region.getLog().closeAndDelete();

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java?rev=1329400&r1=1329399&r2=1329400&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
(original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
Mon Apr 23 19:40:13 2012
@@ -319,7 +319,9 @@ public class TestSplitTransaction {
     HColumnDescriptor hcd = new HColumnDescriptor(CF);
     htd.addFamily(hcd);
     HRegionInfo hri = new HRegionInfo(htd.getName(), STARTROW, ENDROW);
-    HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
+    HRegion r = HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
+    r.close();
+    r.getLog().closeAndDelete();
     return HRegion.openHRegion(testdir, hri, htd, wal,
       TEST_UTIL.getConfiguration());
   }

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java?rev=1329400&r1=1329399&r2=1329400&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java
(original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestCloseRegionHandler.java
Mon Apr 23 19:40:13 2012
@@ -99,28 +99,32 @@ public class TestCloseRegionHandler {
     HRegion region =
       HRegion.createHRegion(hri, HTU.getDataTestDir(),
         HTU.getConfiguration(), htd);
-    assertNotNull(region);
-    // Spy on the region so can throw exception when close is called.
-    HRegion spy = Mockito.spy(region);
-    final boolean abort = false;
-    Mockito.when(spy.close(abort)).
-      thenThrow(new RuntimeException("Mocked failed close!"));
-    // The CloseRegionHandler will try to get an HRegion that corresponds
-    // to the passed hri -- so insert the region into the online region Set.
-    rss.addToOnlineRegions(spy);
-    // Assert the Server is NOT stopped before we call close region.
-    assertFalse(server.isStopped());
-    CloseRegionHandler handler =
-      new CloseRegionHandler(server, rss, hri, false, false, -1);
-    boolean throwable = false;
     try {
-      handler.process();
-    } catch (Throwable t) {
-      throwable = true;
+      assertNotNull(region);
+      // Spy on the region so can throw exception when close is called.
+      HRegion spy = Mockito.spy(region);
+      final boolean abort = false;
+      Mockito.when(spy.close(abort)).
+      thenThrow(new RuntimeException("Mocked failed close!"));
+      // The CloseRegionHandler will try to get an HRegion that corresponds
+      // to the passed hri -- so insert the region into the online region Set.
+      rss.addToOnlineRegions(spy);
+      // Assert the Server is NOT stopped before we call close region.
+      assertFalse(server.isStopped());
+      CloseRegionHandler handler =
+          new CloseRegionHandler(server, rss, hri, false, false, -1);
+      boolean throwable = false;
+      try {
+        handler.process();
+      } catch (Throwable t) {
+        throwable = true;
+      } finally {
+        assertTrue(throwable);
+        // Abort calls stop so stopped flag should be set.
+        assertTrue(server.isStopped());
+      }
     } finally {
-      assertTrue(throwable);
-      // Abort calls stop so stopped flag should be set.
-      assertTrue(server.isStopped());
+      HRegion.closeHRegion(region);
     }
   }
   

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java?rev=1329400&r1=1329399&r2=1329400&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
(original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java
Mon Apr 23 19:40:13 2012
@@ -98,30 +98,34 @@ public class TestOpenRegionHandler {
          HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU
             .getConfiguration(), htd);
     assertNotNull(region);
-    OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) {
-      HRegion openRegion() {
-        // Open region first, then remove znode as though it'd been hijacked.
-        HRegion region = super.openRegion();
-        
-        // Don't actually open region BUT remove the znode as though it'd
-        // been hijacked on us.
-        ZooKeeperWatcher zkw = this.server.getZooKeeper();
-        String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
-        try {
-          ZKUtil.deleteNodeFailSilent(zkw, node);
-        } catch (KeeperException e) {
-          throw new RuntimeException("Ugh failed delete of " + node, e);
+    try {
+      OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) {
+        HRegion openRegion() {
+          // Open region first, then remove znode as though it'd been hijacked.
+          HRegion region = super.openRegion();
+
+          // Don't actually open region BUT remove the znode as though it'd
+          // been hijacked on us.
+          ZooKeeperWatcher zkw = this.server.getZooKeeper();
+          String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
+          try {
+            ZKUtil.deleteNodeFailSilent(zkw, node);
+          } catch (KeeperException e) {
+            throw new RuntimeException("Ugh failed delete of " + node, e);
+          }
+          return region;
         }
-        return region;
-      }
-    };
-    // Call process without first creating OFFLINE region in zk, see if
-    // exception or just quiet return (expected).
-    handler.process();
-    ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
-    // Call process again but this time yank the zk znode out from under it
-    // post OPENING; again will expect it to come back w/o NPE or exception.
-    handler.process();
+      };
+      // Call process without first creating OFFLINE region in zk, see if
+      // exception or just quiet return (expected).
+      handler.process();
+      ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
+      // Call process again but this time yank the zk znode out from under it
+      // post OPENING; again will expect it to come back w/o NPE or exception.
+      handler.process();
+    } finally {
+      HRegion.closeHRegion(region);
+    }
   }
   
   @Test

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=1329400&r1=1329399&r2=1329400&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
(original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
Mon Apr 23 19:40:13 2012
@@ -133,7 +133,8 @@ public class TestWALReplay {
     HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
     HRegion region2 = HRegion.createHRegion(hri,
         hbaseRootDir, this.conf, htd);
-
+    region2.close();
+    region2.getLog().closeAndDelete();
     final byte [] tableName = Bytes.toBytes(tableNameStr);
     final byte [] rowName = tableName;
 
@@ -193,6 +194,8 @@ public class TestWALReplay {
     final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
     HRegion region2 = HRegion.createHRegion(hri,
         hbaseRootDir, this.conf, htd);
+    region2.close();
+    region2.getLog().closeAndDelete();
     HLog wal = createWAL(this.conf);
     HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
     Path f =  new Path(basedir, "hfile");
@@ -252,7 +255,8 @@ public class TestWALReplay {
     final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
     HRegion region3 = HRegion.createHRegion(hri,
             hbaseRootDir, this.conf, htd);
-
+    region3.close();
+    region3.getLog().closeAndDelete();
     // Write countPerFamily edits into the three families.  Do a flush on one
     // of the families during the load of edits so its seqid is not same as
     // others to test we do right thing when different seqids.
@@ -369,7 +373,8 @@ public class TestWALReplay {
     final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
     HRegion region3 = HRegion.createHRegion(hri,
             hbaseRootDir, this.conf, htd);
-
+    region3.close();
+    region3.getLog().closeAndDelete();
     // Write countPerFamily edits into the three families.  Do a flush on one
     // of the families during the load of edits so its seqid is not same as
     // others to test we do right thing when different seqids.
@@ -435,7 +440,8 @@ public class TestWALReplay {
     final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
     HRegion region2 = HRegion.createHRegion(hri,
             hbaseRootDir, this.conf, htd);
-
+    region2.close();
+    region2.getLog().closeAndDelete();
     final HLog wal = createWAL(this.conf);
     final byte[] tableName = Bytes.toBytes(tableNameStr);
     final byte[] rowName = tableName;

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java?rev=1329400&r1=1329399&r2=1329400&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java Mon Apr 23 19:40:13
2012
@@ -169,6 +169,13 @@ public class TestMergeTool extends HBase
   @Override
   public void tearDown() throws Exception {
     super.tearDown();
+    for (int i = 0; i < sourceRegions.length; i++) {
+      HRegion r = regions[i];
+      if (r != null) {
+        r.close();
+        r.getLog().closeAndDelete();
+      }
+    }
     TEST_UTIL.shutdownMiniCluster();
   }
 



Mime
View raw message