hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From j...@apache.org
Subject svn commit: r573492 - in /lucene/hadoop/trunk/src/contrib/hbase: ./ src/test/org/apache/hadoop/hbase/
Date Fri, 07 Sep 2007 07:28:44 GMT
Author: jimk
Date: Fri Sep  7 00:28:42 2007
New Revision: 573492

URL: http://svn.apache.org/viewvc?rev=573492&view=rev
Log:
HADOOP-1847 Many HBase tests do not fail well.
HADOOP-1793 (Phase 1) Remove TestHClient

Removed:
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHClient.java
Modified:
    lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompaction.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeTable.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java

Modified: lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/CHANGES.txt Fri Sep  7 00:28:42 2007
@@ -31,6 +31,7 @@
     HADOOP-1821 Replace all String.getBytes() with String.getBytes("UTF-8")
     HADOOP-1832 listTables() returns duplicate tables
     HADOOP-1834 Scanners ignore timestamp passed on creation
+    HADOOP-1847 Many HBase tests do not fail well.
 
   IMPROVEMENTS
     HADOOP-1737 Make HColumnDescriptor data publically members settable
@@ -39,6 +40,7 @@
                 filter types
     HADOOP-1760 Use new MapWritable and SortedMapWritable classes from
                 org.apache.hadoop.io
+    HADOOP-1793 (Phase 1) Remove TestHClient
     HADOOP-1794 Remove deprecated APIs
     HADOOP-1802 Startup scripts should wait until hdfs as cleared 'safe mode'
     HADOOP-1835 Updated Documentation for HBase setup/installation

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/AbstractMergeTestBase.java Fri Sep  7 00:28:42 2007
@@ -20,7 +20,6 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import java.util.Random;
 
 import org.apache.hadoop.dfs.MiniDFSCluster;
@@ -32,7 +31,7 @@
 /** Abstract base class for merge tests */
 public abstract class AbstractMergeTestBase extends HBaseTestCase {
   protected static final Text COLUMN_NAME = new Text("contents:");
-  protected Random rand;
+  protected final Random rand = new Random();
   protected HTableDescriptor desc;
   protected ImmutableBytesWritable value;
 
@@ -46,7 +45,6 @@
   @Override
   public void setUp() throws Exception {
     super.setUp();
-    rand = new Random();
     desc = new HTableDescriptor("test");
     desc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
     
@@ -57,24 +55,12 @@
     while(val.length() < 1024) {
       val.append(partialValue);
     }
-    try {
-      value = new ImmutableBytesWritable(val.toString().getBytes(HConstants.UTF8_ENCODING));
-      
-    } catch(UnsupportedEncodingException e) {
-      fail();
-    }
+    
+    value = new ImmutableBytesWritable(
+        val.toString().getBytes(HConstants.UTF8_ENCODING));
 
-    try {
-      dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
-      fs = dfsCluster.getFileSystem();
-      dir = new Path("/hbase");
-      fs.mkdirs(dir);
+    dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
       
-    } catch(Throwable t) {
-      t.printStackTrace();
-      fail();
-    }
-
     // We create three data regions: The first is too large to merge since it 
     // will be > 64 MB in size. The second two will be smaller and will be 
     // selected for merging.
@@ -83,6 +69,10 @@
     // least 65536 rows. We will make certain by writing 70000
 
     try {
+      fs = dfsCluster.getFileSystem();
+      dir = new Path("/hbase");
+      fs.mkdirs(dir);
+
       Text row_70001 = new Text("row_70001");
       Text row_80001 = new Text("row_80001");
       
@@ -95,8 +85,11 @@
       // Now create the root and meta regions and insert the data regions
       // created above into the meta
       
-      HRegion root = createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
-      HRegion meta = createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
+      HRegion root =
+        createNewHRegion(dir, conf, HGlobals.rootTableDesc, 0L, null, null);
+      
+      HRegion meta =
+        createNewHRegion(dir, conf, HGlobals.metaTableDesc, 1L, null, null);
     
       HRegion.addRegionToMETA(root, meta);
       
@@ -109,12 +102,11 @@
       meta.close();
       meta.getLog().closeAndDelete();
       
-    } catch(Throwable t) {
-      t.printStackTrace();
+    } catch (Exception e) {
       if(dfsCluster != null) {
         dfsCluster.shutdown();
       }
-      fail();
+      throw e;
     }
   }
 
@@ -124,13 +116,16 @@
   @Override
   public void tearDown() throws Exception {
     super.tearDown();
-    dfsCluster.shutdown();
+    if (dfsCluster != null) {
+      dfsCluster.shutdown();
+    }
   }
 
-  private HRegion createAregion(Text startKey, Text endKey, int firstRow, int nrows)
-      throws IOException {
+  private HRegion createAregion(Text startKey, Text endKey, int firstRow,
+      int nrows) throws IOException {
     
-    HRegion region = createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey);
+    HRegion region =
+      createNewHRegion(dir, conf, desc, rand.nextLong(), startKey, endKey);
     
     System.out.println("created region " + region.getRegionName());
 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/HBaseTestCase.java Fri Sep  7 00:28:42 2007
@@ -70,13 +70,9 @@
   
   @Override
   protected void tearDown() throws Exception {
-    try {
-      if (this.localFs != null && this.testDir != null &&
-          this.localFs.exists(testDir)) {
-        this.localFs.delete(testDir);
-      }
-    } catch (Exception e) {
-      e.printStackTrace();
+    if (this.localFs != null && this.testDir != null &&
+        this.localFs.exists(testDir)) {
+      this.localFs.delete(testDir);
     }
     super.tearDown();
   }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Fri Sep  7 00:28:42 2007
@@ -61,7 +61,8 @@
    * @throws IOException 
    */
   public MiniHBaseCluster(Configuration conf, int nRegionNodes)
-  throws IOException {
+    throws IOException {
+    
     this(conf, nRegionNodes, true, true, true);
   }
 
@@ -76,6 +77,7 @@
    */
   public MiniHBaseCluster(Configuration conf, int nRegionNodes,
       final boolean miniHdfsFilesystem) throws IOException {
+    
     this(conf, nRegionNodes, miniHdfsFilesystem, true, true);
   }
 
@@ -88,8 +90,7 @@
    * @throws IOException 
    */
   public MiniHBaseCluster(Configuration conf, int nRegionNodes,
-      MiniDFSCluster dfsCluster)
-  throws IOException {
+      MiniDFSCluster dfsCluster) throws IOException {
 
     this.conf = conf;
     this.cluster = dfsCluster;
@@ -109,34 +110,24 @@
    */
   public MiniHBaseCluster(Configuration conf, int nRegionNodes,
       final boolean miniHdfsFilesystem, boolean format, boolean deleteOnExit) 
-  throws IOException {
+    throws IOException {
+    
     this.conf = conf;
     this.deleteOnExit = deleteOnExit;
     if (miniHdfsFilesystem) {
-      try {
-        this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
-      } catch(Throwable t) {
-        LOG.error("Failed setup of mini dfs cluster", t);
-        t.printStackTrace();
-        return;
-      }
+      this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
     }
     init(nRegionNodes);
   }
 
-  private void init(final int nRegionNodes)
-  throws IOException {
+  private void init(final int nRegionNodes) throws IOException {
     try {
-      try {
-        this.fs = FileSystem.get(conf);
-        this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
-        fs.mkdirs(parentdir);
-      } catch(IOException e) {
-        LOG.error("Failed setup of FileSystem", e);
-        throw e;
-      }
+      this.fs = FileSystem.get(conf);
+      this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
+      fs.mkdirs(parentdir);
       this.masterThread = startMaster(this.conf);
       this.regionThreads = startRegionServers(this.conf, nRegionNodes);
+
     } catch(IOException e) {
       shutdown();
       throw e;
@@ -199,7 +190,8 @@
    * @see #shutdown(org.apache.hadoop.hbase.MiniHBaseCluster.MasterThread, List)
    */
   public static MasterThread startMaster(final Configuration c)
-  throws IOException {
+    throws IOException {
+    
     if(c.get(MASTER_ADDRESS) == null) {
       c.set(MASTER_ADDRESS, "localhost:0");
     }
@@ -222,8 +214,8 @@
    * @see #startMaster(Configuration)
    */
   public static ArrayList<RegionServerThread> startRegionServers(
-    final Configuration c, final int count)
-  throws IOException {
+    final Configuration c, final int count) throws IOException {
+    
     // Start the HRegionServers.  Always have regionservers come up on
     // port '0' so there won't be clashes over default port as unit tests
     // start/stop ports at different times during the life of the test.
@@ -249,8 +241,8 @@
   }
   
   private static RegionServerThread startRegionServer(final Configuration c,
-    final int index)
-  throws IOException {
+    final int index) throws IOException {
+    
     final HRegionServer hsr = new HRegionServer(c);
     RegionServerThread t = new RegionServerThread(hsr, index);
     t.start();
@@ -362,25 +354,32 @@
   }
   
   void shutdown() {
-    shutdown(this.masterThread, this.regionThreads);
-    // Close the file system.  Will complain if files open so helps w/ leaks.
+    MiniHBaseCluster.shutdown(this.masterThread, this.regionThreads);
+    
     try {
-      if (this.cluster != null && this.cluster.getFileSystem() != null) {
-        this.cluster.getFileSystem().close();
+      if (cluster != null) {
+        FileSystem fs = cluster.getFileSystem();
+        
+        LOG.info("Shutting down Mini DFS cluster");
+        cluster.shutdown();
+
+        if (fs != null) {
+          LOG.info("Shutting down FileSystem");
+          fs.close();
+        }
       }
+      
     } catch (IOException e) {
-      LOG.error("Closing down dfs", e);
-    }
-    if(cluster != null) {
-      LOG.info("Shutting down Mini DFS cluster");
-      cluster.shutdown();
+      LOG.error("shutdown", e);
+      
+    } finally {
+      // Delete all DFS files
+      if(deleteOnExit) {
+        deleteFile(new File(System.getProperty(
+            StaticTestEnvironment.TEST_DIRECTORY_KEY), "dfs"));
+      }
     }
 
-    // Delete all DFS files
-    if(deleteOnExit) {
-      deleteFile(new File(System.getProperty(
-          StaticTestEnvironment.TEST_DIRECTORY_KEY), "dfs"));
-    }
   }
 
   private void deleteFile(File f) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MultiRegionTable.java Fri Sep  7 00:28:42 2007
@@ -34,11 +34,12 @@
    */
   public static void makeMultiRegionTable(Configuration conf,
       MiniHBaseCluster cluster, FileSystem localFs, String tableName,
-      String columnName)
-  throws IOException {
+      String columnName) throws IOException {
+    
     // This size should make it so we always split using the addContent
     // below.  After adding all data, the first region is 1.3M. Should
     // set max filesize to be <= 1M.
+    
     assertTrue(conf.getLong("hbase.hregion.max.filesize",
       HConstants.DEFAULT_MAX_FILE_SIZE) <= 1024 * 1024);
 
@@ -46,24 +47,33 @@
     Path d = cluster.regionThreads.get(0).getRegionServer().rootDir;
     FileSystem fs = (cluster.getDFSCluster() == null) ?
       localFs : cluster.getDFSCluster().getFileSystem();
-    assertTrue(fs != null);
+    assertNotNull(fs);
 
     // Get connection on the meta table and get count of rows.
+    
     HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
     int count = count(meta, HConstants.COLUMN_FAMILY_STR);
     HTable t = new HTable(conf, new Text(tableName));
     addContent(new HTableLoader(t), columnName);
+    
     // All is running in the one JVM so I should be able to get the single
     // region instance and bring on a split.
+    
     HRegionInfo hri =
       t.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
     HRegion r = cluster.regionThreads.get(0).getRegionServer().
     onlineRegions.get(hri.getRegionName());
+    
     // Flush will provoke a split next time the split-checker thread runs.
+    
     r.flushcache(false);
+    
     // Now, wait until split makes it into the meta table.
-    for (int i = 0; i < retries &&
-    (count(meta, HConstants.COLUMN_FAMILY_STR) <= count); i++) {
+    
+    for (int i = 0;
+      i < retries && (count(meta, HConstants.COLUMN_FAMILY_STR) <= count);
+      i++) {
+      
       try {
         Thread.sleep(5000);
       } catch (InterruptedException e) {
@@ -75,9 +85,11 @@
     if (count <= oldCount) {
       throw new IOException("Failed waiting on splits to show up");
     }
+    
     // Get info on the parent from the meta table.  Pass in 'hri'. Its the
     // region we have been dealing with up to this. Its the parent of the
     // region split.
+    
     Map<Text, byte []> data = getSplitParentInfo(meta, hri);
     HRegionInfo parent =
       Writables.getHRegionInfoOrNull(data.get(HConstants.COL_REGIONINFO));
@@ -92,13 +104,19 @@
     LOG.info("Split happened. Parent is " + parent.getRegionName() +
         " and daughters are " + splitA.getRegionName() + ", " +
         splitB.getRegionName());
+    
     // Recalibrate will cause us to wait on new regions' deployment
+    
     recalibrate(t, new Text(columnName), retries);
+    
     // Compact a region at a time so we can test case where one region has
     // no references but the other still has some
+    
     compact(cluster, splitA);
+    
     // Wait till the parent only has reference to remaining split, one that
     // still has references.
+    
     while (getSplitParentInfo(meta, parent).size() == 3) {
       try {
         Thread.sleep(5000);
@@ -108,21 +126,28 @@
     }
     LOG.info("Parent split returned " +
         getSplitParentInfo(meta, parent).keySet().toString());
+    
     // Call second split.
+    
     compact(cluster, splitB);
+    
     // Now wait until parent disappears.
-    LOG.info("Waiting on parent " + parent.getRegionName() +
-    " to disappear");
-    for (int i = 0; i < retries &&
-    getSplitParentInfo(meta, parent) != null; i++) {
+    
+    LOG.info("Waiting on parent " + parent.getRegionName() + " to disappear");
+    for (int i = 0;
+      i < retries && getSplitParentInfo(meta, parent) != null;
+      i++) {
+      
       try {
         Thread.sleep(5000);
       } catch (InterruptedException e) {
         // continue
       }
     }
-    assertTrue(getSplitParentInfo(meta, parent) == null);
+    assertNull(getSplitParentInfo(meta, parent));
+    
     // Assert cleaned up.
+    
     for (int i = 0; i < retries && fs.exists(parentDir); i++) {
       try {
         Thread.sleep(5000);
@@ -141,7 +166,8 @@
    * @throws IOException
    */
   private static int count(final HTable t, final String column)
-  throws IOException {
+    throws IOException {
+    
     int size = 0;
     Text [] cols = new Text[] {new Text(column)};
     HScannerInterface s = t.obtainScanner(cols, HConstants.EMPTY_START_ROW,
@@ -162,29 +188,29 @@
    * @return Return row info for passed in region or null if not found in scan.
    */
   private static Map<Text, byte []> getSplitParentInfo(final HTable t,
-      final HRegionInfo parent)
-    throws IOException {
-      HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY,
+      final HRegionInfo parent) throws IOException {
+    
+    HScannerInterface s = t.obtainScanner(HConstants.COLUMN_FAMILY_ARRAY,
         HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
-      try {
-        HStoreKey curKey = new HStoreKey();
-        TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
-        while(s.next(curKey, curVals)) {
-          HRegionInfo hri = Writables.
-            getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO));
-          if (hri == null) {
-            continue;
-          }
-          if (hri.getRegionName().toString().
-              equals(parent.getRegionName().toString())) {
-            return curVals;
-          }
+    try {
+      HStoreKey curKey = new HStoreKey();
+      TreeMap<Text, byte []> curVals = new TreeMap<Text, byte []>();
+      while(s.next(curKey, curVals)) {
+        HRegionInfo hri = Writables.
+        getHRegionInfoOrNull(curVals.get(HConstants.COL_REGIONINFO));
+        if (hri == null) {
+          continue;
         }
-        return null;
-      } finally {
-        s.close();
-      }   
-    }
+        if (hri.getRegionName().toString().
+            equals(parent.getRegionName().toString())) {
+          return curVals;
+        }
+      }
+      return null;
+    } finally {
+      s.close();
+    }   
+  }
 
   /*
    * Recalibrate passed in HTable.  Run after change in region geography.
@@ -199,6 +225,7 @@
    */
   private static void recalibrate(final HTable t, final Text column,
       final int retries) throws IOException {
+    
     for (int i = 0; i < retries; i++) {
       try {
         HScannerInterface s =
@@ -229,14 +256,15 @@
    * @throws IOException
    */
   private static void compact(final MiniHBaseCluster cluster,
-      final HRegionInfo r)
-  throws IOException {
+      final HRegionInfo r) throws IOException {
+    
     LOG.info("Starting compaction");
     for (MiniHBaseCluster.RegionServerThread thread: cluster.regionThreads) {
-      SortedMap<Text, HRegion> regions =
-        thread.getRegionServer().onlineRegions;
+      SortedMap<Text, HRegion> regions = thread.getRegionServer().onlineRegions;
+      
       // Retry if ConcurrentModification... alternative of sync'ing is not
       // worth it for sake of unit test.
+      
       for (int i = 0; i < 10; i++) {
         try {
           for (HRegion online: regions.values()) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java Fri Sep  7 00:28:42 2007
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.util.Map;
 import java.util.TreeMap;
@@ -35,8 +36,9 @@
   private HTableDescriptor desc = null;
   private HTable table = null;
 
-  /** constructor 
-   * @throws UnsupportedEncodingException */
+  /**
+   * @throws UnsupportedEncodingException
+   */
   public TestBatchUpdate() throws UnsupportedEncodingException {
     value = "abcd".getBytes(HConstants.UTF8_ENCODING);
   }
@@ -49,19 +51,15 @@
     super.setUp();
     this.desc = new HTableDescriptor("test");
     desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
-    try {
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      admin.createTable(desc);
-      table = new HTable(conf, desc.getName());
-      
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail();
-    }
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    table = new HTable(conf, desc.getName());
   }
 
-  /** the test case */
-  public void testBatchUpdate() {
+  /**
+   * @throws IOException
+   */
+  public void testBatchUpdate() throws IOException {
     try {
       table.commit(-1L);
       
@@ -75,36 +73,31 @@
     long lockid = table.startUpdate(new Text("row1"));
     
     try {
-      try {
-        @SuppressWarnings("unused")
-        long dummy = table.startUpdate(new Text("row2"));
-      } catch (IllegalStateException e) {
-        // expected
-      } catch (Exception e) {
-        e.printStackTrace();
-        fail();
-      }
-      table.put(lockid, CONTENTS, value);
-      table.delete(lockid, CONTENTS);
-      table.commit(lockid);
-      
-      lockid = table.startUpdate(new Text("row2"));
-      table.put(lockid, CONTENTS, value);
-      table.commit(lockid);
- 
-      Text[] columns = { CONTENTS };
-      HScannerInterface scanner = table.obtainScanner(columns, new Text());
-      HStoreKey key = new HStoreKey();
-      TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-      while(scanner.next(key, results)) {
-        for(Map.Entry<Text, byte[]> e: results.entrySet()) {
-          System.out.println(key + ": row: " + e.getKey() + " value: " + 
-              new String(e.getValue(), HConstants.UTF8_ENCODING));
-        }
-      }
+      @SuppressWarnings("unused")
+      long dummy = table.startUpdate(new Text("row2"));
+    } catch (IllegalStateException e) {
+      // expected
     } catch (Exception e) {
       e.printStackTrace();
       fail();
+    }
+    table.put(lockid, CONTENTS, value);
+    table.delete(lockid, CONTENTS);
+    table.commit(lockid);
+
+    lockid = table.startUpdate(new Text("row2"));
+    table.put(lockid, CONTENTS, value);
+    table.commit(lockid);
+
+    Text[] columns = { CONTENTS };
+    HScannerInterface scanner = table.obtainScanner(columns, new Text());
+    HStoreKey key = new HStoreKey();
+    TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+    while(scanner.next(key, results)) {
+      for(Map.Entry<Text, byte[]> e: results.entrySet()) {
+        System.out.println(key + ": row: " + e.getKey() + " value: " + 
+            new String(e.getValue(), HConstants.UTF8_ENCODING));
+      }
     }
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestBloomFilters.java Fri Sep  7 00:28:42 2007
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
@@ -146,50 +147,50 @@
     conf.set("hbase.regionserver.maxlogentries", "90"); // and roll log too
   }
   
-  /** Test that specifies explicit parameters for the bloom filter */
-  public void testExplicitParameters() {
+  /**
+   * Test that specifies explicit parameters for the bloom filter
+   * @throws IOException
+   */
+  public void testExplicitParameters() throws IOException {
     HTable table = null;
-    try {
-      // Setup
-      HTableDescriptor desc = new HTableDescriptor(getName());
-      BloomFilterDescriptor bloomFilter =
-        new BloomFilterDescriptor(              // if we insert 1000 values
-            BloomFilterDescriptor.BloomFilterType.BLOOMFILTER,  // plain old bloom filter
-            12499,                              // number of bits
-            4                                   // number of hash functions
-        );
-            
-      desc.addFamily(
-          new HColumnDescriptor(CONTENTS,               // Column name
-              1,                                        // Max versions
-              HColumnDescriptor.CompressionType.NONE,   // no compression
-              HColumnDescriptor.DEFAULT_IN_MEMORY,      // not in memory
-              HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH,
-              bloomFilter
-          )
-      );
-      
-      // Create the table
-      
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      admin.createTable(desc);
-      
-      // Open table
-      
-      table = new HTable(conf, desc.getName());
 
-      // Store some values
+    // Setup
+    
+    HTableDescriptor desc = new HTableDescriptor(getName());
+    BloomFilterDescriptor bloomFilter =
+      new BloomFilterDescriptor(              // if we insert 1000 values
+          BloomFilterDescriptor.BloomFilterType.BLOOMFILTER,  // plain old bloom filter
+          12499,                              // number of bits
+          4                                   // number of hash functions
+      );
 
-      for(int i = 0; i < 100; i++) {
-        Text row = rows[i];
-        String value = row.toString();
-        long lockid = table.startUpdate(rows[i]);
-        table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
-        table.commit(lockid);
-      }
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail();
+    desc.addFamily(
+        new HColumnDescriptor(CONTENTS,               // Column name
+            1,                                        // Max versions
+            HColumnDescriptor.CompressionType.NONE,   // no compression
+            HColumnDescriptor.DEFAULT_IN_MEMORY,      // not in memory
+            HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH,
+            bloomFilter
+        )
+    );
+
+    // Create the table
+
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+
+    // Open table
+
+    table = new HTable(conf, desc.getName());
+
+    // Store some values
+
+    for(int i = 0; i < 100; i++) {
+      Text row = rows[i];
+      String value = row.toString();
+      long lockid = table.startUpdate(rows[i]);
+      table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
+      table.commit(lockid);
     }
     try {
       // Give cache flusher and log roller a chance to run
@@ -201,67 +202,60 @@
     }
 
     
-    try {
-      if (table != null) {
-        for(int i = 0; i < testKeys.length; i++) {
-          byte[] value = table.get(testKeys[i], CONTENTS);
-          if(value != null && value.length != 0) {
-            LOG.info("non existant key: " + testKeys[i] + " returned value: " +
-                new String(value, HConstants.UTF8_ENCODING));
-          }
-        }
+    for(int i = 0; i < testKeys.length; i++) {
+      byte[] value = table.get(testKeys[i], CONTENTS);
+      if(value != null && value.length != 0) {
+        LOG.info("non existant key: " + testKeys[i] + " returned value: " +
+            new String(value, HConstants.UTF8_ENCODING));
       }
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail();
     }
   }
   
-  /** Test that uses computed for the bloom filter */
-  public void testComputedParameters() {
+  /**
+   * Test that uses computed for the bloom filter
+   * @throws IOException
+   */
+  public void testComputedParameters() throws IOException {
     HTable table = null;
-    try {
-      // Setup
-      HTableDescriptor desc = new HTableDescriptor(getName());
-      
-      BloomFilterDescriptor bloomFilter =
-        new BloomFilterDescriptor(
-            BloomFilterDescriptor.BloomFilterType.BLOOMFILTER,  // plain old bloom filter
-            1000                                  // estimated number of entries
-        );
-      LOG.info("vector size: " + bloomFilter.vectorSize);
+
+    // Setup
+    
+    HTableDescriptor desc = new HTableDescriptor(getName());
       
-      desc.addFamily(
-          new HColumnDescriptor(CONTENTS,               // Column name
-              1,                                        // Max versions
-              HColumnDescriptor.CompressionType.NONE,   // no compression
-              HColumnDescriptor.DEFAULT_IN_MEMORY,      // not in memory
-              HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH,
-              bloomFilter
-          )
+    BloomFilterDescriptor bloomFilter =
+      new BloomFilterDescriptor(
+          BloomFilterDescriptor.BloomFilterType.BLOOMFILTER,  // plain old bloom filter
+          1000                                  // estimated number of entries
       );
-      
-      // Create the table
-      
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      admin.createTable(desc);
-      
-      // Open table
-      
-      table = new HTable(conf, desc.getName());
-
-      // Store some values
+    LOG.info("vector size: " + bloomFilter.vectorSize);
 
-      for(int i = 0; i < 100; i++) {
-        Text row = rows[i];
-        String value = row.toString();
-        long lockid = table.startUpdate(rows[i]);
-        table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
-        table.commit(lockid);
-      }
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail();
+    desc.addFamily(
+        new HColumnDescriptor(CONTENTS,               // Column name
+            1,                                        // Max versions
+            HColumnDescriptor.CompressionType.NONE,   // no compression
+            HColumnDescriptor.DEFAULT_IN_MEMORY,      // not in memory
+            HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH,
+            bloomFilter
+        )
+    );
+
+    // Create the table
+
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+
+    // Open table
+
+    table = new HTable(conf, desc.getName());
+
+    // Store some values
+
+    for(int i = 0; i < 100; i++) {
+      Text row = rows[i];
+      String value = row.toString();
+      long lockid = table.startUpdate(rows[i]);
+      table.put(lockid, CONTENTS, value.getBytes(HConstants.UTF8_ENCODING));
+      table.commit(lockid);
     }
     try {
       // Give cache flusher and log roller a chance to run
@@ -272,19 +266,12 @@
       // ignore
     }
     
-    try {
-      if (table != null) {
-        for(int i = 0; i < testKeys.length; i++) {
-          byte[] value = table.get(testKeys[i], CONTENTS);
-          if(value != null && value.length != 0) {
-            LOG.info("non existant key: " + testKeys[i] + " returned value: " +
-                new String(value, HConstants.UTF8_ENCODING));
-          }
-        }
+    for(int i = 0; i < testKeys.length; i++) {
+      byte[] value = table.get(testKeys[i], CONTENTS);
+      if(value != null && value.length != 0) {
+        LOG.info("non existant key: " + testKeys[i] + " returned value: " +
+            new String(value, HConstants.UTF8_ENCODING));
       }
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail();
     }
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompaction.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompaction.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompaction.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompaction.java Fri Sep  7 00:28:42 2007
@@ -30,13 +30,15 @@
 public class TestCompaction extends HBaseTestCase {
   static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
 
+  /** {@inheritDoc} */
   @Override
-  protected void setUp() throws Exception {
+  public void setUp() throws Exception {
     super.setUp();
   }
   
+  /** {@inheritDoc} */
   @Override
-  protected void tearDown() throws Exception {
+  public void tearDown() throws Exception {
     super.tearDown();
   }
   

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java Fri Sep  7 00:28:42 2007
@@ -172,10 +172,6 @@
       r.close();
       log.closeAndDelete();
       
-    } catch(IOException e) {
-      e.printStackTrace();
-      throw e;
-      
     } finally {
       if(cluster != null) {
         cluster.shutdown();

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHLog.java Fri Sep  7 00:28:42 2007
@@ -37,70 +37,67 @@
     super.setUp();
   }
   
-  /** The test */
-  public void testAppend() {
+  /**
+   * @throws IOException
+   */
+  public void testAppend() throws IOException {
+    Path dir = getUnitTestdir(getName());
+    FileSystem fs = FileSystem.get(this.conf);
+    if (fs.exists(dir)) {
+      fs.delete(dir);
+    }
+    final int COL_COUNT = 10;
+    final Text regionName = new Text("regionname");
+    final Text tableName = new Text("tablename");
+    final Text row = new Text("row");
+    Reader reader = null;
+    HLog log = new HLog(fs, dir, this.conf);
     try {
-      Path dir = getUnitTestdir(getName());
-      FileSystem fs = FileSystem.get(this.conf);
+      // Write columns named 1, 2, 3, etc. and then values of single byte
+      // 1, 2, 3...
+      TreeMap<Text, byte []> cols = new TreeMap<Text, byte []>();
+      for (int i = 0; i < COL_COUNT; i++) {
+        cols.put(new Text(Integer.toString(i)),
+            new byte[] { (byte)(i + '0') });
+      }
+      long timestamp = System.currentTimeMillis();
+      log.append(regionName, tableName, row, cols, timestamp);
+      long logSeqId = log.startCacheFlush();
+      log.completeCacheFlush(regionName, tableName, logSeqId);
+      log.close();
+      Path filename = log.computeFilename(log.filenum - 1);
+      log = null;
+      // Now open a reader on the log and assert append worked.
+      reader = new SequenceFile.Reader(fs, filename, conf);
+      HLogKey key = new HLogKey();
+      HLogEdit val = new HLogEdit();
+      for (int i = 0; i < COL_COUNT; i++) {
+        reader.next(key, val);
+        assertEquals(regionName, key.getRegionName());
+        assertEquals(tableName, key.getTablename());
+        assertEquals(row, key.getRow());
+        assertEquals((byte)(i + '0'), val.getVal()[0]);
+        System.out.println(key + " " + val);
+      }
+      while (reader.next(key, val)) {
+        // Assert only one more row... the meta flushed row.
+        assertEquals(regionName, key.getRegionName());
+        assertEquals(tableName, key.getTablename());
+        assertEquals(HLog.METAROW, key.getRow());
+        assertEquals(HLog.METACOLUMN, val.getColumn());
+        assertEquals(0, HGlobals.completeCacheFlush.compareTo(val.getVal()));
+        System.out.println(key + " " + val);
+      }
+    } finally {
+      if (log != null) {
+        log.closeAndDelete();
+      }
+      if (reader != null) {
+        reader.close();
+      }
       if (fs.exists(dir)) {
         fs.delete(dir);
       }
-      final int COL_COUNT = 10;
-      final Text regionName = new Text("regionname");
-      final Text tableName = new Text("tablename");
-      final Text row = new Text("row");
-      Reader reader = null;
-      HLog log = new HLog(fs, dir, this.conf);
-      try {
-        // Write columns named 1, 2, 3, etc. and then values of single byte
-        // 1, 2, 3...
-        TreeMap<Text, byte []> cols = new TreeMap<Text, byte []>();
-        for (int i = 0; i < COL_COUNT; i++) {
-          cols.put(new Text(Integer.toString(i)),
-            new byte[] { (byte)(i + '0') });
-        }
-        long timestamp = System.currentTimeMillis();
-        log.append(regionName, tableName, row, cols, timestamp);
-        long logSeqId = log.startCacheFlush();
-        log.completeCacheFlush(regionName, tableName, logSeqId);
-        log.close();
-        Path filename = log.computeFilename(log.filenum - 1);
-        log = null;
-        // Now open a reader on the log and assert append worked.
-        reader = new SequenceFile.Reader(fs, filename, conf);
-        HLogKey key = new HLogKey();
-        HLogEdit val = new HLogEdit();
-        for (int i = 0; i < COL_COUNT; i++) {
-          reader.next(key, val);
-          assertEquals(regionName, key.getRegionName());
-          assertEquals(tableName, key.getTablename());
-          assertEquals(row, key.getRow());
-          assertEquals((byte)(i + '0'), val.getVal()[0]);
-          System.out.println(key + " " + val);
-        }
-        while (reader.next(key, val)) {
-          // Assert only one more row... the meta flushed row.
-          assertEquals(regionName, key.getRegionName());
-          assertEquals(tableName, key.getTablename());
-          assertEquals(HLog.METAROW, key.getRow());
-          assertEquals(HLog.METACOLUMN, val.getColumn());
-          assertEquals(0, HGlobals.completeCacheFlush.compareTo(val.getVal()));
-          System.out.println(key + " " + val);
-        }
-      } finally {
-        if (log != null) {
-          log.closeAndDelete();
-        }
-        if (reader != null) {
-          reader.close();
-        }
-        if (fs.exists(dir)) {
-          fs.delete(dir);
-        }
-      }
-    } catch(IOException e) {
-      e.printStackTrace();
-      fail();
     }
   }
 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHMemcache.java Fri Sep  7 00:28:42 2007
@@ -21,7 +21,6 @@
 
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.TreeMap;
 
@@ -46,11 +45,9 @@
   
   private static final String COLUMN_FAMILY = "column";
 
-  /* (non-Javadoc)
-   * @see junit.framework.TestCase#setUp()
-   */
+  /** {@inheritDoc} */
   @Override
-  protected void setUp() throws Exception {
+  public void setUp() throws Exception {
     super.setUp();
     this.hmemcache = new HMemcache();
     // Set up a configuration that has configuration for a file
@@ -58,11 +55,9 @@
     this.conf = new HBaseConfiguration();
   }
 
-  /* (non-Javadoc)
-   * @see junit.framework.TestCase#tearDown()
-   */
+  /** {@inheritDoc} */
   @Override
-  protected void tearDown() throws Exception {
+  public void tearDown() throws Exception {
     super.tearDown();
   }
 
@@ -70,10 +65,8 @@
     return new Text("row" + Integer.toString(index));
   }
 
-  private Text getColumnName(final int rowIndex,
-      final int colIndex) {
-    return new Text(COLUMN_FAMILY + ":" +
-        Integer.toString(rowIndex) + ";" +
+  private Text getColumnName(final int rowIndex, final int colIndex) {
+    return new Text(COLUMN_FAMILY + ":" + Integer.toString(rowIndex) + ";" +
         Integer.toString(colIndex));
   }
 
@@ -81,16 +74,12 @@
    * Adds {@link #ROW_COUNT} rows and {@link #COLUMNS_COUNT}
    * @param hmc Instance to add rows to.
    */
-  private void addRows(final HMemcache hmc) {
+  private void addRows(final HMemcache hmc) throws UnsupportedEncodingException {
     for (int i = 0; i < ROW_COUNT; i++) {
       TreeMap<Text, byte []> columns = new TreeMap<Text, byte []>();
       for (int ii = 0; ii < COLUMNS_COUNT; ii++) {
         Text k = getColumnName(i, ii);
-        try {
-          columns.put(k, k.toString().getBytes(HConstants.UTF8_ENCODING));
-        } catch (UnsupportedEncodingException e) {
-          fail();
-        }
+        columns.put(k, k.toString().getBytes(HConstants.UTF8_ENCODING));
       }
       hmc.add(getRowName(i), columns, System.currentTimeMillis());
     }
@@ -98,8 +87,8 @@
 
   private HLog getLogfile() throws IOException {
     // Create a log file.
-    Path testDir = new Path(conf.get("hadoop.tmp.dir", System
-        .getProperty("java.tmp.dir")), "hbase");
+    Path testDir = new Path(conf.get("hadoop.tmp.dir", 
+        System.getProperty("java.tmp.dir")), "hbase");
     Path logFile = new Path(testDir, this.getName());
     FileSystem fs = testDir.getFileSystem(conf);
     // Cleanup any old log file.
@@ -110,7 +99,8 @@
   }
 
   private Snapshot runSnapshot(final HMemcache hmc, final HLog log)
-      throws IOException {
+    throws IOException {
+    
     // Save off old state.
     int oldHistorySize = hmc.history.size();
     TreeMap<HStoreKey, byte []> oldMemcache = hmc.memcache;
@@ -151,12 +141,12 @@
     log.closeAndDelete();
   }
   
-  private void isExpectedRow(final int rowIndex,
-      TreeMap<Text, byte []> row) throws UnsupportedEncodingException {
+  private void isExpectedRow(final int rowIndex, TreeMap<Text, byte []> row)
+    throws UnsupportedEncodingException {
+    
     int i = 0;
     for (Text colname: row.keySet()) {
-      String expectedColname =
-        getColumnName(rowIndex, i++).toString();
+      String expectedColname = getColumnName(rowIndex, i++).toString();
       String colnameStr = colname.toString();
       assertEquals("Column name", colnameStr, expectedColname);
       // Value is column name as bytes.  Usually result is
@@ -204,9 +194,7 @@
       assertEquals("Count of columns", COLUMNS_COUNT,
           results.size());
       TreeMap<Text, byte []> row = new TreeMap<Text, byte []>();
-      for(Iterator<Map.Entry<Text, byte []>> it = results.entrySet().iterator();
-          it.hasNext(); ) {
-        Map.Entry<Text, byte []> e = it.next();
+      for(Map.Entry<Text, byte []> e: results.entrySet() ) {
         row.put(e.getKey(), e.getValue());
       }
       isExpectedRow(i, row);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java Fri Sep  7 00:28:42 2007
@@ -587,7 +587,7 @@
   }
 
   // NOTE: This test depends on testBatchWrite succeeding
-  void splitAndMerge() throws IOException {
+  private void splitAndMerge() throws IOException {
     Text midKey = new Text();
 
     if(region.needsSplit(midKey)) {
@@ -829,8 +829,10 @@
     } catch (IOException e) {
       e.printStackTrace();
     }
-    cluster.shutdown();
-    cluster = null;
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
 
     // Delete all the DFS files
 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java Fri Sep  7 00:28:42 2007
@@ -38,17 +38,11 @@
     admin = null;
   }
   
-  /** the test */
-  public void testMasterAdmin() {
-    try {
-      admin = new HBaseAdmin(conf);
-      admin.createTable(testDesc);
-      admin.disableTable(testDesc.getName());
-      
-    } catch(Exception e) {
-      e.printStackTrace();
-      fail();
-    }
+  /** @throws Exception */
+  public void testMasterAdmin() throws Exception {
+    admin = new HBaseAdmin(conf);
+    admin.createTable(testDesc);
+    admin.disableTable(testDesc.getName());
 
     try {
       try {
@@ -76,13 +70,7 @@
       fail();
       
     } finally {
-      try {
-        admin.deleteTable(testDesc.getName());
-        
-      } catch(Exception e) {
-        e.printStackTrace();
-        fail();
-      }
+      admin.deleteTable(testDesc.getName());
     }
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeMeta.java Fri Sep  7 00:28:42 2007
@@ -19,19 +19,17 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.io.IOException;
+
 /** Tests region merging */
 public class TestMergeMeta extends AbstractMergeTestBase {
   
   /**
    * test case
+   * @throws IOException
    */
-  public void testMergeMeta() {
-    try {
-      HMerge.merge(conf, fs, HConstants.META_TABLE_NAME);
-      
-    } catch(Throwable t) {
-      t.printStackTrace();
-      fail();
-    }
+  public void testMergeMeta() throws IOException {
+    assertNotNull(dfsCluster);
+    HMerge.merge(conf, fs, HConstants.META_TABLE_NAME);
   }
 }  

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeTable.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeTable.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMergeTable.java Fri Sep  7 00:28:42 2007
@@ -31,6 +31,7 @@
    * @throws IOException
    */
   public void testMergeTable() throws IOException {
+    assertNotNull(dfsCluster);
     MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster);
     try {
       HMerge.merge(conf, fs, desc.getName());

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java Fri Sep  7 00:28:42 2007
@@ -40,15 +40,9 @@
     super.setUp();
     this.desc = new HTableDescriptor("test");
     desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
-    try {
-      HBaseAdmin admin = new HBaseAdmin(conf);
-      admin.createTable(desc);
-      table = new HTable(conf, desc.getName());
-      
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail();
-    }
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.createTable(desc);
+    table = new HTable(conf, desc.getName());
   }
 
   /** the test */

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java?rev=573492&r1=573491&r2=573492&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestScanner.java Fri Sep  7 00:28:42 2007
@@ -108,10 +108,6 @@
           results.clear();
         }
 
-      } catch(IOException e) {
-        e.printStackTrace();
-        throw e;
-      
       } finally {
         if(scanner != null) {
           scanner.close();
@@ -258,9 +254,6 @@
       
       region.close();
       log.closeAndDelete();
-    } catch(IOException e) {
-      e.printStackTrace();
-      throw e;
       
     } finally {
       if(cluster != null) {



Mime
View raw message