hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jmhs...@apache.org
Subject svn commit: r1304668 [3/3] - in /hbase/branches/0.90: ./ src/main/java/org/apache/hadoop/hbase/ipc/ src/main/java/org/apache/hadoop/hbase/master/ src/main/java/org/apache/hadoop/hbase/util/ src/main/java/org/apache/hadoop/hbase/util/hbck/ src/test/java...
Date Fri, 23 Mar 2012 23:54:49 GMT
Modified: hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java?rev=1304668&r1=1304667&r2=1304668&view=diff
==============================================================================
--- hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java (original)
+++ hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java Fri
Mar 23 23:54:48 2012
@@ -21,26 +21,41 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
 import java.util.List;
+import java.util.Map;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.master.AssignmentManager.RegionState;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
 import org.apache.zookeeper.KeeperException;
 
+/**
+ * This class contains helper methods that repair parts of hbase's filesystem
+ * contents.
+ */
 public class HBaseFsckRepair {
+  public static final Log LOG = LogFactory.getLog(HBaseFsckRepair.class);
 
   /**
    * Fix dupe assignment by doing silent closes on each RS hosting the region
    * and then force ZK unassigned node to OFFLINE to trigger assignment by
    * master.
+   *
    * @param conf
    * @param region
    * @param servers
@@ -48,46 +63,55 @@ public class HBaseFsckRepair {
    * @throws KeeperException
    * @throws InterruptedException
    */
-  public static void fixDupeAssignment(Configuration conf, HRegionInfo region,
-      List<HServerAddress> servers)
-  throws IOException, KeeperException, InterruptedException {
+  public static void fixMultiAssignment(HBaseAdmin admin, HRegionInfo region,
+      List<HServerAddress> servers) throws IOException, KeeperException,
+      InterruptedException {
 
     HRegionInfo actualRegion = new HRegionInfo(region);
 
     // Close region on the servers silently
-    for(HServerAddress server : servers) {
-      closeRegionSilentlyAndWait(conf, server, actualRegion);
+    for (HServerAddress server : servers) {
+      closeRegionSilentlyAndWait(admin, server, actualRegion);
     }
 
     // Force ZK node to OFFLINE so master assigns
-    forceOfflineInZK(conf, actualRegion);
+    forceOfflineInZK(admin, actualRegion);
   }
 
   /**
    * Fix unassigned by creating/transition the unassigned ZK node for this
-   * region to OFFLINE state with a special flag to tell the master that this
-   * is a forced operation by HBCK.
+   * region to OFFLINE state with a special flag to tell the master that this is
+   * a forced operation by HBCK.
+   *
+   * This assumes that info is in META.
+   *
    * @param conf
    * @param region
    * @throws IOException
    * @throws KeeperException
    */
-  public static void fixUnassigned(Configuration conf, HRegionInfo region)
-  throws IOException, KeeperException {
+  public static void fixUnassigned(HBaseAdmin admin, HRegionInfo region)
+      throws IOException, KeeperException {
     HRegionInfo actualRegion = new HRegionInfo(region);
 
     // Force ZK node to OFFLINE so master assigns
-    forceOfflineInZK(conf, actualRegion);
+    forceOfflineInZK(admin, actualRegion);
   }
 
-  private static void forceOfflineInZK(Configuration conf, final HRegionInfo region)
+  /**
+   * This forces an HRI offline by setting the RegionTransitionData in ZK to
+   * have HBCK_CODE_NAME as the server.  This is a special case in the
+   * AssignmentManager that attempts an assign call by the master.
+   *
+   * @see org.apache.hadoop.hbase.master.AssignementManager#handleHBCK
+   */
+  private static void forceOfflineInZK(HBaseAdmin admin, final HRegionInfo region)
   throws ZooKeeperConnectionException, KeeperException, IOException {
-    HConnectionManager.execute(new HConnectable<Void>(conf) {
+    HConnectionManager.execute(new HConnectable<Void>(admin.getConfiguration()) {
       @Override
       public Void connect(HConnection connection) throws IOException {
         try {
-          ZKAssign.createOrForceNodeOffline(
-              connection.getZooKeeperWatcher(),
+          ZKAssign.createOrForceNodeOffline(connection.getZooKeeperWatcher(),
               region, HConstants.HBCK_CODE_NAME);
         } catch (KeeperException ke) {
           throw new IOException(ke);
@@ -97,40 +121,97 @@ public class HBaseFsckRepair {
     });
   }
 
-  protected static void closeRegionSilentlyAndWait(Configuration conf,
-      HServerAddress server, HRegionInfo region)
-  throws IOException, InterruptedException {
-
-    HConnection connection = HConnectionManager.getConnection(conf);
-    boolean success = false;
+  /*
+   * Should we check all assignments or just not in RIT?
+   */
+  public static void waitUntilAssigned(HBaseAdmin admin,
+      HRegionInfo region) throws IOException, InterruptedException {
+    HConnection connection = admin.getConnection();
 
     try {
-      HRegionInterface rs = connection.getHRegionConnection(server);
-      rs.closeRegion(region, false);
-      long timeout = conf.getLong("hbase.hbck.close.timeout", 120000);
+      long timeout = admin.getConfiguration().getLong("hbase.hbck.assign.timeout", 120000);
       long expiration = timeout + System.currentTimeMillis();
       while (System.currentTimeMillis() < expiration) {
         try {
-          HRegionInfo rsRegion = rs.getRegionInfo(region.getRegionName());
-          if (rsRegion == null)
-            throw new NotServingRegionException();
-        } catch (Exception e) {
-          success = true;
-          return;
+          Map<String, RegionState> rits=
+            admin.getClusterStatus().getRegionsInTransition();
+
+          if (rits.keySet() != null && !rits.keySet().contains(region.getEncodedName()))
{
+            // yay! no longer RIT
+            return;
+          }
+          // still in rit
+          LOG.info("Region still in transition, waiting for "
+              + "it to become assigned: " + region);
+        } catch (IOException e) {
+          LOG.warn("Exception when waiting for region to become assigned,"
+              + " retrying", e);
         }
         Thread.sleep(1000);
       }
-      throw new IOException("Region " + region + " failed to close within"
-          + " timeout " + timeout);
-
+      throw new IOException("Region " + region + " failed to move out of " +
+          "transition within timeout " + timeout + "ms");
     } finally {
       try {
         connection.close();
       } catch (IOException ioe) {
-        if (success) {
-          throw ioe;
-        }
+        throw ioe;
       }
     }
   }
+
+  /**
+   * Contacts a region server and waits up to hbase.hbck.close.timeout ms
+   * (default 120s) to close the region.  This bypasses the active hmaster.
+   */
+  public static void closeRegionSilentlyAndWait(HBaseAdmin admin,
+      HServerAddress server, HRegionInfo region) throws IOException, InterruptedException
{
+    HConnection connection = admin.getConnection();
+    HRegionInterface rs = connection.getHRegionConnection(server);
+    rs.closeRegion(region, false);
+    long timeout = admin.getConfiguration()
+      .getLong("hbase.hbck.close.timeout", 120000);
+    long expiration = timeout + System.currentTimeMillis();
+    while (System.currentTimeMillis() < expiration) {
+      try {
+        HRegionInfo rsRegion = rs.getRegionInfo(region.getRegionName());
+        if (rsRegion == null)
+          return;
+      } catch (IOException ioe) {
+        return;
+      }
+      Thread.sleep(1000);
+    }
+    throw new IOException("Region " + region + " failed to close within"
+        + " timeout " + timeout);
+  }
+
+  /**
+   * Puts the specified HRegionInfo into META.
+   */
+  public static void fixMetaHoleOnline(Configuration conf,
+      HRegionInfo hri) throws IOException {
+    Put p = new Put(hri.getRegionName());
+    p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+        Writables.getBytes(hri));
+    HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
+    meta.put(p);
+    meta.close();
+  }
+
+  /**
+   * Creates, flushes, and closes a new hdfs region dir
+   */
+  public static HRegion createHDFSRegionDir(Configuration conf,
+      HRegionInfo hri) throws IOException {
+    // Create HRegion
+    Path root = FSUtils.getRootDir(conf);
+    HRegion region = HRegion.createHRegion(hri, root, conf);
+    HLog hlog = region.getLog();
+
+    // Close the new region to flush to disk. Close log file too.
+    region.close();
+    hlog.closeAndDelete();
+    return region;
+  }
 }

Modified: hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java?rev=1304668&r1=1304667&r2=1304668&view=diff
==============================================================================
--- hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java
(original)
+++ hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java
Fri Mar 23 23:54:48 2012
@@ -40,7 +40,6 @@ import org.apache.hadoop.io.MultipleIOEx
  */
 public class OfflineMetaRepair {
   private static final Log LOG = LogFactory.getLog(HBaseFsck.class.getName());
-  HBaseFsck fsck;
 
   protected static void printUsageAndExit() {
     System.err.println("Usage: OfflineMetaRepair [opts] ");
@@ -68,7 +67,7 @@ public class OfflineMetaRepair {
     for (int i = 0; i < args.length; i++) {
       String cmd = args[i];
       if (cmd.equals("-details")) {
-        fsck.displayFullReport();
+        fsck.setDisplayFullReport();
       } else if (cmd.equals("-base")) {
         // update hbase root dir to user-specified base
         i++;

Modified: hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1304668&r1=1304667&r2=1304668&view=diff
==============================================================================
--- hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Fri
Mar 23 23:54:48 2012
@@ -881,7 +881,7 @@ public class HBaseTestingUtility {
       HTableDescriptor desc = info.getTableDesc();
       if (Bytes.compareTo(desc.getName(), tableName) == 0) {
         LOG.info("getMetaTableRows: row -> " +
-            Bytes.toStringBinary(result.getRow()));
+            Bytes.toStringBinary(result.getRow()) + info);
         rows.add(result.getRow());
       }
     }

Modified: hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java?rev=1304668&r1=1304667&r2=1304668&view=diff
==============================================================================
--- hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java (original)
+++ hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java Fri
Mar 23 23:54:48 2012
@@ -23,8 +23,12 @@ import static org.apache.hadoop.hbase.ut
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertNoErrors;
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -32,18 +36,26 @@ import java.util.Map.Entry;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 import org.apache.zookeeper.KeeperException;
 import org.junit.AfterClass;
@@ -54,16 +66,20 @@ import org.junit.Test;
  * This tests HBaseFsck's ability to detect reasons for inconsistent tables.
  */
 public class TestHBaseFsck {
-  final Log LOG = LogFactory.getLog(getClass());
+  final static Log LOG = LogFactory.getLog(TestHBaseFsck.class);
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final static Configuration conf = TEST_UTIL.getConfiguration();
   private final static byte[] FAM = Bytes.toBytes("fam");
 
   // for the instance, reset every test run
   private HTable tbl;
-  private final static byte[][] splits= new byte[][] { Bytes.toBytes("A"), 
+  private final static byte[][] SPLITS = new byte[][] { Bytes.toBytes("A"),
     Bytes.toBytes("B"), Bytes.toBytes("C") };
-  
+  // one row per region.
+  private final static byte[][] ROWKEYS= new byte[][] {
+    Bytes.toBytes("00"), Bytes.toBytes("50"), Bytes.toBytes("A0"), Bytes.toBytes("A5"),
+    Bytes.toBytes("B0"), Bytes.toBytes("B5"), Bytes.toBytes("C0"), Bytes.toBytes("C5") };
+
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     TEST_UTIL.startMiniCluster(3);
@@ -74,6 +90,10 @@ public class TestHBaseFsck {
     TEST_UTIL.shutdownMiniCluster();
   }
 
+  
+  /**
+   * Create a new region in META.
+   */
   private HRegionInfo createRegion(Configuration conf, final HTableDescriptor
       htd, byte[] startKey, byte[] endKey)
       throws IOException {
@@ -86,47 +106,102 @@ public class TestHBaseFsck {
     return hri;
   }
 
-  public void dumpMeta(HTableDescriptor htd) throws IOException {
-    List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(htd.getName());
+  /**
+   * Debugging method to dump the contents of meta.
+   */
+  private void dumpMeta(byte[] tableName) throws IOException {
+    List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(tableName);
     for (byte[] row : metaRows) {
       LOG.info(Bytes.toString(row));
     }
   }
 
-  private void deleteRegion(Configuration conf, final HTableDescriptor htd, 
-      byte[] startKey, byte[] endKey) throws IOException {
+  /**
+   * Delete a region from assignments, meta, or completely from hdfs.
+   * @param unassign if true unassign region if assigned
+   * @param metaRow  if true remove region's row from META
+   * @param hdfs if true remove region's dir in HDFS
+   */
+  private void deleteRegion(Configuration conf, final HTableDescriptor htd,
+      byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow,
+      boolean hdfs) throws IOException, InterruptedException {
+    deleteRegion(conf, htd, startKey, endKey, unassign, metaRow, hdfs, false);
+  }
 
-    LOG.info("Before delete:");
-    dumpMeta(htd);
+  /**
+   * This method is used to undeploy a region -- close it and attempt to
+   * remove its state from the Master. 
+   */
+  private void undeployRegion(HBaseAdmin admin, HServerAddress hsa,
+      HRegionInfo hri) throws IOException, InterruptedException {
+    try {
+      HBaseFsckRepair.closeRegionSilentlyAndWait(admin, hsa, hri);
+      admin.getMaster().offline(hri.getRegionName());
+    } catch (IOException ioe) {
+      LOG.warn("Got exception when attempting to offline region " 
+          + Bytes.toString(hri.getRegionName()), ioe);
+    }
+  }
+  /**
+   * Delete a region from assignments, meta, or completely from hdfs.
+   * @param unassign if true unassign region if assigned
+   * @param metaRow  if true remove region's row from META
+   * @param hdfs if true remove region's dir in HDFS
+   * @param regionInfoOnly if true remove a region dir's .regioninfo file
+   */
+  private void deleteRegion(Configuration conf, final HTableDescriptor htd,
+      byte[] startKey, byte[] endKey, boolean unassign, boolean metaRow,
+      boolean hdfs, boolean regionInfoOnly) throws IOException, InterruptedException {
+    LOG.info("** Before delete:");
+    dumpMeta(htd.getName());
 
     Map<HRegionInfo, HServerAddress> hris = tbl.getRegionsInfo();
     for (Entry<HRegionInfo, HServerAddress> e: hris.entrySet()) {
       HRegionInfo hri = e.getKey();
       HServerAddress hsa = e.getValue();
-      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0 
+      if (Bytes.compareTo(hri.getStartKey(), startKey) == 0
           && Bytes.compareTo(hri.getEndKey(), endKey) == 0) {
 
         LOG.info("RegionName: " +hri.getRegionNameAsString());
         byte[] deleteRow = hri.getRegionName();
-        TEST_UTIL.getHBaseAdmin().unassign(deleteRow, true);
 
-        LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
-        Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
-        FileSystem fs = rootDir.getFileSystem(conf);
-        Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName());
-        fs.delete(p, true);
-
-        HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
-        Delete delete = new Delete(deleteRow);
-        meta.delete(delete);
+        if (unassign) {
+          LOG.info("Undeploying region " + hri + " from server " + hsa);
+          undeployRegion(new HBaseAdmin(conf), hsa, hri);
+        }
+
+        if (regionInfoOnly) {
+          LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
+          Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
+          FileSystem fs = rootDir.getFileSystem(conf);
+          Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName());
+          Path hriPath = new Path(p, HRegion.REGIONINFO_FILE);
+          fs.delete(hriPath, true);
+        }
+
+        if (hdfs) {
+          LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
+          Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
+          FileSystem fs = rootDir.getFileSystem(conf);
+          Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName());
+          HBaseFsck.debugLsr(conf, p);
+          boolean success = fs.delete(p, true);
+          LOG.info("Deleted " + p + " sucessfully? " + success);
+          HBaseFsck.debugLsr(conf, p);
+        }
+
+        if (metaRow) {
+          HTable meta = new HTable(conf, HConstants.META_TABLE_NAME);
+          Delete delete = new Delete(deleteRow);
+          meta.delete(delete);
+        }
       }
       LOG.info(hri.toString() + hsa.toString());
     }
 
     TEST_UTIL.getMetaTableRows(htd.getName());
-    LOG.info("After delete:");
-    dumpMeta(htd);
-
+    LOG.info("*** After delete:");
+    dumpMeta(htd.getName());
   }
 
   /**
@@ -140,11 +215,32 @@ public class TestHBaseFsck {
     HTableDescriptor desc = new HTableDescriptor(tablename);
     HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
     desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
-    TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
+    TEST_UTIL.getHBaseAdmin().createTable(desc, SPLITS);
     tbl = new HTable(TEST_UTIL.getConfiguration(), tablename);
+
+    List<Put> puts = new ArrayList<Put>();
+    for (byte[] row : ROWKEYS) {
+      Put p = new Put(row);
+      p.add(FAM, Bytes.toBytes("val"), row);
+      puts.add(p);
+    }
+    tbl.put(puts);
+    tbl.flushCommits();
     return tbl;
   }
 
+  /**
+   * Counts the number of row to verify data loss or non-dataloss.
+   */
+  int countRows() throws IOException {
+     Scan s = new Scan();
+     ResultScanner rs = tbl.getScanner(s);
+     int i = 0;
+     while(rs.next() !=null) {
+       i++;
+     }
+     return i;
+  }
 
   /**
    * delete table in preparation for next test
@@ -153,14 +249,21 @@ public class TestHBaseFsck {
    * @throws IOException
    */
   void deleteTable(String tablename) throws IOException {
-    HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+    HBaseAdmin admin = new HBaseAdmin(conf);
+    admin.getConnection().clearRegionCache();
     byte[] tbytes = Bytes.toBytes(tablename);
-    admin.disableTable(tbytes);
+    admin.disableTableAsync(tbytes);
+    while (!admin.isTableDisabled(tbytes)) {
+      try {
+        Thread.sleep(250);
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+        fail("Interrupted when trying to disable table " + tablename);
+      }
+    }
     admin.deleteTable(tbytes);
   }
 
-
-  
   /**
    * This creates a clean table and confirms that the table is clean.
    */
@@ -173,18 +276,21 @@ public class TestHBaseFsck {
       assertNoErrors(hbck);
 
       setupTable(table);
-      
+      assertEquals(ROWKEYS.length, countRows());
+
       // We created 1 table, should be fine
       hbck = doFsck(conf, false);
       assertNoErrors(hbck);
       assertEquals(0, hbck.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
     } finally {
       deleteTable(table);
     }
   }
 
   /**
-   * This creates a bad table with regions that have a duplicate start key
+   * This create and fixes a bad table with regions that have a duplicate
+   * start key
    */
   @Test
   public void testDupeStartKey() throws Exception {
@@ -193,6 +299,7 @@ public class TestHBaseFsck {
     try {
       setupTable(table);
       assertNoErrors(doFsck(conf, false));
+      assertEquals(ROWKEYS.length, countRows());
 
       // Now let's mess it up, by adding a region with a duplicate startkey
       HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(),
@@ -205,13 +312,111 @@ public class TestHBaseFsck {
       assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS,
             ERROR_CODE.DUPE_STARTKEYS});
       assertEquals(2, hbck.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows()); // seems like the "bigger" region won.
+
+      // fix the degenerate region.
+      doFsck(conf,true);
+
+      // check that the degenerate region is gone and no data loss
+      HBaseFsck hbck2 = doFsck(conf,false);
+      assertNoErrors(hbck2);
+      assertEquals(0, hbck2.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
     } finally {
       deleteTable(table);
     }
   }
-  
+
+  /**
+   * Get region info from local cluster.
+   */
+  Map<HServerInfo, List<String>> getDeployedHRIs(HBaseAdmin admin)
+    throws IOException {
+    ClusterStatus status = admin.getMaster().getClusterStatus();
+    Collection<HServerInfo> regionServers = status.getServerInfo();
+    Map<HServerInfo, List<String>> mm =
+        new HashMap<HServerInfo, List<String>>();
+    HConnection connection = admin.getConnection();
+    for (HServerInfo hsi : regionServers) {
+      HRegionInterface server = 
+        connection.getHRegionConnection(hsi.getServerAddress());
+
+      // list all online regions from this region server
+      List<HRegionInfo> regions = server.getOnlineRegions();
+      List<String> regionNames = new ArrayList<String>();
+      for (HRegionInfo hri : regions) {
+        regionNames.add(hri.getRegionNameAsString());
+      }
+      mm.put(hsi, regionNames);
+    }
+    return mm;
+  }
+
+  /**
+   * Returns the HSI a region info is on.
+   */
+  HServerInfo findDeployedHSI(Map<HServerInfo, List<String>> mm, HRegionInfo
hri) {
+    for (Map.Entry<HServerInfo,List <String>> e : mm.entrySet()) {
+      if (e.getValue().contains(hri.getRegionNameAsString())) {
+        return e.getKey();
+      }
+    }
+    return null;
+  }
+
   /**
-   * This creates a bad table with regions that has startkey == endkey
+   * This create and fixes a bad table with regions that have a duplicate
+   * start key
+   */
+  @Test
+  public void testDupeRegion() throws Exception {
+    String table = "tableDupeRegion";
+    try {
+      setupTable(table);
+      assertNoErrors(doFsck(conf, false));
+      assertEquals(ROWKEYS.length, countRows());
+
+      // Now let's mess it up, by adding a region with a duplicate startkey
+      HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(),
+          Bytes.toBytes("A"), Bytes.toBytes("B"));
+
+      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe);
+      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
+          .waitForAssignment(hriDupe);
+
+      // Yikes! The assignment manager can't tell between diff between two
+      // different regions with the same start/endkeys since it doesn't
+      // differentiate on ts/regionId!  We actually need to recheck
+      // deployments!
+      HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+      while (findDeployedHSI(getDeployedHRIs(admin), hriDupe) == null) {
+        Thread.sleep(250);
+      }
+
+      LOG.debug("Finished assignment of dupe region");
+
+      // TODO why is dupe region different from dupe start keys?
+      HBaseFsck hbck = doFsck(conf, false);
+      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DUPE_STARTKEYS,
+            ERROR_CODE.DUPE_STARTKEYS});
+      assertEquals(2, hbck.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows()); // seems like the "bigger" region won.
+
+      // fix the degenerate region.
+      doFsck(conf,true);
+
+      // check that the degenerate region is gone and no data loss
+      HBaseFsck hbck2 = doFsck(conf,false);
+      assertNoErrors(hbck2);
+      assertEquals(0, hbck2.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
+    } finally {
+      deleteTable(table);
+    }
+  }
+
+  /**
+   * This creates and fixes a bad table with regions that has startkey == endkey
    */
   @Test
   public void testDegenerateRegions() throws Exception {
@@ -219,6 +424,7 @@ public class TestHBaseFsck {
     try {
       setupTable(table);
       assertNoErrors(doFsck(conf,false));
+      assertEquals(ROWKEYS.length, countRows());
 
       // Now let's mess it up, by adding a region with a duplicate startkey
       HRegionInfo hriDupe = createRegion(conf, tbl.getTableDescriptor(),
@@ -231,19 +437,111 @@ public class TestHBaseFsck {
       assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.DEGENERATE_REGION,
           ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.DUPE_STARTKEYS});
       assertEquals(2, hbck.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
+
+      // fix the degenerate region.
+      doFsck(conf,true);
+
+      // check that the degenerate region is gone and no data loss
+      HBaseFsck hbck2 = doFsck(conf,false);
+      assertNoErrors(hbck2);
+      assertEquals(0, hbck2.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
     } finally {
       deleteTable(table);
     }
   }
 
+
   /**
-   * This creates a bad table where a start key contained in another region.
+   * This creates and fixes a bad table where a region is completely contained
+   * by another region.
+   */
+  @Test
+  public void testContainedRegionOverlap() throws Exception {
+    String table = "tableContainedRegionOverlap";
+    try {
+      setupTable(table);
+      assertEquals(ROWKEYS.length, countRows());
+
+      // Mess it up by creating an overlap in the metadata
+      HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(),
+          Bytes.toBytes("A2"), Bytes.toBytes("B"));
+      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap);
+      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
+          .waitForAssignment(hriOverlap);
+
+      HBaseFsck hbck = doFsck(conf, false);
+      assertErrors(hbck, new ERROR_CODE[] {
+          ERROR_CODE.OVERLAP_IN_REGION_CHAIN });
+      assertEquals(2, hbck.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
+
+      // fix the problem.
+      doFsck(conf, true);
+
+      // verify that overlaps are fixed
+      HBaseFsck hbck2 = doFsck(conf,false);
+      assertNoErrors(hbck2);
+      assertEquals(0, hbck2.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
+    } finally {
+       deleteTable(table);
+    }
+  }
+
+  /**
+   * This creates and fixes a bad table where a region is completely contained
+   * by another region, and there is a hole (sort of like a bad split)
+   */
+  @Test
+  public void testOverlapAndOrphan() throws Exception {
+    String table = "tableOverlapAndOrphan";
+    try {
+      setupTable(table);
+      assertEquals(ROWKEYS.length, countRows());
+
+      // Mess it up by creating an overlap in the metadata
+      TEST_UTIL.getHBaseAdmin().disableTable(table);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"),
+          Bytes.toBytes("B"), true, true, false, true);
+      TEST_UTIL.getHBaseAdmin().enableTable(table);
+
+      HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(),
+          Bytes.toBytes("A2"), Bytes.toBytes("B"));
+      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap);
+      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
+          .waitForAssignment(hriOverlap);
+
+      HBaseFsck hbck = doFsck(conf, false);
+      assertErrors(hbck, new ERROR_CODE[] {
+          ERROR_CODE.ORPHAN_HDFS_REGION, ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+          ERROR_CODE.HOLE_IN_REGION_CHAIN});
+
+      // fix the problem.
+      doFsck(conf, true);
+
+      // verify that overlaps are fixed
+      HBaseFsck hbck2 = doFsck(conf,false);
+      assertNoErrors(hbck2);
+      assertEquals(0, hbck2.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
+    } finally {
+       deleteTable(table);
+    }
+  }
+
+  /**
+   * This creates and fixes a bad table where a region overlaps two regions --
+   * a start key contained in another region and its end key is contained in
+   * yet another region.
    */
   @Test
   public void testCoveredStartKey() throws Exception {
     String table = "tableCoveredStartKey";
     try {
       setupTable(table);
+      assertEquals(ROWKEYS.length, countRows());
 
       // Mess it up by creating an overlap in the metadata
       HRegionInfo hriOverlap = createRegion(conf, tbl.getTableDescriptor(),
@@ -257,37 +555,238 @@ public class TestHBaseFsck {
           ERROR_CODE.OVERLAP_IN_REGION_CHAIN,
           ERROR_CODE.OVERLAP_IN_REGION_CHAIN });
       assertEquals(3, hbck.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
+
+      // fix the problem.
+      doFsck(conf, true);
+
+      // verify that overlaps are fixed
+      HBaseFsck hbck2 = doFsck(conf, false);
+      assertErrors(hbck2, new ERROR_CODE[0]);
+      assertEquals(0, hbck2.getOverlapGroups(table).size());
+      assertEquals(ROWKEYS.length, countRows());
     } finally {
       deleteTable(table);
     }
   }
 
   /**
-   * This creates a bad table with a hole in meta.
+   * This creates and fixes a bad table with a missing region -- hole in meta
+   * and data missing in the fs.
    */
   @Test
-  public void testMetaHole() throws Exception {
-    String table = "tableMetaHole";
+  public void testRegionHole() throws Exception {
+    String table = "tableRegionHole";
     try {
       setupTable(table);
+      assertEquals(ROWKEYS.length, countRows());
+
+      // Mess it up by leaving a hole in the assignment, meta, and hdfs data
+      TEST_UTIL.getHBaseAdmin().disableTable(table);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
+          Bytes.toBytes("C"), true, true, true);
+      TEST_UTIL.getHBaseAdmin().enableTable(table);
+
+      HBaseFsck hbck = doFsck(conf, false);
+      assertErrors(hbck, new ERROR_CODE[] {
+          ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      // holes are separate from overlap groups
+      assertEquals(0, hbck.getOverlapGroups(table).size());
+
+      // fix hole
+      doFsck(conf, true);
+
+      // check that hole fixed
+      assertNoErrors(doFsck(conf,false));
+      assertEquals(ROWKEYS.length - 2 , countRows()); // lost a region so lost a row
+    } finally {
+      deleteTable(table);
+    }
+  }
+
+  /**
+   * This creates and fixes a bad table with a missing region -- hole in meta
+   * and data present but .regioinfino missing (an orphan hdfs region)in the fs.
+   */
+  @Test
+  public void testHDFSRegioninfoMissing() throws Exception {
+    String table = "tableHDFSRegioininfoMissing";
+    try {
+      setupTable(table);
+      assertEquals(ROWKEYS.length, countRows());
 
       // Mess it up by leaving a hole in the meta data
-      HRegionInfo hriHole = createRegion(conf, tbl.getTableDescriptor(),
-          Bytes.toBytes("D"), Bytes.toBytes(""));
-      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriHole);
-      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-          .waitForAssignment(hriHole);
+      TEST_UTIL.getHBaseAdmin().disableTable(table);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
+          Bytes.toBytes("C"), true, true, false, true);
+      TEST_UTIL.getHBaseAdmin().enableTable(table);
+
+      HBaseFsck hbck = doFsck(conf, false);
+      assertErrors(hbck, new ERROR_CODE[] {
+          ERROR_CODE.ORPHAN_HDFS_REGION,
+          ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+          ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      // holes are separate from overlap groups
+      assertEquals(0, hbck.getOverlapGroups(table).size());
+
+      // fix hole
+      doFsck(conf, true);
+
+      // check that hole fixed
+      assertNoErrors(doFsck(conf, false));
+      assertEquals(ROWKEYS.length, countRows());
+    } finally {
+      deleteTable(table);
+    }
+  }
+
+  /**
+   * This creates and fixes a bad table with a region that is missing meta and
+   * not assigned to a region server.
+   */
+  @Test
+  public void testNotInMetaOrDeployedHole() throws Exception {
+    String table = "tableNotInMetaOrDeployedHole";
+    try {
+      setupTable(table);
+      assertEquals(ROWKEYS.length, countRows());
+
+      // Mess it up by leaving a hole in the meta data
+      TEST_UTIL.getHBaseAdmin().disableTable(table);
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
+          Bytes.toBytes("C"), true, true, false); // don't rm from fs
+      TEST_UTIL.getHBaseAdmin().enableTable(table);
+
+      HBaseFsck hbck = doFsck(conf, false);
+      assertErrors(hbck, new ERROR_CODE[] {
+          ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+      // holes are separate from overlap groups
+      assertEquals(0, hbck.getOverlapGroups(table).size());
+
+      // fix hole
+      assertErrors(doFsck(conf, true) , new ERROR_CODE[] {
+          ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+
+      // check that hole fixed
+      assertNoErrors(doFsck(conf,false));
+      assertEquals(ROWKEYS.length, countRows());
+    } finally {
+      deleteTable(table);
+    }
+  }
 
+  /**
+   * This creates fixes a bad table with a hole in meta.
+   */
+  @Test
+  public void testNotInMetaHole() throws Exception {
+    String table = "tableNotInMetaHole";
+    try {
+      setupTable(table);
+      assertEquals(ROWKEYS.length, countRows());
+
+      // Mess it up by leaving a hole in the meta data
       TEST_UTIL.getHBaseAdmin().disableTable(table);
-      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("C"), Bytes.toBytes(""));
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
+          Bytes.toBytes("C"), false, true, false); // don't rm from fs
       TEST_UTIL.getHBaseAdmin().enableTable(table);
 
       HBaseFsck hbck = doFsck(conf, false);
-      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.HOLE_IN_REGION_CHAIN });
+      assertErrors(hbck, new ERROR_CODE[] {
+          ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
       // holes are separate from overlap groups
       assertEquals(0, hbck.getOverlapGroups(table).size());
+
+      // fix hole
+      assertErrors(doFsck(conf, true) , new ERROR_CODE[] {
+          ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN});
+
+      // check that hole fixed
+      assertNoErrors(doFsck(conf,false));
+      assertEquals(ROWKEYS.length, countRows());
     } finally {
       deleteTable(table);
     }
   }
+
+  /**
+   * This creates and fixes a bad table with a region that is in meta but has
+   * no deployment or data in hdfs.  
+   */
+  @Test
+  public void testNotInHdfs() throws Exception {
+    String table = "tableNotInHdfs";
+    try {
+      setupTable(table);
+      assertEquals(ROWKEYS.length, countRows());
+
+      // make sure data in regions, if in hlog only there is no data loss
+      TEST_UTIL.getHBaseAdmin().flush(table);
+
+      // Mess it up by leaving a hole in the hdfs data
+      deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
+          Bytes.toBytes("C"), false, false, true); // don't rm meta
+
+      HBaseFsck hbck = doFsck(conf, false);
+      assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS});
+      // holes are separate from overlap groups
+      assertEquals(0, hbck.getOverlapGroups(table).size());
+
+      // fix hole
+      doFsck(conf, true);
+
+      // check that hole fixed
+      assertNoErrors(doFsck(conf,false));
+      assertEquals(ROWKEYS.length - 2, countRows());
+    } finally {
+      deleteTable(table);
+      assertNoErrors(doFsck(conf,false)); // make sure disable worked properly
+    }
+  }
+
+  
+  /**
+   * This creates entries in META with no hdfs data.  This should cleanly
+   * remove the table.
+   */
+  @Test
+  public void testNoHdfsTable() throws Exception {
+    String table = "NoHdfsTable";
+    setupTable(table);
+    assertEquals(ROWKEYS.length, countRows());
+
+    // make sure data in regions, if in hlog only there is no data loss
+    TEST_UTIL.getHBaseAdmin().flush(table);
+
+    // Mess it up by leaving a giant hole in meta
+    deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""),
+        Bytes.toBytes("A"), false, false, true); // don't rm meta
+    deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("A"),
+        Bytes.toBytes("B"), false, false, true); // don't rm meta
+    deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
+        Bytes.toBytes("C"), false, false, true); // don't rm meta
+    deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("C"),
+        Bytes.toBytes(""), false, false, true); // don't rm meta
+
+    HBaseFsck hbck = doFsck(conf, false);
+    assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS,
+        ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS,
+        ERROR_CODE.NOT_IN_HDFS,});
+    // holes are separate from overlap groups
+    assertEquals(0, hbck.getOverlapGroups(table).size());
+
+    // fix hole
+    doFsck(conf, true);
+
+    // check that hole fixed
+    assertNoErrors(doFsck(conf,false));
+
+    try {
+      assertEquals(0, countRows());
+    } catch (IOException ioe) {
+      // we've actually deleted the table already. :)
+      return;
+    }
+    fail("Should have failed with IOException");
+  }
 }

Modified: hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java?rev=1304668&r1=1304667&r2=1304668&view=diff
==============================================================================
--- hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java
(original)
+++ hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckComparator.java
Fri Mar 23 23:54:48 2012
@@ -26,6 +26,7 @@ import static org.mockito.Mockito.*;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.util.HBaseFsck.HbckInfo;
+import org.apache.hadoop.hbase.util.HBaseFsck.HdfsEntry;
 import org.apache.hadoop.hbase.util.HBaseFsck.MetaEntry;
 import org.junit.Test;
 
@@ -42,12 +43,11 @@ public class TestHBaseFsckComparator {
   byte[] keyC = Bytes.toBytes("C");
   byte[] keyEnd = Bytes.toBytes("");
 
-  static HbckInfo genHbckInfo(byte[] table, byte[] start, byte[] end, int time) {
+  static HbckInfo genHbckInfo(byte[] table, byte[] start, byte[] end, long time) {
     HTableDescriptor htd = mock(HTableDescriptor.class);
     doReturn(table).when(htd).getName();
-
-    return new HbckInfo(new MetaEntry(new HRegionInfo(htd, start, end), null, null,
-        time));
+    HRegionInfo hri = new HRegionInfo(htd, start, end, false, time);
+    return new HbckInfo(new HdfsEntry(hri), new MetaEntry(hri,null, null, time));
   }
 
   @Test

Modified: hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java?rev=1304668&r1=1304667&r2=1304668&view=diff
==============================================================================
--- hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
(original)
+++ hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
Fri Mar 23 23:54:48 2012
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.util.hbc
 
 import static org.junit.Assert.assertEquals;
 
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
@@ -28,18 +29,29 @@ import org.apache.hadoop.hbase.util.HBas
 
 public class HbckTestingUtil {
   public static HBaseFsck doFsck(Configuration conf, boolean fix) throws Exception {
+    return doFsck(conf, fix, fix, fix, fix,fix);
+  }
+
+  public static HBaseFsck doFsck(Configuration conf, boolean fixAssignments,
+      boolean fixMeta, boolean fixHdfsHoles, boolean fixHdfsOverlaps,
+      boolean fixHdfsOrphans) throws Exception {
     HBaseFsck fsck = new HBaseFsck(conf);
     fsck.connect();
-    fsck.displayFullReport(); // i.e. -details
+    fsck.setDisplayFullReport(); // i.e. -details
     fsck.setTimeLag(0);
-    fsck.setFixErrors(fix);
-    fsck.doWork();
+    fsck.setFixAssignments(fixAssignments);
+    fsck.setFixMeta(fixMeta);
+    fsck.setFixHdfsHoles(fixHdfsHoles);
+    fsck.setFixHdfsOverlaps(fixHdfsOverlaps);
+    fsck.setFixHdfsOrphans(fixHdfsOrphans);
+    fsck.onlineHbck();
     return fsck;
   }
 
+
   public static void assertNoErrors(HBaseFsck fsck) throws Exception {
     List<ERROR_CODE> errs = fsck.getErrors().getErrorList();
-    assertEquals(0, errs.size());
+    assertEquals(new ArrayList<ERROR_CODE>(), errs);
   }
 
   public static void assertErrors(HBaseFsck fsck, ERROR_CODE[] expectedErrors) {

Modified: hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java?rev=1304668&r1=1304667&r2=1304668&view=diff
==============================================================================
--- hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java
(original)
+++ hbase/branches/0.90/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java
Fri Mar 23 23:54:48 2012
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hbase.ut
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 import java.util.Arrays;
 



Mime
View raw message