hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rajeshb...@apache.org
Subject svn commit: r1577659 - in /hbase/branches/0.94/src: main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
Date Fri, 14 Mar 2014 18:51:37 GMT
Author: rajeshbabu
Date: Fri Mar 14 18:51:37 2014
New Revision: 1577659

URL: http://svn.apache.org/r1577659
Log:
HBASE-10549 When there is a hole, LoadIncrementalHFiles will hang in an infinite loop.(yuanxinen)

Modified:
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1577659&r1=1577658&r2=1577659&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
(original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
Fri Mar 14 18:51:37 2014
@@ -487,6 +487,30 @@ public class LoadIncrementalHFiles exten
       idx = -(idx + 1) - 1;
     }
     final int indexForCallable = idx;
+    
+    /**
+     * we can consider there is a region hole in following conditions. 1) if idx < 0,then
first
+     * region info is lost. 2) if the endkey of a region is not equal to the startkey of
the next
+     * region. 3) if the endkey of the last region is not empty.
+     */
+    if (indexForCallable < 0) {
+      throw new IOException("The first region info for table "
+          + Bytes.toString(table.getTableName())
+          + " cann't be found in .META..Please use hbck tool to fix it first.");
+    } else if ((indexForCallable == startEndKeys.getFirst().length - 1)
+        && !Bytes.equals(startEndKeys.getSecond()[indexForCallable], HConstants.EMPTY_BYTE_ARRAY))
{
+      throw new IOException("The last region info for table "
+          + Bytes.toString(table.getTableName())
+          + " cann't be found in .META..Please use hbck tool to fix it first.");
+    } else if (indexForCallable + 1 < startEndKeys.getFirst().length
+        && !(Bytes.compareTo(startEndKeys.getSecond()[indexForCallable],
+          startEndKeys.getFirst()[indexForCallable + 1]) == 0)) {
+      throw new IOException("The endkey of one region for table "
+          + Bytes.toString(table.getTableName())
+          + " is not equal to the startkey of the next region in .META.."
+          + "Please use hbck tool to fix it first.");
+    }
+    
     boolean lastKeyInRange =
       Bytes.compareTo(last, startEndKeys.getSecond()[idx]) < 0 ||
       Bytes.equals(startEndKeys.getSecond()[idx], HConstants.EMPTY_BYTE_ARRAY);

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java?rev=1577659&r1=1577658&r2=1577659&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
(original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
Fri Mar 14 18:51:37 2014
@@ -37,11 +37,15 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.LargeTests;
 import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
@@ -123,6 +127,29 @@ public class TestLoadIncrementalHFilesSp
     }
   }
 
+  /**
+   * Creates a table with given table name,specified number of column families<br>
+   * and splitkeys if the table does not already exist.
+   * @param table
+   * @param cfs
+   * @param SPLIT_KEYS
+   */
+  private void setupTableWithSplitkeys(String table, int cfs, byte[][] SPLIT_KEYS)
+      throws IOException {
+    try {
+      LOG.info("Creating table " + table);
+      HTableDescriptor htd = new HTableDescriptor(table);
+      for (int i = 0; i < cfs; i++) {
+        htd.addFamily(new HColumnDescriptor(family(i)));
+      }
+
+      util.getHBaseAdmin().createTable(htd, SPLIT_KEYS);
+    } catch (TableExistsException tee) {
+      LOG.info("Table " + table + " already exists");
+    }
+  }
+
+  
   private Path buildBulkFiles(String table, int value) throws Exception {
     Path dir = util.getDataTestDir(table);
     Path bulk1 = new Path(dir, table+value);
@@ -393,6 +420,64 @@ public class TestLoadIncrementalHFilesSp
 
     fail("doBulkLoad should have thrown an exception");
   }
+  
+  @Test
+  public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception {
+    String tableName = "testGroupOrSplitWhenRegionHoleExistsInMeta";
+    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000100") };
+
+    setupTableWithSplitkeys(tableName, 10, SPLIT_KEYS);
+    HTable table = new HTable(util.getConfiguration(), Bytes.toBytes(tableName));
+    Path dir = buildBulkFiles(tableName, 2);
+
+    final AtomicInteger countedLqis = new AtomicInteger();
+    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(
+      util.getConfiguration()) {
+      
+    protected List<LoadQueueItem> groupOrSplit(
+        Multimap<ByteBuffer, LoadQueueItem> regionGroups,
+        final LoadQueueItem item, final HTable htable,
+        final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
+      List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
+      if (lqis != null) {
+        countedLqis.addAndGet(lqis.size());
+      }
+      return lqis;
+    }
+  };
+
+    // do bulkload when there is no region hole in hbase:meta.
+    try {
+      loader.doBulkLoad(dir, table);
+    } catch (Exception e) {
+      LOG.error("exeception=", e);
+    }
+    // check if all the data are loaded into the table.
+    this.assertExpectedTable(tableName, ROWCOUNT, 2);
+
+    dir = buildBulkFiles(tableName, 3);
+
+    // Mess it up by leaving a hole in the hbase:meta
+    CatalogTracker ct = new CatalogTracker(util.getConfiguration());
+    List<HRegionInfo> regionInfos = MetaReader.getTableRegions(ct, Bytes.toBytes(tableName));
+    for (HRegionInfo regionInfo : regionInfos) {
+      if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
+        MetaEditor.deleteRegion(ct, regionInfo);
+        break;
+      }
+    }
+
+    try {
+      loader.doBulkLoad(dir, table);
+    } catch (Exception e) {
+      LOG.error("exeception=", e);
+      assertTrue("IOException expected", e instanceof IOException);
+    }
+
+    table.close();
+
+    this.assertExpectedTable(tableName, ROWCOUNT, 2);
+  }
 
   @org.junit.Rule
   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =



Mime
View raw message