hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e...@apache.org
Subject hbase git commit: HBASE-12891 Parallel execution for Hbck checkRegionConsistency
Date Thu, 02 Apr 2015 18:23:16 GMT
Repository: hbase
Updated Branches:
  refs/heads/master 1e6ea5aab -> 61f4ce688


HBASE-12891 Parallel execution for Hbck checkRegionConsistency

Signed-off-by: Enis Soztutar <enis@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/61f4ce68
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/61f4ce68
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/61f4ce68

Branch: refs/heads/master
Commit: 61f4ce68801cb310cfa48c0a85dfafe000bcdd1c
Parents: 1e6ea5a
Author: Dave Latham <davelatham@yahoo-inc.com>
Authored: Mon Mar 30 11:20:28 2015 -0700
Committer: Enis Soztutar <enis@apache.org>
Committed: Thu Apr 2 11:21:43 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/util/HBaseFsck.java | 90 +++++++++++++++++---
 1 file changed, 77 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/61f4ce68/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 1fb64a2..f8fdd96 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -56,8 +56,10 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Multimap;
+import com.google.common.collect.Ordering;
 import com.google.common.collect.TreeMultimap;
 import com.google.protobuf.ServiceException;
 
@@ -1767,20 +1769,28 @@ public class HBaseFsck extends Configured implements Closeable {
   throws IOException, KeeperException, InterruptedException {
     // Divide the checks in two phases. One for default/primary replicas and another
     // for the non-primary ones. Keeps code cleaner this way.
+
+    List<CheckRegionConsistencyWorkItem> workItems =
+        new ArrayList<CheckRegionConsistencyWorkItem>(regionInfoMap.size());
     for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
       if (e.getValue().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) {
-        checkRegionConsistency(e.getKey(), e.getValue());
+        workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue()));
       }
     }
+    checkRegionConsistencyConcurrently(workItems);
+
     boolean prevHdfsCheck = shouldCheckHdfs();
     setCheckHdfs(false); //replicas don't have any hdfs data
     // Run a pass over the replicas and fix any assignment issues that exist on the currently
     // deployed/undeployed replicas.
+    List<CheckRegionConsistencyWorkItem> replicaWorkItems =
+        new ArrayList<CheckRegionConsistencyWorkItem>(regionInfoMap.size());
     for (java.util.Map.Entry<String, HbckInfo> e: regionInfoMap.entrySet()) {
       if (e.getValue().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
-        checkRegionConsistency(e.getKey(), e.getValue());
+        replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue()));
       }
     }
+    checkRegionConsistencyConcurrently(replicaWorkItems);
     setCheckHdfs(prevHdfsCheck);
 
     if (shouldCheckHdfs()) {
@@ -1789,6 +1799,51 @@ public class HBaseFsck extends Configured implements Closeable {
   }
 
   /**
+   * Check consistency of all regions using mulitple threads concurrently.
+   */
+  private void checkRegionConsistencyConcurrently(
+    final List<CheckRegionConsistencyWorkItem> workItems)
+    throws IOException, KeeperException, InterruptedException {
+    if (workItems.isEmpty()) {
+      return;  // nothing to check
+    }
+
+    List<Future<Void>> workFutures = executor.invokeAll(workItems);
+    for(Future<Void> f: workFutures) {
+      try {
+        f.get();
+      } catch(ExecutionException e1) {
+        LOG.warn("Could not check region consistency " , e1.getCause());
+        if (e1.getCause() instanceof IOException) {
+          throw (IOException)e1.getCause();
+        } else if (e1.getCause() instanceof KeeperException) {
+          throw (KeeperException)e1.getCause();
+        } else if (e1.getCause() instanceof InterruptedException) {
+          throw (InterruptedException)e1.getCause();
+        } else {
+          throw new IOException(e1.getCause());
+        }
+      }
+    }
+  }
+
+  class CheckRegionConsistencyWorkItem implements Callable<Void> {
+    private final String key;
+    private final HbckInfo hbi;
+
+    CheckRegionConsistencyWorkItem(String key, HbckInfo hbi) {
+      this.key = key;
+      this.hbi = hbi;
+    }
+
+    @Override
+    public synchronized Void call() throws Exception {
+      checkRegionConsistency(key, hbi);
+      return null;
+    }
+  }
+
+  /**
    * Check and fix table states, assumes full info available:
    * - tableInfos
    * - empty tables loaded
@@ -2128,16 +2183,8 @@ public class HBaseFsck extends Configured implements Closeable {
 
         HRegionInfo hri = hbi.getHdfsHRI();
         TableInfo tableInfo = tablesInfo.get(hri.getTable());
-        if (tableInfo.regionsFromMeta.isEmpty()) {
-          for (HbckInfo h : regionInfoMap.values()) {
-            if (hri.getTable().equals(h.getTableName())) {
-              if (h.metaEntry != null) tableInfo.regionsFromMeta
-                  .add((HRegionInfo) h.metaEntry);
-            }
-          }
-          Collections.sort(tableInfo.regionsFromMeta);
-        }
-        for (HRegionInfo region : tableInfo.regionsFromMeta) {
+
+        for (HRegionInfo region : tableInfo.getRegionsFromMeta()) {
           if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0
               && (region.getEndKey().length == 0 || Bytes.compareTo(region.getEndKey(),
                 hri.getEndKey()) >= 0)
@@ -2493,7 +2540,7 @@ public class HBaseFsck extends Configured implements Closeable {
       TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, cmp);
 
     // list of regions derived from meta entries.
-    final List<HRegionInfo> regionsFromMeta = new ArrayList<HRegionInfo>();
+    private ImmutableList<HRegionInfo> regionsFromMeta = null;
 
     TableInfo(TableName name) {
       this.tableName = name;
@@ -2550,6 +2597,23 @@ public class HBaseFsck extends Configured implements Closeable {
       return sc.getStarts().size() + backwards.size();
     }
 
+    public synchronized ImmutableList<HRegionInfo> getRegionsFromMeta() {
+      // lazy loaded, synchronized to ensure a single load
+      if (regionsFromMeta == null) {
+        List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
+        for (HbckInfo h : HBaseFsck.this.regionInfoMap.values()) {
+          if (tableName.equals(h.getTableName())) {
+            if (h.metaEntry != null) {
+              regions.add((HRegionInfo) h.metaEntry);
+            }
+          }
+        }
+        regionsFromMeta = Ordering.natural().immutableSortedCopy(regions);
+      }
+      
+      return regionsFromMeta;
+    }
+    
     private class IntegrityFixSuggester extends TableIntegrityErrorHandlerImpl {
       ErrorReporter errors;
 


Mime
View raw message