hbase-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From te...@apache.org
Subject svn commit: r1167453 - in /hbase/branches/0.90: CHANGES.txt src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
Date Sat, 10 Sep 2011 04:37:33 GMT
Author: tedyu
Date: Sat Sep 10 04:37:32 2011
New Revision: 1167453

URL: http://svn.apache.org/viewvc?rev=1167453&view=rev
Log:
HBASE-4340  Hbase can't balance if ServerShutdownHandler encountered
               exception (Jinchao)

Modified:
    hbase/branches/0.90/CHANGES.txt
    hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java

Modified: hbase/branches/0.90/CHANGES.txt
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/CHANGES.txt?rev=1167453&r1=1167452&r2=1167453&view=diff
==============================================================================
--- hbase/branches/0.90/CHANGES.txt (original)
+++ hbase/branches/0.90/CHANGES.txt Sat Sep 10 04:37:32 2011
@@ -32,6 +32,8 @@ Release 0.90.5 - Unreleased
                (Jonathan Hsieh)
    HBASE-4341  HRS#closeAllRegions should take care of HRS#onlineRegions's weak
                consistency (Jieshan Bean)
+   HBASE-4340  Hbase can't balance if ServerShutdownHandler encountered
+               exception (Jinchao)
 
   IMPROVEMENT
    HBASE-4205  Enhance HTable javadoc (Eric Charles)

Modified: hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java?rev=1167453&r1=1167452&r2=1167453&view=diff
==============================================================================
--- hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
(original)
+++ hbase/branches/0.90/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
Sat Sep 10 04:37:32 2011
@@ -149,69 +149,73 @@ public class ServerShutdownHandler exten
     final String serverName = this.hsi.getServerName();
 
     LOG.info("Splitting logs for " + serverName);
-    this.services.getMasterFileSystem().splitLog(serverName);
-
-    // Clean out anything in regions in transition.  Being conservative and
-    // doing after log splitting.  Could do some states before -- OPENING?
-    // OFFLINE? -- and then others after like CLOSING that depend on log
-    // splitting.
-    List<RegionState> regionsInTransition =
-      this.services.getAssignmentManager().processServerShutdown(this.hsi);
-
-    // Assign root and meta if we were carrying them.
-    if (isCarryingRoot()) { // -ROOT-
-      LOG.info("Server " + serverName + " was carrying ROOT. Trying to assign.");
-      verifyAndAssignRootWithRetries();
-    }
-
-    // Carrying meta?
-    if (isCarryingMeta()) {
-      LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
-      this.services.getAssignmentManager().assignMeta();
-    }
-
-    // Wait on meta to come online; we need it to progress.
-    // TODO: Best way to hold strictly here?  We should build this retry logic
-    //       into the MetaReader operations themselves.
-    NavigableMap<HRegionInfo, Result> hris = null;
-    while (!this.server.isStopped()) {
-      try {
-        this.server.getCatalogTracker().waitForMeta();
-        hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(),
-            this.hsi);
-        break;
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        throw new IOException("Interrupted", e);
-      } catch (IOException ioe) {
-        LOG.info("Received exception accessing META during server shutdown of " +
-            serverName + ", retrying META read", ioe);
-      }
-    }
-
-    // Skip regions that were in transition unless CLOSING or PENDING_CLOSE
-    for (RegionState rit : regionsInTransition) {
-      if (!rit.isClosing() && !rit.isPendingClose()) {
-        LOG.debug("Removed " + rit.getRegion().getRegionNameAsString() +
-          " from list of regions to assign because in RIT");
-        hris.remove(rit.getRegion());
-      }
-    }
-
-    LOG.info("Reassigning " + (hris == null? 0: hris.size()) +
-      " region(s) that " + serverName +
-      " was carrying (skipping " + regionsInTransition.size() +
-      " regions(s) that are already in transition)");
-
-    // Iterate regions that were on this server and assign them
-    for (Map.Entry<HRegionInfo, Result> e: hris.entrySet()) {
-      if (processDeadRegion(e.getKey(), e.getValue(),
-          this.services.getAssignmentManager(),
-          this.server.getCatalogTracker())) {
-        this.services.getAssignmentManager().assign(e.getKey(), true);
+    try {
+      this.services.getMasterFileSystem().splitLog(serverName);
+    
+      // Clean out anything in regions in transition.  Being conservative and
+      // doing after log splitting.  Could do some states before -- OPENING?
+      // OFFLINE? -- and then others after like CLOSING that depend on log
+      // splitting.
+      List<RegionState> regionsInTransition =
+        this.services.getAssignmentManager().processServerShutdown(this.hsi);
+    
+      // Assign root and meta if we were carrying them.
+      if (isCarryingRoot()) { // -ROOT-
+        LOG.info("Server " + serverName + " was carrying ROOT. Trying to assign.");
+        verifyAndAssignRootWithRetries();
+      }
+    
+      // Carrying meta?
+      if (isCarryingMeta()) {
+        LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
+        this.services.getAssignmentManager().assignMeta();
+      }
+    
+      // Wait on meta to come online; we need it to progress.
+      // TODO: Best way to hold strictly here?  We should build this retry logic
+      //       into the MetaReader operations themselves.
+      NavigableMap<HRegionInfo, Result> hris = null;
+      while (!this.server.isStopped()) {
+        try {
+          this.server.getCatalogTracker().waitForMeta();
+          hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(),
+              this.hsi);
+          break;
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+          throw new IOException("Interrupted", e);
+        } catch (IOException ioe) {
+          LOG.info("Received exception accessing META during server shutdown of " +
+              serverName + ", retrying META read", ioe);
+        }
+      }
+    
+      // Skip regions that were in transition unless CLOSING or PENDING_CLOSE
+      for (RegionState rit : regionsInTransition) {
+        if (!rit.isClosing() && !rit.isPendingClose()) {
+          LOG.debug("Removed " + rit.getRegion().getRegionNameAsString() +
+            " from list of regions to assign because in RIT");
+          hris.remove(rit.getRegion());
+        }
+      }
+    
+      LOG.info("Reassigning " + (hris == null? 0: hris.size()) +
+        " region(s) that " + serverName +
+        " was carrying (skipping " + regionsInTransition.size() +
+        " regions(s) that are already in transition)");
+    
+      // Iterate regions that were on this server and assign them
+      for (Map.Entry<HRegionInfo, Result> e: hris.entrySet()) {
+        if (processDeadRegion(e.getKey(), e.getValue(),
+            this.services.getAssignmentManager(),
+            this.server.getCatalogTracker())) {
+          this.services.getAssignmentManager().assign(e.getKey(), true);
+        }
       }
+    } finally {
+      this.deadServers.finish(serverName);
     }
-    this.deadServers.finish(serverName);
+    
     LOG.info("Finished processing of shutdown of " + serverName);
   }
 
@@ -363,4 +367,4 @@ public class ServerShutdownHandler exten
       return false;
     }
   }
-}
\ No newline at end of file
+}



Mime
View raw message