hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From kih...@apache.org
Subject hadoop git commit: HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma Reddy Battula.
Date Thu, 13 Oct 2016 19:55:48 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2e153bc8a -> c5a130370


HDFS-10987. Make Decommission less expensive when lot of blocks present. Contributed by Brahma
Reddy Battula.

(cherry picked from commit 332a61fd74fd2a9874319232c583ab5d2c53ff03)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5a13037
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5a13037
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5a13037

Branch: refs/heads/branch-2
Commit: c5a13037048eb1e3b5a500aeec0e2e953e7d509a
Parents: 2e153bc
Author: Kihwal Lee <kihwal@apache.org>
Authored: Thu Oct 13 14:55:22 2016 -0500
Committer: Kihwal Lee <kihwal@apache.org>
Committed: Thu Oct 13 14:55:22 2016 -0500

----------------------------------------------------------------------
 .../blockmanagement/DecommissionManager.java    | 34 ++++++++++++++++----
 1 file changed, 28 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5a13037/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 78b6a20..10e4c96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -388,17 +388,12 @@ public class DecommissionManager {
      */
     private final int numBlocksPerCheck;
     /**
-<<<<<<< HEAD
      * The maximum number of nodes to check per tick.
      */
     private final int numNodesPerCheck;
     /**
      * The maximum number of nodes to track in decomNodeBlocks. A value of 0
      * means no limit.
-=======
-     * The maximum number of nodes to track in outOfServiceNodeBlocks.
-     * A value of 0 means no limit.
->>>>>>> 9dcbdbd... HDFS-9392. Admins support for maintenance state.
Contributed by Ming Ma.
      */
     private final int maxConcurrentTrackedNodes;
     /**
@@ -406,6 +401,10 @@ public class DecommissionManager {
      */
     private int numBlocksChecked = 0;
     /**
+     * The number of blocks checked after (re)holding lock.
+     */
+    private int numBlocksCheckedPerLock = 0;
+    /**
      * The number of nodes that have been checked on this tick. Used for 
      * testing.
      */
@@ -443,6 +442,7 @@ public class DecommissionManager {
       }
       // Reset the checked count at beginning of each iteration
       numBlocksChecked = 0;
+      numBlocksCheckedPerLock = 0;
       numNodesChecked = 0;
       // Check decom progress
       namesystem.writeLock();
@@ -478,7 +478,8 @@ public class DecommissionManager {
 
       while (it.hasNext()
           && !exceededNumBlocksPerCheck()
-          && !exceededNumNodesPerCheck()) {
+          && !exceededNumNodesPerCheck()
+          && namesystem.isRunning()) {
         numNodesChecked++;
         final Map.Entry<DatanodeDescriptor, AbstractList<BlockInfo>>
             entry = it.next();
@@ -608,7 +609,28 @@ public class DecommissionManager {
       int decommissionOnlyReplicas = 0;
       int underReplicatedInOpenFiles = 0;
       while (it.hasNext()) {
+        if (insufficientlyReplicated == null
+            && numBlocksCheckedPerLock >= numBlocksPerCheck) {
+          // During fullscan insufficientlyReplicated will NOT be null, iterator
+          // will be DN's iterator. So should not yield lock, otherwise
+          // ConcurrentModificationException could occur.
+          // Once the fullscan done, iterator will be a copy. So can yield the
+          // lock.
+          // Yielding is required in case of block number is greater than the
+          // configured per-iteration-limit.
+          namesystem.writeUnlock();
+          try {
+            LOG.debug("Yielded lock during decommission check");
+            Thread.sleep(0, 500);
+          } catch (InterruptedException ignored) {
+            return;
+          }
+          // reset
+          numBlocksCheckedPerLock = 0;
+          namesystem.writeLock();
+        }
         numBlocksChecked++;
+        numBlocksCheckedPerLock++;
         final BlockInfo block = it.next();
         // Remove the block from the list if it's no longer in the block map,
         // e.g. the containing file has been deleted


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message