hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rang...@apache.org
Subject svn commit: r615490 - in /hadoop/core/trunk: CHANGES.txt src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java src/java/org/apache/hadoop/dfs/FSNamesystem.java
Date Sat, 26 Jan 2008 21:00:35 GMT
Author: rangadi
Date: Sat Jan 26 13:00:34 2008
New Revision: 615490

URL: http://svn.apache.org/viewvc?rev=615490&view=rev
Log:
HADOOP-2576. Namenode performance degradation over time triggered by
large heartbeat interval. (Raghu Angadi)

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
    hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=615490&r1=615489&r2=615490&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Sat Jan 26 13:00:34 2008
@@ -588,6 +588,9 @@
     HADOOP-2714. TestDecommission failed on windows because the replication
     request was timing out. (dhruba)
 
+    HADOOP-2576. Namenode performance degradation over time triggered by
+    large heartbeat interval. (Raghu Angadi)
+
 Release 0.15.3 - 2008-01-18
 
   BUG FIXES

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java?rev=615490&r1=615489&r2=615490&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java Sat Jan 26 13:00:34
2008
@@ -46,7 +46,7 @@
   //
   List<Block> replicateBlocks;
   List<DatanodeDescriptor[]> replicateTargetSets;
-  List<Block> invalidateBlocks;
+  Set<Block> invalidateBlocks;
   
   /** Default constructor */
   public DatanodeDescriptor() {
@@ -128,7 +128,7 @@
   private void initWorkLists() {
     replicateBlocks = new ArrayList<Block>();
     replicateTargetSets = new ArrayList<DatanodeDescriptor[]>();
-    invalidateBlocks = new ArrayList<Block>();
+    invalidateBlocks = new TreeSet<Block>();
   }
 
   /**
@@ -308,9 +308,10 @@
       }
       int outnum = Math.min(maxblocks, invalidateBlocks.size());
       Block[] blocklist = new Block[outnum];
+      Iterator<Block> iter = invalidateBlocks.iterator();
       for (int i = 0; i < outnum; i++) {
-        blocklist[i] = invalidateBlocks.get(0);
-        invalidateBlocks.remove(0);
+        blocklist[i] = iter.next();
+        iter.remove();
       }
       assert(blocklist.length > 0);
       xferResults[0] = blocklist;

Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=615490&r1=615489&r2=615490&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Sat Jan 26 13:00:34
2008
@@ -224,6 +224,9 @@
   private long softLimit = LEASE_SOFTLIMIT_PERIOD;
   private long hardLimit = LEASE_HARDLIMIT_PERIOD;
 
+  // Ask Datanode only up to this many blocks to delete.
+  private int blockInvalidateLimit = FSConstants.BLOCK_INVALIDATE_CHUNK;
+
   /**
    * FSNamesystem constructor.
    */
@@ -370,6 +373,8 @@
                                                    5 * 60) * 1000;
     this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
+    this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 
+                                         20*(int)(heartbeatInterval/1000));
   }
 
   /**
@@ -1319,7 +1324,7 @@
   private void addToInvalidates(Block b, DatanodeInfo n) {
     Collection<Block> invalidateSet = recentInvalidateSets.get(n.getStorageID());
     if (invalidateSet == null) {
-      invalidateSet = new ArrayList<Block>();
+      invalidateSet = new HashSet<Block>();
       recentInvalidateSets.put(n.getStorageID(), invalidateSet);
     }
     invalidateSet.add(b);
@@ -2035,8 +2040,7 @@
           nodeinfo.getReplicationSets(this.maxReplicationStreams - 
                                       xmitsInProgress, xferResults); 
           if (xferResults[0] == null) {
-            nodeinfo.getInvalidateBlocks(FSConstants.BLOCK_INVALIDATE_CHUNK,
-                                         deleteList);
+            nodeinfo.getInvalidateBlocks(blockInvalidateLimit, deleteList);
           }
           return false;
         }
@@ -2420,7 +2424,7 @@
       // thorugh succeeding heartbeat responses.
       //
       if (!isValidBlock(b)) {
-        if (obsolete.size() > FSConstants.BLOCK_INVALIDATE_CHUNK) {
+        if (obsolete.size() > blockInvalidateLimit) {
           addToInvalidates(b, node);
         } else {
           obsolete.add(b);
@@ -2969,15 +2973,13 @@
 
     Iterator<Block> it = null;
     int sendNum = invalidateSet.size();
-    int origSize = sendNum;
     ArrayList<Block> sendBlock = new ArrayList<Block>(sendNum);
 
     //
     // calculate the number of blocks that we send in one message
     //
-    if (sendNum > FSConstants.BLOCK_INVALIDATE_CHUNK) {
-      sendNum =  FSConstants.BLOCK_INVALIDATE_CHUNK;
-    }
+    sendNum = Math.min(sendNum, blockInvalidateLimit);
+    
     //
     // Copy the first chunk into sendBlock
     //
@@ -2992,7 +2994,6 @@
     // into the collection.
     //
     if (it.hasNext()) {
-      assert(origSize > FSConstants.BLOCK_INVALIDATE_CHUNK);
       recentInvalidateSets.put(nodeID.getStorageID(), invalidateSet);
     }
         



Mime
View raw message