Author: tomwhite
Date: Wed Mar 14 14:19:34 2007
New Revision: 518327
URL: http://svn.apache.org/viewvc?view=rev&rev=518327
Log:
HADOOP-1117. Fix DFS scalability: when the namenode is restarted it consumes 80% CPU. Contributed
by Dhruba Borthakur.
Modified:
lucene/hadoop/trunk/CHANGES.txt
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=518327&r1=518326&r2=518327
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Wed Mar 14 14:19:34 2007
@@ -58,6 +58,10 @@
17. HADOOP-1109. Fix NullPointerException in StreamInputFormat.
(Koji Noguchi via tomwhite)
+18. HADOOP-1117. Fix DFS scalability: when the namenode is
+ restarted it consumes 80% CPU. (Dhruba Borthakur via
+ tomwhite)
+
Release 0.12.0 - 2007-03-02
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=518327&r1=518326&r2=518327
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Wed Mar 14 14:19:34
2007
@@ -2121,10 +2121,12 @@
// check whether safe replication is reached for the block
// only if it is a part of a files
incrementSafeBlockCount( numCurrentReplica );
-
+
// handle underReplication/overReplication
short fileReplication = fileINode.getReplication();
- if(numCurrentReplica < fileReplication) {
+ if (numCurrentReplica >= fileReplication) {
+ neededReplications.remove(block);
+ } else {
neededReplications.update(block, curReplicaDelta, 0);
}
proccessOverReplicatedBlock( block, fileReplication );
@@ -2640,17 +2642,21 @@
filterDecommissionedNodes(containingNodes);
int numCurrentReplica = nodes.size() +
pendingReplications.getNumReplicas(block);
- DatanodeDescriptor targets[] = replicator.chooseTarget(
+ if (numCurrentReplica >= fileINode.getReplication()) {
+ it.remove();
+ } else {
+ DatanodeDescriptor targets[] = replicator.chooseTarget(
Math.min( fileINode.getReplication() - numCurrentReplica,
needed),
datanodeMap.get(srcNode.getStorageID()),
nodes, null, blockSize);
- if (targets.length > 0) {
- // Build items to return
- replicateBlocks.add(block);
- numCurrentReplicas.add(new Integer(numCurrentReplica));
- replicateTargetSets.add(targets);
- needed -= targets.length;
+ if (targets.length > 0) {
+ // Build items to return
+ replicateBlocks.add(block);
+ numCurrentReplicas.add(new Integer(numCurrentReplica));
+ replicateTargetSets.add(targets);
+ needed -= targets.length;
+ }
}
}
}
|