Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id E50BEDA58 for ; Tue, 4 Sep 2012 23:56:55 +0000 (UTC) Received: (qmail 24586 invoked by uid 500); 4 Sep 2012 23:56:55 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 24550 invoked by uid 500); 4 Sep 2012 23:56:55 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 24542 invoked by uid 99); 4 Sep 2012 23:56:55 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 04 Sep 2012 23:56:55 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 04 Sep 2012 23:56:54 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 67F5C23889E1; Tue, 4 Sep 2012 23:56:11 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1380939 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Date: Tue, 04 Sep 2012 23:56:11 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120904235611.67F5C23889E1@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Tue Sep 4 23:56:10 2012 New Revision: 1380939 URL: http://svn.apache.org/viewvc?rev=1380939&view=rev Log: HDFS-3888. Clean up BlockPlacementPolicyDefault. Contributed by Jing Zhao Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1380939&r1=1380938&r2=1380939&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Sep 4 23:56:10 2012 @@ -445,6 +445,8 @@ Branch-2 ( Unreleased changes ) HDFS-3887. Remove redundant chooseTarget methods in BlockPlacementPolicy. (Jing Zhao via szetszwo) + HDFS-3888. Clean up BlockPlacementPolicyDefault. (Jing Zhao via szetszwo) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1380939&r1=1380938&r2=1380939&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Tue Sep 4 23:56:10 2012 @@ -27,8 +27,6 @@ import java.util.List; import java.util.Set; import java.util.TreeSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -55,9 +53,6 @@ import com.google.common.annotations.Vis @InterfaceAudience.Private public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { - private static final Log LOG = - LogFactory.getLog(BlockPlacementPolicyDefault.class.getName()); - private static final String enableDebugLogging = "For more information, please enable DEBUG log level on " + LOG.getClass().getName(); @@ -124,7 +119,6 @@ public class BlockPlacementPolicyDefault excludedNodes, blocksize); } - /** This is the implementation. */ DatanodeDescriptor[] chooseTarget(int numOfReplicas, DatanodeDescriptor writer, @@ -162,7 +156,8 @@ public class BlockPlacementPolicyDefault } DatanodeDescriptor localNode = chooseTarget(numOfReplicas, writer, - excludedNodes, blocksize, maxNodesPerRack, results); + excludedNodes, blocksize, + maxNodesPerRack, results); if (!returnChosenNodes) { results.removeAll(chosenNodes); } @@ -455,14 +450,29 @@ public class BlockPlacementPolicyDefault * does not have too much load, and the rack does not have too many nodes */ private boolean isGoodTarget(DatanodeDescriptor node, - long blockSize, int maxTargetPerLoc, + long blockSize, int maxTargetPerRack, List results) { - return isGoodTarget(node, blockSize, maxTargetPerLoc, + return isGoodTarget(node, blockSize, maxTargetPerRack, this.considerLoad, results); } - + + /** + * Determine if a node is a good target. + * + * @param node The target node + * @param blockSize Size of block + * @param maxTargetPerRack Maximum number of targets per rack. The value of + * this parameter depends on the number of racks in + * the cluster and total number of replicas for a block + * @param considerLoad whether or not to consider load of the target node + * @param results A list containing currently chosen nodes. Used to check if + * too many nodes has been chosen in the target rack. + * @return Return true if node has enough space, + * does not have too much load, + * and the rack does not have too many nodes. + */ protected boolean isGoodTarget(DatanodeDescriptor node, - long blockSize, int maxTargetPerLoc, + long blockSize, int maxTargetPerRack, boolean considerLoad, List results) { // check if the node is (being) decommissed @@ -514,7 +524,7 @@ public class BlockPlacementPolicyDefault counter++; } } - if (counter>maxTargetPerLoc) { + if (counter>maxTargetPerRack) { if(LOG.isDebugEnabled()) { threadLocalBuilder.get().append(node.toString()).append(": ") .append("Node ").append(NodeBase.getPath(node))