Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 0BEEFCFBA for ; Sun, 15 Sep 2013 04:16:47 +0000 (UTC) Received: (qmail 66835 invoked by uid 500); 15 Sep 2013 04:16:44 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 66550 invoked by uid 500); 15 Sep 2013 04:16:36 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 66538 invoked by uid 99); 15 Sep 2013 04:16:32 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 15 Sep 2013 04:16:32 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Sun, 15 Sep 2013 04:16:22 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 07ED6238896F; Sun, 15 Sep 2013 04:15:59 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1523400 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/test/java/org/apache/hadoop/hdfs/server/balancer/ src/test/java/or... Date: Sun, 15 Sep 2013 04:15:58 -0000 To: hdfs-commits@hadoop.apache.org From: szetszwo@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20130915041559.07ED6238896F@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: szetszwo Date: Sun Sep 15 04:15:57 2013 New Revision: 1523400 URL: http://svn.apache.org/r1523400 Log: HDFS-5188. In BlockPlacementPolicy, reduce the number of chooseTarget(..) methods; replace HashMap with Map in parameter declarations and cleanup some related code. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sun Sep 15 04:15:57 2013 @@ -275,6 +275,10 @@ Release 2.3.0 - UNRELEASED HDFS-4096. Add snapshot information to namenode WebUI. (Haohui Mai via jing9) + HDFS-5188. In BlockPlacementPolicy, reduce the number of chooseTarget(..) + methods; replace HashMap with Map in parameter declarations and cleanup + some related code. (szetszwo) + OPTIMIZATIONS BUG FIXES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Sun Sep 15 04:15:57 2013 @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; /** * This class contains constants for configuration keys used @@ -348,6 +349,8 @@ public class DFSConfigKeys extends Commo public static final String DFS_BLOCK_ACCESS_TOKEN_LIFETIME_KEY = "dfs.block.access.token.lifetime"; public static final long DFS_BLOCK_ACCESS_TOKEN_LIFETIME_DEFAULT = 600L; + public static final String DFS_BLOCK_REPLICATOR_CLASSNAME_KEY = "dfs.block.replicator.classname"; + public static final Class DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT = BlockPlacementPolicyDefault.class; public static final String DFS_REPLICATION_MAX_KEY = "dfs.replication.max"; public static final int DFS_REPLICATION_MAX_DEFAULT = 512; public static final String DFS_DF_INTERVAL_KEY = "dfs.df.interval"; Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Sun Sep 15 04:15:57 2013 @@ -1256,8 +1256,7 @@ public class BlockManager { namesystem.writeUnlock(); } - HashMap excludedNodes - = new HashMap(); + final Map excludedNodes = new HashMap(); for(ReplicationWork rw : work){ // Exclude all of the containing nodes from being targets. // This list includes decommissioning or corrupt nodes. @@ -1269,9 +1268,7 @@ public class BlockManager { // choose replication targets: NOT HOLDING THE GLOBAL LOCK // It is costly to extract the filename for which chooseTargets is called, // so for now we pass in the block collection itself. - rw.targets = blockplacement.chooseTarget(rw.bc, - rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes, - excludedNodes, rw.block.getNumBytes()); + rw.chooseTargets(blockplacement, excludedNodes); } namesystem.writeLock(); @@ -3249,6 +3246,13 @@ assert storedBlock.findDatanode(dn) < 0 this.priority = priority; this.targets = null; } + + private void chooseTargets(BlockPlacementPolicy blockplacement, + Map excludedNodes) { + targets = blockplacement.chooseTarget(bc.getName(), + additionalReplRequired, srcNode, liveReplicaNodes, false, + excludedNodes, block.getNumBytes()); + } } /** Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java Sun Sep 15 04:15:57 2013 @@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.bl import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -27,6 +26,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -55,25 +55,6 @@ public abstract class BlockPlacementPoli * choose numOfReplicas data nodes for writer * to re-replicate a block with size blocksize * If not, return as many as we can. - * - * @param srcPath the file to which this chooseTargets is being invoked. - * @param numOfReplicas additional number of replicas wanted. - * @param writer the writer's machine, null if not in the cluster. - * @param chosenNodes datanodes that have been chosen as targets. - * @param blocksize size of the data to be written. - * @return array of DatanodeDescriptor instances chosen as target - * and sorted as a pipeline. - */ - abstract DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - List chosenNodes, - long blocksize); - - /** - * choose numOfReplicas data nodes for writer - * to re-replicate a block with size blocksize - * If not, return as many as we can. * * @param srcPath the file to which this chooseTargets is being invoked. * @param numOfReplicas additional number of replicas wanted. @@ -90,34 +71,8 @@ public abstract class BlockPlacementPoli DatanodeDescriptor writer, List chosenNodes, boolean returnChosenNodes, - HashMap excludedNodes, + Map excludedNodes, long blocksize); - - /** - * choose numOfReplicas data nodes for writer - * If not, return as many as we can. - * The base implemenatation extracts the pathname of the file from the - * specified srcBC, but this could be a costly operation depending on the - * file system implementation. Concrete implementations of this class should - * override this method to avoid this overhead. - * - * @param srcBC block collection of file for which chooseTarget is invoked. - * @param numOfReplicas additional number of replicas wanted. - * @param writer the writer's machine, null if not in the cluster. - * @param chosenNodes datanodes that have been chosen as targets. - * @param blocksize size of the data to be written. - * @return array of DatanodeDescriptor instances chosen as target - * and sorted as a pipeline. - */ - DatanodeDescriptor[] chooseTarget(BlockCollection srcBC, - int numOfReplicas, - DatanodeDescriptor writer, - List chosenNodes, - HashMap excludedNodes, - long blocksize) { - return chooseTarget(srcBC.getName(), numOfReplicas, writer, - chosenNodes, false, excludedNodes, blocksize); - } /** * Same as {@link #chooseTarget(String, int, DatanodeDescriptor, List, boolean, @@ -128,7 +83,7 @@ public abstract class BlockPlacementPoli */ DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas, DatanodeDescriptor writer, - HashMap excludedNodes, + Map excludedNodes, long blocksize, List favoredNodes) { // This class does not provide the functionality of placing // a block in favored datanodes. The implementations of this class @@ -183,7 +138,7 @@ public abstract class BlockPlacementPoli /** * Get an instance of the configured Block Placement Policy based on the - * value of the configuration paramater dfs.block.replicator.classname. + * the configuration property {@link DFS_BLOCK_REPLICATOR_CLASSNAME_KEY}. * * @param conf the configuration to be used * @param stats an object that is used to retrieve the load on the cluster @@ -193,12 +148,12 @@ public abstract class BlockPlacementPoli public static BlockPlacementPolicy getInstance(Configuration conf, FSClusterStats stats, NetworkTopology clusterMap) { - Class replicatorClass = - conf.getClass("dfs.block.replicator.classname", - BlockPlacementPolicyDefault.class, - BlockPlacementPolicy.class); - BlockPlacementPolicy replicator = (BlockPlacementPolicy) ReflectionUtils.newInstance( - replicatorClass, conf); + final Class replicatorClass = conf.getClass( + DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_DEFAULT, + BlockPlacementPolicy.class); + final BlockPlacementPolicy replicator = ReflectionUtils.newInstance( + replicatorClass, conf); replicator.initialize(conf, stats, clusterMap); return replicator; } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Sun Sep 15 04:15:57 2013 @@ -22,8 +22,8 @@ import static org.apache.hadoop.util.Tim import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.TreeSet; @@ -57,6 +57,14 @@ public class BlockPlacementPolicyDefault "For more information, please enable DEBUG log level on " + BlockPlacementPolicy.class.getName(); + private static final ThreadLocal debugLoggingBuilder + = new ThreadLocal() { + @Override + protected StringBuilder initialValue() { + return new StringBuilder(); + } + }; + protected boolean considerLoad; private boolean preferLocalNode = true; protected NetworkTopology clusterMap; @@ -95,40 +103,25 @@ public class BlockPlacementPolicyDefault DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT); } - protected ThreadLocal threadLocalBuilder = - new ThreadLocal() { - @Override - protected StringBuilder initialValue() { - return new StringBuilder(); - } - }; - - @Override - public DatanodeDescriptor[] chooseTarget(String srcPath, - int numOfReplicas, - DatanodeDescriptor writer, - List chosenNodes, - long blocksize) { - return chooseTarget(numOfReplicas, writer, chosenNodes, false, - null, blocksize); - } - @Override public DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, boolean returnChosenNodes, - HashMap excludedNodes, + Map excludedNodes, long blocksize) { return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes, blocksize); } @Override - DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas, - DatanodeDescriptor writer, HashMap excludedNodes, - long blocksize, List favoredNodes) { + DatanodeDescriptor[] chooseTarget(String src, + int numOfReplicas, + DatanodeDescriptor writer, + Map excludedNodes, + long blocksize, + List favoredNodes) { try { if (favoredNodes == null || favoredNodes.size() == 0) { // Favored nodes not specified, fall back to regular block placement. @@ -137,7 +130,7 @@ public class BlockPlacementPolicyDefault excludedNodes, blocksize); } - HashMap favoriteAndExcludedNodes = excludedNodes == null ? + Map favoriteAndExcludedNodes = excludedNodes == null ? new HashMap() : new HashMap(excludedNodes); // Choose favored nodes @@ -181,14 +174,14 @@ public class BlockPlacementPolicyDefault } /** This is the implementation. */ - DatanodeDescriptor[] chooseTarget(int numOfReplicas, + private DatanodeDescriptor[] chooseTarget(int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, boolean returnChosenNodes, - HashMap excludedNodes, + Map excludedNodes, long blocksize) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { - return new DatanodeDescriptor[0]; + return DatanodeDescriptor.EMPTY_ARRAY; } if (excludedNodes == null) { @@ -204,7 +197,6 @@ public class BlockPlacementPolicyDefault for (DatanodeDescriptor node:chosenNodes) { // add localMachine and related nodes to excludedNodes addToExcludedNodes(node, excludedNodes); - adjustExcludedNodes(excludedNodes, node); } if (!clusterMap.contains(writer)) { @@ -239,7 +231,7 @@ public class BlockPlacementPolicyDefault /* choose numOfReplicas from all data nodes */ private DatanodeDescriptor chooseTarget(int numOfReplicas, DatanodeDescriptor writer, - HashMap excludedNodes, + Map excludedNodes, long blocksize, int maxNodesPerRack, List results, @@ -256,7 +248,7 @@ public class BlockPlacementPolicyDefault } // Keep a copy of original excludedNodes - final HashMap oldExcludedNodes = avoidStaleNodes ? + final Map oldExcludedNodes = avoidStaleNodes ? new HashMap(excludedNodes) : null; try { if (numOfResults == 0) { @@ -316,19 +308,19 @@ public class BlockPlacementPolicyDefault return writer; } - /* choose localMachine as the target. + /** + * Choose localMachine as the target. * if localMachine is not available, * choose a node on the same rack * @return the chosen node */ - protected DatanodeDescriptor chooseLocalNode( - DatanodeDescriptor localMachine, - HashMap excludedNodes, + protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine, + Map excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) - throws NotEnoughReplicasException { + throws NotEnoughReplicasException { // if no local machine, randomly choose one node if (localMachine == null) return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, @@ -337,11 +329,8 @@ public class BlockPlacementPolicyDefault // otherwise try local machine first Node oldNode = excludedNodes.put(localMachine, localMachine); if (oldNode == null) { // was not in the excluded list - if (isGoodTarget(localMachine, blocksize, maxNodesPerRack, false, - results, avoidStaleNodes)) { - results.add(localMachine); - // add localMachine and related nodes to excludedNode - addToExcludedNodes(localMachine, excludedNodes); + if (addIfIsGoodTarget(localMachine, excludedNodes, blocksize, + maxNodesPerRack, false, results, avoidStaleNodes) >= 0) { return localMachine; } } @@ -358,26 +347,26 @@ public class BlockPlacementPolicyDefault * @return number of new excluded nodes */ protected int addToExcludedNodes(DatanodeDescriptor localMachine, - HashMap excludedNodes) { + Map excludedNodes) { Node node = excludedNodes.put(localMachine, localMachine); return node == null?1:0; } - /* choose one node from the rack that localMachine is on. + /** + * Choose one node from the rack that localMachine is on. * if no such node is available, choose one node from the rack where * a second replica is on. * if still no such node is available, choose a random node * in the cluster. * @return the chosen node */ - protected DatanodeDescriptor chooseLocalRack( - DatanodeDescriptor localMachine, - HashMap excludedNodes, + protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine, + Map excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) - throws NotEnoughReplicasException { + throws NotEnoughReplicasException { // no local machine, so choose a random machine if (localMachine == null) { return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, @@ -391,9 +380,7 @@ public class BlockPlacementPolicyDefault } catch (NotEnoughReplicasException e1) { // find the second replica DatanodeDescriptor newLocal=null; - for(Iterator iter=results.iterator(); - iter.hasNext();) { - DatanodeDescriptor nextNode = iter.next(); + for(DatanodeDescriptor nextNode : results) { if (nextNode != localMachine) { newLocal = nextNode; break; @@ -416,7 +403,8 @@ public class BlockPlacementPolicyDefault } } - /* choose numOfReplicas nodes from the racks + /** + * Choose numOfReplicas nodes from the racks * that localMachine is NOT on. * if not enough nodes are available, choose the remaining ones * from the local rack @@ -424,12 +412,12 @@ public class BlockPlacementPolicyDefault protected void chooseRemoteRack(int numOfReplicas, DatanodeDescriptor localMachine, - HashMap excludedNodes, + Map excludedNodes, long blocksize, int maxReplicasPerRack, List results, boolean avoidStaleNodes) - throws NotEnoughReplicasException { + throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); // randomly choose one node from remote racks try { @@ -443,91 +431,59 @@ public class BlockPlacementPolicyDefault } } - /* Randomly choose one target from nodes. - * @return the chosen node + /** + * Randomly choose one target from the given scope. + * @return the chosen node, if there is any. */ - protected DatanodeDescriptor chooseRandom( - String nodes, - HashMap excludedNodes, - long blocksize, - int maxNodesPerRack, - List results, - boolean avoidStaleNodes) - throws NotEnoughReplicasException { - int numOfAvailableNodes = - clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet()); - StringBuilder builder = null; - if (LOG.isDebugEnabled()) { - builder = threadLocalBuilder.get(); - builder.setLength(0); - builder.append("["); - } - boolean badTarget = false; - while(numOfAvailableNodes > 0) { - DatanodeDescriptor chosenNode = - (DatanodeDescriptor)(clusterMap.chooseRandom(nodes)); - - Node oldNode = excludedNodes.put(chosenNode, chosenNode); - if (oldNode == null) { // chosenNode was not in the excluded list - numOfAvailableNodes--; - if (isGoodTarget(chosenNode, blocksize, - maxNodesPerRack, results, avoidStaleNodes)) { - results.add(chosenNode); - // add chosenNode and related nodes to excludedNode - addToExcludedNodes(chosenNode, excludedNodes); - adjustExcludedNodes(excludedNodes, chosenNode); - return chosenNode; - } else { - badTarget = true; - } - } - } - - String detail = enableDebugLogging; - if (LOG.isDebugEnabled()) { - if (badTarget && builder != null) { - detail = builder.append("]").toString(); - builder.setLength(0); - } else detail = ""; - } - throw new NotEnoughReplicasException(detail); + protected DatanodeDescriptor chooseRandom(String scope, + Map excludedNodes, + long blocksize, + int maxNodesPerRack, + List results, + boolean avoidStaleNodes) + throws NotEnoughReplicasException { + return chooseRandom(1, scope, excludedNodes, blocksize, maxNodesPerRack, + results, avoidStaleNodes); } - - /* Randomly choose numOfReplicas targets from nodes. + + /** + * Randomly choose numOfReplicas targets from the given scope. + * @return the first chosen node, if there is any. */ - protected void chooseRandom(int numOfReplicas, - String nodes, - HashMap excludedNodes, + protected DatanodeDescriptor chooseRandom(int numOfReplicas, + String scope, + Map excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) - throws NotEnoughReplicasException { + throws NotEnoughReplicasException { - int numOfAvailableNodes = - clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet()); + int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes( + scope, excludedNodes.keySet()); StringBuilder builder = null; if (LOG.isDebugEnabled()) { - builder = threadLocalBuilder.get(); + builder = debugLoggingBuilder.get(); builder.setLength(0); builder.append("["); } boolean badTarget = false; + DatanodeDescriptor firstChosen = null; while(numOfReplicas > 0 && numOfAvailableNodes > 0) { DatanodeDescriptor chosenNode = - (DatanodeDescriptor)(clusterMap.chooseRandom(nodes)); + (DatanodeDescriptor)clusterMap.chooseRandom(scope); Node oldNode = excludedNodes.put(chosenNode, chosenNode); if (oldNode == null) { numOfAvailableNodes--; - if (isGoodTarget(chosenNode, blocksize, - maxNodesPerRack, results, avoidStaleNodes)) { + int newExcludedNodes = addIfIsGoodTarget(chosenNode, excludedNodes, + blocksize, maxNodesPerRack, considerLoad, results, avoidStaleNodes); + if (newExcludedNodes >= 0) { numOfReplicas--; - results.add(chosenNode); - // add chosenNode and related nodes to excludedNode - int newExcludedNodes = addToExcludedNodes(chosenNode, excludedNodes); + if (firstChosen == null) { + firstChosen = chosenNode; + } numOfAvailableNodes -= newExcludedNodes; - adjustExcludedNodes(excludedNodes, chosenNode); } else { badTarget = true; } @@ -544,34 +500,44 @@ public class BlockPlacementPolicyDefault } throw new NotEnoughReplicasException(detail); } + + return firstChosen; } - + /** - * After choosing a node to place replica, adjust excluded nodes accordingly. - * It should do nothing here as chosenNode is already put into exlcudeNodes, - * but it can be overridden in subclass to put more related nodes into - * excludedNodes. - * - * @param excludedNodes - * @param chosenNode - */ - protected void adjustExcludedNodes(HashMap excludedNodes, - Node chosenNode) { - // do nothing here. + * If the given node is a good target, add it to the result list and + * update the excluded node map. + * @return -1 if the given is not a good target; + * otherwise, return the number of excluded nodes added to the map. + */ + int addIfIsGoodTarget(DatanodeDescriptor node, + Map excludedNodes, + long blockSize, + int maxNodesPerRack, + boolean considerLoad, + List results, + boolean avoidStaleNodes) { + if (isGoodTarget(node, blockSize, maxNodesPerRack, considerLoad, + results, avoidStaleNodes)) { + results.add(node); + // add node and related nodes to excludedNode + return addToExcludedNodes(node, excludedNodes); + } else { + return -1; + } } - /* judge if a node is a good target. - * return true if node has enough space, - * does not have too much load, and the rack does not have too many nodes - */ - private boolean isGoodTarget(DatanodeDescriptor node, - long blockSize, int maxTargetPerRack, - List results, - boolean avoidStaleNodes) { - return isGoodTarget(node, blockSize, maxTargetPerRack, this.considerLoad, - results, avoidStaleNodes); + private static void logNodeIsNotChosen(DatanodeDescriptor node, String reason) { + if (LOG.isDebugEnabled()) { + // build the error message for later use. + debugLoggingBuilder.get() + .append(node).append(": ") + .append("Node ").append(NodeBase.getPath(node)) + .append(" is not chosen because ") + .append(reason); + } } - + /** * Determine if a node is a good target. * @@ -588,28 +554,20 @@ public class BlockPlacementPolicyDefault * does not have too much load, * and the rack does not have too many nodes. */ - protected boolean isGoodTarget(DatanodeDescriptor node, + private boolean isGoodTarget(DatanodeDescriptor node, long blockSize, int maxTargetPerRack, boolean considerLoad, List results, boolean avoidStaleNodes) { // check if the node is (being) decommissed if (node.isDecommissionInProgress() || node.isDecommissioned()) { - if(LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the node is (being) decommissioned "); - } + logNodeIsNotChosen(node, "the node is (being) decommissioned "); return false; } if (avoidStaleNodes) { if (node.isStale(this.staleInterval)) { - if (LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the node is stale "); - } + logNodeIsNotChosen(node, "the node is stale "); return false; } } @@ -618,11 +576,7 @@ public class BlockPlacementPolicyDefault (node.getBlocksScheduled() * blockSize); // check the remaining capacity of the target machine if (blockSize* HdfsConstants.MIN_BLOCKS_FOR_WRITE>remaining) { - if(LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the node does not have enough space "); - } + logNodeIsNotChosen(node, "the node does not have enough space "); return false; } @@ -634,11 +588,7 @@ public class BlockPlacementPolicyDefault avgLoad = (double)stats.getTotalLoad()/size; } if (node.getXceiverCount() > (2.0 * avgLoad)) { - if(LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the node is too busy "); - } + logNodeIsNotChosen(node, "the node is too busy "); return false; } } @@ -646,31 +596,25 @@ public class BlockPlacementPolicyDefault // check if the target rack has chosen too many nodes String rackname = node.getNetworkLocation(); int counter=1; - for(Iterator iter = results.iterator(); - iter.hasNext();) { - Node result = iter.next(); + for(Node result : results) { if (rackname.equals(result.getNetworkLocation())) { counter++; } } if (counter>maxTargetPerRack) { - if(LOG.isDebugEnabled()) { - threadLocalBuilder.get().append(node.toString()).append(": ") - .append("Node ").append(NodeBase.getPath(node)) - .append(" is not chosen because the rack has too many chosen nodes "); - } + logNodeIsNotChosen(node, "the rack has too many chosen nodes "); return false; } return true; } - /* Return a pipeline of nodes. + /** + * Return a pipeline of nodes. * The pipeline is formed finding a shortest path that * starts from the writer and traverses all nodes * This is basically a traveling salesman problem. */ - private DatanodeDescriptor[] getPipeline( - DatanodeDescriptor writer, + private DatanodeDescriptor[] getPipeline(DatanodeDescriptor writer, DatanodeDescriptor[] nodes) { if (nodes.length==0) return nodes; @@ -709,7 +653,7 @@ public class BlockPlacementPolicyDefault int minRacks) { DatanodeInfo[] locs = lBlk.getLocations(); if (locs == null) - locs = new DatanodeInfo[0]; + locs = DatanodeDescriptor.EMPTY_ARRAY; int numRacks = clusterMap.getNumOfRacks(); if(numRacks <= 1) // only one rack return 0; @@ -724,24 +668,18 @@ public class BlockPlacementPolicyDefault @Override public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc, - Block block, - short replicationFactor, - Collection first, - Collection second) { + Block block, short replicationFactor, + Collection first, + Collection second) { long oldestHeartbeat = now() - heartbeatInterval * tolerateHeartbeatMultiplier; DatanodeDescriptor oldestHeartbeatNode = null; long minSpace = Long.MAX_VALUE; DatanodeDescriptor minSpaceNode = null; - // pick replica from the first Set. If first is empty, then pick replicas - // from second set. - Iterator iter = pickupReplicaSet(first, second); - // Pick the node with the oldest heartbeat or with the least free space, // if all hearbeats are within the tolerable heartbeat interval - while (iter.hasNext() ) { - DatanodeDescriptor node = iter.next(); + for(DatanodeDescriptor node : pickupReplicaSet(first, second)) { long free = node.getRemaining(); long lastHeartbeat = node.getLastUpdate(); if(lastHeartbeat < oldestHeartbeat) { @@ -762,12 +700,10 @@ public class BlockPlacementPolicyDefault * replica while second set contains remaining replica nodes. * So pick up first set if not empty. If first is empty, then pick second. */ - protected Iterator pickupReplicaSet( + protected Collection pickupReplicaSet( Collection first, Collection second) { - Iterator iter = - first.isEmpty() ? second.iterator() : first.iterator(); - return iter; + return first.isEmpty() ? second : first; } @VisibleForTesting Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java Sun Sep 15 04:15:57 2013 @@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.bl import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; @@ -65,7 +64,7 @@ public class BlockPlacementPolicyWithNod */ @Override protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine, - HashMap excludedNodes, long blocksize, int maxNodesPerRack, + Map excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // if no local machine, randomly choose one node @@ -76,12 +75,8 @@ public class BlockPlacementPolicyWithNod // otherwise try local machine first Node oldNode = excludedNodes.put(localMachine, localMachine); if (oldNode == null) { // was not in the excluded list - if (isGoodTarget(localMachine, blocksize, - maxNodesPerRack, false, results, avoidStaleNodes)) { - results.add(localMachine); - // Nodes under same nodegroup should be excluded. - addNodeGroupToExcludedNodes(excludedNodes, - localMachine.getNetworkLocation()); + if (addIfIsGoodTarget(localMachine, excludedNodes, blocksize, + maxNodesPerRack, false, results, avoidStaleNodes) >= 0) { return localMachine; } } @@ -98,26 +93,10 @@ public class BlockPlacementPolicyWithNod blocksize, maxNodesPerRack, results, avoidStaleNodes); } - @Override - protected void adjustExcludedNodes(HashMap excludedNodes, - Node chosenNode) { - // as node-group aware implementation, it should make sure no two replica - // are placing on the same node group. - addNodeGroupToExcludedNodes(excludedNodes, chosenNode.getNetworkLocation()); - } - // add all nodes under specific nodegroup to excludedNodes. - private void addNodeGroupToExcludedNodes(HashMap excludedNodes, - String nodeGroup) { - List leafNodes = clusterMap.getLeaves(nodeGroup); - for (Node node : leafNodes) { - excludedNodes.put(node, node); - } - } - @Override protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine, - HashMap excludedNodes, long blocksize, int maxNodesPerRack, + Map excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // no local machine, so choose a random machine @@ -137,9 +116,7 @@ public class BlockPlacementPolicyWithNod } catch (NotEnoughReplicasException e1) { // find the second replica DatanodeDescriptor newLocal=null; - for(Iterator iter=results.iterator(); - iter.hasNext();) { - DatanodeDescriptor nextNode = iter.next(); + for(DatanodeDescriptor nextNode : results) { if (nextNode != localMachine) { newLocal = nextNode; break; @@ -165,7 +142,7 @@ public class BlockPlacementPolicyWithNod @Override protected void chooseRemoteRack(int numOfReplicas, - DatanodeDescriptor localMachine, HashMap excludedNodes, + DatanodeDescriptor localMachine, Map excludedNodes, long blocksize, int maxReplicasPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); @@ -192,7 +169,7 @@ public class BlockPlacementPolicyWithNod */ private DatanodeDescriptor chooseLocalNodeGroup( NetworkTopologyWithNodeGroup clusterMap, DatanodeDescriptor localMachine, - HashMap excludedNodes, long blocksize, int maxNodesPerRack, + Map excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // no local machine, so choose a random machine @@ -209,9 +186,7 @@ public class BlockPlacementPolicyWithNod } catch (NotEnoughReplicasException e1) { // find the second replica DatanodeDescriptor newLocal=null; - for(Iterator iter=results.iterator(); - iter.hasNext();) { - DatanodeDescriptor nextNode = iter.next(); + for(DatanodeDescriptor nextNode : results) { if (nextNode != localMachine) { newLocal = nextNode; break; @@ -248,10 +223,11 @@ public class BlockPlacementPolicyWithNod * within the same nodegroup * @return number of new excluded nodes */ - protected int addToExcludedNodes(DatanodeDescriptor localMachine, - HashMap excludedNodes) { + @Override + protected int addToExcludedNodes(DatanodeDescriptor chosenNode, + Map excludedNodes) { int countOfExcludedNodes = 0; - String nodeGroupScope = localMachine.getNetworkLocation(); + String nodeGroupScope = chosenNode.getNetworkLocation(); List leafNodes = clusterMap.getLeaves(nodeGroupScope); for (Node leafNode : leafNodes) { Node node = excludedNodes.put(leafNode, leafNode); @@ -274,12 +250,12 @@ public class BlockPlacementPolicyWithNod * If first is empty, then pick second. */ @Override - public Iterator pickupReplicaSet( + public Collection pickupReplicaSet( Collection first, Collection second) { // If no replica within same rack, return directly. if (first.isEmpty()) { - return second.iterator(); + return second; } // Split data nodes in the first set into two sets, // moreThanOne contains nodes on nodegroup with more than one replica @@ -312,9 +288,7 @@ public class BlockPlacementPolicyWithNod } } - Iterator iter = - moreThanOne.isEmpty() ? exactlyOne.iterator() : moreThanOne.iterator(); - return iter; + return moreThanOne.isEmpty()? exactlyOne : moreThanOne; } } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java Sun Sep 15 04:15:57 2013 @@ -38,7 +38,8 @@ import org.apache.hadoop.util.Time; @InterfaceAudience.Private @InterfaceStability.Evolving public class DatanodeDescriptor extends DatanodeInfo { - + public static final DatanodeDescriptor[] EMPTY_ARRAY = {}; + // Stores status of decommissioning. // If node is not decommissioning, do not use this object for anything. public DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java Sun Sep 15 04:15:57 2013 @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -39,9 +40,11 @@ import org.apache.hadoop.hdfs.NameNodePr import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup; import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.NetworkTopologyWithNodeGroup; +import org.junit.Assert; import org.junit.Test; -import junit.framework.Assert; /** * This class tests if a balancer schedules tasks correctly. @@ -75,10 +78,9 @@ public class TestBalancerWithNodeGroup { Configuration conf = new HdfsConfiguration(); TestBalancer.initConf(conf); conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, - "org.apache.hadoop.net.NetworkTopologyWithNodeGroup"); - conf.set("dfs.block.replicator.classname", - "org.apache.hadoop.hdfs.server.blockmanagement." + - "BlockPlacementPolicyWithNodeGroup"); + NetworkTopologyWithNodeGroup.class.getName()); + conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + BlockPlacementPolicyWithNodeGroup.class.getName()); return conf; } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java Sun Sep 15 04:15:57 2013 @@ -157,8 +157,8 @@ public class TestRBWBlockInvalidation { // in the context of the test, whereas a random one is more accurate // to what is seen in real clusters (nodes have random amounts of free // space) - conf.setClass("dfs.block.replicator.classname", RandomDeleterPolicy.class, - BlockPlacementPolicy.class); + conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + RandomDeleterPolicy.class, BlockPlacementPolicy.class); // Speed up the test a bit with faster heartbeats. conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Sun Sep 15 04:15:57 2013 @@ -136,30 +136,25 @@ public class TestReplicationPolicy { HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[0]); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); - targets = replicator.chooseTarget(filename, 4, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(4); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); assertTrue(cluster.isOnSameRack(targets[1], targets[2]) || @@ -171,15 +166,38 @@ public class TestReplicationPolicy { HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0); } + private static DatanodeDescriptor[] chooseTarget(int numOfReplicas) { + return chooseTarget(numOfReplicas, dataNodes[0]); + } + + private static DatanodeDescriptor[] chooseTarget(int numOfReplicas, + DatanodeDescriptor writer) { + return chooseTarget(numOfReplicas, writer, + new ArrayList()); + } + + private static DatanodeDescriptor[] chooseTarget(int numOfReplicas, + List chosenNodes) { + return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes); + } + + private static DatanodeDescriptor[] chooseTarget(int numOfReplicas, + DatanodeDescriptor writer, List chosenNodes) { + return chooseTarget(numOfReplicas, writer, chosenNodes, null); + } + + private static DatanodeDescriptor[] chooseTarget(int numOfReplicas, + List chosenNodes, Map excludedNodes) { + return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes, excludedNodes); + } + private static DatanodeDescriptor[] chooseTarget( - BlockPlacementPolicyDefault policy, int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, - HashMap excludedNodes, - long blocksize) { - return policy.chooseTarget(numOfReplicas, writer, chosenNodes, false, - excludedNodes, blocksize); + Map excludedNodes) { + return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes, + false, excludedNodes, BLOCK_SIZE); } /** @@ -194,28 +212,24 @@ public class TestReplicationPolicy { public void testChooseTarget2() throws Exception { HashMap excludedNodes; DatanodeDescriptor[] targets; - BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator; List chosenNodes = new ArrayList(); excludedNodes = new HashMap(); excludedNodes.put(dataNodes[1], dataNodes[1]); - targets = chooseTarget(repl, 0, dataNodes[0], chosenNodes, excludedNodes, - BLOCK_SIZE); + targets = chooseTarget(0, chosenNodes, excludedNodes); assertEquals(targets.length, 0); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.put(dataNodes[1], dataNodes[1]); - targets = chooseTarget(repl, 1, dataNodes[0], chosenNodes, excludedNodes, - BLOCK_SIZE); + targets = chooseTarget(1, chosenNodes, excludedNodes); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[0]); excludedNodes.clear(); chosenNodes.clear(); excludedNodes.put(dataNodes[1], dataNodes[1]); - targets = chooseTarget(repl, 2, dataNodes[0], chosenNodes, excludedNodes, - BLOCK_SIZE); + targets = chooseTarget(2, chosenNodes, excludedNodes); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); @@ -223,8 +237,7 @@ public class TestReplicationPolicy { excludedNodes.clear(); chosenNodes.clear(); excludedNodes.put(dataNodes[1], dataNodes[1]); - targets = chooseTarget(repl, 3, dataNodes[0], chosenNodes, excludedNodes, - BLOCK_SIZE); + targets = chooseTarget(3, chosenNodes, excludedNodes); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); @@ -233,8 +246,7 @@ public class TestReplicationPolicy { excludedNodes.clear(); chosenNodes.clear(); excludedNodes.put(dataNodes[1], dataNodes[1]); - targets = chooseTarget(repl, 4, dataNodes[0], chosenNodes, excludedNodes, - BLOCK_SIZE); + targets = chooseTarget(4, chosenNodes, excludedNodes); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); for(int i=1; i<4; i++) { @@ -248,7 +260,7 @@ public class TestReplicationPolicy { chosenNodes.clear(); excludedNodes.put(dataNodes[1], dataNodes[1]); chosenNodes.add(dataNodes[2]); - targets = repl.chooseTarget(1, dataNodes[0], chosenNodes, true, + targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true, excludedNodes, BLOCK_SIZE); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2, targets.length); @@ -274,30 +286,25 @@ public class TestReplicationPolicy { (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[1]); - targets = replicator.chooseTarget(filename, 2, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[1]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 4, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(4); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[1]); for(int i=1; i<4; i++) { @@ -330,23 +337,19 @@ public class TestReplicationPolicy { } DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); - targets = replicator.chooseTarget(filename, 2, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3); assertEquals(targets.length, 3); for(int i=0; i<3; i++) { assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0])); @@ -375,21 +378,17 @@ public class TestReplicationPolicy { DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4"); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, writerDesc, - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0, writerDesc); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, writerDesc, - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1, writerDesc); assertEquals(targets.length, 1); - targets = replicator.chooseTarget(filename, 2, writerDesc, - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2, writerDesc); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, writerDesc, - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3, writerDesc); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); @@ -433,9 +432,7 @@ public class TestReplicationPolicy { // try to choose NUM_OF_DATANODES which is more than actually available // nodes. - DatanodeDescriptor[] targets = replicator.chooseTarget(filename, - NUM_OF_DATANODES, dataNodes[0], new ArrayList(), - BLOCK_SIZE); + DatanodeDescriptor[] targets = chooseTarget(NUM_OF_DATANODES); assertEquals(targets.length, NUM_OF_DATANODES - 2); final List log = appender.getLog(); @@ -478,17 +475,14 @@ public class TestReplicationPolicy { DatanodeDescriptor[] targets; // We set the datanode[0] as stale, thus should choose datanode[1] since // datanode[1] is on the same rack with datanode[0] (writer) - targets = replicator.chooseTarget(filename, 1, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[1]); HashMap excludedNodes = new HashMap(); excludedNodes.put(dataNodes[1], dataNodes[1]); List chosenNodes = new ArrayList(); - BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator; - targets = chooseTarget(repl, 1, dataNodes[0], chosenNodes, excludedNodes, - BLOCK_SIZE); + targets = chooseTarget(1, chosenNodes, excludedNodes); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); @@ -515,33 +509,27 @@ public class TestReplicationPolicy { namenode.getNamesystem().getBlockManager() .getDatanodeManager().getHeartbeatManager().heartbeatCheck(); - DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + DatanodeDescriptor[] targets = chooseTarget(0); assertEquals(targets.length, 0); // Since we have 6 datanodes total, stale nodes should // not be returned until we ask for more than 3 targets - targets = replicator.chooseTarget(filename, 1, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1); assertEquals(targets.length, 1); assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2)); - targets = replicator.chooseTarget(filename, 2, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2); assertEquals(targets.length, 2); assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2)); assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2)); - targets = replicator.chooseTarget(filename, 3, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3); assertEquals(targets.length, 3); assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5)); assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5)); assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5)); - targets = replicator.chooseTarget(filename, 4, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(4); assertEquals(targets.length, 4); assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3)); assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3)); @@ -594,7 +582,8 @@ public class TestReplicationPolicy { BlockPlacementPolicy replicator = miniCluster.getNameNode() .getNamesystem().getBlockManager().getBlockPlacementPolicy(); DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 3, - staleNodeInfo, new ArrayList(), BLOCK_SIZE); + staleNodeInfo, new ArrayList(), false, null, BLOCK_SIZE); + assertEquals(targets.length, 3); assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo)); @@ -618,7 +607,7 @@ public class TestReplicationPolicy { .getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); // Call chooseTarget targets = replicator.chooseTarget(filename, 3, - staleNodeInfo, new ArrayList(), BLOCK_SIZE); + staleNodeInfo, new ArrayList(), false, null, BLOCK_SIZE); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(targets[0], staleNodeInfo)); @@ -640,8 +629,7 @@ public class TestReplicationPolicy { assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager() .getDatanodeManager().shouldAvoidStaleDataNodesForWrite()); // Call chooseTarget - targets = replicator.chooseTarget(filename, 3, - staleNodeInfo, new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3, staleNodeInfo); assertEquals(targets.length, 3); assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo)); } finally { @@ -662,23 +650,19 @@ public class TestReplicationPolicy { chosenNodes.add(dataNodes[0]); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(0, chosenNodes); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, chosenNodes); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(2, chosenNodes); assertEquals(targets.length, 2); assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(3, chosenNodes); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[2])); @@ -698,17 +682,14 @@ public class TestReplicationPolicy { chosenNodes.add(dataNodes[1]); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(0, chosenNodes); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, chosenNodes); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(2, chosenNodes); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0])); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1])); @@ -728,29 +709,24 @@ public class TestReplicationPolicy { chosenNodes.add(dataNodes[2]); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(0, chosenNodes); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, chosenNodes); assertEquals(targets.length, 1); assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0])); assertFalse(cluster.isOnSameRack(dataNodes[2], targets[0])); - targets = replicator.chooseTarget(filename, - 1, dataNodes[2], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, dataNodes[2], chosenNodes); assertEquals(targets.length, 1); assertTrue(cluster.isOnSameRack(dataNodes[2], targets[0])); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(2, chosenNodes); assertEquals(targets.length, 2); assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[2], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(2, dataNodes[2], chosenNodes); assertEquals(targets.length, 2); assertTrue(cluster.isOnSameRack(dataNodes[2], targets[0])); } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java Sun Sep 15 04:15:57 2013 @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.HdfsConfig import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; +import org.apache.hadoop.net.NetworkTopologyWithNodeGroup; import org.apache.hadoop.net.Node; import org.apache.hadoop.test.PathUtils; import org.junit.After; @@ -101,10 +102,10 @@ public class TestReplicationPolicyWithNo FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); // Set properties to make HDFS aware of NodeGroup. - CONF.set("dfs.block.replicator.classname", - "org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup"); + CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + BlockPlacementPolicyWithNodeGroup.class.getName()); CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, - "org.apache.hadoop.net.NetworkTopologyWithNodeGroup"); + NetworkTopologyWithNodeGroup.class.getName()); File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class); @@ -156,6 +157,35 @@ public class TestReplicationPolicyWithNo return true; } + private DatanodeDescriptor[] chooseTarget(int numOfReplicas) { + return chooseTarget(numOfReplicas, dataNodes[0]); + } + + private DatanodeDescriptor[] chooseTarget(int numOfReplicas, + DatanodeDescriptor writer) { + return chooseTarget(numOfReplicas, writer, + new ArrayList()); + } + + private DatanodeDescriptor[] chooseTarget(int numOfReplicas, + List chosenNodes) { + return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes); + } + + private DatanodeDescriptor[] chooseTarget(int numOfReplicas, + DatanodeDescriptor writer, List chosenNodes) { + return chooseTarget(numOfReplicas, writer, chosenNodes, null); + } + + private DatanodeDescriptor[] chooseTarget( + int numOfReplicas, + DatanodeDescriptor writer, + List chosenNodes, + Map excludedNodes) { + return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes, + false, excludedNodes, BLOCK_SIZE); + } + /** * In this testcase, client is dataNodes[0]. So the 1st replica should be * placed on dataNodes[0], the 2nd replica should be placed on @@ -172,31 +202,26 @@ public class TestReplicationPolicyWithNo HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[0]); - targets = replicator.chooseTarget(filename, 2, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[0]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameNodeGroup(targets[1], targets[2])); - targets = replicator.chooseTarget(filename, 4, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(4); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); assertTrue(cluster.isOnSameRack(targets[1], targets[2]) || @@ -235,7 +260,7 @@ public class TestReplicationPolicyWithNo excludedNodes = new HashMap(); excludedNodes.put(dataNodes[1], dataNodes[1]); - targets = repl.chooseTarget(4, dataNodes[0], chosenNodes, false, + targets = repl.chooseTarget(filename, 4, dataNodes[0], chosenNodes, false, excludedNodes, BLOCK_SIZE); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); @@ -252,7 +277,7 @@ public class TestReplicationPolicyWithNo chosenNodes.clear(); excludedNodes.put(dataNodes[1], dataNodes[1]); chosenNodes.add(dataNodes[2]); - targets = repl.chooseTarget(1, dataNodes[0], chosenNodes, true, + targets = repl.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true, excludedNodes, BLOCK_SIZE); System.out.println("targets=" + Arrays.asList(targets)); assertEquals(2, targets.length); @@ -278,30 +303,25 @@ public class TestReplicationPolicyWithNo (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[1]); - targets = replicator.chooseTarget(filename, 2, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[1]); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 4, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(4); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[1]); assertTrue(cluster.isNodeGroupAware()); @@ -333,23 +353,19 @@ public class TestReplicationPolicyWithNo } DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); - targets = replicator.chooseTarget(filename, 2, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, dataNodes[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3); assertEquals(targets.length, 3); for(int i=0; i<3; i++) { assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0])); @@ -371,21 +387,17 @@ public class TestReplicationPolicyWithNo public void testChooseTarget5() throws Exception { setupDataNodeCapacity(); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, NODE, - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0, NODE); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, NODE, - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1, NODE); assertEquals(targets.length, 1); - targets = replicator.chooseTarget(filename, 2, NODE, - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2, NODE); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, NODE, - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3, NODE); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(targets[1], targets[2])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); @@ -406,23 +418,19 @@ public class TestReplicationPolicyWithNo chosenNodes.add(dataNodes[0]); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(0, chosenNodes); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, chosenNodes); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(2, chosenNodes); assertEquals(targets.length, 2); assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0])); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, - 3, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(3, chosenNodes); assertEquals(targets.length, 3); assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[0], targets[0])); @@ -444,17 +452,14 @@ public class TestReplicationPolicyWithNo chosenNodes.add(dataNodes[1]); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(0, chosenNodes); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, chosenNodes); assertEquals(targets.length, 1); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(2, chosenNodes); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]) && cluster.isOnSameRack(dataNodes[0], targets[1])); @@ -475,30 +480,26 @@ public class TestReplicationPolicyWithNo chosenNodes.add(dataNodes[3]); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, - 0, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(0, chosenNodes); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, - 1, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, chosenNodes); assertEquals(targets.length, 1); assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0])); assertFalse(cluster.isOnSameRack(dataNodes[3], targets[0])); - targets = replicator.chooseTarget(filename, - 1, dataNodes[3], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, dataNodes[3], chosenNodes); assertEquals(targets.length, 1); assertTrue(cluster.isOnSameRack(dataNodes[3], targets[0])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[3], targets[0])); assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[0], chosenNodes, BLOCK_SIZE); + targets = chooseTarget(2, chosenNodes); assertEquals(targets.length, 2); assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0])); assertFalse(cluster.isOnSameNodeGroup(dataNodes[0], targets[0])); - targets = replicator.chooseTarget(filename, - 2, dataNodes[3], chosenNodes, BLOCK_SIZE); + + targets = chooseTarget(2, dataNodes[3], chosenNodes); assertEquals(targets.length, 2); assertTrue(cluster.isOnSameRack(dataNodes[3], targets[0])); } @@ -584,21 +585,17 @@ public class TestReplicationPolicyWithNo } DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 0, dataNodesInBoundaryCase[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(0, dataNodesInBoundaryCase[0]); assertEquals(targets.length, 0); - targets = replicator.chooseTarget(filename, 1, dataNodesInBoundaryCase[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(1, dataNodesInBoundaryCase[0]); assertEquals(targets.length, 1); - targets = replicator.chooseTarget(filename, 2, dataNodesInBoundaryCase[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(2, dataNodesInBoundaryCase[0]); assertEquals(targets.length, 2); assertFalse(cluster.isOnSameRack(targets[0], targets[1])); - targets = replicator.chooseTarget(filename, 3, dataNodesInBoundaryCase[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3, dataNodesInBoundaryCase[0]); assertEquals(targets.length, 3); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); } @@ -621,8 +618,7 @@ public class TestReplicationPolicyWithNo chosenNodes.add(dataNodesInBoundaryCase[0]); chosenNodes.add(dataNodesInBoundaryCase[5]); DatanodeDescriptor[] targets; - targets = replicator.chooseTarget(filename, 1, dataNodesInBoundaryCase[0], - chosenNodes, BLOCK_SIZE); + targets = chooseTarget(1, dataNodesInBoundaryCase[0], chosenNodes); assertFalse(cluster.isOnSameNodeGroup(targets[0], dataNodesInBoundaryCase[0])); assertFalse(cluster.isOnSameNodeGroup(targets[0], @@ -661,14 +657,12 @@ public class TestReplicationPolicyWithNo DatanodeDescriptor[] targets; // Test normal case -- 3 replicas - targets = replicator.chooseTarget(filename, 3, dataNodesInMoreTargetsCase[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(3, dataNodesInMoreTargetsCase[0]); assertEquals(targets.length, 3); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); // Test special case -- replica number over node groups. - targets = replicator.chooseTarget(filename, 10, dataNodesInMoreTargetsCase[0], - new ArrayList(), BLOCK_SIZE); + targets = chooseTarget(10, dataNodesInMoreTargetsCase[0]); assertTrue(checkTargetsOnDifferentNodeGroup(targets)); // Verify it only can find 6 targets for placing replicas. assertEquals(targets.length, 6); Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1523400&r1=1523399&r2=1523400&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Sun Sep 15 04:15:57 2013 @@ -96,8 +96,8 @@ public class TestDNFencing { // Increase max streams so that we re-replicate quickly. conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000); // See RandomDeleterPolicy javadoc. - conf.setClass("dfs.block.replicator.classname", RandomDeleterPolicy.class, - BlockPlacementPolicy.class); + conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, + RandomDeleterPolicy.class, BlockPlacementPolicy.class); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology())