Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 59BA3200C1E for ; Fri, 17 Feb 2017 15:50:01 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 58690160B6D; Fri, 17 Feb 2017 14:50:01 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 7A57D160B3F for ; Fri, 17 Feb 2017 15:50:00 +0100 (CET) Received: (qmail 46828 invoked by uid 500); 17 Feb 2017 14:49:44 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 43783 invoked by uid 99); 17 Feb 2017 14:49:42 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 17 Feb 2017 14:49:42 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 40CD5DFD9E; Fri, 17 Feb 2017 14:49:42 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: rakeshr@apache.org To: common-commits@hadoop.apache.org Date: Fri, 17 Feb 2017 14:50:26 -0000 Message-Id: <779354463ea1437eba92586697095e8c@git.apache.org> In-Reply-To: <05a86cae06e545b3ac1a82a6f00b2924@git.apache.org> References: <05a86cae06e545b3ac1a82a6f00b2924@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [46/50] [abbrv] hadoop git commit: HDFS-11293: [SPS]: Local DN should be given preference as source node, when target available in same node. Contributed by Yuanbo Liu and Uma Maheswara Rao G archived-at: Fri, 17 Feb 2017 14:50:01 -0000 HDFS-11293: [SPS]: Local DN should be given preference as source node, when target available in same node. Contributed by Yuanbo Liu and Uma Maheswara Rao G Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01d1b93b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01d1b93b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01d1b93b Branch: refs/heads/HDFS-10285 Commit: 01d1b93b513b06c118d8b0cc67ffdc47df8d411c Parents: d875c18 Author: Uma Maheswara Rao G Authored: Mon Jan 9 14:37:42 2017 -0800 Committer: Rakesh Radhakrishnan Committed: Fri Feb 17 19:54:42 2017 +0530 ---------------------------------------------------------------------- .../server/namenode/StoragePolicySatisfier.java | 49 ++++++++++++-- .../namenode/TestStoragePolicySatisfier.java | 71 ++++++++++++++++++++ 2 files changed, 113 insertions(+), 7 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d1b93b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java index ee59617..b1b1464 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java @@ -298,9 +298,25 @@ public class StoragePolicySatisfier implements Runnable { new ArrayList(); List existingBlockStorages = new ArrayList(Arrays.asList(storages)); + // if expected type exists in source node already, local movement would be + // possible, so lets find such sources first. + Iterator iterator = existingBlockStorages.iterator(); + while (iterator.hasNext()) { + DatanodeStorageInfo datanodeStorageInfo = iterator.next(); + if (checkSourceAndTargetTypeExists( + datanodeStorageInfo.getDatanodeDescriptor(), existing, + expectedStorageTypes)) { + sourceWithStorageMap + .add(new StorageTypeNodePair(datanodeStorageInfo.getStorageType(), + datanodeStorageInfo.getDatanodeDescriptor())); + iterator.remove(); + existing.remove(datanodeStorageInfo.getStorageType()); + } + } + + // Let's find sources for existing types left. for (StorageType existingType : existing) { - Iterator iterator = - existingBlockStorages.iterator(); + iterator = existingBlockStorages.iterator(); while (iterator.hasNext()) { DatanodeStorageInfo datanodeStorageInfo = iterator.next(); StorageType storageType = datanodeStorageInfo.getStorageType(); @@ -317,7 +333,7 @@ public class StoragePolicySatisfier implements Runnable { findTargetsForExpectedStorageTypes(expectedStorageTypes); foundMatchingTargetNodesForBlock |= findSourceAndTargetToMove( - blockMovingInfos, blockInfo, existing, sourceWithStorageMap, + blockMovingInfos, blockInfo, sourceWithStorageMap, expectedStorageTypes, locsForExpectedStorageTypes); } return foundMatchingTargetNodesForBlock; @@ -366,8 +382,6 @@ public class StoragePolicySatisfier implements Runnable { * - list of block source and target node pair * @param blockInfo * - Block - * @param existing - * - Existing storage types of block * @param sourceWithStorageList * - Source Datanode with storages list * @param expected @@ -379,7 +393,6 @@ public class StoragePolicySatisfier implements Runnable { */ private boolean findSourceAndTargetToMove( List blockMovingInfos, BlockInfo blockInfo, - List existing, List sourceWithStorageList, List expected, StorageTypeNodeMap locsForExpectedStorageTypes) { @@ -403,6 +416,7 @@ public class StoragePolicySatisfier implements Runnable { targetNodes.add(chosenTarget.dn); targetStorageTypes.add(chosenTarget.storageType); chosenNodes.add(chosenTarget.dn); + expected.remove(chosenTarget.storageType); // TODO: We can increment scheduled block count for this node? } } @@ -442,16 +456,20 @@ public class StoragePolicySatisfier implements Runnable { targetNodes.add(chosenTarget.dn); targetStorageTypes.add(chosenTarget.storageType); chosenNodes.add(chosenTarget.dn); + expected.remove(chosenTarget.storageType); // TODO: We can increment scheduled block count for this node? } else { LOG.warn( "Failed to choose target datanode for the required" + " storage types {}, block:{}, existing storage type:{}", expected, blockInfo, existingTypeNodePair.storageType); - foundMatchingTargetNodesForBlock = false; } } + if (expected.size() > 0) { + foundMatchingTargetNodesForBlock = false; + } + blockMovingInfos.addAll(getBlockMovingInfos(blockInfo, sourceNodes, sourceStorageTypes, targetNodes, targetStorageTypes)); return foundMatchingTargetNodesForBlock; @@ -616,6 +634,23 @@ public class StoragePolicySatisfier implements Runnable { return max; } + private boolean checkSourceAndTargetTypeExists(DatanodeDescriptor dn, + List existing, List expectedStorageTypes) { + DatanodeStorageInfo[] allDNStorageInfos = dn.getStorageInfos(); + boolean isExpectedTypeAvailable = false; + boolean isExistingTypeAvailable = false; + for (DatanodeStorageInfo dnInfo : allDNStorageInfos) { + StorageType storageType = dnInfo.getStorageType(); + if (existing.contains(storageType)) { + isExistingTypeAvailable = true; + } + if (expectedStorageTypes.contains(storageType)) { + isExpectedTypeAvailable = true; + } + } + return isExistingTypeAvailable && isExpectedTypeAvailable; + } + private static class StorageTypeNodeMap { private final EnumMap> typeNodeMap = new EnumMap>(StorageType.class); http://git-wip-us.apache.org/repos/asf/hadoop/blob/01d1b93b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java index 718dbcb..9abb78d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java @@ -580,6 +580,77 @@ public class TestStoragePolicySatisfier { } } + /** + * Tests that moving block storage with in the same datanode. Let's say we + * have DN1[DISK,ARCHIVE], DN2[DISK, SSD], DN3[DISK,RAM_DISK] when + * storagepolicy set to ONE_SSD and request satisfyStoragePolicy, then block + * should move to DN2[SSD] successfully. + */ + @Test(timeout = 300000) + public void testBlockMoveInSameDatanodeWithONESSD() throws Exception { + StorageType[][] diskTypes = + new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE}, + {StorageType.DISK, StorageType.SSD}, + {StorageType.DISK, StorageType.RAM_DISK}}; + config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE); + try { + hdfsCluster = startCluster(config, diskTypes, numOfDatanodes, + storagesPerDatanode, capacity); + dfs = hdfsCluster.getFileSystem(); + writeContent(file); + + // Change policy to ONE_SSD + dfs.setStoragePolicy(new Path(file), "ONE_SSD"); + FSNamesystem namesystem = hdfsCluster.getNamesystem(); + INode inode = namesystem.getFSDirectory().getINode(file); + + namesystem.getBlockManager().satisfyStoragePolicy(inode.getId()); + hdfsCluster.triggerHeartbeats(); + waitExpectedStorageType(file, StorageType.SSD, 1, 30000); + waitExpectedStorageType(file, StorageType.DISK, 2, 30000); + + } finally { + shutdownCluster(); + } + } + + /** + * Tests that moving block storage with in the same datanode and remote node. + * Let's say we have DN1[DISK,ARCHIVE], DN2[ARCHIVE, SSD], DN3[DISK,DISK], + * DN4[DISK,DISK] when storagepolicy set to WARM and request + * satisfyStoragePolicy, then block should move to DN1[ARCHIVE] and + * DN2[ARCHIVE] successfully. + */ + @Test(timeout = 300000) + public void testBlockMoveInSameAndRemoteDatanodesWithWARM() throws Exception { + StorageType[][] diskTypes = + new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE}, + {StorageType.ARCHIVE, StorageType.SSD}, + {StorageType.DISK, StorageType.DISK}, + {StorageType.DISK, StorageType.DISK}}; + + config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE); + try { + hdfsCluster = startCluster(config, diskTypes, diskTypes.length, + storagesPerDatanode, capacity); + dfs = hdfsCluster.getFileSystem(); + writeContent(file); + + // Change policy to WARM + dfs.setStoragePolicy(new Path(file), "WARM"); + FSNamesystem namesystem = hdfsCluster.getNamesystem(); + INode inode = namesystem.getFSDirectory().getINode(file); + + namesystem.getBlockManager().satisfyStoragePolicy(inode.getId()); + hdfsCluster.triggerHeartbeats(); + + waitExpectedStorageType(file, StorageType.DISK, 1, 30000); + waitExpectedStorageType(file, StorageType.ARCHIVE, 2, 30000); + } finally { + shutdownCluster(); + } + } + private String createFileAndSimulateFavoredNodes(int favoredNodesCount) throws IOException { ArrayList dns = hdfsCluster.getDataNodes(); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org