Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 5256E18F85 for ; Fri, 30 Oct 2015 18:00:24 +0000 (UTC) Received: (qmail 6308 invoked by uid 500); 30 Oct 2015 18:00:15 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 5941 invoked by uid 500); 30 Oct 2015 18:00:15 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 4242 invoked by uid 99); 30 Oct 2015 18:00:15 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 30 Oct 2015 18:00:15 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 13CA6E0435; Fri, 30 Oct 2015 18:00:15 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: jing9@apache.org To: common-commits@hadoop.apache.org Date: Fri, 30 Oct 2015 18:00:41 -0000 Message-Id: <341dd9b0f4f34f388e97ea970f060e9a@git.apache.org> In-Reply-To: <9710bfe7b3a74bb28b05386042bf49f9@git.apache.org> References: <9710bfe7b3a74bb28b05386042bf49f9@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [28/30] hadoop git commit: HDFS-9279. Decomissioned capacity should not be considered for configured/used capacity. Contributed by Kihu Shukla . HDFS-9279. Decomissioned capacity should not be considered for configured/used capacity. Contributed by Kihu Shukla . Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19a77f54 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19a77f54 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19a77f54 Branch: refs/heads/HDFS-8966 Commit: 19a77f546657b086af8f41fa631099bdde7e010c Parents: 2d10cb8 Author: Kihwal Lee Authored: Wed Oct 28 11:57:56 2015 -0500 Committer: Kihwal Lee Committed: Wed Oct 28 11:58:51 2015 -0500 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/blockmanagement/DatanodeStats.java | 26 ++++++----- .../apache/hadoop/hdfs/TestDecommission.java | 47 +++++++++++++++++--- 3 files changed, 58 insertions(+), 18 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 184b743..7f903b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2176,6 +2176,9 @@ Release 2.8.0 - UNRELEASED HDFS-9302. WebHDFS throws NullPointerException if newLength is not provided. (Jagadesh Kiran N via yliu) + HDFS-9297. Decomissioned capacity should not be considered for + configured/used capacity (Contributed by Kuhu Shukla) + Release 2.7.2 - UNRELEASED INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java index 3ab0d5c..4c39c41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java @@ -45,19 +45,20 @@ class DatanodeStats { private int expiredHeartbeats = 0; synchronized void add(final DatanodeDescriptor node) { - capacityUsed += node.getDfsUsed(); - blockPoolUsed += node.getBlockPoolUsed(); xceiverCount += node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { + capacityUsed += node.getDfsUsed(); + blockPoolUsed += node.getBlockPoolUsed(); nodesInService++; nodesInServiceXceiverCount += node.getXceiverCount(); capacityTotal += node.getCapacity(); capacityRemaining += node.getRemaining(); - } else { - capacityTotal += node.getDfsUsed(); + cacheCapacity += node.getCacheCapacity(); + cacheUsed += node.getCacheUsed(); + } else if (!node.isDecommissioned()) { + cacheCapacity += node.getCacheCapacity(); + cacheUsed += node.getCacheUsed(); } - cacheCapacity += node.getCacheCapacity(); - cacheUsed += node.getCacheUsed(); Set storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { statsMap.addStorage(storageInfo, node); @@ -69,19 +70,20 @@ class DatanodeStats { } synchronized void subtract(final DatanodeDescriptor node) { - capacityUsed -= node.getDfsUsed(); - blockPoolUsed -= node.getBlockPoolUsed(); xceiverCount -= node.getXceiverCount(); if (!(node.isDecommissionInProgress() || node.isDecommissioned())) { + capacityUsed -= node.getDfsUsed(); + blockPoolUsed -= node.getBlockPoolUsed(); nodesInService--; nodesInServiceXceiverCount -= node.getXceiverCount(); capacityTotal -= node.getCapacity(); capacityRemaining -= node.getRemaining(); - } else { - capacityTotal -= node.getDfsUsed(); + cacheCapacity -= node.getCacheCapacity(); + cacheUsed -= node.getCacheUsed(); + } else if (!node.isDecommissioned()) { + cacheCapacity -= node.getCacheCapacity(); + cacheUsed -= node.getCacheUsed(); } - cacheCapacity -= node.getCacheCapacity(); - cacheUsed -= node.getCacheUsed(); Set storageTypes = new HashSet<>(); for (DatanodeStorageInfo storageInfo : node.getStorageInfos()) { statsMap.subtractStorage(storageInfo, node); http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a77f54/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 3ae9e25..d648bca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; @@ -350,14 +351,13 @@ public class TestDecommission { for (int i = 0; i < 10; i++) { long[] newStats = namenode.getRpcServer().getStats(); - // For decommissioning nodes, ensure capacity of the DN is no longer - // counted. Only used space of the DN is counted in cluster capacity + // For decommissioning nodes, ensure capacity of the DN and dfsUsed + // is no longer counted towards total assertEquals(newStats[0], - decommissioning ? info.getDfsUsed() : info.getCapacity()); + decommissioning ? 0 : info.getCapacity()); - // Ensure cluster used capacity is counted for both normal and - // decommissioning nodes - assertEquals(newStats[1], info.getDfsUsed()); + // Ensure cluster used capacity is counted for normal nodes only + assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed()); // For decommissioning nodes, remaining space from the DN is not counted assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining()); @@ -1264,4 +1264,39 @@ public class TestDecommission { cluster.shutdown(); } } + + @Test + public void testUsedCapacity() throws Exception { + int numNamenodes = 1; + int numDatanodes = 2; + + startCluster(numNamenodes,numDatanodes,conf); + cluster.waitActive(); + FSNamesystem ns = cluster.getNamesystem(0); + BlockManager blockManager = ns.getBlockManager(); + DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager() + .getDatanodeStatistics(); + + long initialUsedCapacity = datanodeStatistics.getCapacityUsed(); + long initialTotalCapacity = datanodeStatistics.getCapacityTotal(); + long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed(); + ArrayList> namenodeDecomList = + new ArrayList>(numNamenodes); + namenodeDecomList.add(0, new ArrayList(numDatanodes)); + ArrayList decommissionedNodes = namenodeDecomList.get(0); + //decommission one node + DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes, + AdminStates.DECOMMISSIONED); + decommissionedNodes.add(decomNode); + long newUsedCapacity = datanodeStatistics.getCapacityUsed(); + long newTotalCapacity = datanodeStatistics.getCapacityTotal(); + long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed(); + + assertTrue("DfsUsedCapacity should not be the same after a node has " + + "been decommissioned!", initialUsedCapacity != newUsedCapacity); + assertTrue("TotalCapacity should not be the same after a node has " + + "been decommissioned!", initialTotalCapacity != newTotalCapacity); + assertTrue("BlockPoolUsed should not be the same after a node has " + + "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed); + } }