Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 9FF53200D80 for ; Tue, 19 Dec 2017 01:09:03 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id 9E0D8160C2A; Tue, 19 Dec 2017 00:09:03 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 7343B160C05 for ; Tue, 19 Dec 2017 01:09:01 +0100 (CET) Received: (qmail 99548 invoked by uid 500); 19 Dec 2017 00:08:56 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 99289 invoked by uid 99); 19 Dec 2017 00:08:56 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 19 Dec 2017 00:08:56 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 72601F17D9; Tue, 19 Dec 2017 00:08:52 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: kkaranasos@apache.org To: common-commits@hadoop.apache.org Date: Tue, 19 Dec 2017 00:09:03 -0000 Message-Id: In-Reply-To: <8f01fa5bdd484843875708279eb4d72e@git.apache.org> References: <8f01fa5bdd484843875708279eb4d72e@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [12/50] [abbrv] hadoop git commit: HDFS-12775. [READ] Fix reporting of Provided volumes archived-at: Tue, 19 Dec 2017 00:09:03 -0000 HDFS-12775. [READ] Fix reporting of Provided volumes Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b1d3030 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b1d3030 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b1d3030 Branch: refs/heads/YARN-6592 Commit: 3b1d30301bcd35bbe525a7e122d3e5acfab92c88 Parents: e1a28f9 Author: Virajith Jalaparti Authored: Thu Nov 16 03:52:12 2017 -0800 Committer: Chris Douglas Committed: Fri Dec 15 17:51:39 2017 -0800 ---------------------------------------------------------------------- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 1 - .../server/blockmanagement/BlockManager.java | 19 ++- .../blockmanagement/DatanodeDescriptor.java | 24 ++-- .../blockmanagement/DatanodeStatistics.java | 3 + .../server/blockmanagement/DatanodeStats.java | 4 +- .../blockmanagement/HeartbeatManager.java | 9 +- .../blockmanagement/ProvidedStorageMap.java | 60 +++++++-- .../blockmanagement/StorageTypeStats.java | 33 ++++- .../fsdataset/impl/DefaultProvidedVolumeDF.java | 58 --------- .../fsdataset/impl/ProvidedVolumeDF.java | 34 ----- .../fsdataset/impl/ProvidedVolumeImpl.java | 101 ++++++++++++--- .../federation/metrics/FederationMBean.java | 6 + .../federation/metrics/FederationMetrics.java | 5 + .../federation/metrics/NamenodeBeanMetrics.java | 10 ++ .../resolver/MembershipNamenodeResolver.java | 1 + .../resolver/NamenodeStatusReport.java | 12 +- .../router/NamenodeHeartbeatService.java | 3 +- .../store/records/MembershipStats.java | 4 + .../records/impl/pb/MembershipStatsPBImpl.java | 10 ++ .../hdfs/server/namenode/FSNamesystem.java | 12 ++ .../hdfs/server/namenode/NameNodeMXBean.java | 10 +- .../namenode/metrics/FSNamesystemMBean.java | 7 +- .../src/main/proto/FederationProtocol.proto | 1 + .../src/main/resources/hdfs-default.xml | 8 -- .../src/main/webapps/hdfs/dfshealth.html | 1 + .../blockmanagement/TestProvidedStorageMap.java | 39 +++--- .../fsdataset/impl/TestProvidedImpl.java | 55 ++------ .../metrics/TestFederationMetrics.java | 2 + .../TestNameNodeProvidedImplementation.java | 125 ++++++++++++++++--- 29 files changed, 425 insertions(+), 232 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index cb57675..fbdc859 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -331,7 +331,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_PROVIDED_ENABLED = "dfs.namenode.provided.enabled"; public static final boolean DFS_NAMENODE_PROVIDED_ENABLED_DEFAULT = false; - public static final String DFS_PROVIDER_DF_CLASS = "dfs.provided.df.class"; public static final String DFS_PROVIDER_STORAGEUUID = "dfs.provided.storage.id"; public static final String DFS_PROVIDER_STORAGEUUID_DEFAULT = "DS-PROVIDED"; public static final String DFS_PROVIDED_ALIASMAP_CLASS = "dfs.provided.aliasmap.class"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 07502c1..f92c4e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -103,6 +103,8 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.hdfs.util.FoldedTreeSet; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.server.namenode.CacheManager; @@ -2402,6 +2404,21 @@ public class BlockManager implements BlockStatsMXBean { } } + public long getProvidedCapacity() { + return providedStorageMap.getCapacity(); + } + + public void updateHeartbeat(DatanodeDescriptor node, StorageReport[] reports, + long cacheCapacity, long cacheUsed, int xceiverCount, int failedVolumes, + VolumeFailureSummary volumeFailureSummary) { + + for (StorageReport report: reports) { + providedStorageMap.updateStorage(node, report.getStorage()); + } + node.updateHeartbeat(reports, cacheCapacity, cacheUsed, xceiverCount, + failedVolumes, volumeFailureSummary); + } + /** * StatefulBlockInfo is used to build the "toUC" list, which is a list of * updates to the information about under-construction blocks. @@ -2463,7 +2480,7 @@ public class BlockManager implements BlockStatsMXBean { // !#! Register DN with provided storage, not with storage owned by DN // !#! DN should still have a ref to the DNStorageInfo DatanodeStorageInfo storageInfo = - providedStorageMap.getStorage(node, storage, context); + providedStorageMap.getStorage(node, storage); if (storageInfo == null) { // We handle this for backwards compatibility. http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index c17ab4c..83c608f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -449,24 +449,24 @@ public class DatanodeDescriptor extends DatanodeInfo { this.volumeFailures = volFailures; this.volumeFailureSummary = volumeFailureSummary; for (StorageReport report : reports) { - totalCapacity += report.getCapacity(); - totalRemaining += report.getRemaining(); - totalBlockPoolUsed += report.getBlockPoolUsed(); - totalDfsUsed += report.getDfsUsed(); - totalNonDfsUsed += report.getNonDfsUsed(); - // for PROVIDED storages, do not call updateStorage() unless - // DatanodeStorageInfo already exists! - if (StorageType.PROVIDED.equals(report.getStorage().getStorageType()) - && storageMap.get(report.getStorage().getStorageID()) == null) { - continue; - } - DatanodeStorageInfo storage = updateStorage(report.getStorage()); + DatanodeStorageInfo storage = + storageMap.get(report.getStorage().getStorageID()); if (checkFailedStorages) { failedStorageInfos.remove(storage); } storage.receivedHeartbeat(report); + // skip accounting for capacity of PROVIDED storages! + if (StorageType.PROVIDED.equals(storage.getStorageType())) { + continue; + } + + totalCapacity += report.getCapacity(); + totalRemaining += report.getRemaining(); + totalBlockPoolUsed += report.getBlockPoolUsed(); + totalDfsUsed += report.getDfsUsed(); + totalNonDfsUsed += report.getNonDfsUsed(); } // Update total metrics for the node. http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java index 33eca2e..36a9c2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStatistics.java @@ -77,4 +77,7 @@ public interface DatanodeStatistics { /** @return Storage Tier statistics*/ Map getStorageTypeStats(); + + /** @return the provided capacity */ + public long getProvidedCapacity(); } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java index 8386b27..912d4d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStats.java @@ -183,7 +183,7 @@ class DatanodeStats { StorageTypeStats storageTypeStats = storageTypeStatsMap.get(storageType); if (storageTypeStats == null) { - storageTypeStats = new StorageTypeStats(); + storageTypeStats = new StorageTypeStats(storageType); storageTypeStatsMap.put(storageType, storageTypeStats); } storageTypeStats.addNode(node); @@ -194,7 +194,7 @@ class DatanodeStats { StorageTypeStats storageTypeStats = storageTypeStatsMap.get(info.getStorageType()); if (storageTypeStats == null) { - storageTypeStats = new StorageTypeStats(); + storageTypeStats = new StorageTypeStats(info.getStorageType()); storageTypeStatsMap.put(info.getStorageType(), storageTypeStats); } storageTypeStats.addStorage(info, node); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index a72ad64..1972a6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -195,6 +195,11 @@ class HeartbeatManager implements DatanodeStatistics { return stats.getStatsMap(); } + @Override + public long getProvidedCapacity() { + return blockManager.getProvidedCapacity(); + } + synchronized void register(final DatanodeDescriptor d) { if (!d.isAlive()) { addDatanode(d); @@ -232,8 +237,8 @@ class HeartbeatManager implements DatanodeStatistics { int xceiverCount, int failedVolumes, VolumeFailureSummary volumeFailureSummary) { stats.subtract(node); - node.updateHeartbeat(reports, cacheCapacity, cacheUsed, - xceiverCount, failedVolumes, volumeFailureSummary); + blockManager.updateHeartbeat(node, reports, cacheCapacity, cacheUsed, + xceiverCount, failedVolumes, volumeFailureSummary); stats.add(node); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java index 3d19775..2bc8faa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap; -import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.common.BlockAlias; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; @@ -72,6 +71,7 @@ public class ProvidedStorageMap { private final ProvidedDescriptor providedDescriptor; private final DatanodeStorageInfo providedStorageInfo; private boolean providedEnabled; + private long capacity; ProvidedStorageMap(RwLock lock, BlockManager bm, Configuration conf) throws IOException { @@ -112,14 +112,13 @@ public class ProvidedStorageMap { /** * @param dn datanode descriptor * @param s data node storage - * @param context the block report context * @return the {@link DatanodeStorageInfo} for the specified datanode. * If {@code s} corresponds to a provided storage, the storage info * representing provided storage is returned. * @throws IOException */ - DatanodeStorageInfo getStorage(DatanodeDescriptor dn, DatanodeStorage s, - BlockReportContext context) throws IOException { + DatanodeStorageInfo getStorage(DatanodeDescriptor dn, DatanodeStorage s) + throws IOException { if (providedEnabled && storageId.equals(s.getStorageID())) { if (StorageType.PROVIDED.equals(s.getStorageType())) { if (providedStorageInfo.getState() == State.FAILED @@ -127,8 +126,10 @@ public class ProvidedStorageMap { providedStorageInfo.setState(State.NORMAL); LOG.info("Provided storage transitioning to state " + State.NORMAL); } - processProvidedStorageReport(context); - dn.injectStorage(providedStorageInfo); + if (dn.getStorageInfo(s.getStorageID()) == null) { + dn.injectStorage(providedStorageInfo); + } + processProvidedStorageReport(); return providedDescriptor.getProvidedStorage(dn, s); } LOG.warn("Reserved storage {} reported as non-provided from {}", s, dn); @@ -136,7 +137,7 @@ public class ProvidedStorageMap { return dn.getStorageInfo(s.getStorageID()); } - private void processProvidedStorageReport(BlockReportContext context) + private void processProvidedStorageReport() throws IOException { assert lock.hasWriteLock() : "Not holding write lock"; if (providedStorageInfo.getBlockReportCount() == 0 @@ -172,6 +173,26 @@ public class ProvidedStorageMap { } } + public long getCapacity() { + if (providedStorageInfo == null) { + return 0; + } + return providedStorageInfo.getCapacity(); + } + + public void updateStorage(DatanodeDescriptor node, DatanodeStorage storage) { + if (providedEnabled && storageId.equals(storage.getStorageID())) { + if (StorageType.PROVIDED.equals(storage.getStorageType())) { + node.injectStorage(providedStorageInfo); + return; + } else { + LOG.warn("Reserved storage {} reported as non-provided from {}", + storage, node); + } + } + node.updateStorage(storage); + } + /** * Builder used for creating {@link LocatedBlocks} when a block is provided. */ @@ -295,10 +316,12 @@ public class ProvidedStorageMap { * An abstract DatanodeDescriptor to track datanodes with provided storages. * NOTE: never resolved through registerDatanode, so not in the topology. */ - static class ProvidedDescriptor extends DatanodeDescriptor { + public static class ProvidedDescriptor extends DatanodeDescriptor { private final NavigableMap dns = new ConcurrentSkipListMap<>(); + public final static String NETWORK_LOCATION = "/REMOTE"; + public final static String NAME = "PROVIDED"; ProvidedDescriptor() { super(new DatanodeID( @@ -444,6 +467,21 @@ public class ProvidedStorageMap { public int hashCode() { return super.hashCode(); } + + @Override + public String toString() { + return "PROVIDED-LOCATION"; + } + + @Override + public String getNetworkLocation() { + return NETWORK_LOCATION; + } + + @Override + public String getName() { + return NAME; + } } /** @@ -480,7 +518,13 @@ public class ProvidedStorageMap { super.setState(state); } } + + @Override + public String toString() { + return "PROVIDED-STORAGE"; + } } + /** * Used to emulate block reports for provided blocks. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java index 978009e..c335ec6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/StorageTypeStats.java @@ -22,6 +22,7 @@ import java.beans.ConstructorProperties; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.StorageType; /** * Statistics per StorageType. @@ -36,6 +37,7 @@ public class StorageTypeStats { private long capacityRemaining = 0L; private long blockPoolUsed = 0L; private int nodesInService = 0; + private StorageType storageType; @ConstructorProperties({"capacityTotal", "capacityUsed", "capacityNonDfsUsed", "capacityRemaining", "blockPoolUsed", "nodesInService"}) @@ -51,22 +53,47 @@ public class StorageTypeStats { } public long getCapacityTotal() { + // for PROVIDED storage, avoid counting the same storage + // across multiple datanodes + if (storageType == StorageType.PROVIDED && nodesInService > 0) { + return capacityTotal/nodesInService; + } return capacityTotal; } public long getCapacityUsed() { + // for PROVIDED storage, avoid counting the same storage + // across multiple datanodes + if (storageType == StorageType.PROVIDED && nodesInService > 0) { + return capacityUsed/nodesInService; + } return capacityUsed; } public long getCapacityNonDfsUsed() { + // for PROVIDED storage, avoid counting the same storage + // across multiple datanodes + if (storageType == StorageType.PROVIDED && nodesInService > 0) { + return capacityNonDfsUsed/nodesInService; + } return capacityNonDfsUsed; } public long getCapacityRemaining() { + // for PROVIDED storage, avoid counting the same storage + // across multiple datanodes + if (storageType == StorageType.PROVIDED && nodesInService > 0) { + return capacityRemaining/nodesInService; + } return capacityRemaining; } public long getBlockPoolUsed() { + // for PROVIDED storage, avoid counting the same storage + // across multiple datanodes + if (storageType == StorageType.PROVIDED && nodesInService > 0) { + return blockPoolUsed/nodesInService; + } return blockPoolUsed; } @@ -74,7 +101,9 @@ public class StorageTypeStats { return nodesInService; } - StorageTypeStats() {} + StorageTypeStats(StorageType storageType) { + this.storageType = storageType; + } StorageTypeStats(StorageTypeStats other) { capacityTotal = other.capacityTotal; @@ -87,6 +116,7 @@ public class StorageTypeStats { void addStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { + assert storageType == info.getStorageType(); capacityUsed += info.getDfsUsed(); capacityNonDfsUsed += info.getNonDfsUsed(); blockPoolUsed += info.getBlockPoolUsed(); @@ -106,6 +136,7 @@ public class StorageTypeStats { void subtractStorage(final DatanodeStorageInfo info, final DatanodeDescriptor node) { + assert storageType == info.getStorageType(); capacityUsed -= info.getDfsUsed(); capacityNonDfsUsed -= info.getNonDfsUsed(); blockPoolUsed -= info.getBlockPoolUsed(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java deleted file mode 100644 index 24921c4..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/DefaultProvidedVolumeDF.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; - -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; - -/** - * The default usage statistics for a provided volume. - */ -public class DefaultProvidedVolumeDF - implements ProvidedVolumeDF, Configurable { - - @Override - public void setConf(Configuration conf) { - } - - @Override - public Configuration getConf() { - return null; - } - - @Override - public long getCapacity() { - return Long.MAX_VALUE; - } - - @Override - public long getSpaceUsed() { - return 0; - } - - @Override - public long getBlockPoolUsed(String bpid) { - return 0; - } - - @Override - public long getAvailable() { - return Long.MAX_VALUE; - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeDF.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeDF.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeDF.java deleted file mode 100644 index 4d28883..0000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeDF.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; - -/** - * This interface is used to define the usage statistics - * of the provided storage. - */ -public interface ProvidedVolumeDF { - - long getCapacity(); - - long getSpaceUsed(); - - long getBlockPoolUsed(String bpid); - - long getAvailable(); -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java index d103b64..65487f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java @@ -26,6 +26,7 @@ import java.util.Map; import java.util.Set; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -89,6 +90,30 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { return suffix; } + /** + * Class to keep track of the capacity usage statistics for provided volumes. + */ + public static class ProvidedVolumeDF { + + private AtomicLong used = new AtomicLong(); + + public long getSpaceUsed() { + return used.get(); + } + + public void decDfsUsed(long value) { + used.addAndGet(-value); + } + + public void incDfsUsed(long value) { + used.addAndGet(value); + } + + public long getCapacity() { + return getSpaceUsed(); + } + } + static class ProvidedBlockPoolSlice { private ProvidedVolumeImpl providedVolume; @@ -96,6 +121,8 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { private Configuration conf; private String bpid; private ReplicaMap bpVolumeMap; + private ProvidedVolumeDF df; + private AtomicLong numOfBlocks = new AtomicLong(); ProvidedBlockPoolSlice(String bpid, ProvidedVolumeImpl volume, Configuration conf) { @@ -107,6 +134,7 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { aliasMap = ReflectionUtils.newInstance(fmt, conf); this.conf = conf; this.bpid = bpid; + this.df = new ProvidedVolumeDF(); bpVolumeMap.initBlockPool(bpid); LOG.info("Created alias map using class: " + aliasMap.getClass()); } @@ -155,6 +183,8 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { if (oldReplica == null) { volumeMap.add(bpid, newReplica); bpVolumeMap.add(bpid, newReplica); + incrNumBlocks(); + incDfsUsed(region.getBlock().getNumBytes()); } else { throw new IOException("A block with id " + newReplica.getBlockId() + " already exists in the volumeMap"); @@ -163,6 +193,10 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { } } + private void incrNumBlocks() { + numOfBlocks.incrementAndGet(); + } + public boolean isEmpty() { return bpVolumeMap.replicas(bpid).size() == 0; } @@ -199,6 +233,18 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { } } } + + public long getNumOfBlocks() { + return numOfBlocks.get(); + } + + long getDfsUsed() throws IOException { + return df.getSpaceUsed(); + } + + void incDfsUsed(long value) { + df.incDfsUsed(value); + } } private URI baseURI; @@ -217,10 +263,7 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { "Only provided storages must use ProvidedVolume"; baseURI = getStorageLocation().getUri(); - Class dfClass = - conf.getClass(DFSConfigKeys.DFS_PROVIDER_DF_CLASS, - DefaultProvidedVolumeDF.class, ProvidedVolumeDF.class); - df = ReflectionUtils.newInstance(dfClass, conf); + df = new ProvidedVolumeDF(); remoteFS = FileSystem.get(baseURI, conf); } @@ -231,34 +274,47 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { @Override public long getCapacity() { - if (configuredCapacity < 0) { - return df.getCapacity(); + try { + // default to whatever is the space used! + return getDfsUsed(); + } catch (IOException e) { + LOG.warn("Exception when trying to get capacity of ProvidedVolume: {}", + e); } - return configuredCapacity; + return 0L; } @Override public long getDfsUsed() throws IOException { - return df.getSpaceUsed(); + long dfsUsed = 0; + synchronized(getDataset()) { + for(ProvidedBlockPoolSlice s : bpSlices.values()) { + dfsUsed += s.getDfsUsed(); + } + } + return dfsUsed; } @Override long getBlockPoolUsed(String bpid) throws IOException { - if (bpSlices.containsKey(bpid)) { - return df.getBlockPoolUsed(bpid); - } else { - throw new IOException("block pool " + bpid + " is not found"); - } + return getProvidedBlockPoolSlice(bpid).getDfsUsed(); } @Override public long getAvailable() throws IOException { - return df.getAvailable(); + long remaining = getCapacity() - getDfsUsed(); + // do not report less than 0 remaining space for PROVIDED storage + // to prevent marking it as over capacity on NN + if (remaining < 0L) { + LOG.warn("Volume {} has less than 0 available space", this); + return 0L; + } + return remaining; } @Override long getActualNonDfsUsed() throws IOException { - return df.getSpaceUsed(); + return 0L; } @Override @@ -267,6 +323,21 @@ public class ProvidedVolumeImpl extends FsVolumeImpl { } @Override + long getNumBlocks() { + long numBlocks = 0; + for (ProvidedBlockPoolSlice s : bpSlices.values()) { + numBlocks += s.getNumOfBlocks(); + } + return numBlocks; + } + + @Override + void incDfsUsedAndNumBlocks(String bpid, long value) { + throw new UnsupportedOperationException( + "ProvidedVolume does not yet support writes"); + } + + @Override public URI getBaseURI() { return baseURI; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java index cb4245a..8abfc6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java @@ -65,6 +65,12 @@ public interface FederationMBean { long getRemainingCapacity(); /** + * Get the total remote storage capacity mounted in the federated cluster. + * @return Remote capacity of the federated cluster. + */ + long getProvidedSpace(); + + /** * Get the number of nameservices in the federation. * @return Number of nameservices in the federation. */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java index 7844a2e..4582825 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java @@ -272,6 +272,11 @@ public class FederationMetrics implements FederationMBean { } @Override + public long getProvidedSpace() { + return getNameserviceAggregatedLong(MembershipStats::getProvidedSpace); + } + + @Override public long getUsedCapacity() { return getTotalCapacity() - getRemainingCapacity(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java index 23cd675..c4e5b5b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java @@ -169,6 +169,11 @@ public class NamenodeBeanMetrics } @Override + public long getProvidedCapacity() { + return getFederationMetrics().getProvidedSpace(); + } + + @Override public String getSafemode() { // We assume that the global federated view is never in safe mode return ""; @@ -450,6 +455,11 @@ public class NamenodeBeanMetrics } @Override + public long getProvidedCapacityTotal() { + return getProvidedCapacity(); + } + + @Override public long getFilesTotal() { return getFederationMetrics().getNumFiles(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java index 98ddd22..b87eeec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java @@ -236,6 +236,7 @@ public class MembershipNamenodeResolver report.getNumOfBlocksPendingDeletion()); stats.setAvailableSpace(report.getAvailableSpace()); stats.setTotalSpace(report.getTotalSpace()); + stats.setProvidedSpace(report.getProvidedSpace()); stats.setNumOfDecommissioningDatanodes( report.getNumDecommissioningDatanodes()); stats.setNumOfActiveDatanodes(report.getNumLiveDatanodes()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java index 555e2ee..d3c6d87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java @@ -58,6 +58,7 @@ public class NamenodeStatusReport { private long numOfBlocksUnderReplicated = -1; private long numOfBlocksPendingDeletion = -1; private long totalSpace = -1; + private long providedSpace = -1; /** If the fields are valid. */ private boolean registrationValid = false; @@ -296,7 +297,7 @@ public class NamenodeStatusReport { public void setNamesystemInfo(long available, long total, long numFiles, long numBlocks, long numBlocksMissing, long numBlocksPendingReplication, long numBlocksUnderReplicated, - long numBlocksPendingDeletion) { + long numBlocksPendingDeletion, long providedSpace) { this.totalSpace = total; this.availableSpace = available; this.numOfBlocks = numBlocks; @@ -306,6 +307,7 @@ public class NamenodeStatusReport { this.numOfBlocksPendingDeletion = numBlocksPendingDeletion; this.numOfFiles = numFiles; this.statsValid = true; + this.providedSpace = providedSpace; } /** @@ -345,6 +347,14 @@ public class NamenodeStatusReport { } /** + * Get the space occupied by provided storage. + * + * @return the provided capacity. + */ + public long getProvidedSpace() { + return this.providedSpace; + } + /** * Get the number of missing blocks. * * @return Number of missing blocks. http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index 7d69a26..aaf2817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -350,7 +350,8 @@ public class NamenodeHeartbeatService extends PeriodicService { jsonObject.getLong("MissingBlocks"), jsonObject.getLong("PendingReplicationBlocks"), jsonObject.getLong("UnderReplicatedBlocks"), - jsonObject.getLong("PendingDeletionBlocks")); + jsonObject.getLong("PendingDeletionBlocks"), + jsonObject.getLong("ProvidedCapacityTotal")); } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java index 0bd19d9..654140c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipStats.java @@ -45,6 +45,10 @@ public abstract class MembershipStats extends BaseRecord { public abstract long getAvailableSpace(); + public abstract void setProvidedSpace(long capacity); + + public abstract long getProvidedSpace(); + public abstract void setNumOfFiles(long files); public abstract long getNumOfFiles(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java index 9f0a167..3347bc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/impl/pb/MembershipStatsPBImpl.java @@ -78,6 +78,16 @@ public class MembershipStatsPBImpl extends MembershipStats } @Override + public void setProvidedSpace(long capacity) { + this.translator.getBuilder().setProvidedSpace(capacity); + } + + @Override + public long getProvidedSpace() { + return this.translator.getProtoOrBuilder().getProvidedSpace(); + } + + @Override public void setNumOfFiles(long files) { this.translator.getBuilder().setNumOfFiles(files); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index dedb737..286c41c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4166,6 +4166,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, return datanodeStatistics.getCapacityRemaining(); } + @Override // FSNamesystemMBean + @Metric({"ProvidedCapacityTotal", + "Total space used in PROVIDED storage in bytes" }) + public long getProvidedCapacityTotal() { + return datanodeStatistics.getProvidedCapacity(); + } + @Metric({"CapacityRemainingGB", "Remaining capacity in GB"}) public float getCapacityRemainingGB() { return DFSUtil.roundBytesToGB(getCapacityRemaining()); @@ -5730,6 +5737,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } @Override // NameNodeMXBean + public long getProvidedCapacity() { + return this.getProvidedCapacityTotal(); + } + + @Override // NameNodeMXBean public String getSafemode() { if (!this.isInSafeMode()) return ""; http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java index 82cec33..e4ed3a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java @@ -65,8 +65,14 @@ public interface NameNodeMXBean { * @return the total raw bytes including non-dfs used space */ public long getTotal(); - - + + /** + * Gets capacity of the provided storage mounted, in bytes. + * + * @return the total raw bytes present in the provided storage. + */ + public long getProvidedCapacity(); + /** * Gets the safemode status * http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java index ebdbc12..c25bafd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java @@ -69,7 +69,12 @@ public interface FSNamesystemMBean { * @return - used capacity in bytes */ public long getCapacityUsed(); - + + /** + * Total PROVIDED storage capacity. + * @return - total PROVIDED storage capacity in bytes + */ + public long getProvidedCapacityTotal(); /** * Total number of files and directories http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto index 88acd08..043a21a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/FederationProtocol.proto @@ -30,6 +30,7 @@ package hadoop.hdfs; message NamenodeMembershipStatsRecordProto { optional uint64 totalSpace = 1; optional uint64 availableSpace = 2; + optional uint64 providedSpace = 3; optional uint64 numOfFiles = 10; optional uint64 numOfBlocks = 11; http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 835d8c4..655f9cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4630,14 +4630,6 @@ - dfs.provided.df.class - org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.DefaultProvidedVolumeDF - - The class that is used to measure usage statistics of provided stores. - - - - dfs.provided.storage.id DS-PROVIDED http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 6ae3960..45aee1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -162,6 +162,7 @@ {#nn} + http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java index 89741b5..1ef2f2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestProvidedStorageMap.java @@ -63,15 +63,15 @@ public class TestProvidedStorageMap { private DatanodeDescriptor createDatanodeDescriptor(int port) { return DFSTestUtil.getDatanodeDescriptor("127.0.0.1", port, "defaultRack", - "localhost"); + "localhost"); } @Test public void testProvidedStorageMap() throws IOException { ProvidedStorageMap providedMap = new ProvidedStorageMap( - nameSystemLock, bm, conf); + nameSystemLock, bm, conf); DatanodeStorageInfo providedMapStorage = - providedMap.getProvidedStorageInfo(); + providedMap.getProvidedStorageInfo(); //the provided storage cannot be null assertNotNull(providedMapStorage); @@ -80,41 +80,40 @@ public class TestProvidedStorageMap { //associate two storages to the datanode DatanodeStorage dn1ProvidedStorage = new DatanodeStorage( - providedStorageID, - DatanodeStorage.State.NORMAL, - StorageType.PROVIDED); + providedStorageID, + DatanodeStorage.State.NORMAL, + StorageType.PROVIDED); DatanodeStorage dn1DiskStorage = new DatanodeStorage( - "sid-1", DatanodeStorage.State.NORMAL, StorageType.DISK); + "sid-1", DatanodeStorage.State.NORMAL, StorageType.DISK); when(nameSystemLock.hasWriteLock()).thenReturn(true); - DatanodeStorageInfo dns1Provided = providedMap.getStorage(dn1, - dn1ProvidedStorage, null); - DatanodeStorageInfo dns1Disk = providedMap.getStorage(dn1, - dn1DiskStorage, null); + DatanodeStorageInfo dns1Provided = + providedMap.getStorage(dn1, dn1ProvidedStorage); + DatanodeStorageInfo dns1Disk = providedMap.getStorage(dn1, dn1DiskStorage); assertTrue("The provided storages should be equal", - dns1Provided == providedMapStorage); + dns1Provided == providedMapStorage); assertTrue("Disk storage has not yet been registered with block manager", - dns1Disk == null); + dns1Disk == null); //add the disk storage to the datanode. DatanodeStorageInfo dnsDisk = new DatanodeStorageInfo(dn1, dn1DiskStorage); dn1.injectStorage(dnsDisk); assertTrue("Disk storage must match the injected storage info", - dnsDisk == providedMap.getStorage(dn1, dn1DiskStorage, null)); + dnsDisk == providedMap.getStorage(dn1, dn1DiskStorage)); //create a 2nd datanode DatanodeDescriptor dn2 = createDatanodeDescriptor(5010); //associate a provided storage with the datanode DatanodeStorage dn2ProvidedStorage = new DatanodeStorage( - providedStorageID, - DatanodeStorage.State.NORMAL, - StorageType.PROVIDED); + providedStorageID, + DatanodeStorage.State.NORMAL, + StorageType.PROVIDED); DatanodeStorageInfo dns2Provided = providedMap.getStorage( - dn2, dn2ProvidedStorage, null); + dn2, dn2ProvidedStorage); assertTrue("The provided storages should be equal", - dns2Provided == providedMapStorage); + dns2Provided == providedMapStorage); assertTrue("The DatanodeDescriptor should contain the provided storage", - dn2.getStorageInfo(providedStorageID) == providedMapStorage); + dn2.getStorageInfo(providedStorageID) == providedMapStorage); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java index ecab06b..52112f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestProvidedImpl.java @@ -46,7 +46,6 @@ import java.util.Map; import java.util.Set; import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.Path; @@ -102,6 +101,7 @@ public class TestProvidedImpl { private FsDatasetImpl dataset; private static Map blkToPathMap; private static List providedVolumes; + private static long spaceUsed = 0; /** * A simple FileRegion iterator for tests. @@ -142,6 +142,7 @@ public class TestProvidedImpl { } writer.flush(); writer.close(); + spaceUsed += BLK_LEN; } catch (IOException e) { e.printStackTrace(); } @@ -240,39 +241,6 @@ public class TestProvidedImpl { } } - public static class TestProvidedVolumeDF - implements ProvidedVolumeDF, Configurable { - - @Override - public void setConf(Configuration conf) { - } - - @Override - public Configuration getConf() { - return null; - } - - @Override - public long getCapacity() { - return Long.MAX_VALUE; - } - - @Override - public long getSpaceUsed() { - return -1; - } - - @Override - public long getBlockPoolUsed(String bpid) { - return -1; - } - - @Override - public long getAvailable() { - return Long.MAX_VALUE; - } - } - private static Storage.StorageDirectory createLocalStorageDirectory( File root, Configuration conf) throws SecurityException, IOException { @@ -370,6 +338,8 @@ public class TestProvidedImpl { when(datanode.getConf()).thenReturn(conf); final DNConf dnConf = new DNConf(datanode); when(datanode.getDnConf()).thenReturn(dnConf); + // reset the space used + spaceUsed = 0; final BlockScanner disabledBlockScanner = new BlockScanner(datanode, conf); when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner); @@ -379,8 +349,6 @@ public class TestProvidedImpl { this.conf.setClass(DFSConfigKeys.DFS_PROVIDED_ALIASMAP_CLASS, TestFileRegionBlockAliasMap.class, BlockAliasMap.class); - conf.setClass(DFSConfigKeys.DFS_PROVIDER_DF_CLASS, - TestProvidedVolumeDF.class, ProvidedVolumeDF.class); blkToPathMap = new HashMap(); providedVolumes = new LinkedList(); @@ -410,8 +378,6 @@ public class TestProvidedImpl { assertEquals(NUM_PROVIDED_INIT_VOLUMES, providedVolumes.size()); assertEquals(0, dataset.getNumFailedVolumes()); - TestProvidedVolumeDF df = new TestProvidedVolumeDF(); - for (int i = 0; i < providedVolumes.size(); i++) { //check basic information about provided volume assertEquals(DFSConfigKeys.DFS_PROVIDER_STORAGEUUID_DEFAULT, @@ -419,18 +385,17 @@ public class TestProvidedImpl { assertEquals(StorageType.PROVIDED, providedVolumes.get(i).getStorageType()); + long space = providedVolumes.get(i).getBlockPoolUsed( + BLOCK_POOL_IDS[CHOSEN_BP_ID]); //check the df stats of the volume - assertEquals(df.getAvailable(), providedVolumes.get(i).getAvailable()); - assertEquals(df.getBlockPoolUsed(BLOCK_POOL_IDS[CHOSEN_BP_ID]), - providedVolumes.get(i).getBlockPoolUsed( - BLOCK_POOL_IDS[CHOSEN_BP_ID])); + assertEquals(spaceUsed, space); + assertEquals(NUM_PROVIDED_BLKS, providedVolumes.get(i).getNumBlocks()); providedVolumes.get(i).shutdownBlockPool( BLOCK_POOL_IDS[1 - CHOSEN_BP_ID], null); try { - assertEquals(df.getBlockPoolUsed(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID]), - providedVolumes.get(i).getBlockPoolUsed( - BLOCK_POOL_IDS[1 - CHOSEN_BP_ID])); + assertEquals(0, providedVolumes.get(i) + .getBlockPoolUsed(BLOCK_POOL_IDS[1 - CHOSEN_BP_ID])); //should not be triggered assertTrue(false); } catch (IOException e) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java index 61fda0e..b445df5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/metrics/TestFederationMetrics.java @@ -190,6 +190,8 @@ public class TestFederationMetrics extends TestMetricsBase { json.getLong("numOfDecomActiveDatanodes")); assertEquals(stats.getNumOfDecomDeadDatanodes(), json.getLong("numOfDecomDeadDatanodes")); + assertEquals(stats.getProvidedSpace(), + json.getLong("providedSpace")); nameservicesFound++; } assertEquals(getNameservices().size(), nameservicesFound); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b1d3030/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java ---------------------------------------------------------------------- diff --git a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java index 22f00aa..f6d38f6 100644 --- a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java +++ b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeProvidedImplementation.java @@ -27,6 +27,7 @@ import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.channels.Channels; import java.nio.channels.ReadableByteChannel; +import java.util.Iterator; import java.util.Random; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.conf.Configuration; @@ -44,13 +45,23 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; +import org.apache.hadoop.hdfs.server.blockmanagement.ProvidedStorageMap; import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap; import org.apache.hadoop.hdfs.server.datanode.DataNode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; +import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.apache.hadoop.net.NodeBase; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -59,6 +70,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.net.NodeBase.PATH_SEPARATOR_STR; import static org.junit.Assert.*; public class TestNameNodeProvidedImplementation { @@ -79,6 +91,7 @@ public class TestNameNodeProvidedImplementation { private final String filePrefix = "file"; private final String fileSuffix = ".dat"; private final int baseFileLen = 1024; + private long providedDataSize = 0; Configuration conf; MiniDFSCluster cluster; @@ -135,6 +148,7 @@ public class TestNameNodeProvidedImplementation { } writer.flush(); writer.close(); + providedDataSize += newFile.length(); } catch (IOException e) { e.printStackTrace(); } @@ -206,13 +220,14 @@ public class TestNameNodeProvidedImplementation { cluster.waitActive(); } - @Test(timeout = 20000) + @Test(timeout=20000) public void testLoadImage() throws Exception { final long seed = r.nextLong(); LOG.info("NAMEPATH: " + NAMEPATH); createImage(new RandomTreeWalk(seed), NNDIRPATH, FixedBlockResolver.class); - startCluster(NNDIRPATH, 0, new StorageType[] {StorageType.PROVIDED}, - null, false); + startCluster(NNDIRPATH, 0, + new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null, + false); FileSystem fs = cluster.getFileSystem(); for (TreePath e : new RandomTreeWalk(seed)) { @@ -231,14 +246,83 @@ public class TestNameNodeProvidedImplementation { } } - @Test(timeout=20000) - public void testBlockLoad() throws Exception { + @Test(timeout=30000) + public void testProvidedReporting() throws Exception { conf.setClass(ImageWriter.Options.UGI_CLASS, SingleUGIResolver.class, UGIResolver.class); createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH, FixedBlockResolver.class); - startCluster(NNDIRPATH, 1, new StorageType[] {StorageType.PROVIDED}, - null, false); + int numDatanodes = 10; + startCluster(NNDIRPATH, numDatanodes, + new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null, + false); + long diskCapacity = 1000; + // set the DISK capacity for testing + for (DataNode dn: cluster.getDataNodes()) { + for (FsVolumeSpi ref : dn.getFSDataset().getFsVolumeReferences()) { + if (ref.getStorageType() == StorageType.DISK) { + ((FsVolumeImpl) ref).setCapacityForTesting(diskCapacity); + } + } + } + // trigger heartbeats to update the capacities + cluster.triggerHeartbeats(); + Thread.sleep(10000); + // verify namenode stats + FSNamesystem namesystem = cluster.getNameNode().getNamesystem(); + DatanodeStatistics dnStats = namesystem.getBlockManager() + .getDatanodeManager().getDatanodeStatistics(); + + // total capacity reported includes only the local volumes and + // not the provided capacity + assertEquals(diskCapacity * numDatanodes, namesystem.getTotal()); + + // total storage used should be equal to the totalProvidedStorage + // no capacity should be remaining! + assertEquals(providedDataSize, dnStats.getProvidedCapacity()); + assertEquals(providedDataSize, namesystem.getProvidedCapacityTotal()); + assertEquals(providedDataSize, dnStats.getStorageTypeStats() + .get(StorageType.PROVIDED).getCapacityTotal()); + assertEquals(providedDataSize, dnStats.getStorageTypeStats() + .get(StorageType.PROVIDED).getCapacityUsed()); + + // verify datanode stats + for (DataNode dn: cluster.getDataNodes()) { + for (StorageReport report : dn.getFSDataset() + .getStorageReports(namesystem.getBlockPoolId())) { + if (report.getStorage().getStorageType() == StorageType.PROVIDED) { + assertEquals(providedDataSize, report.getCapacity()); + assertEquals(providedDataSize, report.getDfsUsed()); + assertEquals(providedDataSize, report.getBlockPoolUsed()); + assertEquals(0, report.getNonDfsUsed()); + assertEquals(0, report.getRemaining()); + } + } + } + + DFSClient client = new DFSClient(new InetSocketAddress("localhost", + cluster.getNameNodePort()), cluster.getConfiguration(0)); + BlockManager bm = namesystem.getBlockManager(); + for (int fileId = 0; fileId < numFiles; fileId++) { + String filename = "/" + filePrefix + fileId + fileSuffix; + LocatedBlocks locatedBlocks = client.getLocatedBlocks( + filename, 0, baseFileLen); + for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) { + BlockInfo blockInfo = + bm.getStoredBlock(locatedBlock.getBlock().getLocalBlock()); + Iterator storagesItr = blockInfo.getStorageInfos(); + + DatanodeStorageInfo info = storagesItr.next(); + assertEquals(StorageType.PROVIDED, info.getStorageType()); + DatanodeDescriptor dnDesc = info.getDatanodeDescriptor(); + // check the locations that are returned by FSCK have the right name + assertEquals(ProvidedStorageMap.ProvidedDescriptor.NETWORK_LOCATION + + PATH_SEPARATOR_STR + ProvidedStorageMap.ProvidedDescriptor.NAME, + NodeBase.getPath(dnDesc)); + // no DatanodeStorageInfos should remain + assertFalse(storagesItr.hasNext()); + } + } } @Test(timeout=500000) @@ -250,8 +334,8 @@ public class TestNameNodeProvidedImplementation { // make the last Datanode with only DISK startCluster(NNDIRPATH, 3, null, new StorageType[][] { - {StorageType.PROVIDED}, - {StorageType.PROVIDED}, + {StorageType.PROVIDED, StorageType.DISK}, + {StorageType.PROVIDED, StorageType.DISK}, {StorageType.DISK}}, false); // wait for the replication to finish @@ -308,8 +392,9 @@ public class TestNameNodeProvidedImplementation { FsUGIResolver.class, UGIResolver.class); createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH, FixedBlockResolver.class); - startCluster(NNDIRPATH, 3, new StorageType[] {StorageType.PROVIDED}, - null, false); + startCluster(NNDIRPATH, 3, + new StorageType[] {StorageType.PROVIDED, StorageType.DISK}, null, + false); FileSystem fs = cluster.getFileSystem(); Thread.sleep(2000); int count = 0; @@ -371,7 +456,7 @@ public class TestNameNodeProvidedImplementation { return fs.getFileBlockLocations(path, 0, fileLen); } - @Test + @Test(timeout=30000) public void testClusterWithEmptyImage() throws IOException { // start a cluster with 2 datanodes without any provided storage startCluster(NNDIRPATH, 2, null, @@ -404,7 +489,7 @@ public class TestNameNodeProvidedImplementation { * Tests setting replication of provided files. * @throws Exception */ - @Test + @Test(timeout=30000) public void testSetReplicationForProvidedFiles() throws Exception { createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH, FixedBlockResolver.class); @@ -441,14 +526,14 @@ public class TestNameNodeProvidedImplementation { getAndCheckBlockLocations(client, filename, newReplication); } - @Test + @Test(timeout=30000) public void testProvidedDatanodeFailures() throws Exception { createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH, FixedBlockResolver.class); startCluster(NNDIRPATH, 3, null, new StorageType[][] { - {StorageType.PROVIDED}, - {StorageType.PROVIDED}, + {StorageType.PROVIDED, StorageType.DISK}, + {StorageType.PROVIDED, StorageType.DISK}, {StorageType.DISK}}, false); @@ -511,7 +596,7 @@ public class TestNameNodeProvidedImplementation { // 2 Datanodes, 1 PROVIDED and other DISK startCluster(NNDIRPATH, 2, null, new StorageType[][] { - {StorageType.PROVIDED}, + {StorageType.PROVIDED, StorageType.DISK}, {StorageType.DISK}}, false); @@ -540,7 +625,7 @@ public class TestNameNodeProvidedImplementation { // 2 Datanodes, 1 PROVIDED and other DISK startCluster(NNDIRPATH, 2, null, new StorageType[][] { - {StorageType.PROVIDED}, + {StorageType.PROVIDED, StorageType.DISK}, {StorageType.DISK}}, false); @@ -570,7 +655,7 @@ public class TestNameNodeProvidedImplementation { } } - @Test + @Test(timeout=30000) public void testSetClusterID() throws Exception { String clusterID = "PROVIDED-CLUSTER"; createImage(new FSTreeWalk(NAMEPATH, conf), NNDIRPATH, @@ -578,7 +663,7 @@ public class TestNameNodeProvidedImplementation { // 2 Datanodes, 1 PROVIDED and other DISK startCluster(NNDIRPATH, 2, null, new StorageType[][] { - {StorageType.PROVIDED}, + {StorageType.PROVIDED, StorageType.DISK}, {StorageType.DISK}}, false); NameNode nn = cluster.getNameNode(); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org
Configured Capacity:{Total|fmt_bytes}
Configured Remote Capacity:{ProvidedCapacity|fmt_bytes}
DFS Used:{Used|fmt_bytes} ({PercentUsed|fmt_percentage})
Non DFS Used:{NonDfsUsedSpace|fmt_bytes}
DFS Remaining:{Free|fmt_bytes} ({PercentRemaining|fmt_percentage})