Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 390ED11170 for ; Sat, 23 Aug 2014 06:22:39 +0000 (UTC) Received: (qmail 524 invoked by uid 500); 23 Aug 2014 06:22:39 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 468 invoked by uid 500); 23 Aug 2014 06:22:39 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 456 invoked by uid 99); 23 Aug 2014 06:22:39 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 23 Aug 2014 06:22:39 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Sat, 23 Aug 2014 06:22:36 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id EB22D23889F1; Sat, 23 Aug 2014 06:22:15 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1619972 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ src/test/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/server/datano... Date: Sat, 23 Aug 2014 06:22:15 -0000 To: hdfs-commits@hadoop.apache.org From: arp@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140823062215.EB22D23889F1@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: arp Date: Sat Aug 23 06:22:15 2014 New Revision: 1619972 URL: http://svn.apache.org/r1619972 Log: HDFS-6899. Undo merge of r1619971 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1619972&r1=1619971&r2=1619972&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sat Aug 23 06:22:15 2014 @@ -159,9 +159,6 @@ Release 2.6.0 - UNRELEASED HDFS-6758. block writer should pass the expected block size to DataXceiverServer. (Arpit Agarwal) - HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity - per volume. (Arpit Agarwal) - OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java?rev=1619972&r1=1619971&r2=1619972&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java Sat Aug 23 06:22:15 2014 @@ -29,7 +29,6 @@ import java.util.concurrent.ThreadFactor import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; @@ -50,8 +49,7 @@ import com.google.common.util.concurrent * It uses the {@link FsDatasetImpl} object for synchronization. */ @InterfaceAudience.Private -@VisibleForTesting -public class FsVolumeImpl implements FsVolumeSpi { +class FsVolumeImpl implements FsVolumeSpi { private final FsDatasetImpl dataset; private final String storageID; private final StorageType storageType; @@ -60,12 +58,6 @@ public class FsVolumeImpl implements FsV private final File currentDir; // /current private final DF usage; private final long reserved; - - // Capacity configured. This is useful when we want to - // limit the visible capacity for tests. If negative, then we just - // query from the filesystem. - protected long configuredCapacity; - /** * Per-volume worker pool that processes new blocks to cache. * The maximum number of workers per volume is bounded (configurable via @@ -85,26 +77,20 @@ public class FsVolumeImpl implements FsV File parent = currentDir.getParentFile(); this.usage = new DF(parent, conf); this.storageType = storageType; - this.configuredCapacity = -1; - cacheExecutor = initializeCacheExecutor(parent); - } - - protected ThreadPoolExecutor initializeCacheExecutor(File parent) { final int maxNumThreads = dataset.datanode.getConf().getInt( DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_KEY, - DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT); - + DFSConfigKeys.DFS_DATANODE_FSDATASETCACHE_MAX_THREADS_PER_VOLUME_DEFAULT + ); ThreadFactory workerFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat("FsVolumeImplWorker-" + parent.toString() + "-%d") .build(); - ThreadPoolExecutor executor = new ThreadPoolExecutor( + cacheExecutor = new ThreadPoolExecutor( 1, maxNumThreads, 60, TimeUnit.SECONDS, new LinkedBlockingQueue(), workerFactory); - executor.allowCoreThreadTimeOut(true); - return executor; + cacheExecutor.allowCoreThreadTimeOut(true); } File getCurrentDir() { @@ -143,24 +129,9 @@ public class FsVolumeImpl implements FsV * reserved capacity. * @return the unreserved number of bytes left in this filesystem. May be zero. */ - @VisibleForTesting - public long getCapacity() { - if (configuredCapacity < 0) { - long remaining = usage.getCapacity() - reserved; - return remaining > 0 ? remaining : 0; - } - - return configuredCapacity; - } - - /** - * This function MUST NOT be used outside of tests. - * - * @param capacity - */ - @VisibleForTesting - public void setCapacityForTesting(long capacity) { - this.configuredCapacity = capacity; + long getCapacity() { + long remaining = usage.getCapacity() - reserved; + return remaining > 0 ? remaining : 0; } @Override Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1619972&r1=1619971&r2=1619972&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Sat Aug 23 06:22:15 2014 @@ -55,6 +55,7 @@ import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.nio.channels.FileChannel; +import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -90,9 +91,7 @@ import org.apache.hadoop.hdfs.server.dat import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; @@ -133,15 +132,11 @@ public class MiniDFSCluster { public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY = DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing"; - // Changing this default may break some tests that assume it is 2. - private static final int DEFAULT_STORAGES_PER_DATANODE = 2; + // Changing this value may break some tests that assume it is 2. + public static final int DIRS_PER_DATANODE = 2; static { DefaultMetricsSystem.setMiniClusterMode(true); } - public int getStoragesPerDatanode() { - return storagesPerDatanode; - } - /** * Class to construct instances of MiniDFSClusters with specific options. */ @@ -151,8 +146,6 @@ public class MiniDFSCluster { private final Configuration conf; private int numDataNodes = 1; private StorageType[][] storageTypes = null; - private StorageType[] storageTypes1D = null; - private int storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE; private boolean format = true; private boolean manageNameDfsDirs = true; private boolean manageNameDfsSharedDirs = true; @@ -163,8 +156,6 @@ public class MiniDFSCluster { private String[] racks = null; private String [] hosts = null; private long [] simulatedCapacities = null; - private long [][] storageCapacities = null; - private long [] storageCapacities1D = null; private String clusterId = null; private boolean waitSafeMode = true; private boolean setupHostsFile = false; @@ -203,20 +194,16 @@ public class MiniDFSCluster { } /** - * Default: DEFAULT_STORAGES_PER_DATANODE - */ - public Builder storagesPerDatanode(int numStorages) { - this.storagesPerDatanode = numStorages; - return this; - } - - /** * Set the same storage type configuration for each datanode. * If storageTypes is uninitialized or passed null then * StorageType.DEFAULT is used. */ public Builder storageTypes(StorageType[] types) { - this.storageTypes1D = types; + assert types.length == DIRS_PER_DATANODE; + this.storageTypes = new StorageType[numDataNodes][types.length]; + for (int i = 0; i < numDataNodes; ++i) { + this.storageTypes[i] = types; + } return this; } @@ -231,26 +218,6 @@ public class MiniDFSCluster { } /** - * Set the same storage capacity configuration for each datanode. - * If storageTypes is uninitialized or passed null then - * StorageType.DEFAULT is used. - */ - public Builder storageCapacities(long[] capacities) { - this.storageCapacities1D = capacities; - return this; - } - - /** - * Set custom storage capacity configuration for each datanode. - * If storageCapacities is uninitialized or passed null then - * capacity is limited by available disk space. - */ - public Builder storageCapacities(long[][] capacities) { - this.storageCapacities = capacities; - return this; - } - - /** * Default: true */ public Builder format(boolean val) { @@ -323,11 +290,6 @@ public class MiniDFSCluster { } /** - * Use SimulatedFSDataset and limit the capacity of each DN per - * the values passed in val. - * - * For limiting the capacity of volumes with real storage, see - * {@link FsVolumeImpl#setCapacityForTesting} * Default: null */ public Builder simulatedCapacities(long[] val) { @@ -430,28 +392,7 @@ public class MiniDFSCluster { LOG.info("starting cluster: numNameNodes=" + numNameNodes + ", numDataNodes=" + builder.numDataNodes); nameNodes = new NameNodeInfo[numNameNodes]; - this.storagesPerDatanode = builder.storagesPerDatanode; - - // Duplicate the storageType setting for each DN. - if (builder.storageTypes == null && builder.storageTypes1D != null) { - assert builder.storageTypes1D.length == storagesPerDatanode; - builder.storageTypes = new StorageType[builder.numDataNodes][storagesPerDatanode]; - for (int i = 0; i < builder.numDataNodes; ++i) { - builder.storageTypes[i] = builder.storageTypes1D; - } - } - - // Duplicate the storageCapacity setting for each DN. - if (builder.storageCapacities == null && builder.storageCapacities1D != null) { - assert builder.storageCapacities1D.length == storagesPerDatanode; - builder.storageCapacities = new long[builder.numDataNodes][storagesPerDatanode]; - - for (int i = 0; i < builder.numDataNodes; ++i) { - builder.storageCapacities[i] = builder.storageCapacities1D; - } - } - initMiniDFSCluster(builder.conf, builder.numDataNodes, builder.storageTypes, @@ -464,7 +405,6 @@ public class MiniDFSCluster { builder.dnOption, builder.racks, builder.hosts, - builder.storageCapacities, builder.simulatedCapacities, builder.clusterId, builder.waitSafeMode, @@ -507,7 +447,6 @@ public class MiniDFSCluster { private boolean waitSafeMode = true; private boolean federation; private boolean checkExitOnShutdown = true; - protected final int storagesPerDatanode; /** * A unique instance identifier for the cluster. This @@ -546,7 +485,6 @@ public class MiniDFSCluster { */ public MiniDFSCluster() { nameNodes = new NameNodeInfo[0]; // No namenode in the cluster - storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE; synchronized (MiniDFSCluster.class) { instanceId = instanceCount++; } @@ -721,12 +659,11 @@ public class MiniDFSCluster { String[] racks, String hosts[], long[] simulatedCapacities) throws IOException { this.nameNodes = new NameNodeInfo[1]; // Single namenode in the cluster - this.storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE; initMiniDFSCluster(conf, numDataNodes, null, format, - manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, - operation, null, racks, hosts, - null, simulatedCapacities, null, true, false, - MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false, null); + manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs, + operation, null, racks, hosts, + simulatedCapacities, null, true, false, + MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false, null); } private void initMiniDFSCluster( @@ -735,8 +672,7 @@ public class MiniDFSCluster { boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy, boolean manageDataDfsDirs, StartupOption startOpt, StartupOption dnStartOpt, String[] racks, - String[] hosts, - long[][] storageCapacities, long[] simulatedCapacities, String clusterId, + String[] hosts, long[] simulatedCapacities, String clusterId, boolean waitSafeMode, boolean setupHostsFile, MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown, boolean checkDataNodeAddrConfig, @@ -810,7 +746,7 @@ public class MiniDFSCluster { // Start the DataNodes startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs, dnStartOpt != null ? dnStartOpt : startOpt, - racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile, + racks, hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays); waitClusterUp(); //make sure ProxyUsers uses the latest conf @@ -1185,8 +1121,8 @@ public class MiniDFSCluster { String makeDataNodeDirs(int dnIndex, StorageType[] storageTypes) throws IOException { StringBuilder sb = new StringBuilder(); - assert storageTypes == null || storageTypes.length == storagesPerDatanode; - for (int j = 0; j < storagesPerDatanode; ++j) { + assert storageTypes == null || storageTypes.length == DIRS_PER_DATANODE; + for (int j = 0; j < DIRS_PER_DATANODE; ++j) { File dir = getInstanceStorageDir(dnIndex, j); dir.mkdirs(); if (!dir.isDirectory()) { @@ -1262,7 +1198,7 @@ public class MiniDFSCluster { long[] simulatedCapacities, boolean setupHostsFile) throws IOException { startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts, - null, simulatedCapacities, setupHostsFile, false, false, null); + simulatedCapacities, setupHostsFile, false, false, null); } public synchronized void startDataNodes(Configuration conf, int numDataNodes, @@ -1272,7 +1208,7 @@ public class MiniDFSCluster { boolean setupHostsFile, boolean checkDataNodeAddrConfig) throws IOException { startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts, - null, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null); + simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null); } /** @@ -1306,15 +1242,12 @@ public class MiniDFSCluster { public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, - long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException { - assert storageCapacities == null || simulatedCapacities == null; assert storageTypes == null || storageTypes.length == numDataNodes; - assert storageCapacities == null || storageCapacities.length == numDataNodes; if (operation == StartupOption.RECOVER) { return; @@ -1367,7 +1300,7 @@ public class MiniDFSCluster { operation != StartupOption.ROLLBACK) ? null : new String[] {operation.getName()}; - DataNode[] dns = new DataNode[numDataNodes]; + for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) { Configuration dnConf = new HdfsConfiguration(conf); if (dnConfOverlays != null) { @@ -1458,24 +1391,10 @@ public class MiniDFSCluster { dn.runDatanodeDaemon(); dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort())); - dns[i - curDatanodesNum] = dn; } curDatanodesNum += numDataNodes; this.numDataNodes += numDataNodes; waitActive(); - - if (storageCapacities != null) { - for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; ++i) { - List volumes = dns[i].getFSDataset().getVolumes(); - assert storageCapacities[i].length == storagesPerDatanode; - assert volumes.size() == storagesPerDatanode; - - for (int j = 0; j < volumes.size(); ++j) { - FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j); - volume.setCapacityForTesting(storageCapacities[i][j]); - } - } - } } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java?rev=1619972&r1=1619971&r2=1619972&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java Sat Aug 23 06:22:15 2014 @@ -22,7 +22,6 @@ import static org.apache.hadoop.hdfs.ser import java.io.File; import java.io.IOException; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -32,8 +31,6 @@ import org.apache.hadoop.hdfs.server.dat import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; -import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.StaticMapping; import org.apache.hadoop.security.UserGroupInformation; @@ -55,15 +52,11 @@ public class MiniDFSClusterWithNodeGroup public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] nodeGroups, String[] hosts, - long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig) throws IOException { - - assert storageCapacities == null || simulatedCapacities == null; assert storageTypes == null || storageTypes.length == numDataNodes; - assert storageCapacities == null || storageCapacities.length == numDataNodes; if (operation == StartupOption.RECOVER) { return; @@ -116,7 +109,6 @@ public class MiniDFSClusterWithNodeGroup operation != StartupOption.ROLLBACK) ? null : new String[] {operation.getName()}; - DataNode[] dns = new DataNode[numDataNodes]; for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) { Configuration dnConf = new HdfsConfiguration(conf); // Set up datanode address @@ -189,23 +181,10 @@ public class MiniDFSClusterWithNodeGroup } dn.runDatanodeDaemon(); dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort())); - dns[i - curDatanodesNum] = dn; } curDatanodesNum += numDataNodes; this.numDataNodes += numDataNodes; waitActive(); - - if (storageCapacities != null) { - for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; ++i) { - List volumes = dns[i].getFSDataset().getVolumes(); - assert volumes.size() == storagesPerDatanode; - - for (int j = 0; j < volumes.size(); ++j) { - FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j); - volume.setCapacityForTesting(storageCapacities[i][j]); - } - } - } } public synchronized void startDataNodes(Configuration conf, int numDataNodes, @@ -214,7 +193,7 @@ public class MiniDFSClusterWithNodeGroup long[] simulatedCapacities, boolean setupHostsFile) throws IOException { startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, nodeGroups, - hosts, null, simulatedCapacities, setupHostsFile, false, false); + hosts, simulatedCapacities, setupHostsFile, false, false); } public void startDataNodes(Configuration conf, int numDataNodes, @@ -230,14 +209,13 @@ public class MiniDFSClusterWithNodeGroup public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, - long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException { startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks, - NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile, + NODE_GROUPS, hosts, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig); } Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1619972&r1=1619971&r2=1619972&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Sat Aug 23 06:22:15 2014 @@ -213,7 +213,7 @@ public class TestSafeMode { @Override public Boolean get() { return getLongCounter("StorageBlockReportOps", getMetrics(NN_METRICS)) == - cluster.getStoragesPerDatanode(); + MiniDFSCluster.DIRS_PER_DATANODE; } }, 10, 10000); Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java?rev=1619972&r1=1619971&r2=1619972&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java Sat Aug 23 06:22:15 2014 @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; @@ -105,7 +106,7 @@ public class TestBlockHasMultipleReplica DataNode dn = cluster.getDataNodes().get(0); DatanodeRegistration dnReg = dn.getDNRegistrationForBP(bpid); StorageBlockReport reports[] = - new StorageBlockReport[cluster.getStoragesPerDatanode()]; + new StorageBlockReport[MiniDFSCluster.DIRS_PER_DATANODE]; ArrayList blocks = new ArrayList(); @@ -113,7 +114,7 @@ public class TestBlockHasMultipleReplica blocks.add(locatedBlock.getBlock().getLocalBlock()); } - for (int i = 0; i < cluster.getStoragesPerDatanode(); ++i) { + for (int i = 0; i < MiniDFSCluster.DIRS_PER_DATANODE; ++i) { BlockListAsLongs bll = new BlockListAsLongs(blocks, null); FsVolumeSpi v = dn.getFSDataset().getVolumes().get(i); DatanodeStorage dns = new DatanodeStorage(v.getStorageID()); Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java?rev=1619972&r1=1619971&r2=1619972&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java Sat Aug 23 06:22:15 2014 @@ -130,7 +130,7 @@ public class TestDnRespectsBlockReportSp ArgumentCaptor captor = ArgumentCaptor.forClass(StorageBlockReport[].class); - Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport( + Mockito.verify(nnSpy, times(MiniDFSCluster.DIRS_PER_DATANODE)).blockReport( any(DatanodeRegistration.class), anyString(), captor.capture()); @@ -167,7 +167,7 @@ public class TestDnRespectsBlockReportSp anyString(), captor.capture()); - verifyCapturedArguments(captor, cluster.getStoragesPerDatanode(), BLOCKS_IN_FILE); + verifyCapturedArguments(captor, MiniDFSCluster.DIRS_PER_DATANODE, BLOCKS_IN_FILE); } /** @@ -194,7 +194,7 @@ public class TestDnRespectsBlockReportSp ArgumentCaptor captor = ArgumentCaptor.forClass(StorageBlockReport[].class); - Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport( + Mockito.verify(nnSpy, times(MiniDFSCluster.DIRS_PER_DATANODE)).blockReport( any(DatanodeRegistration.class), anyString(), captor.capture()); Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1619972&r1=1619971&r2=1619972&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Sat Aug 23 06:22:15 2014 @@ -444,7 +444,7 @@ public class TestNameNodeMetrics { assertCounter("SyncsNumOps", 1L, rb); // Each datanode reports in when the cluster comes up assertCounter("BlockReportNumOps", - (long)DATANODE_COUNT * cluster.getStoragesPerDatanode(), rb); + (long)DATANODE_COUNT*MiniDFSCluster.DIRS_PER_DATANODE, rb); // Sleep for an interval+slop to let the percentiles rollover Thread.sleep((PERCENTILES_INTERVAL+1)*1000);