Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 9D404186C4 for ; Fri, 11 Mar 2016 22:22:35 +0000 (UTC) Received: (qmail 54412 invoked by uid 500); 11 Mar 2016 22:22:23 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 53154 invoked by uid 500); 11 Mar 2016 22:22:23 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 49594 invoked by uid 99); 11 Mar 2016 22:22:20 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 11 Mar 2016 22:22:20 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 4B713E0446; Fri, 11 Mar 2016 22:22:20 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: arp@apache.org To: common-commits@hadoop.apache.org Date: Fri, 11 Mar 2016 22:22:44 -0000 Message-Id: <6dc290b920d84c2dabc14be10100779a@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [26/50] [abbrv] hadoop git commit: HDFS-1477. Support reconfiguring dfs.heartbeat.interval and dfs.namenode.heartbeat.recheck-interval without NN restart. (Contributed by Xiaobing Zhou) HDFS-1477. Support reconfiguring dfs.heartbeat.interval and dfs.namenode.heartbeat.recheck-interval without NN restart. (Contributed by Xiaobing Zhou) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e01c6ea6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e01c6ea6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e01c6ea6 Branch: refs/heads/HDFS-7240 Commit: e01c6ea688e62f25c4310e771a0cd85b53a5fb87 Parents: adf1cdf Author: Arpit Agarwal Authored: Thu Mar 10 19:03:55 2016 -0800 Committer: Arpit Agarwal Committed: Thu Mar 10 19:03:55 2016 -0800 ---------------------------------------------------------------------- .../server/blockmanagement/BlockManager.java | 3 +- .../server/blockmanagement/DatanodeManager.java | 44 +++++- .../hadoop/hdfs/server/datanode/DataNode.java | 1 + .../hadoop/hdfs/server/namenode/NameNode.java | 103 ++++++++++++-- .../hdfs/server/namenode/NameNodeRpcServer.java | 28 ++-- .../hdfs/server/namenode/NamenodeFsck.java | 2 +- .../TestComputeInvalidateWork.java | 2 +- .../namenode/TestNameNodeReconfigure.java | 126 +++++++++++++++++ .../apache/hadoop/hdfs/tools/TestDFSAdmin.java | 134 +++++++++++-------- 9 files changed, 357 insertions(+), 86 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index f12ea1b..6ed102c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -332,7 +332,8 @@ public class BlockManager implements BlockStatsMXBean { DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY, DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L; invalidateBlocks = new InvalidateBlocks( - datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs); + datanodeManager.getBlockInvalidateLimit(), + startupDelayBlockDeletionInMs); // Compute the map capacity by allocating 2% of total memory blocksMap = new BlocksMap( http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 3072fc0..53c7c16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.util.Time.monotonicNow; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.net.InetAddresses; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -70,6 +71,8 @@ public class DatanodeManager { private final HeartbeatManager heartbeatManager; private final FSClusterStats fsClusterStats; + private volatile long heartbeatIntervalSeconds; + private volatile int heartbeatRecheckInterval; /** * Stores the datanode -> block map. *

@@ -113,7 +116,7 @@ public class DatanodeManager { /** The period to wait for datanode heartbeat.*/ private long heartbeatExpireInterval; /** Ask Datanode only up to this many blocks to delete. */ - final int blockInvalidateLimit; + private volatile int blockInvalidateLimit; /** The interval for judging stale DataNodes for read/write */ private final long staleInterval; @@ -227,10 +230,10 @@ public class DatanodeManager { dnsToSwitchMapping.resolve(locations); } - final long heartbeatIntervalSeconds = conf.getLong( + heartbeatIntervalSeconds = conf.getLong( DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT); - final int heartbeatRecheckInterval = conf.getInt( + heartbeatRecheckInterval = conf.getInt( DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval @@ -348,6 +351,10 @@ public class DatanodeManager { return fsClusterStats; } + int getBlockInvalidateLimit() { + return blockInvalidateLimit; + } + /** @return the datanode statistics. */ public DatanodeStatistics getDatanodeStatistics() { return heartbeatManager; @@ -1103,6 +1110,14 @@ public class DatanodeManager { return staleInterval; } + public long getHeartbeatInterval() { + return this.heartbeatIntervalSeconds; + } + + public long getHeartbeatRecheckInterval() { + return this.heartbeatRecheckInterval; + } + /** * Set the number of current stale DataNodes. The HeartbeatManager got this * number based on DataNodes' heartbeats. @@ -1667,5 +1682,28 @@ public class DatanodeManager { } }; } + + public void setHeartbeatInterval(long intervalSeconds) { + setHeartbeatInterval(intervalSeconds, + this.heartbeatRecheckInterval); + } + + public void setHeartbeatRecheckInterval(int recheckInterval) { + setHeartbeatInterval(this.heartbeatIntervalSeconds, + recheckInterval); + } + + /** + * Set parameters derived from heartbeat interval. + */ + private void setHeartbeatInterval(long intervalSeconds, + int recheckInterval) { + this.heartbeatIntervalSeconds = intervalSeconds; + this.heartbeatRecheckInterval = recheckInterval; + this.heartbeatExpireInterval = 2L * recheckInterval + 10 * 1000 + * intervalSeconds; + this.blockInvalidateLimit = Math.max(20 * (int) (intervalSeconds), + DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 2362610..989afbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -2995,6 +2995,7 @@ public class DataNode extends ReconfigurableBase @Override // ClientDatanodeProtocol & ReconfigurationProtocol public List listReconfigurableProperties() throws IOException { + checkSuperuserPrivilege(); return RECONFIGURABLE_PROPERTIES; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index e8900ee..148626b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -21,11 +21,14 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.ReconfigurableBase; +import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Trash; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -41,6 +44,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -93,6 +97,7 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -141,6 +146,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FO import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT; import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.ToolRunner.confirmPrompt; @@ -182,7 +191,8 @@ import static org.apache.hadoop.util.ToolRunner.confirmPrompt; * NameNode state, for example partial blocksMap etc. **********************************************************/ @InterfaceAudience.Private -public class NameNode implements NameNodeStatusMXBean { +public class NameNode extends ReconfigurableBase implements + NameNodeStatusMXBean { static{ HdfsConfiguration.init(); } @@ -260,7 +270,12 @@ public class NameNode implements NameNodeStatusMXBean { public static final String[] NAMESERVICE_SPECIFIC_KEYS = { DFS_HA_AUTO_FAILOVER_ENABLED_KEY }; - + + /** A list of property that are reconfigurable at runtime. */ + static final List RECONFIGURABLE_PROPERTIES = Collections + .unmodifiableList(Arrays.asList(DFS_HEARTBEAT_INTERVAL_KEY, + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY)); + private static final String USAGE = "Usage: hdfs namenode [" + StartupOption.BACKUP.getName() + "] | \n\t[" + StartupOption.CHECKPOINT.getName() + "] | \n\t[" @@ -329,7 +344,6 @@ public class NameNode implements NameNodeStatusMXBean { LogFactory.getLog("NameNodeMetricsLog"); protected FSNamesystem namesystem; - protected final Configuration conf; protected final NamenodeRole role; private volatile HAState state; private final boolean haEnabled; @@ -864,12 +878,12 @@ public class NameNode implements NameNodeStatusMXBean { protected NameNode(Configuration conf, NamenodeRole role) throws IOException { + super(conf); this.tracer = new Tracer.Builder("NameNode"). conf(TraceUtils.wrapHadoopConf(NAMENODE_HTRACE_PREFIX, conf)). build(); this.tracerConfigurationManager = new TracerConfigurationManager(NAMENODE_HTRACE_PREFIX, conf); - this.conf = conf; this.role = role; setClientNamenodeAddress(conf); String nsId = getNameServiceId(conf); @@ -880,7 +894,7 @@ public class NameNode implements NameNodeStatusMXBean { this.haContext = createHAContext(); try { initializeGenericKeys(conf, nsId, namenodeId); - initialize(conf); + initialize(getConf()); try { haContext.writeLock(); state.prepareToEnterState(haContext); @@ -1804,7 +1818,7 @@ public class NameNode implements NameNodeStatusMXBean { public void startActiveServices() throws IOException { try { namesystem.startActiveServices(); - startTrashEmptier(conf); + startTrashEmptier(getConf()); } catch (Throwable t) { doImmediateShutdown(t); } @@ -1825,7 +1839,7 @@ public class NameNode implements NameNodeStatusMXBean { @Override public void startStandbyServices() throws IOException { try { - namesystem.startStandbyServices(conf); + namesystem.startStandbyServices(getConf()); } catch (Throwable t) { doImmediateShutdown(t); } @@ -1902,8 +1916,8 @@ public class NameNode implements NameNodeStatusMXBean { */ void checkHaStateChange(StateChangeRequestInfo req) throws AccessControlException { - boolean autoHaEnabled = conf.getBoolean(DFS_HA_AUTO_FAILOVER_ENABLED_KEY, - DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT); + boolean autoHaEnabled = getConf().getBoolean( + DFS_HA_AUTO_FAILOVER_ENABLED_KEY, DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT); switch (req.getSource()) { case REQUEST_BY_USER: if (autoHaEnabled) { @@ -1930,4 +1944,75 @@ public class NameNode implements NameNodeStatusMXBean { break; } } + + /* + * {@inheritDoc} + * */ + @Override // ReconfigurableBase + public Collection getReconfigurableProperties() { + return RECONFIGURABLE_PROPERTIES; + } + + /* + * {@inheritDoc} + * */ + @Override // ReconfigurableBase + protected String reconfigurePropertyImpl(String property, String newVal) + throws ReconfigurationException { + final DatanodeManager datanodeManager = namesystem.getBlockManager() + .getDatanodeManager(); + + switch (property) { + case DFS_HEARTBEAT_INTERVAL_KEY: + namesystem.writeLock(); + try { + if (newVal == null) { + // set to default + datanodeManager.setHeartbeatInterval(DFS_HEARTBEAT_INTERVAL_DEFAULT); + return String.valueOf(DFS_HEARTBEAT_INTERVAL_DEFAULT); + } else { + datanodeManager.setHeartbeatInterval(Long.parseLong(newVal)); + return String.valueOf(datanodeManager.getHeartbeatInterval()); + } + } catch (NumberFormatException nfe) { + throw new ReconfigurationException(property, newVal, getConf().get( + property), nfe); + } finally { + namesystem.writeUnlock(); + LOG.info("RECONFIGURE* changed heartbeatInterval to " + + datanodeManager.getHeartbeatInterval()); + } + case DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY: + namesystem.writeLock(); + try { + if (newVal == null) { + // set to default + datanodeManager + .setHeartbeatRecheckInterval( + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); + return String + .valueOf(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); + } else { + datanodeManager.setHeartbeatRecheckInterval(Integer.parseInt(newVal)); + return String.valueOf(datanodeManager.getHeartbeatRecheckInterval()); + } + } catch (NumberFormatException nfe) { + throw new ReconfigurationException(property, newVal, getConf().get( + property), nfe); + } finally { + namesystem.writeUnlock(); + LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to " + + datanodeManager.getHeartbeatRecheckInterval()); + } + default: + break; + } + throw new ReconfigurationException(property, newVal, getConf() + .get(property)); + } + + @Override // ReconfigurableBase + protected Configuration getNewConf() { + return new HdfsConfiguration(); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 0c4a440..6dff1bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -43,7 +43,6 @@ import com.google.common.collect.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.conf.ReconfigurationTaskStatus; import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; @@ -2109,7 +2108,7 @@ class NameNodeRpcServer implements NamenodeProtocols { checkNNStartup(); namesystem.checkOperation(OperationCategory.READ); // only active namesystem.checkSuperuserPrivilege(); - int maxEventsPerRPC = nn.conf.getInt( + int maxEventsPerRPC = nn.getConf().getInt( DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_KEY, DFSConfigKeys.DFS_NAMENODE_INOTIFY_MAX_EVENTS_PER_RPC_DEFAULT); FSEditLog log = namesystem.getFSImage().getEditLog(); @@ -2224,23 +2223,24 @@ class NameNodeRpcServer implements NamenodeProtocols { } @Override // ReconfigurationProtocol - public void startReconfiguration() { - throw new UnsupportedOperationException( - "Namenode startReconfiguration is not implemented.", - new ReconfigurationException()); + public void startReconfiguration() throws IOException { + checkNNStartup(); + namesystem.checkSuperuserPrivilege(); + nn.startReconfigurationTask(); } @Override // ReconfigurationProtocol - public ReconfigurationTaskStatus getReconfigurationStatus() { - throw new UnsupportedOperationException( - " Namenode getReconfigurationStatus is not implemented.", - new ReconfigurationException()); + public ReconfigurationTaskStatus getReconfigurationStatus() + throws IOException { + checkNNStartup(); + namesystem.checkSuperuserPrivilege(); + return nn.getReconfigurationTaskStatus(); } @Override // ReconfigurationProtocol - public List listReconfigurableProperties() { - throw new UnsupportedOperationException( - " Namenode listReconfigurableProperties is not implemented.", - new ReconfigurationException()); + public List listReconfigurableProperties() throws IOException { + checkNNStartup(); + namesystem.checkSuperuserPrivilege(); + return NameNode.RECONFIGURABLE_PROPERTIES; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 647dd83..d3be9b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -938,7 +938,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { setInetSocketAddress(targetAddr). setCachingStrategy(CachingStrategy.newDropBehind()). setClientCacheContext(dfs.getClientContext()). - setConfiguration(namenode.conf). + setConfiguration(namenode.getConf()). setTracer(tracer). setRemotePeerFactory(new RemotePeerFactory() { @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/e01c6ea6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java index c33161f..033f4d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java @@ -81,7 +81,7 @@ public class TestComputeInvalidateWork { @Test(timeout=120000) public void testCompInvalidate() throws Exception { final int blockInvalidateLimit = bm.getDatanodeManager() - .blockInvalidateLimit; + .getBlockInvalidateLimit(); namesystem.writeLock(); try { for (int i=0; i outs, final List errs) throws IOException { - reconfigurationOutErrFormatter("startReconfiguration", nodeType, - address, outs, errs); - } - private void getReconfigurableProperties(String nodeType, String address, final List outs, final List errs) throws IOException { reconfigurationOutErrFormatter("getReconfigurableProperties", nodeType, @@ -151,9 +151,10 @@ public class TestDFSAdmin { * @param expectedSuccuss set true if the reconfiguration task should success. * @throws IOException * @throws InterruptedException + * @throws TimeoutException */ private void testDataNodeGetReconfigurationStatus(boolean expectedSuccuss) - throws IOException, InterruptedException { + throws IOException, InterruptedException, TimeoutException { ReconfigurationUtil ru = mock(ReconfigurationUtil.class); datanode.setReconfigurationUtil(ru); @@ -179,21 +180,10 @@ public class TestDFSAdmin { assertThat(admin.startReconfiguration("datanode", address), is(0)); - int count = 100; final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); - while (count > 0) { - outs.clear(); - errs.clear(); - getReconfigurationStatus("datanode", address, outs, errs); - if (!outs.isEmpty() && outs.get(0).contains("finished")) { - break; - } - count--; - Thread.sleep(100); - } - LOG.info(String.format("count=%d", count)); - assertTrue(count > 0); + awaitReconfigurationFinished("datanode", address, outs, errs); + if (expectedSuccuss) { assertThat(outs.size(), is(4)); } else { @@ -232,59 +222,89 @@ public class TestDFSAdmin { @Test(timeout = 30000) public void testDataNodeGetReconfigurationStatus() throws IOException, - InterruptedException { + InterruptedException, TimeoutException { testDataNodeGetReconfigurationStatus(true); restartCluster(); testDataNodeGetReconfigurationStatus(false); } @Test(timeout = 30000) - public void testNameNodeStartReconfiguration() throws IOException { - final String address = namenode.getHostAndPort(); - final List outs = Lists.newArrayList(); - final List errs = Lists.newArrayList(); - startReconfiguration("namenode", address, outs, errs); - assertEquals(0, outs.size()); - assertTrue(errs.size() > 1); - assertThat( - errs.get(0), - is(allOf(containsString("Namenode"), containsString("reconfiguring:"), - containsString("startReconfiguration"), - containsString("is not implemented"), - containsString("UnsupportedOperationException")))); - } - - @Test(timeout = 30000) public void testNameNodeGetReconfigurableProperties() throws IOException { final String address = namenode.getHostAndPort(); final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(0, outs.size()); - assertTrue(errs.size() > 1); - assertThat( - errs.get(0), - is(allOf(containsString("Namenode"), - containsString("reconfiguration:"), - containsString("listReconfigurableProperties"), - containsString("is not implemented"), - containsString("UnsupportedOperationException")))); + assertEquals(3, outs.size()); + assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1)); + assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2)); + assertEquals(errs.size(), 0); + } + + void awaitReconfigurationFinished(final String nodeType, + final String address, final List outs, final List errs) + throws TimeoutException, IOException, InterruptedException { + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + outs.clear(); + errs.clear(); + try { + getReconfigurationStatus(nodeType, address, outs, errs); + } catch (IOException e) { + LOG.error(String.format( + "call getReconfigurationStatus on %s[%s] failed.", nodeType, + address), e); + } + return !outs.isEmpty() && outs.get(0).contains("finished"); + + } + }, 100, 100 * 100); } @Test(timeout = 30000) - public void testNameNodeGetReconfigurationStatus() throws IOException { + public void testNameNodeGetReconfigurationStatus() throws IOException, + InterruptedException, TimeoutException { + ReconfigurationUtil ru = mock(ReconfigurationUtil.class); + namenode.setReconfigurationUtil(ru); final String address = namenode.getHostAndPort(); + + List changes = + new ArrayList<>(); + changes.add(new ReconfigurationUtil.PropertyChange( + DFS_HEARTBEAT_INTERVAL_KEY, String.valueOf(6), + namenode.getConf().get(DFS_HEARTBEAT_INTERVAL_KEY))); + changes.add(new ReconfigurationUtil.PropertyChange( + "randomKey", "new123", "old456")); + when(ru.parseChangedProperties(any(Configuration.class), + any(Configuration.class))).thenReturn(changes); + assertThat(admin.startReconfiguration("namenode", address), is(0)); + final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); - getReconfigurationStatus("namenode", address, outs, errs); - assertEquals(0, outs.size()); - assertTrue(errs.size() > 1); - assertThat( - errs.get(0), - is(allOf(containsString("Namenode"), - containsString("reloading configuration:"), - containsString("getReconfigurationStatus"), - containsString("is not implemented"), - containsString("UnsupportedOperationException")))); + awaitReconfigurationFinished("namenode", address, outs, errs); + + // verify change + assertEquals( + DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", + 6, + namenode + .getConf() + .getLong(DFS_HEARTBEAT_INTERVAL_KEY, + DFS_HEARTBEAT_INTERVAL_DEFAULT)); + assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value", + 6, + namenode + .getNamesystem() + .getBlockManager() + .getDatanodeManager() + .getHeartbeatInterval()); + + int offset = 1; + assertThat(outs.get(offset), containsString("SUCCESS: Changed property " + + DFS_HEARTBEAT_INTERVAL_KEY)); + assertThat(outs.get(offset + 1), + is(allOf(containsString("From:"), containsString("3")))); + assertThat(outs.get(offset + 2), + is(allOf(containsString("To:"), containsString("6")))); } } \ No newline at end of file