Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 059281042C for ; Tue, 15 Sep 2015 01:25:00 +0000 (UTC) Received: (qmail 9991 invoked by uid 500); 15 Sep 2015 01:24:53 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 9926 invoked by uid 500); 15 Sep 2015 01:24:53 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 9917 invoked by uid 99); 15 Sep 2015 01:24:53 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 15 Sep 2015 01:24:53 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 6CFB8E076F; Tue, 15 Sep 2015 01:24:53 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: wheat9@apache.org To: common-commits@hadoop.apache.org Message-Id: <3be512c413ac4ebd87ddd11e1263cefc@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT config key. Contributed by Mingliang Liu. Date: Tue, 15 Sep 2015 01:24:53 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/trunk 7b5cf5352 -> 76957a485 HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT config key. Contributed by Mingliang Liu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76957a48 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76957a48 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76957a48 Branch: refs/heads/trunk Commit: 76957a485b526468498f93e443544131a88b5684 Parents: 7b5cf53 Author: Haohui Mai Authored: Mon Sep 14 18:22:52 2015 -0700 Committer: Haohui Mai Committed: Mon Sep 14 18:22:52 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/fs/Hdfs.java | 7 ++++--- .../apache/hadoop/hdfs/DistributedFileSystem.java | 4 ++-- .../org/apache/hadoop/hdfs/NameNodeProxies.java | 3 ++- .../hadoop/hdfs/server/namenode/NameNode.java | 15 +++++++++++---- .../apache/hadoop/hdfs/tools/NNHAServiceTarget.java | 3 ++- .../hadoop/hdfs/TestAppendSnapshotTruncate.java | 3 ++- .../apache/hadoop/hdfs/TestDFSClientFailover.java | 3 ++- .../apache/hadoop/hdfs/TestDefaultNameNodePort.java | 16 ++++++++++------ .../balancer/TestBalancerWithHANameNodes.java | 4 ++-- .../hdfs/server/namenode/TestFileTruncate.java | 5 +++-- 11 files changed, 43 insertions(+), 23 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 270f30b..35dcd80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -909,6 +909,9 @@ Release 2.8.0 - UNRELEASED HDFS-8996. Consolidate validateLog and scanLog in FJM#EditLogFile (Zhe Zhang via Colin P. McCabe) + HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys. + DFS_NAMENODE_RPC_PORT_DEFAULT config key. (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java index ba5687c..1d37aa9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator; @@ -49,7 +50,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; @@ -77,7 +77,8 @@ public class Hdfs extends AbstractFileSystem { * @throws IOException */ Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException { - super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT); + super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) { throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs"); @@ -92,7 +93,7 @@ public class Hdfs extends AbstractFileSystem { @Override public int getUriDefaultPort() { - return NameNode.DEFAULT_PORT; + return HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index ebed3c2..f4cf4c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -63,6 +63,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.client.HdfsAdmin; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; @@ -84,7 +85,6 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.AccessControlException; @@ -1497,7 +1497,7 @@ public class DistributedFileSystem extends FileSystem { @Override protected int getDefaultPort() { - return NameNode.DEFAULT_PORT; + return HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index d873526..7a4ec4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -509,7 +509,8 @@ public class NameNodeProxies { // Check the port in the URI, if it is logical. if (checkPort && providerNN.useLogicalURI()) { int port = nameNodeUri.getPort(); - if (port > 0 && port != NameNode.DEFAULT_PORT) { + if (port > 0 && + port != HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) { // Throwing here without any cleanup is fine since we have not // actually created the underlying proxies yet. throw new IOException("Port " + port + " specified in URI " http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 6e32066..683112b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -39,9 +39,9 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -111,6 +111,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY; @@ -314,7 +315,12 @@ public class NameNode implements NameNodeStatusMXBean { } } - public static final int DEFAULT_PORT = 8020; + /** + * @deprecated Use {@link HdfsClientConfigKeys#DFS_NAMENODE_RPC_PORT_DEFAULT} + * instead. + */ + @Deprecated + public static final int DEFAULT_PORT = DFS_NAMENODE_RPC_PORT_DEFAULT; public static final Logger LOG = LoggerFactory.getLogger(NameNode.class.getName()); public static final Logger stateChangeLog = @@ -452,7 +458,7 @@ public class NameNode implements NameNodeStatusMXBean { } public static InetSocketAddress getAddress(String address) { - return NetUtils.createSocketAddr(address, DEFAULT_PORT); + return NetUtils.createSocketAddr(address, DFS_NAMENODE_RPC_PORT_DEFAULT); } /** @@ -509,7 +515,8 @@ public class NameNode implements NameNodeStatusMXBean { public static URI getUri(InetSocketAddress namenode) { int port = namenode.getPort(); - String portString = port == DEFAULT_PORT ? "" : (":"+port); + String portString = (port == DFS_NAMENODE_RPC_PORT_DEFAULT) ? + "" : (":" + port); return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + namenode.getHostName()+portString); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java index 38f5123..6615a4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ha.NodeFencer; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetUtils; @@ -77,7 +78,7 @@ public class NNHAServiceTarget extends HAServiceTarget { "Unable to determine service address for namenode '" + nnId + "'"); } this.addr = NetUtils.createSocketAddr(serviceAddr, - NameNode.DEFAULT_PORT); + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); this.autoFailoverEnabled = targetConf.getBoolean( DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java index 9a09987..a1169c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate; @@ -89,7 +90,7 @@ public class TestAppendSnapshotTruncate { cluster = new MiniDFSCluster.Builder(conf) .format(true) .numDataNodes(DATANODE_NUM) - .nameNodePort(NameNode.DEFAULT_PORT) + .nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) .waitSafeMode(true) .build(); dfs = cluster.getFileSystem(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java index 644d66d..ff5554a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java @@ -115,7 +115,8 @@ public class TestDFSClientFailover { // to include a port number. Path withPort = new Path("hdfs://" + HATestUtil.getLogicalHostname(cluster) + ":" + - NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath()); + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + "/" + + TEST_FILE.toUri().getPath()); FileSystem fs2 = withPort.getFileSystem(fs.getConf()); assertTrue(fs2.exists(withPort)); http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java index 27f13e3..38be3c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java @@ -24,7 +24,9 @@ import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.server.namenode.NameNode; + import org.junit.Test; /** Test NameNode port defaulting code. */ @@ -33,9 +35,9 @@ public class TestDefaultNameNodePort { @Test public void testGetAddressFromString() throws Exception { assertEquals(NameNode.getAddress("foo").getPort(), - NameNode.DEFAULT_PORT); + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); assertEquals(NameNode.getAddress("hdfs://foo/").getPort(), - NameNode.DEFAULT_PORT); + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(), 555); assertEquals(NameNode.getAddress("foo:555").getPort(), @@ -46,11 +48,13 @@ public class TestDefaultNameNodePort { public void testGetAddressFromConf() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://foo/"); - assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); + assertEquals(NameNode.getAddress(conf).getPort(), + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); FileSystem.setDefaultUri(conf, "hdfs://foo:555/"); assertEquals(NameNode.getAddress(conf).getPort(), 555); FileSystem.setDefaultUri(conf, "foo"); - assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT); + assertEquals(NameNode.getAddress(conf).getPort(), + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); } @Test @@ -58,7 +62,7 @@ public class TestDefaultNameNodePort { assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)), URI.create("hdfs://foo:555")); assertEquals(NameNode.getUri(new InetSocketAddress("foo", - NameNode.DEFAULT_PORT)), - URI.create("hdfs://foo")); + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)), + URI.create("hdfs://foo")); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index bd91366..7559de4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -31,8 +31,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf; import org.apache.hadoop.hdfs.NameNodeProxies; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.junit.Test; @@ -67,7 +67,7 @@ public class TestBalancerWithHANameNodes { assertEquals(capacities.length, racks.length); int numOfDatanodes = capacities.length; NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1"); - nn1Conf.setIpcPort(NameNode.DEFAULT_PORT); + nn1Conf.setIpcPort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); Configuration copiedConf = new Configuration(conf); cluster = new MiniDFSCluster.Builder(copiedConf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 70fa222..711db2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -99,7 +100,7 @@ public class TestFileTruncate { cluster = new MiniDFSCluster.Builder(conf) .format(true) .numDataNodes(DATANODE_NUM) - .nameNodePort(NameNode.DEFAULT_PORT) + .nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) .waitSafeMode(true) .build(); fs = cluster.getFileSystem(); @@ -1224,7 +1225,7 @@ public class TestFileTruncate { NameNode.doRollback(conf, false); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM) .format(false) - .nameNodePort(NameNode.DEFAULT_PORT) + .nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) .startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o) .dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o) .build();