Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 3375C17C0F for ; Thu, 17 Sep 2015 21:17:38 +0000 (UTC) Received: (qmail 55776 invoked by uid 500); 17 Sep 2015 21:17:38 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 55709 invoked by uid 500); 17 Sep 2015 21:17:37 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 55700 invoked by uid 99); 17 Sep 2015 21:17:37 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 17 Sep 2015 21:17:37 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id AE22EE10B1; Thu, 17 Sep 2015 21:17:37 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: wheat9@apache.org To: common-commits@hadoop.apache.org Message-Id: <6c25dccf0c564c69b2b6222bbf50df4e@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-9022. Move NameNode.getAddress() and NameNode.getUri() to hadoop-hdfs-client. Contributed by Mingliang Liu. Date: Thu, 17 Sep 2015 21:17:37 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/branch-2 4861fadb1 -> cadde8c1e HDFS-9022. Move NameNode.getAddress() and NameNode.getUri() to hadoop-hdfs-client. Contributed by Mingliang Liu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cadde8c1 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cadde8c1 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cadde8c1 Branch: refs/heads/branch-2 Commit: cadde8c1e5fd75319e3914c82252e94e0f75405c Parents: 4861fad Author: Haohui Mai Authored: Thu Sep 17 13:41:18 2015 -0700 Committer: Haohui Mai Committed: Thu Sep 17 14:17:30 2015 -0700 ---------------------------------------------------------------------- .../org/apache/hadoop/hdfs/DFSUtilClient.java | 41 ++++++++++++++++ .../hadoop/hdfs/nfs/mount/RpcProgramMountd.java | 4 +- .../hadoop/hdfs/nfs/nfs3/DFSClientCache.java | 4 +- .../apache/hadoop/hdfs/nfs/nfs3/TestWrites.java | 7 +-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../java/org/apache/hadoop/hdfs/DFSClient.java | 7 ++- .../java/org/apache/hadoop/hdfs/DFSUtil.java | 10 ++-- .../org/apache/hadoop/hdfs/NameNodeProxies.java | 12 ++--- .../hadoop/hdfs/server/namenode/DfsServlet.java | 3 +- .../hdfs/server/namenode/ImageServlet.java | 5 +- .../hadoop/hdfs/server/namenode/NameNode.java | 49 ++++---------------- .../hdfs/server/namenode/NamenodeFsck.java | 2 +- .../server/namenode/ha/BootstrapStandby.java | 3 +- .../namenode/ha/IPFailoverProxyProvider.java | 4 +- .../hdfs/tools/DFSZKFailoverController.java | 3 +- .../org/apache/hadoop/hdfs/tools/GetGroups.java | 4 +- .../org/apache/hadoop/hdfs/DFSTestUtil.java | 4 +- .../hadoop/hdfs/TestDFSClientFailover.java | 2 +- .../hadoop/hdfs/TestDFSShellGenericOptions.java | 13 +++--- .../hadoop/hdfs/TestDefaultNameNodePort.java | 18 +++---- .../org/apache/hadoop/hdfs/TestFileStatus.java | 4 +- .../org/apache/hadoop/hdfs/TestGetBlocks.java | 7 ++- .../apache/hadoop/hdfs/TestPersistBlocks.java | 5 +- .../datanode/TestDataNodeRollingUpgrade.java | 8 ++-- .../server/namenode/NNThroughputBenchmark.java | 3 +- .../hdfs/server/namenode/TestINodeFile.java | 4 +- .../hdfs/server/namenode/TestStreamFile.java | 4 +- .../namenode/ha/TestFailureToReadEdits.java | 3 +- 28 files changed, 127 insertions(+), 109 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index b032250..359886e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.net.BasicInetPeer; @@ -33,6 +34,7 @@ import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory; @@ -587,4 +589,43 @@ public class DFSUtilClient { } } } + + public static InetSocketAddress getNNAddress(String address) { + return NetUtils.createSocketAddr(address, + HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); + } + + public static InetSocketAddress getNNAddress(Configuration conf) { + URI filesystemURI = FileSystem.getDefaultUri(conf); + return getNNAddress(filesystemURI); + } + + /** + * @return address of file system + */ + public static InetSocketAddress getNNAddress(URI filesystemURI) { + String authority = filesystemURI.getAuthority(); + if (authority == null) { + throw new IllegalArgumentException(String.format( + "Invalid URI for NameNode address (check %s): %s has no authority.", + FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString())); + } + if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase( + filesystemURI.getScheme())) { + throw new IllegalArgumentException(String.format( + "Invalid URI for NameNode address (check %s): " + + "%s is not of scheme '%s'.", FileSystem.FS_DEFAULT_NAME_KEY, + filesystemURI.toString(), HdfsConstants.HDFS_URI_SCHEME)); + } + return getNNAddress(authority); + } + + public static URI getNNUri(InetSocketAddress namenode) { + int port = namenode.getPort(); + String portString = + (port == HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) ? + "" : (":" + port); + return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + + namenode.getHostName() + portString); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index 2814cb0..869fb73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -27,10 +27,10 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.mount.MountEntry; import org.apache.hadoop.mount.MountInterface; import org.apache.hadoop.mount.MountResponse; @@ -90,7 +90,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { UserGroupInformation.setConfiguration(config); SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY, NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY); - this.dfsClient = new DFSClient(NameNode.getAddress(config), config); + this.dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java index 79072f4..b946bce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java @@ -33,8 +33,8 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ShutdownHookManager; @@ -173,7 +173,7 @@ class DFSClientCache { return ugi.doAs(new PrivilegedExceptionAction() { @Override public DFSClient run() throws IOException { - return new DFSClient(NameNode.getAddress(config), config); + return new DFSClient(DFSUtilClient.getNNAddress(config), config); } }); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java index 56603b9..3c193ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java @@ -28,6 +28,7 @@ import java.util.Arrays; import java.util.concurrent.ConcurrentNavigableMap; import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys; @@ -35,7 +36,6 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS; import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; @@ -480,7 +480,7 @@ public class TestWrites { try { cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build(); cluster.waitActive(); - client = new DFSClient(NameNode.getAddress(config), config); + client = new DFSClient(DFSUtilClient.getNNAddress(config), config); // Use emphral port in case tests are running in parallel config.setInt("nfs3.mountd.port", 0); @@ -596,7 +596,8 @@ public class TestWrites { nfs3.startServiceInternal(false); nfsd = (RpcProgramNfs3) nfs3.getRpcProgram(); - DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config); + DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), + config); HdfsFileStatus status = dfsClient.getFileInfo("/"); FileHandle rootHandle = new FileHandle(status.getFileId()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7156a51..1673708 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -581,6 +581,9 @@ Release 2.8.0 - UNRELEASED HDFS-7995. Implement chmod in the HDFS Web UI. (Ravi Prakash and Haohui Mai via wheat9) + HDFS-9022. Move NameNode.getAddress() and NameNode.getUri() to + hadoop-hdfs-client. (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 69a8532..ef9bf48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -154,7 +154,6 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.DataOutputBuffer; @@ -256,17 +255,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, = new HashMap(); /** - * Same as this(NameNode.getAddress(conf), conf); + * Same as this(NameNode.getNNAddress(conf), conf); * @see #DFSClient(InetSocketAddress, Configuration) * @deprecated Deprecated at 0.21 */ @Deprecated public DFSClient(Configuration conf) throws IOException { - this(NameNode.getAddress(conf), conf); + this(DFSUtilClient.getNNAddress(conf), conf); } public DFSClient(InetSocketAddress address, Configuration conf) throws IOException { - this(NameNode.getUri(address), conf); + this(DFSUtilClient.getNNUri(address), conf); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 76f1d18..fee7138 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -414,7 +414,7 @@ public class DFSUtil { NameNode.initializeGenericKeys(confForNn, nsId, nnId); String principal = SecurityUtil.getServerPrincipal(confForNn .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), - NameNode.getAddress(confForNn).getHostName()); + DFSUtilClient.getNNAddress(confForNn).getHostName()); principals.add(principal); } } else { @@ -422,7 +422,7 @@ public class DFSUtil { NameNode.initializeGenericKeys(confForNn, nsId, null); String principal = SecurityUtil.getServerPrincipal(confForNn .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), - NameNode.getAddress(confForNn).getHostName()); + DFSUtilClient.getNNAddress(confForNn).getHostName()); principals.add(principal); } } @@ -498,7 +498,8 @@ public class DFSUtil { // Use default address as fall back String defaultAddress; try { - defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); + defaultAddress = NetUtils.getHostPortString( + DFSUtilClient.getNNAddress(conf)); } catch (IllegalArgumentException e) { defaultAddress = null; } @@ -534,7 +535,8 @@ public class DFSUtil { // Use default address as fall back String defaultAddress; try { - defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); + defaultAddress = NetUtils.getHostPortString( + DFSUtilClient.getNNAddress(conf)); } catch (IllegalArgumentException e) { defaultAddress = null; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index 87ac437..a5039a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -165,8 +165,8 @@ public class NameNodeProxies { if (failoverProxyProvider == null) { // Non-HA case - return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface, - UserGroupInformation.getCurrentUser(), true, + return createNonHAProxy(conf, DFSUtilClient.getNNAddress(nameNodeUri), + xface, UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth); } else { // HA case @@ -183,10 +183,10 @@ public class NameNodeProxies { HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( - NameNode.getAddress(nameNodeUri)); + DFSUtilClient.getNNAddress(nameNodeUri)); } return new ProxyAndInfo(proxy, dtService, - NameNode.getAddress(nameNodeUri)); + DFSUtilClient.getNNAddress(nameNodeUri)); } } @@ -249,10 +249,10 @@ public class NameNodeProxies { HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( - NameNode.getAddress(nameNodeUri)); + DFSUtilClient.getNNAddress(nameNodeUri)); } return new ProxyAndInfo(proxy, dtService, - NameNode.getAddress(nameNodeUri)); + DFSUtilClient.getNNAddress(nameNodeUri)); } else { LOG.warn("Currently creating proxy using " + "LossyRetryInvocationHandler requires NN HA setup"); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java index 402dcdd..8edaed6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java @@ -26,6 +26,7 @@ import javax.servlet.http.HttpServletRequest; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -77,7 +78,7 @@ abstract class DfsServlet extends HttpServlet { NameNodeHttpServer.getNameNodeAddressFromContext(context); Configuration conf = new HdfsConfiguration( NameNodeHttpServer.getConfFromContext(context)); - return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr), + return NameNodeProxies.createProxy(conf, DFSUtilClient.getNNUri(nnAddr), ClientProtocol.class).getProxy(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index c565eb5..9f4b20c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -31,6 +31,7 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.security.SecurityUtil; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -233,7 +234,7 @@ public class ImageServlet extends HttpServlet { validRequestors.add(SecurityUtil.getServerPrincipal(conf .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), - NameNode.getAddress(conf).getHostName())); + DFSUtilClient.getNNAddress(conf).getHostName())); try { validRequestors.add( SecurityUtil.getServerPrincipal(conf @@ -256,7 +257,7 @@ public class ImageServlet extends HttpServlet { Configuration otherNnConf = HAUtil.getConfForOtherNode(conf); validRequestors.add(SecurityUtil.getServerPrincipal(otherNnConf .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), - NameNode.getAddress(otherNnConf).getHostName())); + DFSUtilClient.getNNAddress(otherNnConf).getHostName())); } for (String v : validRequestors) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index aec2e98..c361ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -445,10 +445,6 @@ public class NameNode implements NameNodeStatusMXBean { return clientNamenodeAddress; } - public static InetSocketAddress getAddress(String address) { - return NetUtils.createSocketAddr(address, DFS_NAMENODE_RPC_PORT_DEFAULT); - } - /** * Set the configuration property for the service rpc address * to address @@ -470,45 +466,18 @@ public class NameNode implements NameNodeStatusMXBean { boolean fallback) { String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY); if (addr == null || addr.isEmpty()) { - return fallback ? getAddress(conf) : null; + return fallback ? DFSUtilClient.getNNAddress(conf) : null; } - return getAddress(addr); - } - - public static InetSocketAddress getAddress(Configuration conf) { - URI filesystemURI = FileSystem.getDefaultUri(conf); - return getAddress(filesystemURI); + return DFSUtilClient.getNNAddress(addr); } - + @Deprecated /** - * @return address of file system + * @deprecated Use {@link DFSUtilClient#getNNUri(InetSocketAddress)} instead. */ - public static InetSocketAddress getAddress(URI filesystemURI) { - String authority = filesystemURI.getAuthority(); - if (authority == null) { - throw new IllegalArgumentException(String.format( - "Invalid URI for NameNode address (check %s): %s has no authority.", - FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString())); - } - if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase( - filesystemURI.getScheme())) { - throw new IllegalArgumentException(String.format( - "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.", - FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(), - HdfsConstants.HDFS_URI_SCHEME)); - } - return getAddress(authority); - } - public static URI getUri(InetSocketAddress namenode) { - int port = namenode.getPort(); - String portString = (port == DFS_NAMENODE_RPC_PORT_DEFAULT) ? - "" : (":" + port); - return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" - + namenode.getHostName()+portString); + return DFSUtilClient.getNNUri(namenode); } - // // Common NameNode methods implementation for the active name-node role. // @@ -529,7 +498,7 @@ public class NameNode implements NameNodeStatusMXBean { } protected InetSocketAddress getRpcServerAddress(Configuration conf) { - return getAddress(conf); + return DFSUtilClient.getNNAddress(conf); } /** Given a configuration get the bind host of the service rpc server @@ -564,7 +533,7 @@ public class NameNode implements NameNodeStatusMXBean { protected void setRpcServerAddress(Configuration conf, InetSocketAddress rpcAddress) { - FileSystem.setDefaultUri(conf, getUri(rpcAddress)); + FileSystem.setDefaultUri(conf, DFSUtilClient.getNNUri(rpcAddress)); } protected InetSocketAddress getHttpServerAddress(Configuration conf) { @@ -1014,7 +983,7 @@ public class NameNode implements NameNodeStatusMXBean { checkAllowFormat(conf); if (UserGroupInformation.isSecurityEnabled()) { - InetSocketAddress socAddr = getAddress(conf); + InetSocketAddress socAddr = DFSUtilClient.getNNAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } @@ -1117,7 +1086,7 @@ public class NameNode implements NameNodeStatusMXBean { } if (UserGroupInformation.isSecurityEnabled()) { - InetSocketAddress socAddr = getAddress(conf); + InetSocketAddress socAddr = DFSUtilClient.getNNAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 315ab1e..69785b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -734,7 +734,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks) throws IOException { - final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf); + final DFSClient dfs = new DFSClient(DFSUtilClient.getNNAddress(conf), conf); final String fullName = file.getFullName(parent); OutputStream fos = null; try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 7038dc0..f3fbcae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -38,6 +38,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; @@ -103,7 +104,7 @@ public class BootstrapStandby implements Tool, Configurable { parseConfAndFindOtherNN(); NameNode.checkAllowFormat(conf); - InetSocketAddress myAddr = NameNode.getAddress(conf); + InetSocketAddress myAddr = DFSUtilClient.getNNAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java index bc4e726..4e1cb9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/IPFailoverProxyProvider.java @@ -24,9 +24,9 @@ import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.security.UserGroupInformation; @@ -91,7 +91,7 @@ public class IPFailoverProxyProvider extends if (nnProxyInfo == null) { try { // Create a proxy that is not wrapped in RetryProxy - InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri); + InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri); nnProxyInfo = new ProxyInfo(NameNodeProxies.createNonHAProxy( conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), false).getProxy(), nnAddr.toString()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java index f125a27..ff3fd68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ha.HealthMonitor; import org.apache.hadoop.ha.ZKFailoverController; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -164,7 +165,7 @@ public class DFSZKFailoverController extends ZKFailoverController { @Override public void loginAsFCUser() throws IOException { - InetSocketAddress socAddr = NameNode.getAddress(conf); + InetSocketAddress socAddr = DFSUtilClient.getNNAddress(conf); SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java index 3c6d4c5..e03e787 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java @@ -29,9 +29,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.NameNodeProxies; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.tools.GetGroupsBase; import org.apache.hadoop.tools.GetUserMappingsProtocol; import org.apache.hadoop.util.ToolRunner; @@ -63,7 +63,7 @@ public class GetGroups extends GetGroupsBase { @Override protected InetSocketAddress getProtocolAddress(Configuration conf) throws IOException { - return NameNode.getAddress(conf); + return DFSUtilClient.getNNAddress(conf); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 5a3a25f..e324557 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1805,8 +1805,8 @@ public class DFSTestUtil { URI nameNodeUri, UserGroupInformation ugi) throws IOException { return NameNodeProxies.createNonHAProxy(conf, - NameNode.getAddress(nameNodeUri), NamenodeProtocol.class, ugi, false). - getProxy(); + DFSUtilClient.getNNAddress(nameNodeUri), NamenodeProtocol.class, ugi, + false).getProxy(); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java index ff5554a..b098711 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java @@ -300,7 +300,7 @@ public class TestDFSClientFailover { Class xface) { try { this.proxy = NameNodeProxies.createNonHAProxy(conf, - NameNode.getAddress(uri), xface, + DFSUtilClient.getNNAddress(uri), xface, UserGroupInformation.getCurrentUser(), false).getProxy(); this.xface = xface; } catch (IOException ioe) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java index 2b37e2e..282dcf7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; @@ -53,7 +52,7 @@ public class TestDFSShellGenericOptions { } } - private void testFsOption(String [] args, String namenode) { + private void testFsOption(String [] args, String namenode) { // prepare arguments to create a directory /data args[0] = "-fs"; args[1] = namenode; @@ -81,7 +80,7 @@ public class TestDFSShellGenericOptions { // prepare arguments to create a directory /data args[0] = "-conf"; args[1] = siteFile.getPath(); - execute(args, namenode); + execute(args, namenode); } catch (FileNotFoundException e) { e.printStackTrace(); } finally { @@ -94,7 +93,7 @@ public class TestDFSShellGenericOptions { // prepare arguments to create a directory /data args[0] = "-D"; args[1] = "fs.defaultFS="+namenode; - execute(args, namenode); + execute(args, namenode); } private void execute(String [] args, String namenode) { @@ -102,9 +101,9 @@ public class TestDFSShellGenericOptions { FileSystem fs=null; try { ToolRunner.run(shell, args); - fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)), - shell.getConf()); - assertTrue("Directory does not get created", + fs = FileSystem.get(DFSUtilClient.getNNUri( + DFSUtilClient.getNNAddress(namenode)), shell.getConf()); + assertTrue("Directory does not get created", fs.isDirectory(new Path("/data"))); fs.delete(new Path("/data"), true); } catch (Exception e) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java index 38be3c8..1d8d289 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java @@ -34,13 +34,13 @@ public class TestDefaultNameNodePort { @Test public void testGetAddressFromString() throws Exception { - assertEquals(NameNode.getAddress("foo").getPort(), + assertEquals(DFSUtilClient.getNNAddress("foo").getPort(), HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); - assertEquals(NameNode.getAddress("hdfs://foo/").getPort(), + assertEquals(DFSUtilClient.getNNAddress("hdfs://foo/").getPort(), HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); - assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(), + assertEquals(DFSUtilClient.getNNAddress("hdfs://foo:555").getPort(), 555); - assertEquals(NameNode.getAddress("foo:555").getPort(), + assertEquals(DFSUtilClient.getNNAddress("foo:555").getPort(), 555); } @@ -48,20 +48,20 @@ public class TestDefaultNameNodePort { public void testGetAddressFromConf() throws Exception { Configuration conf = new HdfsConfiguration(); FileSystem.setDefaultUri(conf, "hdfs://foo/"); - assertEquals(NameNode.getAddress(conf).getPort(), + assertEquals(DFSUtilClient.getNNAddress(conf).getPort(), HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); FileSystem.setDefaultUri(conf, "hdfs://foo:555/"); - assertEquals(NameNode.getAddress(conf).getPort(), 555); + assertEquals(DFSUtilClient.getNNAddress(conf).getPort(), 555); FileSystem.setDefaultUri(conf, "foo"); - assertEquals(NameNode.getAddress(conf).getPort(), + assertEquals(DFSUtilClient.getNNAddress(conf).getPort(), HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT); } @Test public void testGetUri() { - assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)), + assertEquals(DFSUtilClient.getNNUri(new InetSocketAddress("foo", 555)), URI.create("hdfs://foo:555")); - assertEquals(NameNode.getUri(new InetSocketAddress("foo", + assertEquals(DFSUtilClient.getNNUri(new InetSocketAddress("foo", HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)), URI.create("hdfs://foo")); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java index 8b1223d..8320540 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java @@ -38,9 +38,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.web.HftpFileSystem; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; @@ -78,7 +76,7 @@ public class TestFileStatus { fs = cluster.getFileSystem(); fc = FileContext.getFileContext(cluster.getURI(0), conf); hftpfs = cluster.getHftpFileSystem(0); - dfsClient = new DFSClient(NameNode.getAddress(conf), conf); + dfsClient = new DFSClient(DFSUtilClient.getNNAddress(conf), conf); file1 = new Path("filestatus.dat"); writeFile(fs, file1, 1, fileSize, blockSize); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java index c5320cc..9abf754 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.ipc.RemoteException; @@ -209,8 +208,8 @@ public class TestGetBlocks { DatanodeInfo[] dataNodes = null; boolean notWritten; do { - final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), - CONF); + final DFSClient dfsclient = new DFSClient( + DFSUtilClient.getNNAddress(CONF), CONF); locatedBlocks = dfsclient.getNamenode() .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks(); assertEquals(2, locatedBlocks.size()); @@ -232,7 +231,7 @@ public class TestGetBlocks { InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort()); NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, - NameNode.getUri(addr), NamenodeProtocol.class).getProxy(); + DFSUtilClient.getNNUri(addr), NamenodeProtocol.class).getProxy(); // get blocks of size fileLen from dataNodes[0] BlockWithLocations[] locs; http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java index beabfc3..2d25031 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; @@ -207,7 +206,7 @@ public class TestPersistBlocks { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs = cluster.getFileSystem(); - NameNode.getAddress(conf).getPort(); + DFSUtilClient.getNNAddress(conf).getPort(); // Creating a file with 4096 blockSize to write multiple blocks stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE); stream.write(DATA_BEFORE_RESTART); @@ -256,7 +255,7 @@ public class TestPersistBlocks { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fs = cluster.getFileSystem(); - NameNode.getAddress(conf).getPort(); + DFSUtilClient.getNNAddress(conf).getPort(); // Creating a file with 4096 blockSize to write multiple blocks stream = fs.create(FILE_PATH, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE); stream.write(DATA_BEFORE_RESTART, 0, DATA_BEFORE_RESTART.length / 2); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java index 57fee06..7e56988 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java @@ -34,13 +34,13 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster.Builder; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.TestRollingUpgrade; -import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -294,9 +294,9 @@ public class TestDataNodeRollingUpgrade { String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat"; String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat"; - DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf); - DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf); - DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf); + DFSClient client1 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf); + DFSClient client2 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf); + DFSClient client3 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf); DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true); DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 39894b5..b963d8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.Block; @@ -1497,7 +1498,7 @@ public class NNThroughputBenchmark implements Tool { UserGroupInformation.getCurrentUser()); clientProto = dfs.getClient().getNamenode(); dataNodeProto = new DatanodeProtocolClientSideTranslatorPB( - NameNode.getAddress(nnUri), config); + DFSUtilClient.getNNAddress(nnUri), config); refreshUserMappingsProto = DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnUri); getBlockPoolId(dfs); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index f6fca0f..3dfe9d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -52,9 +52,9 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -937,7 +937,7 @@ public class TestINodeFile { long parentId = fsdir.getINode("/").getId(); String testPath = "/.reserved/.inodes/" + dirId + "/.."; - client = new DFSClient(NameNode.getAddress(conf), conf); + client = new DFSClient(DFSUtilClient.getNNAddress(conf), conf); HdfsFileStatus status = client.getFileInfo(testPath); assertTrue(parentId == status.getFileId()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java index c7bd5eb..f887034 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.JspHelper; @@ -272,7 +273,8 @@ public class TestStreamFile { Mockito.doReturn(CONF).when(mockServletContext).getAttribute( JspHelper.CURRENT_CONF); - Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF))) + Mockito.doReturn(NetUtils.getHostPortString(DFSUtilClient.getNNAddress + (CONF))) .when(mockHttpServletRequest).getParameter("nnaddr"); Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest) .getPathInfo(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/cadde8c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java index 5ddc635..f5a72ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; @@ -246,7 +247,7 @@ public class TestFailureToReadEdits { FileSystem fs0 = null; try { // Make sure that when the active restarts, it loads all the edits. - fs0 = FileSystem.get(NameNode.getUri(nn0.getNameNodeAddress()), + fs0 = FileSystem.get(DFSUtilClient.getNNUri(nn0.getNameNodeAddress()), conf); assertTrue(fs0.exists(new Path(TEST_DIR1)));