Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id C932B9A6F for ; Thu, 27 Oct 2011 23:27:27 +0000 (UTC) Received: (qmail 31499 invoked by uid 500); 27 Oct 2011 23:27:27 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 31473 invoked by uid 500); 27 Oct 2011 23:27:27 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 31466 invoked by uid 99); 27 Oct 2011 23:27:27 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 27 Oct 2011 23:27:27 +0000 X-ASF-Spam-Status: No, hits=-1998.0 required=5.0 tests=ALL_TRUSTED,FB_GET_MEDS X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 27 Oct 2011 23:27:25 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 192A72388A9B for ; Thu, 27 Oct 2011 23:27:05 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1190089 - in /hadoop/common/branches/branch-0.20-security: ./ src/core/org/apache/hadoop/fs/ src/core/org/apache/hadoop/net/ src/hdfs/org/apache/hadoop/hdfs/ src/test/org/apache/hadoop/fs/ src/test/org/apache/hadoop/net/ Date: Thu, 27 Oct 2011 23:27:04 -0000 To: common-commits@hadoop.apache.org From: suresh@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20111027232705.192A72388A9B@eris.apache.org> Author: suresh Date: Thu Oct 27 23:27:04 2011 New Revision: 1190089 URL: http://svn.apache.org/viewvc?rev=1190089&view=rev Log: HDFS-2450. Filesystem supports path with both short names and FQDN. Contributed by Daryn Sharp. Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/fs/FileSystem.java hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/net/NetUtils.java hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/fs/TestFileSystem.java hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/net/TestNetUtils.java Modified: hadoop/common/branches/branch-0.20-security/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/CHANGES.txt?rev=1190089&r1=1190088&r2=1190089&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/CHANGES.txt (original) +++ hadoop/common/branches/branch-0.20-security/CHANGES.txt Thu Oct 27 23:27:04 2011 @@ -63,6 +63,10 @@ Release 0.20.205.1 - unreleased HADOOP-7728. Enable task memory management to be configurable in hadoop config setup script. (ramya) + HDFS-2450. Filesystem supports path with both short names and FQDN. + (Daryn Sharp via suresh) + + BUG FIXES HDFS-2441. Remove the Content-Type set by HttpServer.QuotingInputFilter in Modified: hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/fs/FileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/fs/FileSystem.java?rev=1190089&r1=1190088&r2=1190089&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/fs/FileSystem.java (original) +++ hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/fs/FileSystem.java Thu Oct 27 23:27:04 2011 @@ -40,6 +40,7 @@ import org.apache.hadoop.conf.*; import org.apache.hadoop.util.*; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.MultipleIOException; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -159,6 +160,15 @@ public abstract class FileSystem extends public abstract URI getUri(); /** + * Resolve the uri's hostname and add the default port if not in the uri + * @return URI + * @see NetUtils#getCanonicalUri(URI, int) + */ + protected URI getCanonicalUri() { + return NetUtils.getCanonicalUri(getUri(), getDefaultPort()); + } + + /** * Get the default port for this file system. * @return the default port or 0 if there isn't one */ @@ -340,32 +350,31 @@ public abstract class FileSystem extends /** Check that a Path belongs to this FileSystem. */ protected void checkPath(Path path) { URI uri = path.toUri(); - if (uri.getScheme() == null) // fs is relative - return; - String thisScheme = this.getUri().getScheme(); String thatScheme = uri.getScheme(); - String thisAuthority = this.getUri().getAuthority(); - String thatAuthority = uri.getAuthority(); + if (thatScheme == null) // fs is relative + return; + URI thisUri = getCanonicalUri(); + String thisScheme = thisUri.getScheme(); //authority and scheme are not case sensitive if (thisScheme.equalsIgnoreCase(thatScheme)) {// schemes match - if (thisAuthority == thatAuthority || // & authorities match - (thisAuthority != null && - thisAuthority.equalsIgnoreCase(thatAuthority))) - return; - + String thisAuthority = thisUri.getAuthority(); + String thatAuthority = uri.getAuthority(); if (thatAuthority == null && // path's authority is null thisAuthority != null) { // fs has an authority - URI defaultUri = getDefaultUri(getConf()); // & is the conf default - if (thisScheme.equalsIgnoreCase(defaultUri.getScheme()) && - thisAuthority.equalsIgnoreCase(defaultUri.getAuthority())) - return; - try { // or the default fs's uri - defaultUri = get(getConf()).getUri(); - } catch (IOException e) { - throw new RuntimeException(e); + URI defaultUri = getDefaultUri(getConf()); + if (thisScheme.equalsIgnoreCase(defaultUri.getScheme())) { + uri = defaultUri; // schemes match, so use this uri instead + } else { + uri = null; // can't determine auth of the path } - if (thisScheme.equalsIgnoreCase(defaultUri.getScheme()) && - thisAuthority.equalsIgnoreCase(defaultUri.getAuthority())) + } + if (uri != null) { + // canonicalize uri before comparing with this fs + uri = NetUtils.getCanonicalUri(uri, getDefaultPort()); + thatAuthority = uri.getAuthority(); + if (thisAuthority == thatAuthority || // authorities match + (thisAuthority != null && + thisAuthority.equalsIgnoreCase(thatAuthority))) return; } } Modified: hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/net/NetUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/net/NetUtils.java?rev=1190089&r1=1190088&r2=1190089&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/net/NetUtils.java (original) +++ hadoop/common/branches/branch-0.20-security/src/core/org/apache/hadoop/net/NetUtils.java Thu Oct 27 23:27:04 2011 @@ -33,6 +33,7 @@ import java.net.ConnectException; import java.nio.channels.SocketChannel; import java.util.Map.Entry; import java.util.*; +import java.util.concurrent.ConcurrentHashMap; import javax.net.SocketFactory; @@ -358,9 +359,60 @@ public class NetUtils { /** * This is for testing only! */ - static void setHostResolver(QualifiedHostResolver newResolver) { + static void setHostResolver(HostResolver newResolver) { hostResolver = newResolver; } + + /** + * Resolve the uri's hostname and add the default port if not in the uri + * @param uri to resolve + * @param defaultPort if none is given + * @return URI + * @throws UnknownHostException + */ + public static URI getCanonicalUri(URI uri, int defaultPort) { + // skip if there is no authority, ie. "file" scheme or relative uri + String host = uri.getHost(); + if (host == null) { + return uri; + } + String fqHost = canonicalizeHost(host); + int port = uri.getPort(); + // short out if already canonical with a port + if (host.equals(fqHost) && port != -1) { + return uri; + } + // reconstruct the uri with the canonical host and port + try { + uri = new URI(uri.getScheme(), uri.getUserInfo(), + fqHost, (port == -1) ? defaultPort : port, + uri.getPath(), uri.getQuery(), uri.getFragment()); + } catch (URISyntaxException e) { + throw new IllegalArgumentException(e); + } + return uri; + } + + // cache the canonicalized hostnames; the cache currently isn't expired, + // but the canonicals will only change if the host's resolver configuration + // changes + private static ConcurrentHashMap canonicalizedHostCache = + new ConcurrentHashMap(); + + private static String canonicalizeHost(String host) { + // check if the host has already been canonicalized + String fqHost = canonicalizedHostCache.get(host); + if (fqHost == null) { + try { + fqHost = hostResolver.getByName(host).getHostName(); + // slight race condition, but won't hurt + canonicalizedHostCache.put(host, fqHost); + } catch (UnknownHostException e) { + fqHost = host; + } + } + return fqHost; + } /** * Handle the transition from pairs of attributes specifying a host and port Modified: hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java?rev=1190089&r1=1190088&r2=1190089&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java (original) +++ hadoop/common/branches/branch-0.20-security/src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java Thu Oct 27 23:27:04 2011 @@ -87,43 +87,10 @@ public class DistributedFileSystem exten InetSocketAddress namenode = NameNode.getAddress(uri.getAuthority()); this.dfs = new DFSClient(namenode, conf, statistics); - this.uri = NameNode.getUri(namenode); + this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority()); this.workingDir = getHomeDirectory(); } - /** Permit paths which explicitly specify the default port. */ - protected void checkPath(Path path) { - URI thisUri = this.getUri(); - URI thatUri = path.toUri(); - String thatAuthority = thatUri.getAuthority(); - if (thatUri.getScheme() != null - && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) - && thatUri.getPort() == NameNode.DEFAULT_PORT - && thisUri.getPort() == -1 - && thatAuthority.substring(0,thatAuthority.indexOf(":")) - .equalsIgnoreCase(thisUri.getAuthority())) - return; - super.checkPath(path); - } - - /** Normalize paths that explicitly specify the default port. */ - public Path makeQualified(Path path) { - URI thisUri = this.getUri(); - URI thatUri = path.toUri(); - String thatAuthority = thatUri.getAuthority(); - if (thatUri.getScheme() != null - && thatUri.getScheme().equalsIgnoreCase(thisUri.getScheme()) - && thatUri.getPort() == NameNode.DEFAULT_PORT - && thisUri.getPort() == -1 - && thatAuthority.substring(0,thatAuthority.indexOf(":")) - .equalsIgnoreCase(thisUri.getAuthority())) { - path = new Path(thisUri.getScheme(), thisUri.getAuthority(), - thatUri.getPath()); - } - return super.makeQualified(path); - } - - public Path getWorkingDirectory() { return workingDir; } Modified: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/fs/TestFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/fs/TestFileSystem.java?rev=1190089&r1=1190088&r2=1190089&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/fs/TestFileSystem.java (original) +++ hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/fs/TestFileSystem.java Thu Oct 27 23:27:04 2011 @@ -18,34 +18,38 @@ package org.apache.hadoop.fs; +import static org.mockito.Mockito.mock; + import java.io.DataInputStream; import java.io.IOException; import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.URI; import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.Random; -import java.util.List; import java.util.ArrayList; -import java.util.Set; +import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; -import java.util.HashMap; -import java.net.InetSocketAddress; -import java.net.URI; +import java.util.Random; +import java.util.Set; import junit.framework.TestCase; import org.apache.commons.logging.Log; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.fs.shell.CommandFormat; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.SequenceFile; +import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.UTF8; import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.FileOutputFormat; import org.apache.hadoop.mapred.JobClient; @@ -55,13 +59,13 @@ import org.apache.hadoop.mapred.OutputCo import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.lib.LongSumReducer; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.net.NetUtilsTestResolver; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import java.security.PrivilegedAction; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import static org.mockito.Mockito.mock; +import org.apache.hadoop.util.Progressable; +import org.junit.Test; public class TestFileSystem extends TestCase { private static final Log LOG = FileSystem.LOG; @@ -697,4 +701,343 @@ public class TestFileSystem extends Test }); assertNotSame(fsA, fsA1); } + + // canonicalizing! + + static String[] authorities = { + "myfs://host", + "myfs://host.a", + "myfs://host.a.b", + }; + + static String[] ips = { + "myfs://127.0.0.1" + }; + + + @Test + public void testSetupResolver() throws Exception { + NetUtilsTestResolver.install(); + } + + // no ports + + @Test + public void testShortAuthority() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123"); + verifyPaths(fs, authorities, -1, true); + verifyPaths(fs, authorities, 123, true); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testPartialAuthority() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host.a", "myfs://host.a.b:123"); + verifyPaths(fs, authorities, -1, true); + verifyPaths(fs, authorities, 123, true); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testFullAuthority() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host.a.b", "myfs://host.a.b:123"); + verifyPaths(fs, authorities, -1, true); + verifyPaths(fs, authorities, 123, true); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + // with default ports + + @Test + public void testShortAuthorityWithDefaultPort() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host:123", "myfs://host.a.b:123"); + verifyPaths(fs, authorities, -1, true); + verifyPaths(fs, authorities, 123, true); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testPartialAuthorityWithDefaultPort() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host.a:123", "myfs://host.a.b:123"); + verifyPaths(fs, authorities, -1, true); + verifyPaths(fs, authorities, 123, true); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testFullAuthorityWithDefaultPort() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host.a.b:123", "myfs://host.a.b:123"); + verifyPaths(fs, authorities, -1, true); + verifyPaths(fs, authorities, 123, true); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + // with non-standard ports + + @Test + public void testShortAuthorityWithOtherPort() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host:456", "myfs://host.a.b:456"); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, true); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testPartialAuthorityWithOtherPort() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host.a:456", "myfs://host.a.b:456"); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, true); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testFullAuthorityWithOtherPort() throws Exception { + FileSystem fs = getVerifiedFS("myfs://host.a.b:456", "myfs://host.a.b:456"); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, true); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + // ips + + @Test + public void testIpAuthority() throws Exception { + FileSystem fs = getVerifiedFS("myfs://127.0.0.1", "myfs://127.0.0.1:123"); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, true); + verifyPaths(fs, ips, 123, true); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testIpAuthorityWithDefaultPort() throws Exception { + FileSystem fs = getVerifiedFS("myfs://127.0.0.1:123", "myfs://127.0.0.1:123"); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, true); + verifyPaths(fs, ips, 123, true); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testIpAuthorityWithOtherPort() throws Exception { + FileSystem fs = getVerifiedFS("myfs://127.0.0.1:456", "myfs://127.0.0.1:456"); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, true); + } + + // bad stuff + + @Test + public void testMismatchedSchemes() throws Exception { + FileSystem fs = getVerifiedFS("myfs2://simple", "myfs2://simple:123"); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testMismatchedHosts() throws Exception { + FileSystem fs = getVerifiedFS("myfs://simple", "myfs://simple:123"); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testNullAuthority() throws Exception { + FileSystem fs = getVerifiedFS("myfs:///", "myfs:///"); + verifyPaths(fs, new String[]{ "myfs://" }, -1, true); + verifyPaths(fs, authorities, -1, false); + verifyPaths(fs, authorities, 123, false); + verifyPaths(fs, authorities, 456, false); + verifyPaths(fs, ips, -1, false); + verifyPaths(fs, ips, 123, false); + verifyPaths(fs, ips, 456, false); + } + + @Test + public void testAuthorityFromDefaultFS() throws Exception { + Configuration config = new Configuration(); + + FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123", config); + verifyPaths(fs, new String[]{ "myfs://" }, -1, false); + + config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "myfs://host"); + verifyPaths(fs, new String[]{ "myfs://" }, -1, true); + + config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "myfs2://host"); + verifyPaths(fs, new String[]{ "myfs://" }, -1, false); + + config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "myfs://host:123"); + verifyPaths(fs, new String[]{ "myfs://" }, -1, true); + + config.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "myfs://host:456"); + verifyPaths(fs, new String[]{ "myfs://" }, -1, false); + } + + FileSystem getVerifiedFS(String authority, String canonical) throws Exception { + return getVerifiedFS(authority, canonical, new Configuration()); + } + + // create a fs from the authority, then check its uri against the given uri + // and the canonical. then try to fetch paths using the canonical + FileSystem getVerifiedFS(String authority, String canonical, Configuration conf) + throws Exception { + URI uri = URI.create(authority); + URI canonicalUri = URI.create(canonical); + + FileSystem fs = new DummyFileSystem(uri, conf); + assertEquals(uri, fs.getUri()); + assertEquals(canonicalUri, fs.getCanonicalUri()); + verifyCheckPath(fs, "/file", true); + return fs; + } + + void verifyPaths(FileSystem fs, String[] uris, int port, boolean shouldPass) { + for (String uri : uris) { + if (port != -1) uri += ":"+port; + verifyCheckPath(fs, uri+"/file", shouldPass); + } + } + + void verifyCheckPath(FileSystem fs, String path, boolean shouldPass) { + Path rawPath = new Path(path); + Path fqPath = null; + Exception e = null; + try { + fqPath = fs.makeQualified(rawPath); + } catch (IllegalArgumentException iae) { + e = iae; + } + if (shouldPass) { + assertEquals(null, e); + String pathAuthority = rawPath.toUri().getAuthority(); + if (pathAuthority == null) { + pathAuthority = fs.getUri().getAuthority(); + } + assertEquals(pathAuthority, fqPath.toUri().getAuthority()); + } else { + assertNotNull("did not fail", e); + assertEquals("Wrong FS: "+rawPath+", expected: "+fs.getUri(), + e.getMessage()); + } + } + + static class DummyFileSystem extends FileSystem { + URI uri; + static int defaultPort = 123; + + DummyFileSystem(URI uri, Configuration conf) throws IOException { + this.uri = uri; + setConf(conf); + } + + @Override + public URI getUri() { + return uri; + } + + @Override + protected int getDefaultPort() { + return defaultPort; + } + + @Override + public FSDataInputStream open(Path f, int bufferSize) throws IOException { + throw new IOException("not supposed to be here"); + } + + @Override + public FSDataOutputStream create(Path f, FsPermission permission, + boolean overwrite, int bufferSize, short replication, long blockSize, + Progressable progress) throws IOException { + throw new IOException("not supposed to be here"); + } + + @Override + public FSDataOutputStream append(Path f, int bufferSize, + Progressable progress) throws IOException { + throw new IOException("not supposed to be here"); + } + + @Override + public boolean rename(Path src, Path dst) throws IOException { + throw new IOException("not supposed to be here"); + } + + @Override + public boolean delete(Path f) throws IOException { + throw new IOException("not supposed to be here"); + } + + @Override + public boolean delete(Path f, boolean recursive) throws IOException { + throw new IOException("not supposed to be here"); + } + + @Override + public FileStatus[] listStatus(Path f) throws IOException { + throw new IOException("not supposed to be here"); + } + + @Override + public void setWorkingDirectory(Path new_dir) { + } + + @Override + public Path getWorkingDirectory() { + return new Path("/"); + } + + @Override + public boolean mkdirs(Path f, FsPermission permission) throws IOException { + throw new IOException("not supposed to be here"); + } + + @Override + public FileStatus getFileStatus(Path f) throws IOException { + throw new IOException("not supposed to be here"); + } + } } Modified: hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/net/TestNetUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/net/TestNetUtils.java?rev=1190089&r1=1190088&r2=1190089&view=diff ============================================================================== --- hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/net/TestNetUtils.java (original) +++ hadoop/common/branches/branch-0.20-security/src/test/org/apache/hadoop/net/TestNetUtils.java Thu Oct 27 23:27:04 2011 @@ -17,25 +17,25 @@ */ package org.apache.hadoop.net; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; -import java.net.InetAddress; -import java.net.Socket; import java.net.ConnectException; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.Socket; import java.net.SocketException; +import java.net.URI; import java.net.UnknownHostException; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.net.NetUtils.QualifiedHostResolver; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; public class TestNetUtils { @@ -70,21 +70,18 @@ public class TestNetUtils { } } - static TestNetUtilsResolver resolver; + static NetUtilsTestResolver resolver; + static Configuration config; @BeforeClass public static void setupResolver() { - resolver = new TestNetUtilsResolver(); - resolver.setSearchDomains("a.b", "b", "c"); - resolver.addResolvedHost("host.a.b.", "1.1.1.1"); - resolver.addResolvedHost("b-host.b.", "2.2.2.2"); - resolver.addResolvedHost("simple.", "3.3.3.3"); - NetUtils.setHostResolver(resolver); + resolver = NetUtilsTestResolver.install(); } @Before public void resetResolver() { resolver.reset(); + config = new Configuration(); } // getByExactName @@ -243,37 +240,79 @@ public class TestNetUtils { } // - - static class TestNetUtilsResolver extends QualifiedHostResolver { - Map resolvedHosts = new HashMap(); - List hostSearches = new LinkedList(); - void addResolvedHost(String host, String ip) { - InetAddress addr; - try { - addr = InetAddress.getByName(ip); - addr = InetAddress.getByAddress(host, addr.getAddress()); - } catch (UnknownHostException e) { - throw new IllegalArgumentException("not an ip:"+ip); - } - resolvedHosts.put(host, addr); - } + @Test + public void testCanonicalUriWithPort() { + URI uri; + + uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123"), 456); + assertEquals("scheme://host.a.b:123", uri.toString()); + + uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/"), 456); + assertEquals("scheme://host.a.b:123/", uri.toString()); + + uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/path"), 456); + assertEquals("scheme://host.a.b:123/path", uri.toString()); + + uri = NetUtils.getCanonicalUri(URI.create("scheme://host:123/path?q#frag"), 456); + assertEquals("scheme://host.a.b:123/path?q#frag", uri.toString()); + } + + @Test + public void testCanonicalUriWithDefaultPort() { + URI uri; - InetAddress getInetAddressByName(String host) throws UnknownHostException { - hostSearches.add(host); - if (!resolvedHosts.containsKey(host)) { - throw new UnknownHostException(host); - } - return resolvedHosts.get(host); - } + uri = NetUtils.getCanonicalUri(URI.create("scheme://host"), 123); + assertEquals("scheme://host.a.b:123", uri.toString()); - String[] getHostSearches() { - return hostSearches.toArray(new String[0]); - } + uri = NetUtils.getCanonicalUri(URI.create("scheme://host/"), 123); + assertEquals("scheme://host.a.b:123/", uri.toString()); - void reset() { - hostSearches.clear(); - } + uri = NetUtils.getCanonicalUri(URI.create("scheme://host/path"), 123); + assertEquals("scheme://host.a.b:123/path", uri.toString()); + + uri = NetUtils.getCanonicalUri(URI.create("scheme://host/path?q#frag"), 123); + assertEquals("scheme://host.a.b:123/path?q#frag", uri.toString()); + } + + @Test + public void testCanonicalUriWithPath() { + URI uri; + + uri = NetUtils.getCanonicalUri(URI.create("path"), 2); + assertEquals("path", uri.toString()); + + uri = NetUtils.getCanonicalUri(URI.create("/path"), 2); + assertEquals("/path", uri.toString()); + } + + @Test + public void testCanonicalUriWithNoAuthority() { + URI uri; + + uri = NetUtils.getCanonicalUri(URI.create("scheme:/"), 2); + assertEquals("scheme:/", uri.toString()); + + uri = NetUtils.getCanonicalUri(URI.create("scheme:/path"), 2); + assertEquals("scheme:/path", uri.toString()); + + uri = NetUtils.getCanonicalUri(URI.create("scheme:///"), 2); + assertEquals("scheme:///", uri.toString()); + + uri = NetUtils.getCanonicalUri(URI.create("scheme:///path"), 2); + assertEquals("scheme:///path", uri.toString()); + } + + @Test + public void testCanonicalUriWithNoHost() { + URI uri = NetUtils.getCanonicalUri(URI.create("scheme://:123/path"), 2); + assertEquals("scheme://:123/path", uri.toString()); + } + + @Test + public void testCanonicalUriWithNoPortNoDefaultPort() { + URI uri = NetUtils.getCanonicalUri(URI.create("scheme://host/path"), -1); + assertEquals("scheme://host.a.b/path", uri.toString()); } private void assertBetterArrayEquals(T[] expect, T[]got) {