hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1536921 [2/2] - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/main/java/org/apache/hadoop/hdfs/tools/ src/main/java/org/apache/ha...
Date Tue, 29 Oct 2013 22:44:35 GMT
Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java?rev=1536921&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java
(added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpFileSystem.java
Tue Oct 29 22:44:34 2013
@@ -0,0 +1,432 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.Random;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.Builder;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.web.HftpFileSystem;
+import org.apache.hadoop.hdfs.web.HsftpFileSystem;
+import org.apache.hadoop.util.ServletUtil;
+import org.apache.log4j.Level;
+import org.junit.*;
+
+public class TestHftpFileSystem {
+  private static final Random RAN = new Random();
+
+  private static Configuration config = null;
+  private static MiniDFSCluster cluster = null;
+  private static String blockPoolId = null;
+  private static String hftpUri = null;
+  private FileSystem hdfs = null;
+  private HftpFileSystem hftpFs = null;
+
+  private static Path[] TEST_PATHS = new Path[] {
+      // URI does not encode, Request#getPathInfo returns /foo
+      new Path("/foo;bar"),
+
+      // URI does not encode, Request#getPathInfo returns verbatim
+      new Path("/foo+"),
+      new Path("/foo+bar/foo+bar"),
+      new Path("/foo=bar/foo=bar"),
+      new Path("/foo,bar/foo,bar"),
+      new Path("/foo@bar/foo@bar"),
+      new Path("/foo&bar/foo&bar"),
+      new Path("/foo$bar/foo$bar"),
+      new Path("/foo_bar/foo_bar"),
+      new Path("/foo~bar/foo~bar"),
+      new Path("/foo.bar/foo.bar"),
+      new Path("/foo../bar/foo../bar"),
+      new Path("/foo.../bar/foo.../bar"),
+      new Path("/foo'bar/foo'bar"),
+      new Path("/foo#bar/foo#bar"),
+      new Path("/foo!bar/foo!bar"),
+      // HDFS file names may not contain ":"
+
+      // URI percent encodes, Request#getPathInfo decodes
+      new Path("/foo bar/foo bar"),
+      new Path("/foo?bar/foo?bar"),
+      new Path("/foo\">bar/foo\">bar"),
+    };
+
+  @BeforeClass
+  public static void setUp() throws IOException {
+    ((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);
+
+    final long seed = RAN.nextLong();
+    System.out.println("seed=" + seed);
+    RAN.setSeed(seed);
+
+    config = new Configuration();
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build();
+    blockPoolId = cluster.getNamesystem().getBlockPoolId();
+    hftpUri =
+      "hftp://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Before
+  public void initFileSystems() throws IOException {
+    hdfs = cluster.getFileSystem();
+    hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(config);
+    // clear out the namespace
+    for (FileStatus stat : hdfs.listStatus(new Path("/"))) {
+      hdfs.delete(stat.getPath(), true);
+    }
+  }
+
+  @After
+  public void resetFileSystems() throws IOException {
+    FileSystem.closeAll();
+  }
+
+  /**
+   * Test file creation and access with file names that need encoding.
+   */
+  @Test
+  public void testFileNameEncoding() throws IOException, URISyntaxException {
+    for (Path p : TEST_PATHS) {
+      // Create and access the path (data and streamFile servlets)
+      FSDataOutputStream out = hdfs.create(p, true);
+      out.writeBytes("0123456789");
+      out.close();
+      FSDataInputStream in = hftpFs.open(p);
+      assertEquals('0', in.read());
+
+      // Check the file status matches the path. Hftp returns a FileStatus
+      // with the entire URI, extract the path part.
+      assertEquals(p, new Path(hftpFs.getFileStatus(p).getPath().toUri().getPath()));
+
+      // Test list status (listPath servlet)
+      assertEquals(1, hftpFs.listStatus(p).length);
+
+      // Test content summary (contentSummary servlet)
+      assertNotNull("No content summary", hftpFs.getContentSummary(p));
+
+      // Test checksums (fileChecksum and getFileChecksum servlets)
+      assertNotNull("No file checksum", hftpFs.getFileChecksum(p));
+    }
+  }
+
+  private void testDataNodeRedirect(Path path) throws IOException {
+    // Create the file
+    if (hdfs.exists(path)) {
+      hdfs.delete(path, true);
+    }
+    FSDataOutputStream out = hdfs.create(path, (short)1);
+    out.writeBytes("0123456789");
+    out.close();
+
+    // Get the path's block location so we can determine
+    // if we were redirected to the right DN.
+    BlockLocation[] locations =
+        hdfs.getFileBlockLocations(path, 0, 10);
+    String xferAddr = locations[0].getNames()[0];
+
+    // Connect to the NN to get redirected
+    URL u = hftpFs.getNamenodeURL(
+        "/data" + ServletUtil.encodePath(path.toUri().getPath()),
+        "ugi=userx,groupy");
+    HttpURLConnection conn = (HttpURLConnection)u.openConnection();
+    HttpURLConnection.setFollowRedirects(true);
+    conn.connect();
+    conn.getInputStream();
+
+    boolean checked = false;
+    // Find the datanode that has the block according to locations
+    // and check that the URL was redirected to this DN's info port
+    for (DataNode node : cluster.getDataNodes()) {
+      DatanodeRegistration dnR =
+        DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
+      if (dnR.getXferAddr().equals(xferAddr)) {
+        checked = true;
+        assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
+      }
+    }
+    assertTrue("The test never checked that location of " +
+               "the block and hftp desitnation are the same", checked);
+  }
+
+  /**
+   * Test that clients are redirected to the appropriate DN.
+   */
+  @Test
+  public void testDataNodeRedirect() throws IOException {
+    for (Path p : TEST_PATHS) {
+      testDataNodeRedirect(p);
+    }
+  }
+
+  /**
+   * Tests getPos() functionality.
+   */
+  @Test
+  public void testGetPos() throws IOException {
+    final Path testFile = new Path("/testfile+1");
+    // Write a test file.
+    FSDataOutputStream out = hdfs.create(testFile, true);
+    out.writeBytes("0123456789");
+    out.close();
+
+    FSDataInputStream in = hftpFs.open(testFile);
+
+    // Test read().
+    for (int i = 0; i < 5; ++i) {
+      assertEquals(i, in.getPos());
+      in.read();
+    }
+
+    // Test read(b, off, len).
+    assertEquals(5, in.getPos());
+    byte[] buffer = new byte[10];
+    assertEquals(2, in.read(buffer, 0, 2));
+    assertEquals(7, in.getPos());
+
+    // Test read(b).
+    int bytesRead = in.read(buffer);
+    assertEquals(7 + bytesRead, in.getPos());
+
+    // Test EOF.
+    for (int i = 0; i < 100; ++i) {
+      in.read();
+    }
+    assertEquals(10, in.getPos());
+    in.close();
+  }
+
+  /**
+   * Tests seek().
+   */
+  @Test
+  public void testSeek() throws IOException {
+    final Path testFile = new Path("/testfile+1");
+    FSDataOutputStream out = hdfs.create(testFile, true);
+    out.writeBytes("0123456789");
+    out.close();
+    FSDataInputStream in = hftpFs.open(testFile);
+    in.seek(7);
+    assertEquals('7', in.read());
+  }
+
+  @Test
+  public void testReadClosedStream() throws IOException {
+    final Path testFile = new Path("/testfile+2");
+    FSDataOutputStream os = hdfs.create(testFile, true);
+    os.writeBytes("0123456789");
+    os.close();
+
+    // ByteRangeInputStream delays opens until reads.  Make sure it doesn't
+    // open a closed stream that has never been opened
+    FSDataInputStream in = hftpFs.open(testFile);
+    in.close();
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+
+    // force the stream to connect and then close it
+    in = hftpFs.open(testFile);
+    int ch = in.read();
+    assertEquals('0', ch);
+    in.close();
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+
+    // make sure seeking doesn't automagically reopen the stream
+    in.seek(4);
+    checkClosedStream(in);
+    checkClosedStream(in.getWrappedStream());
+  }
+
+  private void checkClosedStream(InputStream is) {
+    IOException ioe = null;
+    try {
+      is.read();
+    } catch (IOException e) {
+      ioe = e;
+    }
+    assertNotNull("No exception on closed read", ioe);
+    assertEquals("Stream closed", ioe.getMessage());
+  }
+
+  @Test
+  public void testHftpDefaultPorts() throws IOException {
+    Configuration conf = new Configuration();
+    URI uri = URI.create("hftp://localhost");
+    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
+
+    assertEquals(uri, fs.getUri());
+
+    // HFTP uses http to get the token so canonical service name should
+    // return the http port.
+    assertEquals(
+        "127.0.0.1:" + DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT,
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHftpCustomDefaultPorts() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
+
+    URI uri = URI.create("hftp://localhost");
+    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(123, fs.getDefaultPort());
+
+    assertEquals(uri, fs.getUri());
+
+    // HFTP uses http to get the token so canonical service name should
+    // return the http port.
+    assertEquals(
+        "127.0.0.1:123",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHftpCustomUriPortWithDefaultPorts() throws IOException {
+    Configuration conf = new Configuration();
+    URI uri = URI.create("hftp://localhost:123");
+    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:123",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
+
+    URI uri = URI.create("hftp://localhost:789");
+    HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(123, fs.getDefaultPort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:789",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  ///
+
+  @Test
+  public void testHsftpDefaultPorts() throws IOException {
+    Configuration conf = new Configuration();
+    URI uri = URI.create("hsftp://localhost");
+    HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT,
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHsftpCustomDefaultPorts() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
+
+    URI uri = URI.create("hsftp://localhost");
+    HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(456, fs.getDefaultPort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:456",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHsftpCustomUriPortWithDefaultPorts() throws IOException {
+    Configuration conf = new Configuration();
+    URI uri = URI.create("hsftp://localhost:123");
+    HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:123",
+        fs.getCanonicalServiceName()
+    );
+  }
+
+  @Test
+  public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 123);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 456);
+
+    URI uri = URI.create("hsftp://localhost:789");
+    HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf);
+
+    assertEquals(456, fs.getDefaultPort());
+
+    assertEquals(uri, fs.getUri());
+    assertEquals(
+        "127.0.0.1:789",
+        fs.getCanonicalServiceName()
+    );
+  }
+}

Added: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpURLTimeouts.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpURLTimeouts.java?rev=1536921&view=auto
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpURLTimeouts.java
(added)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHftpURLTimeouts.java
Tue Oct 29 22:44:34 2013
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.InetAddress;
+import java.net.ServerSocket;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.web.HftpFileSystem;
+import org.apache.hadoop.hdfs.web.HsftpFileSystem;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.junit.Test;
+
+public class TestHftpURLTimeouts {
+
+  @Test
+  public void testHftpSocketTimeout() throws Exception {
+    Configuration conf = new Configuration();
+    ServerSocket socket = new ServerSocket(0,1);
+    URI uri = new URI("hftp", null,
+        InetAddress.getByName(null).getHostAddress(),
+        socket.getLocalPort(),
+        null, null, null);
+
+    HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf);
+    fs.connectionFactory = new URLConnectionFactory(5);
+
+    boolean timedout = false;
+    try {
+      HttpURLConnection conn = fs.openConnection("/", "");
+      timedout = false;
+      try {
+        // this will consume the only slot in the backlog
+        conn.getInputStream();
+      } catch (SocketTimeoutException ste) {
+        timedout = true;
+        assertEquals("Read timed out", ste.getMessage());
+      } finally {
+        if (conn != null) conn.disconnect();
+      }
+      assertTrue("read timedout", timedout);
+      assertTrue("connect timedout", checkConnectTimeout(fs, false));
+    } finally {
+      fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+      fs.close();
+    }
+  }
+
+  @Test
+  public void testHsftpSocketTimeout() throws Exception {
+    Configuration conf = new Configuration();
+    ServerSocket socket = new ServerSocket(0,1);
+    URI uri = new URI("hsftp", null,
+        InetAddress.getByName(null).getHostAddress(),
+        socket.getLocalPort(),
+        null, null, null);
+    boolean timedout = false;
+
+    HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf);
+    fs.connectionFactory = new URLConnectionFactory(5);
+
+    try {
+      HttpURLConnection conn = null;
+      timedout = false;
+      try {
+        // this will consume the only slot in the backlog
+        conn = fs.openConnection("/", "");
+      } catch (SocketTimeoutException ste) {
+        // SSL expects a negotiation, so it will timeout on read, unlike hftp
+        timedout = true;
+        assertEquals("Read timed out", ste.getMessage());
+      } finally {
+        if (conn != null) conn.disconnect();
+      }
+      assertTrue("ssl read connect timedout", timedout);
+      assertTrue("connect timedout", checkConnectTimeout(fs, true));
+    } finally {
+      fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
+      fs.close();
+    }
+  }
+
+  private boolean checkConnectTimeout(HftpFileSystem fs, boolean ignoreReadTimeout)
+      throws IOException {
+    boolean timedout = false;
+    List<HttpURLConnection> conns = new LinkedList<HttpURLConnection>();
+    try {
+      // with a listen backlog of 1, should only have to make one connection
+      // to trigger a connection timeout.  however... linux doesn't honor the
+      // socket's listen backlog so we have to try a bunch of times
+      for (int n=32; !timedout && n > 0; n--) {
+        try {
+          conns.add(fs.openConnection("/", ""));
+        } catch (SocketTimeoutException ste) {
+          String message = ste.getMessage();
+          assertNotNull(message);
+          // https will get a read timeout due to SSL negotiation, but
+          // a normal http will not, so need to ignore SSL read timeouts
+          // until a connect timeout occurs
+          if (!(ignoreReadTimeout && "Read timed out".equals(message))) {
+            timedout = true;
+            assertEquals("connect timed out", message);
+          }
+        }
+      }
+    } finally {
+      for (HttpURLConnection conn : conns) {
+        conn.disconnect();
+      }
+    }
+    return timedout;
+  }
+}

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java?rev=1536921&r1=1536920&r2=1536921&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
(original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenRemoteFetcher.java
Tue Oct 29 22:44:34 2013
@@ -34,9 +34,9 @@ import java.util.concurrent.Executors;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
+import org.apache.hadoop.hdfs.web.HftpFileSystem;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;



Mime
View raw message