hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [3/4] hadoop git commit: HDFS-9168. Move client side unit test to hadoop-hdfs-client. Contributed by Haohui Mai.
Date Wed, 28 Oct 2015 22:56:41 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
new file mode 100644
index 0000000..36fd821
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestTokenAspect.java
@@ -0,0 +1,316 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegationTokenRenewer;
+import org.apache.hadoop.fs.DelegationTokenRenewer.RenewAction;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.reflection.Whitebox;
+
+public class TestTokenAspect {
+
+  private static class DummyFs extends FileSystem implements
+      DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
+
+    private static final Text TOKEN_KIND = new Text("DummyFS Token");
+    private boolean emulateSecurityEnabled;
+    private TokenAspect<DummyFs> tokenAspect;
+    private final UserGroupInformation ugi = UserGroupInformation
+        .createUserForTesting("foo", new String[] { "bar" });
+    private URI uri;
+
+    @Override
+    public FSDataOutputStream append(Path f, int bufferSize,
+        Progressable progress) throws IOException {
+      return null;
+    }
+
+    @Override
+    public void cancelDelegationToken(Token<?> token) throws IOException {
+    }
+
+    @Override
+    public FSDataOutputStream create(Path f, FsPermission permission,
+        boolean overwrite, int bufferSize, short replication, long blockSize,
+        Progressable progress) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean delete(Path f, boolean recursive) throws IOException {
+      return false;
+    }
+
+    @Override
+    public URI getCanonicalUri() {
+      return super.getCanonicalUri();
+    }
+
+    @Override
+    public FileStatus getFileStatus(Path f) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Token<?> getRenewToken() {
+      return null;
+    }
+
+    @Override
+    public URI getUri() {
+      return uri;
+    }
+
+    @Override
+    public Path getWorkingDirectory() {
+      return null;
+    }
+
+    @Override
+    public void initialize(URI name, Configuration conf) throws IOException {
+      super.initialize(name, conf);
+      setConf(conf);
+      this.uri = URI.create(name.getScheme() + "://" + name.getAuthority());
+      tokenAspect = new TokenAspect<DummyFs>(this,
+          SecurityUtil.buildTokenService(uri), TOKEN_KIND);
+      if (emulateSecurityEnabled || UserGroupInformation.isSecurityEnabled()) {
+        tokenAspect.initDelegationToken(ugi);
+      }
+    }
+
+    @Override
+    public FileStatus[] listStatus(Path f) throws FileNotFoundException,
+        IOException {
+      return null;
+    }
+
+    @Override
+    public boolean mkdirs(Path f, FsPermission permission) throws IOException {
+      return false;
+    }
+
+    @Override
+    public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean rename(Path src, Path dst) throws IOException {
+      return false;
+    }
+
+    @Override
+    public long renewDelegationToken(Token<?> token) throws IOException {
+      return 0;
+    }
+
+    @Override
+    public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
+    }
+
+    @Override
+    public void setWorkingDirectory(Path new_dir) {
+    }
+  }
+
+  private static RenewAction<?> getActionFromTokenAspect(
+      TokenAspect<DummyFs> tokenAspect) {
+    return (RenewAction<?>) Whitebox.getInternalState(tokenAspect, "action");
+  }
+
+  @Test
+  public void testCachedInitialization() throws IOException, URISyntaxException {
+    Configuration conf = new Configuration();
+    DummyFs fs = spy(new DummyFs());
+    Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0],
+        new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234"));
+
+    doReturn(token).when(fs).getDelegationToken(anyString());
+    doReturn(token).when(fs).getRenewToken();
+
+    fs.emulateSecurityEnabled = true;
+    fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
+
+    fs.tokenAspect.ensureTokenInitialized();
+    verify(fs, times(1)).getDelegationToken(null);
+    verify(fs, times(1)).setDelegationToken(token);
+
+    // For the second iteration, the token should be cached.
+    fs.tokenAspect.ensureTokenInitialized();
+    verify(fs, times(1)).getDelegationToken(null);
+    verify(fs, times(1)).setDelegationToken(token);
+  }
+
+  @Test
+  public void testGetRemoteToken() throws IOException, URISyntaxException {
+    Configuration conf = new Configuration();
+    DummyFs fs = spy(new DummyFs());
+    Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0],
+        new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234"));
+
+    doReturn(token).when(fs).getDelegationToken(anyString());
+    doReturn(token).when(fs).getRenewToken();
+
+    fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
+
+    fs.tokenAspect.ensureTokenInitialized();
+
+    // Select a token, store and renew it
+    verify(fs).setDelegationToken(token);
+    assertNotNull(Whitebox.getInternalState(fs.tokenAspect, "dtRenewer"));
+    assertNotNull(Whitebox.getInternalState(fs.tokenAspect, "action"));
+  }
+
+  @Test
+  public void testGetRemoteTokenFailure() throws IOException,
+      URISyntaxException {
+    Configuration conf = new Configuration();
+    DummyFs fs = spy(new DummyFs());
+    IOException e = new IOException();
+    doThrow(e).when(fs).getDelegationToken(anyString());
+
+    fs.emulateSecurityEnabled = true;
+    fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
+    try {
+      fs.tokenAspect.ensureTokenInitialized();
+    } catch (IOException exc) {
+      assertEquals(e, exc);
+    }
+  }
+
+  @Test
+  public void testInitWithNoTokens() throws IOException, URISyntaxException {
+    Configuration conf = new Configuration();
+    DummyFs fs = spy(new DummyFs());
+    doReturn(null).when(fs).getDelegationToken(anyString());
+    fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
+
+    fs.tokenAspect.ensureTokenInitialized();
+
+    // No token will be selected.
+    verify(fs, never()).setDelegationToken(
+        Mockito.<Token<? extends TokenIdentifier>> any());
+  }
+
+  @Test
+  public void testInitWithUGIToken() throws IOException, URISyntaxException {
+    Configuration conf = new Configuration();
+    DummyFs fs = spy(new DummyFs());
+    doReturn(null).when(fs).getDelegationToken(anyString());
+
+    Token<TokenIdentifier> token = new Token<TokenIdentifier>(new byte[0],
+        new byte[0], DummyFs.TOKEN_KIND, new Text("127.0.0.1:1234"));
+    fs.ugi.addToken(token);
+    fs.ugi.addToken(new Token<TokenIdentifier>(new byte[0], new byte[0],
+        new Text("Other token"), new Text("127.0.0.1:8021")));
+    assertEquals("wrong tokens in user", 2, fs.ugi.getTokens().size());
+
+    fs.emulateSecurityEnabled = true;
+    fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf);
+    fs.tokenAspect.ensureTokenInitialized();
+
+    // Select a token from ugi (not from the remote host), store it but don't
+    // renew it
+    verify(fs).setDelegationToken(token);
+    verify(fs, never()).getDelegationToken(anyString());
+    assertNull(Whitebox.getInternalState(fs.tokenAspect, "dtRenewer"));
+    assertNull(Whitebox.getInternalState(fs.tokenAspect, "action"));
+  }
+
+  @Test
+  public void testRenewal() throws Exception {
+    Configuration conf = new Configuration();
+    Token<?> token1 = mock(Token.class);
+    Token<?> token2 = mock(Token.class);
+    final long renewCycle = 100;
+    DelegationTokenRenewer.renewCycle = renewCycle;
+
+    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("foo",
+        new String[] { "bar" });
+    DummyFs fs = spy(new DummyFs());
+
+    doReturn(token1).doReturn(token2).when(fs).getDelegationToken(null);
+    doReturn(token1).when(fs).getRenewToken();
+    // cause token renewer to abandon the token
+    doThrow(new IOException("renew failed")).when(token1).renew(conf);
+    doThrow(new IOException("get failed")).when(fs).addDelegationTokens(null,
+        null);
+
+    final URI uri = new URI("dummyfs://127.0.0.1:1234");
+    TokenAspect<DummyFs> tokenAspect = new TokenAspect<DummyFs>(fs,
+        SecurityUtil.buildTokenService(uri), DummyFs.TOKEN_KIND);
+    fs.initialize(uri, conf);
+    tokenAspect.initDelegationToken(ugi);
+
+    // trigger token acquisition
+    tokenAspect.ensureTokenInitialized();
+    DelegationTokenRenewer.RenewAction<?> action = getActionFromTokenAspect(tokenAspect);
+    verify(fs).setDelegationToken(token1);
+    assertTrue(action.isValid());
+
+    // upon renewal, token will go bad based on above stubbing
+    Thread.sleep(renewCycle * 2);
+    assertSame(action, getActionFromTokenAspect(tokenAspect));
+    assertFalse(action.isValid());
+
+    // now that token is invalid, should get a new one
+    tokenAspect.ensureTokenInitialized();
+    verify(fs, times(2)).getDelegationToken(anyString());
+    verify(fs).setDelegationToken(token2);
+    assertNotSame(action, getActionFromTokenAspect(tokenAspect));
+
+    action = getActionFromTokenAspect(tokenAspect);
+    assertTrue(action.isValid());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
new file mode 100644
index 0000000..997e9ca
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestURLConnectionFactory.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.util.List;
+
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public final class TestURLConnectionFactory {
+
+  @Test
+  public void testConnConfiguratior() throws IOException {
+    final URL u = new URL("http://localhost");
+    final List<HttpURLConnection> conns = Lists.newArrayList();
+    URLConnectionFactory fc = new URLConnectionFactory(new ConnectionConfigurator() {
+      @Override
+      public HttpURLConnection configure(HttpURLConnection conn)
+          throws IOException {
+        Assert.assertEquals(u, conn.getURL());
+        conns.add(conn);
+        return conn;
+      }
+    });
+
+    fc.openConnection(u);
+    Assert.assertEquals(1, conns.size());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
new file mode 100644
index 0000000..032fff0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSOAuth2.java
@@ -0,0 +1,217 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.web.oauth2.ConfCredentialBasedAccessTokenProvider;
+import org.apache.hadoop.hdfs.web.oauth2.CredentialBasedAccessTokenProvider;
+import org.apache.hadoop.hdfs.web.oauth2.OAuth2ConnectionConfigurator;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestWebHDFSOAuth2 {
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestWebHDFSOAuth2.class);
+
+  private ClientAndServer mockWebHDFS;
+  private ClientAndServer mockOAuthServer;
+
+  public final static int WEBHDFS_PORT = 7552;
+  public final static int OAUTH_PORT = 7553;
+
+  public final static Header CONTENT_TYPE_APPLICATION_JSON = new Header("Content-Type", "application/json");
+
+  public final static String AUTH_TOKEN = "0123456789abcdef";
+  public final static Header AUTH_TOKEN_HEADER = new Header("AUTHORIZATION", OAuth2ConnectionConfigurator.HEADER + AUTH_TOKEN);
+
+  @Before
+  public void startMockOAuthServer() {
+    mockOAuthServer = startClientAndServer(OAUTH_PORT);
+  }
+  @Before
+  public void startMockWebHDFSServer() {
+    System.setProperty("hadoop.home.dir", System.getProperty("user.dir"));
+
+    mockWebHDFS = startClientAndServer(WEBHDFS_PORT);
+  }
+
+  @Test
+  public void listStatusReturnsAsExpected() throws URISyntaxException, IOException {
+    MockServerClient mockWebHDFSServerClient = new MockServerClient("localhost", WEBHDFS_PORT);
+    MockServerClient mockOAuthServerClient = new MockServerClient("localhost", OAUTH_PORT);
+
+    HttpRequest oauthServerRequest = getOAuthServerMockRequest(mockOAuthServerClient);
+
+    HttpRequest fileSystemRequest = request()
+        .withMethod("GET")
+        .withPath(WebHdfsFileSystem.PATH_PREFIX + "/test1/test2")
+        .withHeader(AUTH_TOKEN_HEADER);
+
+    try {
+      mockWebHDFSServerClient.when(fileSystemRequest,
+          exactly(1)
+      )
+          .respond(
+              response()
+                  .withStatusCode(HttpStatus.SC_OK)
+                  .withHeaders(
+                      CONTENT_TYPE_APPLICATION_JSON
+                  )
+                  .withBody("{\n" +
+                      "  \"FileStatuses\":\n" +
+                      "  {\n" +
+                      "    \"FileStatus\":\n" +
+                      "    [\n" +
+                      "      {\n" +
+                      "        \"accessTime\"      : 1320171722771,\n" +
+                      "        \"blockSize\"       : 33554432,\n" +
+                      "        \"group\"           : \"supergroup\",\n" +
+                      "        \"length\"          : 24930,\n" +
+                      "        \"modificationTime\": 1320171722771,\n" +
+                      "        \"owner\"           : \"webuser\",\n" +
+                      "        \"pathSuffix\"      : \"a.patch\",\n" +
+                      "        \"permission\"      : \"644\",\n" +
+                      "        \"replication\"     : 1,\n" +
+                      "        \"type\"            : \"FILE\"\n" +
+                      "      },\n" +
+                      "      {\n" +
+                      "        \"accessTime\"      : 0,\n" +
+                      "        \"blockSize\"       : 0,\n" +
+                      "        \"group\"           : \"supergroup\",\n" +
+                      "        \"length\"          : 0,\n" +
+                      "        \"modificationTime\": 1320895981256,\n" +
+                      "        \"owner\"           : \"szetszwo\",\n" +
+                      "        \"pathSuffix\"      : \"bar\",\n" +
+                      "        \"permission\"      : \"711\",\n" +
+                      "        \"replication\"     : 0,\n" +
+                      "        \"type\"            : \"DIRECTORY\"\n" +
+                      "      }\n" +
+                      "    ]\n" +
+                      "  }\n" +
+                      "}\n")
+          );
+
+      FileSystem fs = new WebHdfsFileSystem();
+      Configuration conf = getConfiguration();
+      conf.set(OAUTH_REFRESH_URL_KEY, "http://localhost:" + OAUTH_PORT + "/refresh");
+      conf.set(CredentialBasedAccessTokenProvider.OAUTH_CREDENTIAL_KEY, "credential");
+
+      URI uri = new URI("webhdfs://localhost:" + WEBHDFS_PORT);
+      fs.initialize(uri, conf);
+
+      FileStatus[] ls = fs.listStatus(new Path("/test1/test2"));
+
+      mockOAuthServer.verify(oauthServerRequest);
+      mockWebHDFSServerClient.verify(fileSystemRequest);
+
+      assertEquals(2, ls.length);
+      assertEquals("a.patch", ls[0].getPath().getName());
+      assertEquals("bar", ls[1].getPath().getName());
+
+      fs.close();
+    } finally {
+      mockWebHDFSServerClient.clear(fileSystemRequest);
+      mockOAuthServerClient.clear(oauthServerRequest);
+    }
+  }
+
+  private HttpRequest getOAuthServerMockRequest(MockServerClient mockServerClient) throws IOException {
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        .withBody("client_secret=credential&grant_type=client_credentials&client_id=MY_CLIENTID");
+    
+    Map<String, Object> map = new TreeMap<>();
+    
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, "bearer");
+    map.put(ACCESS_TOKEN, AUTH_TOKEN);
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    return expectedRequest;
+  }
+
+  public Configuration getConfiguration() {
+    Configuration conf = new Configuration();
+
+    // Configs for OAuth2
+    conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY, true);
+    conf.set(OAUTH_CLIENT_ID_KEY, "MY_CLIENTID");
+
+    conf.set(ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class.getName());
+
+    return conf;
+
+  }
+
+  @After
+  public void stopMockWebHDFSServer() {
+      mockWebHDFS.stop();
+  }
+
+  @After
+  public void stopMockOAuthServer() {
+    mockOAuthServer.stop();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
new file mode 100644
index 0000000..ba99f7a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.net.NetUtils;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+public class TestWebHdfsContentLength {
+  private static ServerSocket listenSocket;
+  private static String bindAddr;
+  private static Path p;
+  private static FileSystem fs;
+
+  private static final Pattern contentLengthPattern = Pattern.compile(
+      "^(Content-Length|Transfer-Encoding):\\s*(.*)", Pattern.MULTILINE);
+
+  private static String errResponse =
+      "HTTP/1.1 500 Boom\r\n" +
+      "Content-Length: 0\r\n" +
+      "Connection: close\r\n\r\n";
+  private static String redirectResponse;
+
+  private static ExecutorService executor;
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    listenSocket = new ServerSocket();
+    listenSocket.bind(null);
+    bindAddr = NetUtils.getHostPortString(
+        (InetSocketAddress)listenSocket.getLocalSocketAddress());
+    redirectResponse =
+        "HTTP/1.1 307 Redirect\r\n" +
+        "Location: http://"+bindAddr+"/path\r\n" +
+        "Connection: close\r\n\r\n";
+
+    p = new Path("webhdfs://"+bindAddr+"/path");
+    fs = p.getFileSystem(new Configuration());
+    executor = Executors.newSingleThreadExecutor();    
+  }
+  
+  @AfterClass
+  public static void teardown() throws IOException {
+    if (listenSocket != null) {
+      listenSocket.close();
+    }
+    if (executor != null) {
+      executor.shutdownNow();
+    }
+  }
+  
+  @Test
+  public void testGetOp() throws Exception {
+    Future<String> future = contentLengthFuture(errResponse);
+    try {
+      fs.getFileStatus(p);
+      Assert.fail();
+    } catch (IOException ioe) {} // expected
+    Assert.assertEquals(null, getContentLength(future));
+  }
+
+  @Test
+  public void testGetOpWithRedirect() {
+    Future<String> future1 = contentLengthFuture(redirectResponse);
+    Future<String> future2 = contentLengthFuture(errResponse);
+    try {
+      fs.open(p).read();
+      Assert.fail();
+    } catch (IOException ioe) {} // expected
+    Assert.assertEquals(null, getContentLength(future1));
+    Assert.assertEquals(null, getContentLength(future2));
+  }
+  
+  @Test
+  public void testPutOp() {
+    Future<String> future = contentLengthFuture(errResponse);
+    try {
+      fs.mkdirs(p);
+      Assert.fail();
+    } catch (IOException ioe) {} // expected
+    Assert.assertEquals("0", getContentLength(future));
+  }
+
+  @Test
+  public void testPutOpWithRedirect() {
+    Future<String> future1 = contentLengthFuture(redirectResponse);
+    Future<String> future2 = contentLengthFuture(errResponse);
+    try {
+      FSDataOutputStream os = fs.create(p);
+      os.write(new byte[]{0});
+      os.close();
+      Assert.fail();
+    } catch (IOException ioe) {} // expected
+    Assert.assertEquals("0", getContentLength(future1));
+    Assert.assertEquals("chunked", getContentLength(future2));
+  }
+  
+  @Test
+  public void testPostOp() {  
+    Future<String> future = contentLengthFuture(errResponse);
+    try {
+      fs.concat(p, new Path[]{p});
+      Assert.fail();
+    } catch (IOException ioe) {} // expected
+    Assert.assertEquals("0", getContentLength(future));
+  }
+  
+  @Test
+  public void testPostOpWithRedirect() {
+    // POST operation with redirect
+    Future<String> future1 = contentLengthFuture(redirectResponse);
+    Future<String> future2 = contentLengthFuture(errResponse);
+    try {
+      FSDataOutputStream os = fs.append(p);
+      os.write(new byte[]{0});
+      os.close();
+      Assert.fail();
+    } catch (IOException ioe) {} // expected
+    Assert.assertEquals("0", getContentLength(future1));
+    Assert.assertEquals("chunked", getContentLength(future2));
+  }
+  
+  @Test
+  public void testDelete() {
+    Future<String> future = contentLengthFuture(errResponse);
+    try {
+      fs.delete(p, false);
+      Assert.fail();
+    } catch (IOException ioe) {} // expected
+    Assert.assertEquals(null, getContentLength(future));
+  }  
+
+  private String getContentLength(Future<String> future)  {
+    String request = null;
+    try {
+      request = future.get(2, TimeUnit.SECONDS);
+    } catch (Exception e) {
+      Assert.fail(e.toString());
+    }
+    Matcher matcher = contentLengthPattern.matcher(request);
+    return matcher.find() ? matcher.group(2) : null;
+  }
+  
+  private Future<String> contentLengthFuture(final String response) {
+    return executor.submit(new Callable<String>() {
+      @Override
+      public String call() throws Exception {
+        Socket client = listenSocket.accept();
+        client.setSoTimeout(2000);
+        try {
+          client.getOutputStream().write(response.getBytes());
+          client.shutdownOutput();
+          byte[] buf = new byte[4*1024]; // much bigger than request
+          int n = client.getInputStream().read(buf);
+          return new String(buf, 0, n);
+        } finally {
+          client.close();
+        }
+      }
+    });
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
new file mode 100644
index 0000000..c387b1e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestAccessTokenTimer.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.util.Timer;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class TestAccessTokenTimer {
+  @Test
+  public void expireConversionWorks() {
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now())
+        .thenReturn(5l);
+    
+    AccessTokenTimer timer = new AccessTokenTimer(mockTimer);
+    
+    timer.setExpiresIn("3");
+    assertEquals(3005, timer.getNextRefreshMSSinceEpoch());
+    
+    assertTrue(timer.shouldRefresh());
+  }
+  
+  @Test
+  public void shouldRefreshIsCorrect() {
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now())
+        .thenReturn(500l)
+        .thenReturn(1000000l + 500l);
+    
+    AccessTokenTimer timer = new AccessTokenTimer(mockTimer);
+    
+    timer.setExpiresInMSSinceEpoch("1000000");
+    
+    assertFalse(timer.shouldRefresh());
+    assertTrue(timer.shouldRefresh());
+    
+    verify(mockTimer, times(2)).now();
+  } 
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
new file mode 100644
index 0000000..d796753
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestClientCredentialTimeBasedTokenRefresher.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.ServerSocketUtil;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+import org.mockserver.model.Parameter;
+import org.mockserver.model.ParameterBody;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ACCESS_TOKEN_PROVIDER_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_CREDENTIALS;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_SECRET;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestClientCredentialTimeBasedTokenRefresher {
+  public final static Header CONTENT_TYPE_APPLICATION_JSON
+      = new Header("Content-Type", "application/json");
+
+  public final static String CLIENT_ID_FOR_TESTING = "joebob";
+
+  public Configuration buildConf(String credential, String tokenExpires,
+                                 String clientId, String refreshURL) {
+    // Configurations are simple enough that it's not worth mocking them out.
+    Configuration conf = new Configuration();
+    conf.set(CredentialBasedAccessTokenProvider.OAUTH_CREDENTIAL_KEY,
+        credential);
+    conf.set(ACCESS_TOKEN_PROVIDER_KEY,
+        ConfCredentialBasedAccessTokenProvider.class.getName());
+    conf.set(OAUTH_CLIENT_ID_KEY, clientId);
+    conf.set(OAUTH_REFRESH_URL_KEY, refreshURL);
+    return conf;
+  }
+
+  @Test
+  public void refreshUrlIsCorrect() throws IOException {
+    final int PORT = ServerSocketUtil.getPort(0, 20);
+    final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
+
+    long tokenExpires = 0;
+
+    Configuration conf = buildConf("myreallycoolcredential",
+        Long.toString(tokenExpires),
+        CLIENT_ID_FOR_TESTING,
+        REFRESH_ADDRESS);
+
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now()).thenReturn(tokenExpires + 1000l);
+
+    AccessTokenProvider credProvider =
+        new ConfCredentialBasedAccessTokenProvider(mockTimer);
+    credProvider.setConf(conf);
+    
+    // Build mock server to receive refresh request
+    ClientAndServer mockServer  = startClientAndServer(PORT);
+
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        .withBody( 
+        // Note, OkHttp does not sort the param values, so we need to do
+        // it ourselves via the ordering provided to ParameterBody...
+            ParameterBody.params(
+                Parameter.param(CLIENT_SECRET, "myreallycoolcredential"),
+                Parameter.param(GRANT_TYPE, CLIENT_CREDENTIALS),
+                Parameter.param(CLIENT_ID, CLIENT_ID_FOR_TESTING)
+                ));
+
+    MockServerClient mockServerClient = new MockServerClient("localhost", PORT);
+
+    // https://tools.ietf.org/html/rfc6749#section-5.1
+    Map<String, Object> map = new TreeMap<>();
+    
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, "bearer");
+    map.put(ACCESS_TOKEN, "new access token");
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    assertEquals("new access token", credProvider.getAccessToken());
+
+    mockServerClient.verify(expectedRequest);
+
+    mockServerClient.clear(expectedRequest);
+    mockServer.stop();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
new file mode 100644
index 0000000..889ad0e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/oauth2/TestRefreshTokenTimeBasedTokenRefresher.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.hadoop.hdfs.web.oauth2;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.Timer;
+import org.apache.http.HttpStatus;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.Test;
+import org.mockserver.client.server.MockServerClient;
+import org.mockserver.integration.ClientAndServer;
+import org.mockserver.model.Header;
+import org.mockserver.model.HttpRequest;
+import org.mockserver.model.HttpResponse;
+import org.mockserver.model.Parameter;
+import org.mockserver.model.ParameterBody;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_CLIENT_ID_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.OAUTH_REFRESH_URL_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.ConfRefreshTokenBasedAccessTokenProvider.OAUTH_REFRESH_TOKEN_EXPIRES_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.ConfRefreshTokenBasedAccessTokenProvider.OAUTH_REFRESH_TOKEN_KEY;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.ACCESS_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.BEARER;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.CLIENT_ID;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.EXPIRES_IN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.GRANT_TYPE;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.REFRESH_TOKEN;
+import static org.apache.hadoop.hdfs.web.oauth2.OAuth2Constants.TOKEN_TYPE;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import static org.mockserver.integration.ClientAndServer.startClientAndServer;
+import static org.mockserver.matchers.Times.exactly;
+import static org.mockserver.model.HttpRequest.request;
+import static org.mockserver.model.HttpResponse.response;
+
+public class TestRefreshTokenTimeBasedTokenRefresher {
+
+  public final static Header CONTENT_TYPE_APPLICATION_JSON
+      = new Header("Content-Type", "application/json");
+
+  public Configuration buildConf(String refreshToken, String tokenExpires,
+                                 String clientId, String refreshURL) {
+    // Configurations are simple enough that it's not worth mocking them out.
+    Configuration conf = new Configuration();
+    conf.set(OAUTH_REFRESH_TOKEN_KEY, refreshToken);
+    conf.set(OAUTH_REFRESH_TOKEN_EXPIRES_KEY, tokenExpires);
+    conf.set(OAUTH_CLIENT_ID_KEY, clientId);
+    conf.set(OAUTH_REFRESH_URL_KEY, refreshURL);
+
+    return conf;
+  }
+
+  @Test
+  public void refreshUrlIsCorrect() throws IOException {
+    final int PORT = 7552;
+    final String REFRESH_ADDRESS = "http://localhost:" + PORT + "/refresh";
+
+    long tokenExpires = 0;
+
+    Configuration conf = buildConf("refresh token key",
+        Long.toString(tokenExpires),
+        "joebob",
+        REFRESH_ADDRESS);
+
+    Timer mockTimer = mock(Timer.class);
+    when(mockTimer.now()).thenReturn(tokenExpires + 1000l);
+
+    AccessTokenProvider tokenProvider =
+        new ConfRefreshTokenBasedAccessTokenProvider(mockTimer);
+    tokenProvider.setConf(conf);
+
+    // Build mock server to receive refresh request
+
+    ClientAndServer mockServer  = startClientAndServer(PORT);
+
+    HttpRequest expectedRequest = request()
+        .withMethod("POST")
+        .withPath("/refresh")
+        // Note, OkHttp does not sort the param values, so we need to
+        // do it ourselves via the ordering provided to ParameterBody...
+        .withBody(
+            ParameterBody.params(
+                Parameter.param(CLIENT_ID, "joebob"),
+                Parameter.param(GRANT_TYPE, REFRESH_TOKEN),
+                Parameter.param(REFRESH_TOKEN, "refresh token key")));
+
+    MockServerClient mockServerClient = new MockServerClient("localhost", PORT);
+
+    // https://tools.ietf.org/html/rfc6749#section-5.1
+    Map<String, Object> map = new TreeMap<>();
+
+    map.put(EXPIRES_IN, "0987654321");
+    map.put(TOKEN_TYPE, BEARER);
+    map.put(ACCESS_TOKEN, "new access token");
+
+    ObjectMapper mapper = new ObjectMapper();
+    
+    HttpResponse resp = response()
+        .withStatusCode(HttpStatus.SC_OK)
+        .withHeaders(
+            CONTENT_TYPE_APPLICATION_JSON
+        )
+        .withBody(mapper.writeValueAsString(map));
+
+    mockServerClient
+        .when(expectedRequest, exactly(1))
+        .respond(resp);
+
+    assertEquals("new access token", tokenProvider.getAccessToken());
+
+    mockServerClient.verify(expectedRequest);
+
+    mockServerClient.clear(expectedRequest);
+    mockServer.stop();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f293c09..06c393d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1641,6 +1641,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-9297. Update TestBlockMissingException to use corruptBlockOnDataNodesByDeletingBlockFile().
     (Tony Wu via lei)
 
+    HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9)
+
   BUG FIXES
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 0798248..8625a04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -211,12 +211,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>leveldbjni-all</artifactId>
       <version>1.8</version>
     </dependency>
-    <dependency>
-      <groupId>org.mock-server</groupId>
-      <artifactId>mockserver-netty</artifactId>
-      <version>3.9.2</version>
-      <scope>test</scope>
-    </dependency>
     <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
     <dependency>
       <groupId>org.bouncycastle</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
deleted file mode 100644
index 910fee2..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandlerFactory.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.Random;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-/**
- * Test of the URL stream handler factory.
- */
-public class TestUrlStreamHandlerFactory {
-
-  private static final int RUNS = 20;
-  private static final int THREADS = 10;
-  private static final int TASKS = 200;
-  private static final int TIMEOUT = 30;
-
-  @Test
-  public void testConcurrency() throws Exception {
-    for (int i = 0; i < RUNS; i++) {
-      singleRun();
-    }
-  }
-
-  private void singleRun() throws Exception {
-    final FsUrlStreamHandlerFactory factory = new FsUrlStreamHandlerFactory();
-    final Random random = new Random();
-    ExecutorService executor = Executors.newFixedThreadPool(THREADS);
-    ArrayList<Future<?>> futures = new ArrayList<Future<?>>(TASKS);
-
-    for (int i = 0; i < TASKS ; i++) {
-      final int aux = i;
-      futures.add(executor.submit(new Runnable() {
-        @Override
-        public void run() {
-          int rand = aux + random.nextInt(3);
-          factory.createURLStreamHandler(String.valueOf(rand));
-        }
-      }));
-    }
-
-    executor.shutdown();
-    try {
-      executor.awaitTermination(TIMEOUT, TimeUnit.SECONDS);
-      executor.shutdownNow();
-    } catch (InterruptedException e) {
-      // pass
-    }
-
-    // check for exceptions
-    for (Future future : futures) {
-      if (!future.isDone()) {
-        break; // timed out
-      }
-      future.get();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
deleted file mode 100644
index e47658d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotSame;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Tests for <code>XAttr</code> objects.
- */
-public class TestXAttr {
-  private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4, XATTR5;
-  
-  @BeforeClass
-  public static void setUp() throws Exception {
-    byte[] value = {0x31, 0x32, 0x33};
-    XATTR = new XAttr.Builder()
-      .setName("name")
-      .setValue(value)
-      .build();
-    XATTR1 = new XAttr.Builder()
-      .setNameSpace(XAttr.NameSpace.USER)
-      .setName("name")
-      .setValue(value)
-      .build();
-    XATTR2 = new XAttr.Builder()
-      .setNameSpace(XAttr.NameSpace.TRUSTED)
-      .setName("name")
-      .setValue(value)
-      .build();
-    XATTR3 = new XAttr.Builder()
-      .setNameSpace(XAttr.NameSpace.SYSTEM)
-      .setName("name")
-      .setValue(value)
-      .build();
-    XATTR4 = new XAttr.Builder()
-      .setNameSpace(XAttr.NameSpace.SECURITY)
-      .setName("name")
-      .setValue(value)
-      .build();
-    XATTR5 = new XAttr.Builder()
-      .setNameSpace(XAttr.NameSpace.RAW)
-      .setName("name")
-      .setValue(value)
-      .build();
-  }
-  
-  @Test
-  public void testXAttrEquals() {
-    assertNotSame(XATTR1, XATTR2);
-    assertNotSame(XATTR2, XATTR3);
-    assertNotSame(XATTR3, XATTR4);
-    assertNotSame(XATTR4, XATTR5);
-    assertEquals(XATTR, XATTR1);
-    assertEquals(XATTR1, XATTR1);
-    assertEquals(XATTR2, XATTR2);
-    assertEquals(XATTR3, XATTR3);
-    assertEquals(XATTR4, XATTR4);
-    assertEquals(XATTR5, XATTR5);
-    assertFalse(XATTR1.equals(XATTR2));
-    assertFalse(XATTR2.equals(XATTR3));
-    assertFalse(XATTR3.equals(XATTR4));
-    assertFalse(XATTR4.equals(XATTR5));
-  }
-  
-  @Test
-  public void testXAttrHashCode() {
-    assertEquals(XATTR.hashCode(), XATTR1.hashCode());
-    assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
-    assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
-    assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
-    assertFalse(XATTR4.hashCode() == XATTR5.hashCode());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
deleted file mode 100755
index 77957bc..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.Random;
-import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.htrace.core.SpanId;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class TestDFSPacket {
-  private static final int chunkSize = 512;
-  private static final int checksumSize = 4;
-  private static final int maxChunksPerPacket = 4;
-
-  @Test
-  public void testPacket() throws Exception {
-    Random r = new Random(12345L);
-    byte[] data =  new byte[chunkSize];
-    r.nextBytes(data);
-    byte[] checksum = new byte[checksumSize];
-    r.nextBytes(checksum);
-
-    DataOutputBuffer os =  new DataOutputBuffer(data.length * 2);
-
-    byte[] packetBuf = new byte[data.length * 2];
-    DFSPacket p = new DFSPacket(packetBuf, maxChunksPerPacket,
-                                0, 0, checksumSize, false);
-    p.setSyncBlock(true);
-    p.writeData(data, 0, data.length);
-    p.writeChecksum(checksum, 0, checksum.length);
-    p.writeTo(os);
-
-    //we have set syncBlock to true, so the header has the maximum length
-    int headerLen = PacketHeader.PKT_MAX_HEADER_LEN;
-    byte[] readBuf = os.getData();
-
-    assertArrayRegionsEqual(readBuf, headerLen, checksum, 0, checksum.length);
-    assertArrayRegionsEqual(readBuf, headerLen + checksum.length, data, 0, data.length);
-
-  }
-
-  public static void assertArrayRegionsEqual(byte []buf1, int off1, byte []buf2,
-                                             int off2, int len) {
-    for (int i = 0; i < len; i++) {
-      if (buf1[off1 + i] != buf2[off2 + i]) {
-        Assert.fail("arrays differ at byte " + i + ". " +
-            "The first array has " + (int) buf1[off1 + i] +
-            ", but the second array has " + (int) buf2[off2 + i]);
-      }
-    }
-  }
-
-  @Test
-  public void testAddParentsGetParents() throws Exception {
-    DFSPacket p = new DFSPacket(null, maxChunksPerPacket,
-                                0, 0, checksumSize, false);
-    SpanId parents[] = p.getTraceParents();
-    Assert.assertEquals(0, parents.length);
-    p.addTraceParent(new SpanId(0, 123));
-    p.addTraceParent(new SpanId(0, 123));
-    parents = p.getTraceParents();
-    Assert.assertEquals(1, parents.length);
-    Assert.assertEquals(new SpanId(0, 123), parents[0]);
-    parents = p.getTraceParents(); // test calling 'get' again.
-    Assert.assertEquals(1, parents.length);
-    Assert.assertEquals(new SpanId(0, 123), parents[0]);
-    p.addTraceParent(new SpanId(0, 1));
-    p.addTraceParent(new SpanId(0, 456));
-    p.addTraceParent(new SpanId(0, 789));
-    parents = p.getTraceParents();
-    Assert.assertEquals(4, parents.length);
-    Assert.assertEquals(new SpanId(0, 1), parents[0]);
-    Assert.assertEquals(new SpanId(0, 123), parents[1]);
-    Assert.assertEquals(new SpanId(0, 456), parents[2]);
-    Assert.assertEquals(new SpanId(0, 789), parents[3]);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
deleted file mode 100644
index 1d8d289..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import static org.junit.Assert.assertEquals;
-
-import java.net.InetSocketAddress;
-import java.net.URI;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-
-import org.junit.Test;
-
-/** Test NameNode port defaulting code. */
-public class TestDefaultNameNodePort {
-
-  @Test
-  public void testGetAddressFromString() throws Exception {
-    assertEquals(DFSUtilClient.getNNAddress("foo").getPort(),
-                 HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
-    assertEquals(DFSUtilClient.getNNAddress("hdfs://foo/").getPort(),
-                 HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
-    assertEquals(DFSUtilClient.getNNAddress("hdfs://foo:555").getPort(),
-                 555);
-    assertEquals(DFSUtilClient.getNNAddress("foo:555").getPort(),
-                 555);
-  }
-
-  @Test
-  public void testGetAddressFromConf() throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    FileSystem.setDefaultUri(conf, "hdfs://foo/");
-    assertEquals(DFSUtilClient.getNNAddress(conf).getPort(),
-        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
-    FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
-    assertEquals(DFSUtilClient.getNNAddress(conf).getPort(), 555);
-    FileSystem.setDefaultUri(conf, "foo");
-    assertEquals(DFSUtilClient.getNNAddress(conf).getPort(),
-        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
-  }
-
-  @Test
-  public void testGetUri() {
-    assertEquals(DFSUtilClient.getNNUri(new InetSocketAddress("foo", 555)),
-                 URI.create("hdfs://foo:555"));
-    assertEquals(DFSUtilClient.getNNUri(new InetSocketAddress("foo",
-            HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)),
-        URI.create("hdfs://foo"));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
deleted file mode 100644
index 243d09b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java
+++ /dev/null
@@ -1,293 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.channels.ReadableByteChannel;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.net.unix.DomainSocket;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-import com.google.common.collect.HashMultiset;
-
-public class TestPeerCache {
-  static final Log LOG = LogFactory.getLog(TestPeerCache.class);
-
-  private static class FakePeer implements Peer {
-    private boolean closed = false;
-    private final boolean hasDomain;
-
-    private final DatanodeID dnId;
-
-    public FakePeer(DatanodeID dnId, boolean hasDomain) {
-      this.dnId = dnId;
-      this.hasDomain = hasDomain;
-    }
-
-    @Override
-    public ReadableByteChannel getInputStreamChannel() {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void setReadTimeout(int timeoutMs) throws IOException {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public int getReceiveBufferSize() throws IOException {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public boolean getTcpNoDelay() throws IOException {
-      return false;
-    }
-
-    @Override
-    public void setWriteTimeout(int timeoutMs) throws IOException {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public boolean isClosed() {
-      return closed;
-    }
-  
-    @Override
-    public void close() throws IOException {
-      closed = true;
-    }
-
-    @Override
-    public String getRemoteAddressString() {
-      return dnId.getInfoAddr();
-    }
-
-    @Override
-    public String getLocalAddressString() {
-      return "127.0.0.1:123";
-    }
-
-    @Override
-    public InputStream getInputStream() throws IOException {
-      throw new UnsupportedOperationException();
-    }
-  
-    @Override
-    public OutputStream getOutputStream() throws IOException {
-      throw new UnsupportedOperationException();
-    }
-  
-    @Override
-    public boolean isLocal() {
-      return true;
-    }
-  
-    @Override
-    public String toString() {
-      return "FakePeer(dnId=" + dnId + ")";
-    }
-
-    @Override
-    public DomainSocket getDomainSocket() {
-      if (!hasDomain) return null;
-      // Return a mock which throws an exception whenever any function is
-      // called.
-      return Mockito.mock(DomainSocket.class,
-          new Answer<Object>() {
-            @Override
-            public Object answer(InvocationOnMock invocation)
-                throws Throwable {
-              throw new RuntimeException("injected fault.");
-          } });
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (!(o instanceof FakePeer)) return false;
-      FakePeer other = (FakePeer)o;
-      return hasDomain == other.hasDomain &&
-          dnId.equals(other.dnId);
-    }
-
-    @Override
-    public int hashCode() {
-      return dnId.hashCode() ^ (hasDomain ? 1 : 0);
-    }
-
-    @Override
-    public boolean hasSecureChannel() {
-      return false;
-    }
-  }
-
-  @Test
-  public void testAddAndRetrieve() throws Exception {
-    PeerCache cache = new PeerCache(3, 100000);
-    DatanodeID dnId = new DatanodeID("192.168.0.1",
-          "fakehostname", "fake_datanode_id",
-          100, 101, 102, 103);
-    FakePeer peer = new FakePeer(dnId, false);
-    cache.put(dnId, peer);
-    assertTrue(!peer.isClosed());
-    assertEquals(1, cache.size());
-    assertEquals(peer, cache.get(dnId, false));
-    assertEquals(0, cache.size());
-    cache.close();
-  }
-
-  @Test
-  public void testExpiry() throws Exception {
-    final int CAPACITY = 3;
-    final int EXPIRY_PERIOD = 10;
-    PeerCache cache = new PeerCache(CAPACITY, EXPIRY_PERIOD);
-    DatanodeID dnIds[] = new DatanodeID[CAPACITY];
-    FakePeer peers[] = new FakePeer[CAPACITY];
-    for (int i = 0; i < CAPACITY; ++i) {
-      dnIds[i] = new DatanodeID("192.168.0.1",
-          "fakehostname_" + i, "fake_datanode_id",
-          100, 101, 102, 103);
-      peers[i] = new FakePeer(dnIds[i], false);
-    }
-    for (int i = 0; i < CAPACITY; ++i) {
-      cache.put(dnIds[i], peers[i]);
-    }
-
-    // Wait for the peers to expire
-    Thread.sleep(EXPIRY_PERIOD * 50);
-    assertEquals(0, cache.size());
-
-    // make sure that the peers were closed when they were expired
-    for (int i = 0; i < CAPACITY; ++i) {
-      assertTrue(peers[i].isClosed());
-    }
-
-    // sleep for another second and see if 
-    // the daemon thread runs fine on empty cache
-    Thread.sleep(EXPIRY_PERIOD * 50);
-    cache.close();
-  }
-
-  @Test
-  public void testEviction() throws Exception {
-    final int CAPACITY = 3;
-    PeerCache cache = new PeerCache(CAPACITY, 100000);
-    DatanodeID dnIds[] = new DatanodeID[CAPACITY + 1];
-    FakePeer peers[] = new FakePeer[CAPACITY + 1];
-    for (int i = 0; i < dnIds.length; ++i) {
-      dnIds[i] = new DatanodeID("192.168.0.1",
-          "fakehostname_" + i, "fake_datanode_id_" + i,
-          100, 101, 102, 103);
-      peers[i] = new FakePeer(dnIds[i], false);
-    }
-    for (int i = 0; i < CAPACITY; ++i) {
-      cache.put(dnIds[i], peers[i]);
-    }
-    // Check that the peers are cached
-    assertEquals(CAPACITY, cache.size());
-
-    // Add another entry and check that the first entry was evicted
-    cache.put(dnIds[CAPACITY], peers[CAPACITY]);
-    assertEquals(CAPACITY, cache.size());
-    assertSame(null, cache.get(dnIds[0], false));
-
-    // Make sure that the other entries are still there
-    for (int i = 1; i < CAPACITY; ++i) {
-      Peer peer = cache.get(dnIds[i], false);
-      assertSame(peers[i], peer);
-      assertTrue(!peer.isClosed());
-      peer.close();
-    }
-    assertEquals(1, cache.size());
-    cache.close();
-  }
-
-  @Test
-  public void testMultiplePeersWithSameKey() throws Exception {
-    final int CAPACITY = 3;
-    PeerCache cache = new PeerCache(CAPACITY, 100000);
-    DatanodeID dnId = new DatanodeID("192.168.0.1",
-          "fakehostname", "fake_datanode_id",
-          100, 101, 102, 103);
-    HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
-    for (int i = 0; i < CAPACITY; ++i) {
-      FakePeer peer = new FakePeer(dnId, false);
-      peers.add(peer);
-      cache.put(dnId, peer);
-    }
-    // Check that all of the peers ended up in the cache
-    assertEquals(CAPACITY, cache.size());
-    while (!peers.isEmpty()) {
-      Peer peer = cache.get(dnId, false);
-      assertTrue(peer != null);
-      assertTrue(!peer.isClosed());
-      peers.remove(peer);
-    }
-    assertEquals(0, cache.size());
-    cache.close();
-  }
-
-  @Test
-  public void testDomainSocketPeers() throws Exception {
-    final int CAPACITY = 3;
-    PeerCache cache = new PeerCache(CAPACITY, 100000);
-    DatanodeID dnId = new DatanodeID("192.168.0.1",
-          "fakehostname", "fake_datanode_id",
-          100, 101, 102, 103);
-    HashMultiset<FakePeer> peers = HashMultiset.create(CAPACITY);
-    for (int i = 0; i < CAPACITY; ++i) {
-      FakePeer peer = new FakePeer(dnId, i == CAPACITY - 1);
-      peers.add(peer);
-      cache.put(dnId, peer);
-    }
-    // Check that all of the peers ended up in the cache
-    assertEquals(CAPACITY, cache.size());
-    // Test that get(requireDomainPeer=true) finds the peer with the 
-    // domain socket.
-    Peer peer = cache.get(dnId, true);
-    assertTrue(peer.getDomainSocket() != null);
-    peers.remove(peer);
-    // Test that get(requireDomainPeer=true) returns null when there are
-    // no more peers with domain sockets.
-    peer = cache.get(dnId, true);
-    assertTrue(peer == null);
-    // Check that all of the other peers ended up in the cache.
-    while (!peers.isEmpty()) {
-      peer = cache.get(dnId, false);
-      assertTrue(peer != null);
-      assertTrue(!peer.isClosed());
-      peers.remove(peer);
-    }
-    assertEquals(0, cache.size());
-    cache.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
deleted file mode 100644
index a4e00d9..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestLeaseRenewer.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.client.impl;
-
-import static org.junit.Assert.assertSame;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSOutputStream;
-import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-import com.google.common.base.Supplier;
-
-public class TestLeaseRenewer {
-  private final String FAKE_AUTHORITY="hdfs://nn1/";
-  private final UserGroupInformation FAKE_UGI_A =
-    UserGroupInformation.createUserForTesting(
-      "myuser", new String[]{"group1"});
-  private final UserGroupInformation FAKE_UGI_B =
-    UserGroupInformation.createUserForTesting(
-      "myuser", new String[]{"group1"});
-
-  private DFSClient MOCK_DFSCLIENT;
-  private LeaseRenewer renewer;
-
-  /** Cause renewals often so test runs quickly. */
-  private static final long FAST_GRACE_PERIOD = 100L;
-
-  @Before
-  public void setupMocksAndRenewer() throws IOException {
-    MOCK_DFSCLIENT = createMockClient();
-
-    renewer = LeaseRenewer.getInstance(
-        FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
-    renewer.setGraceSleepPeriod(FAST_GRACE_PERIOD);
-}
-
-  private DFSClient createMockClient() {
-    final DfsClientConf mockConf = Mockito.mock(DfsClientConf.class);
-    Mockito.doReturn((int)FAST_GRACE_PERIOD).when(mockConf).getHdfsTimeout();
-
-    DFSClient mock = Mockito.mock(DFSClient.class);
-    Mockito.doReturn(true).when(mock).isClientRunning();
-    Mockito.doReturn(mockConf).when(mock).getConf();
-    Mockito.doReturn("myclient").when(mock).getClientName();
-    return mock;
-  }
-
-  @Test
-  public void testInstanceSharing() throws IOException {
-    // Two lease renewers with the same UGI should return
-    // the same instance
-    LeaseRenewer lr = LeaseRenewer.getInstance(
-        FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
-    LeaseRenewer lr2 = LeaseRenewer.getInstance(
-        FAKE_AUTHORITY, FAKE_UGI_A, MOCK_DFSCLIENT);
-    Assert.assertSame(lr, lr2);
-
-    // But a different UGI should return a different instance
-    LeaseRenewer lr3 = LeaseRenewer.getInstance(
-        FAKE_AUTHORITY, FAKE_UGI_B, MOCK_DFSCLIENT);
-    Assert.assertNotSame(lr, lr3);
-
-    // A different authority with same UGI should also be a different
-    // instance.
-    LeaseRenewer lr4 = LeaseRenewer.getInstance(
-        "someOtherAuthority", FAKE_UGI_B, MOCK_DFSCLIENT);
-    Assert.assertNotSame(lr, lr4);
-    Assert.assertNotSame(lr3, lr4);
-  }
-
-  @Test
-  public void testRenewal() throws Exception {
-    // Keep track of how many times the lease gets renewed
-    final AtomicInteger leaseRenewalCount = new AtomicInteger();
-    Mockito.doAnswer(new Answer<Boolean>() {
-      @Override
-      public Boolean answer(InvocationOnMock invocation) throws Throwable {
-        leaseRenewalCount.incrementAndGet();
-        return true;
-      }
-    }).when(MOCK_DFSCLIENT).renewLease();
-
-
-    // Set up a file so that we start renewing our lease.
-    DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
-    long fileId = 123L;
-    renewer.put(fileId, mockStream, MOCK_DFSCLIENT);
-
-    // Wait for lease to get renewed
-    long failTime = Time.monotonicNow() + 5000;
-    while (Time.monotonicNow() < failTime &&
-        leaseRenewalCount.get() == 0) {
-      Thread.sleep(50);
-    }
-    if (leaseRenewalCount.get() == 0) {
-      Assert.fail("Did not renew lease at all!");
-    }
-
-    renewer.closeFile(fileId, MOCK_DFSCLIENT);
-  }
-
-  /**
-   * Regression test for HDFS-2810. In this bug, the LeaseRenewer has handles
-   * to several DFSClients with the same name, the first of which has no files
-   * open. Previously, this was causing the lease to not get renewed.
-   */
-  @Test
-  public void testManyDfsClientsWhereSomeNotOpen() throws Exception {
-    // First DFSClient has no files open so doesn't renew leases.
-    final DFSClient mockClient1 = createMockClient();
-    Mockito.doReturn(false).when(mockClient1).renewLease();
-    assertSame(renewer, LeaseRenewer.getInstance(
-        FAKE_AUTHORITY, FAKE_UGI_A, mockClient1));
-
-    // Set up a file so that we start renewing our lease.
-    DFSOutputStream mockStream1 = Mockito.mock(DFSOutputStream.class);
-    long fileId = 456L;
-    renewer.put(fileId, mockStream1, mockClient1);
-
-    // Second DFSClient does renew lease
-    final DFSClient mockClient2 = createMockClient();
-    Mockito.doReturn(true).when(mockClient2).renewLease();
-    assertSame(renewer, LeaseRenewer.getInstance(
-        FAKE_AUTHORITY, FAKE_UGI_A, mockClient2));
-
-    // Set up a file so that we start renewing our lease.
-    DFSOutputStream mockStream2 = Mockito.mock(DFSOutputStream.class);
-    renewer.put(fileId, mockStream2, mockClient2);
-
-
-    // Wait for lease to get renewed
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        try {
-          Mockito.verify(mockClient1, Mockito.atLeastOnce()).renewLease();
-          Mockito.verify(mockClient2, Mockito.atLeastOnce()).renewLease();
-          return true;
-        } catch (AssertionError err) {
-          LeaseRenewer.LOG.warn("Not yet satisfied", err);
-          return false;
-        } catch (IOException e) {
-          // should not throw!
-          throw new RuntimeException(e);
-        }
-      }
-    }, 100, 10000);
-
-    renewer.closeFile(fileId, mockClient1);
-    renewer.closeFile(fileId, mockClient2);
-  }
-
-  @Test
-  public void testThreadName() throws Exception {
-    DFSOutputStream mockStream = Mockito.mock(DFSOutputStream.class);
-    long fileId = 789L;
-    Assert.assertFalse("Renewer not initially running",
-        renewer.isRunning());
-
-    // Pretend to open a file
-    renewer.put(fileId, mockStream, MOCK_DFSCLIENT);
-
-    Assert.assertTrue("Renewer should have started running",
-        renewer.isRunning());
-
-    // Check the thread name is reasonable
-    String threadName = renewer.getDaemonName();
-    Assert.assertEquals("LeaseRenewer:myuser@hdfs://nn1/", threadName);
-
-    // Pretend to close the file
-    renewer.closeFile(fileId, MOCK_DFSCLIENT);
-    renewer.setEmptyTime(Time.monotonicNow());
-
-    // Should stop the renewer running within a few seconds
-    long failTime = Time.monotonicNow() + 5000;
-    while (renewer.isRunning() && Time.monotonicNow() < failTime) {
-      Thread.sleep(50);
-    }
-    Assert.assertFalse(renewer.isRunning());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
deleted file mode 100644
index 10c1671..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import org.junit.Test;
-
-
-public class TestExtendedBlock {
-  static final String POOL_A = "blockpool-a";
-  static final String POOL_B = "blockpool-b";
-  static final Block BLOCK_1_GS1 = new Block(1L, 100L, 1L);
-  static final Block BLOCK_1_GS2 = new Block(1L, 100L, 2L);
-  static final Block BLOCK_2_GS1 = new Block(2L, 100L, 1L);
-  
-  @Test
-  public void testEquals() {
-    // Same block -> equal
-    assertEquals(
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1),
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1));
-    // Different pools, same block id -> not equal
-    assertNotEquals(
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1),
-        new ExtendedBlock(POOL_B, BLOCK_1_GS1));
-    // Same pool, different block id -> not equal
-    assertNotEquals(
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1),
-        new ExtendedBlock(POOL_A, BLOCK_2_GS1));
-    // Same block, different genstamps -> equal
-    assertEquals(
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1),
-        new ExtendedBlock(POOL_A, BLOCK_1_GS2));
-  }
-  
-  @Test
-  public void testHashcode() {
-    
-    // Different pools, same block id -> different hashcode
-    assertNotEquals(
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
-        new ExtendedBlock(POOL_B, BLOCK_1_GS1).hashCode());
-    
-    // Same pool, different block id -> different hashcode
-    assertNotEquals(
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
-        new ExtendedBlock(POOL_A, BLOCK_2_GS1).hashCode());
-    
-    // Same block -> same hashcode
-    assertEquals(
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode(),
-        new ExtendedBlock(POOL_A, BLOCK_1_GS1).hashCode());
-
-  }
-
-  private static void assertNotEquals(Object a, Object b) {
-    assertFalse("expected not equal: '" + a + "' and '" + b + "'",
-        a.equals(b));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65f53f24/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
index 62b5f8f..168b9d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import com.google.common.collect.Lists;
-import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -259,7 +259,7 @@ public class TestAuditLogger {
       auditlog.clearOutput();
 
       // long context is truncated
-      final String longContext = RandomStringUtils.randomAscii(200);
+      final String longContext = StringUtils.repeat("foo", 100);
       context = new CallerContext.Builder(longContext)
           .setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING))
           .build();


Mime
View raw message