hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [2/2] hadoop git commit: HDFS-8185. Separate client related routines in HAUtil into a new class. Contributed by Haohui Mai.
Date Wed, 22 Apr 2015 05:09:28 GMT
HDFS-8185. Separate client related routines in HAUtil into a new class. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f8003dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f8003dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f8003dc

Branch: refs/heads/trunk
Commit: 6f8003dc7bc9e8be7b0512c514d370c303faf003
Parents: 674c7ef
Author: Haohui Mai <wheat9@apache.org>
Authored: Thu Apr 16 15:45:46 2015 -0700
Committer: Haohui Mai <wheat9@apache.org>
Committed: Tue Apr 21 21:59:47 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   | 172 +++++++++++++++
 .../org/apache/hadoop/hdfs/HAUtilClient.java    |  95 ++++++++
 .../hdfs/client/HdfsClientConfigKeys.java       |   6 +
 .../hdfs/protocol/HdfsConstantsClient.java      |   6 +
 .../hadoop/hdfs/web/WebHdfsConstants.java       |   3 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   8 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  17 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    | 217 +++----------------
 .../hadoop/hdfs/DistributedFileSystem.java      |   2 +-
 .../java/org/apache/hadoop/hdfs/HAUtil.java     |  74 +------
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |   8 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java     |   7 -
 .../datanode/web/webhdfs/ParameterParser.java   |   8 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   4 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  24 +-
 .../apache/hadoop/hdfs/tools/DFSHAAdmin.java    |   5 +-
 .../hdfs/tools/DelegationTokenFetcher.java      |   7 +-
 .../hadoop/hdfs/web/SWebHdfsFileSystem.java     |   4 +-
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  10 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  20 +-
 .../org/apache/hadoop/fs/TestSymlinkHdfs.java   |   3 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |   2 +-
 .../hadoop/hdfs/TestDFSClientRetries.java       |   6 +-
 .../org/apache/hadoop/hdfs/TestDFSUtil.java     |   6 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  |   4 +-
 .../apache/hadoop/hdfs/TestEncryptionZones.java |   7 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |   6 +-
 .../sasl/SaslDataTransferTestCase.java          |   1 -
 .../hdfs/security/TestDelegationToken.java      |   3 +-
 .../TestDelegationTokenForProxyUser.java        |   3 +-
 .../blockmanagement/TestReplicationPolicy.java  |   2 -
 .../web/webhdfs/TestParameterParser.java        |   7 +-
 .../hdfs/server/namenode/TestAuditLogs.java     |   9 +-
 .../hdfs/server/namenode/TestMalformedURLs.java |   4 +-
 .../TestNameNodeRespectsBindHostKeys.java       |   3 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |  12 +-
 .../server/namenode/ha/TestHAConfiguration.java |   2 +-
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java       |   4 +-
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   |   2 +-
 .../hadoop/hdfs/web/TestHttpsFileSystem.java    |   1 -
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |  27 ++-
 .../apache/hadoop/hdfs/web/TestWebHDFSAcl.java  |   5 +-
 .../hadoop/hdfs/web/TestWebHDFSForHA.java       |   3 +-
 .../hadoop/hdfs/web/TestWebHDFSXAttr.java       |   2 +-
 .../hdfs/web/TestWebHdfsFileSystemContract.java |   5 +-
 .../hadoop/hdfs/web/TestWebHdfsTimeouts.java    |   2 +-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  |   2 +-
 .../web/TestWebHdfsWithMultipleNameNodes.java   |   3 +-
 .../apache/hadoop/hdfs/web/WebHdfsTestUtil.java |  13 +-
 50 files changed, 448 insertions(+), 401 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 2817b66..84fb12c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -17,11 +17,28 @@
  */
 package org.apache.hadoop.hdfs;
 
+import com.google.common.base.Joiner;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.UnsupportedEncodingException;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 
 public class DFSUtilClient {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      DFSUtilClient.class);
   /**
    * Converts a byte array to a string using UTF8 encoding.
    */
@@ -45,6 +62,58 @@ public class DFSUtilClient {
   }
 
   /**
+   * Returns collection of nameservice Ids from the configuration.
+   * @param conf configuration
+   * @return collection of nameservice Ids, or null if not specified
+   */
+  public static Collection<String> getNameServiceIds(Configuration conf) {
+    return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
+  }
+
+  /**
+   * Namenode HighAvailability related configuration.
+   * Returns collection of namenode Ids from the configuration. One logical id
+   * for each namenode in the in the HA setup.
+   *
+   * @param conf configuration
+   * @param nsId the nameservice ID to look at, or null for non-federated
+   * @return collection of namenode Ids
+   */
+  public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
+    String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
+    return conf.getTrimmedStringCollection(key);
+  }
+
+  /** Add non empty and non null suffix to a key */
+  static String addSuffix(String key, String suffix) {
+    if (suffix == null || suffix.isEmpty()) {
+      return key;
+    }
+    assert !suffix.startsWith(".") :
+      "suffix '" + suffix + "' should not already have '.' prepended.";
+    return key + "." + suffix;
+  }
+
+  /**
+   * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
+   * the configuration.
+   *
+   * @return list of InetSocketAddresses
+   */
+  public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
+      Configuration conf, String scheme) {
+    if (WebHdfsConstants.WEBHDFS_SCHEME.equals(scheme)) {
+      return getAddresses(conf, null,
+          HdfsClientConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+    } else if (WebHdfsConstants.SWEBHDFS_SCHEME.equals(scheme)) {
+      return getAddresses(conf, null,
+          HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
+    } else {
+      throw new IllegalArgumentException("Unsupported scheme: " + scheme);
+    }
+  }
+
+  /**
    * Decode a specific range of bytes of the given byte array to a string
    * using UTF8.
    *
@@ -62,4 +131,107 @@ public class DFSUtilClient {
     return null;
   }
 
+  /**
+   * @return <code>coll</code> if it is non-null and non-empty. Otherwise,
+   * returns a list with a single null value.
+   */
+  static Collection<String> emptyAsSingletonNull(Collection<String> coll) {
+    if (coll == null || coll.isEmpty()) {
+      return Collections.singletonList(null);
+    } else {
+      return coll;
+    }
+  }
+
+  /** Concatenate list of suffix strings '.' separated */
+  static String concatSuffixes(String... suffixes) {
+    if (suffixes == null) {
+      return null;
+    }
+    return Joiner.on(".").skipNulls().join(suffixes);
+  }
+
+  /**
+   * Returns the configured address for all NameNodes in the cluster.
+   * @param conf configuration
+   * @param defaultAddress default address to return in case key is not found.
+   * @param keys Set of keys to look for in the order of preference
+   * @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
+   */
+  static Map<String, Map<String, InetSocketAddress>>
+    getAddresses(Configuration conf, String defaultAddress, String... keys) {
+    Collection<String> nameserviceIds = getNameServiceIds(conf);
+    return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys);
+  }
+
+  /**
+   * Returns the configured address for all NameNodes in the cluster.
+   * @param conf configuration
+   * @param defaultAddress default address to return in case key is not found.
+   * @param keys Set of keys to look for in the order of preference
+   *
+   * @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
+   */
+  static Map<String, Map<String, InetSocketAddress>>
+    getAddressesForNsIds(
+      Configuration conf, Collection<String> nsIds, String defaultAddress,
+      String... keys) {
+    // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
+    // across all of the configured nameservices and namenodes.
+    Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
+    for (String nsId : emptyAsSingletonNull(nsIds)) {
+      Map<String, InetSocketAddress> isas =
+        getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
+      if (!isas.isEmpty()) {
+        ret.put(nsId, isas);
+      }
+    }
+    return ret;
+  }
+
+  static Map<String, InetSocketAddress> getAddressesForNameserviceId(
+      Configuration conf, String nsId, String defaultValue, String... keys) {
+    Collection<String> nnIds = getNameNodeIds(conf, nsId);
+    Map<String, InetSocketAddress> ret = Maps.newHashMap();
+    for (String nnId : emptyAsSingletonNull(nnIds)) {
+      String suffix = concatSuffixes(nsId, nnId);
+      String address = getConfValue(defaultValue, suffix, conf, keys);
+      if (address != null) {
+        InetSocketAddress isa = NetUtils.createSocketAddr(address);
+        if (isa.isUnresolved()) {
+          LOG.warn("Namenode for " + nsId +
+                       " remains unresolved for ID " + nnId +
+                   ".  Check your hdfs-site.xml file to " +
+                   "ensure namenodes are configured properly.");
+        }
+        ret.put(nnId, isa);
+      }
+    }
+    return ret;
+  }
+
+  /**
+   * Given a list of keys in the order of preference, returns a value
+   * for the key in the given order from the configuration.
+   * @param defaultValue default value to return, when key was not found
+   * @param keySuffix suffix to add to the key, if it is not null
+   * @param conf Configuration
+   * @param keys list of keys in the order of preference
+   * @return value of the key or default if a key was not found in configuration
+   */
+  private static String getConfValue(String defaultValue, String keySuffix,
+      Configuration conf, String... keys) {
+    String value = null;
+    for (String key : keys) {
+      key = addSuffix(key, keySuffix);
+      value = conf.get(key);
+      if (value != null) {
+        break;
+      }
+    }
+    if (value == null) {
+      value = defaultValue;
+    }
+    return value;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
new file mode 100644
index 0000000..7956838
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+
+import java.net.URI;
+
+import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.HA_DT_SERVICE_PREFIX;
+
+@InterfaceAudience.Private
+public class HAUtilClient {
+  /**
+   * @return true if the given nameNodeUri appears to be a logical URI.
+   */
+  public static boolean isLogicalUri(
+      Configuration conf, URI nameNodeUri) {
+    String host = nameNodeUri.getHost();
+    // A logical name must be one of the service IDs.
+    return DFSUtilClient.getNameServiceIds(conf).contains(host);
+  }
+
+  /**
+   * Check whether the client has a failover proxy provider configured
+   * for the namenode/nameservice.
+   *
+   * @param conf Configuration
+   * @param nameNodeUri The URI of namenode
+   * @return true if failover is configured.
+   */
+  public static boolean isClientFailoverConfigured(
+      Configuration conf, URI nameNodeUri) {
+    String host = nameNodeUri.getHost();
+    String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX
+        + "." + host;
+    return conf.get(configKey) != null;
+  }
+
+  /**
+   * Get the service name used in the delegation token for the given logical
+   * HA service.
+   * @param uri the logical URI of the cluster
+   * @param scheme the scheme of the corresponding FileSystem
+   * @return the service name
+   */
+  public static Text buildTokenServiceForLogicalUri(final URI uri,
+      final String scheme) {
+    return new Text(buildTokenServicePrefixForLogicalUri(scheme)
+        + uri.getHost());
+  }
+
+  public static String buildTokenServicePrefixForLogicalUri(String scheme) {
+    return HA_DT_SERVICE_PREFIX + scheme + ":";
+  }
+
+  /**
+   * Parse the file system URI out of the provided token.
+   */
+  public static URI getServiceUriFromToken(final String scheme, Token<?> token) {
+    String tokStr = token.getService().toString();
+    final String prefix = buildTokenServicePrefixForLogicalUri(
+        scheme);
+    if (tokStr.startsWith(prefix)) {
+      tokStr = tokStr.replaceFirst(prefix, "");
+    }
+    return URI.create(scheme + "://" + tokStr);
+  }
+
+  /**
+   * @return true if this token corresponds to a logical nameservice
+   * rather than a specific namenode.
+   */
+  public static boolean isTokenForLogicalUri(Token<?> token) {
+    return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index f9965b4..d11922d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -31,6 +31,12 @@ public interface HdfsClientConfigKeys {
       "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
 
   static final String PREFIX = "dfs.client.";
+  String  DFS_NAMESERVICES = "dfs.nameservices";
+  int     DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
+  String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
+  int     DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
+  String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
+  String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
 
   /** dfs.client.retry configuration properties */
   interface Retry {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
index 40c3203..ab4310e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java
@@ -32,4 +32,10 @@ public interface HdfsConstantsClient {
    */
   long GRANDFATHER_INODE_ID = 0;
   byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0;
+  /**
+   * A prefix put before the namenode URI inside the "service" field
+   * of a delgation token, indicating that the URI is a logical (HA)
+   * URI.
+   */
+  String HA_DT_SERVICE_PREFIX = "ha-";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
index 25f3cfd..50da899 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
@@ -23,7 +23,8 @@ import org.apache.hadoop.io.Text;
 
 @InterfaceAudience.Private
 public class WebHdfsConstants {
-  /** Delegation token kind */
+  public static final String WEBHDFS_SCHEME = "webhdfs";
+  public static final String SWEBHDFS_SCHEME = "swebhdfs";
   public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation");
   public static final Text SWEBHDFS_TOKEN_KIND = new Text("SWEBHDFS delegation");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e162d28..03c5228 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -455,6 +455,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8133. Improve readability of deleted block check (Daryn Sharp via
     Colin P. McCabe)
 
+    HDFS-8185. Separate client related routines in HAUtil into a new class.
+    (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 6a73a93..6721412 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -806,10 +806,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     private static ClientProtocol getNNProxy(
         Token<DelegationTokenIdentifier> token, Configuration conf)
         throws IOException {
-      URI uri = HAUtil.getServiceUriFromToken(HdfsConstants.HDFS_URI_SCHEME,
-              token);
-      if (HAUtil.isTokenForLogicalUri(token) &&
-          !HAUtil.isLogicalUri(conf, uri)) {
+      URI uri = HAUtilClient.getServiceUriFromToken(
+          HdfsConstants.HDFS_URI_SCHEME, token);
+      if (HAUtilClient.isTokenForLogicalUri(token) &&
+          !HAUtilClient.isLogicalUri(conf, uri)) {
         // If the token is for a logical nameservice, but the configuration
         // we have disagrees about that, we can't actually renew it.
         // This can be the case in MR, for example, if the RM doesn't

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9cb00a0..9a754cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -102,8 +102,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
     "dfs.namenode.path.based.cache.block.map.allocation.percent";
   public static final float    DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f;
 
-  public static final int     DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
-  public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
+  public static final int     DFS_NAMENODE_HTTP_PORT_DEFAULT =
+      HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
+  public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY =
+      HdfsClientConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT;
   public static final String  DFS_NAMENODE_HTTP_BIND_HOST_KEY = "dfs.namenode.http-bind-host";
   public static final String  DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address";
@@ -302,8 +304,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
   //Following keys have no defaults
   public static final String  DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir";
-  public static final int     DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
-  public static final String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
+  public static final int     DFS_NAMENODE_HTTPS_PORT_DEFAULT =
+      HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
+  public static final String  DFS_NAMENODE_HTTPS_ADDRESS_KEY =
+      HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
   public static final String  DFS_NAMENODE_HTTPS_BIND_HOST_KEY = "dfs.namenode.https-bind-host";
   public static final String  DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT;
   public static final String  DFS_NAMENODE_NAME_DIR_KEY = "dfs.namenode.name.dir";
@@ -485,7 +489,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
   public static final int     DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
   public static final String  DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY = "dfs.namenode.legacy-oiv-image.dir";
-  
+
   public static final String  DFS_NAMESERVICES = "dfs.nameservices";
   public static final String  DFS_NAMESERVICE_ID = "dfs.nameservice.id";
   public static final String  DFS_INTERNAL_NAMESERVICES_KEY = "dfs.internal.nameservices";
@@ -513,7 +517,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT = true;
 
   // HA related configuration
-  public static final String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
+  public static final String DFS_HA_NAMENODES_KEY_PREFIX =
+      HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
   public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id";
   public static final String  DFS_HA_STANDBY_CHECKPOINTS_KEY = "dfs.ha.standby.checkpoints";
   public static final boolean DFS_HA_STANDBY_CHECKPOINTS_DEFAULT = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index a2c16b7..60a496f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
@@ -31,7 +30,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
@@ -48,7 +46,6 @@ import java.security.SecureRandom;
 import java.text.SimpleDateFormat;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.Comparator;
 import java.util.Date;
 import java.util.HashSet;
@@ -87,8 +84,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -106,7 +101,6 @@ import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import com.google.common.primitives.SignedBytes;
 import com.google.protobuf.BlockingService;
 
@@ -518,7 +512,7 @@ public class DFSUtil {
       for (int hCnt = 0; hCnt < locations.length; hCnt++) {
         hosts[hCnt] = locations[hCnt].getHostName();
         xferAddrs[hCnt] = locations[hCnt].getXferAddr();
-        NodeBase node = new NodeBase(xferAddrs[hCnt], 
+        NodeBase node = new NodeBase(xferAddrs[hCnt],
                                      locations[hCnt].getNetworkLocation());
         racks[hCnt] = node.toString();
       }
@@ -538,128 +532,14 @@ public class DFSUtil {
   }
 
   /**
-   * Returns collection of nameservice Ids from the configuration.
-   * @param conf configuration
-   * @return collection of nameservice Ids, or null if not specified
-   */
-  public static Collection<String> getNameServiceIds(Configuration conf) {
-    return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
-  }
-
-  /**
-   * @return <code>coll</code> if it is non-null and non-empty. Otherwise,
-   * returns a list with a single null value.
-   */
-  private static Collection<String> emptyAsSingletonNull(Collection<String> coll) {
-    if (coll == null || coll.isEmpty()) {
-      return Collections.singletonList(null);
-    } else {
-      return coll;
-    }
-  }
-  
-  /**
-   * Namenode HighAvailability related configuration.
-   * Returns collection of namenode Ids from the configuration. One logical id
-   * for each namenode in the in the HA setup.
-   * 
-   * @param conf configuration
-   * @param nsId the nameservice ID to look at, or null for non-federated 
-   * @return collection of namenode Ids
-   */
-  public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
-    String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
-    return conf.getTrimmedStringCollection(key);
-  }
-  
-  /**
-   * Given a list of keys in the order of preference, returns a value
-   * for the key in the given order from the configuration.
-   * @param defaultValue default value to return, when key was not found
-   * @param keySuffix suffix to add to the key, if it is not null
-   * @param conf Configuration
-   * @param keys list of keys in the order of preference
-   * @return value of the key or default if a key was not found in configuration
-   */
-  private static String getConfValue(String defaultValue, String keySuffix,
-      Configuration conf, String... keys) {
-    String value = null;
-    for (String key : keys) {
-      key = addSuffix(key, keySuffix);
-      value = conf.get(key);
-      if (value != null) {
-        break;
-      }
-    }
-    if (value == null) {
-      value = defaultValue;
-    }
-    return value;
-  }
-  
-  /** Add non empty and non null suffix to a key */
-  private static String addSuffix(String key, String suffix) {
-    if (suffix == null || suffix.isEmpty()) {
-      return key;
-    }
-    assert !suffix.startsWith(".") :
-      "suffix '" + suffix + "' should not already have '.' prepended.";
-    return key + "." + suffix;
-  }
-  
-  /** Concatenate list of suffix strings '.' separated */
-  private static String concatSuffixes(String... suffixes) {
-    if (suffixes == null) {
-      return null;
-    }
-    return Joiner.on(".").skipNulls().join(suffixes);
-  }
-  
-  /**
    * Return configuration key of format key.suffix1.suffix2...suffixN
    */
   public static String addKeySuffixes(String key, String... suffixes) {
-    String keySuffix = concatSuffixes(suffixes);
-    return addSuffix(key, keySuffix);
-  }
-
-  /**
-   * Returns the configured address for all NameNodes in the cluster.
-   * @param conf configuration
-   * @param defaultAddress default address to return in case key is not found.
-   * @param keys Set of keys to look for in the order of preference
-   * @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
-   */
-  private static Map<String, Map<String, InetSocketAddress>>
-    getAddresses(Configuration conf, String defaultAddress, String... keys) {
-    Collection<String> nameserviceIds = getNameServiceIds(conf);
-    return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys);
+    String keySuffix = DFSUtilClient.concatSuffixes(suffixes);
+    return DFSUtilClient.addSuffix(key, keySuffix);
   }
 
   /**
-   * Returns the configured address for all NameNodes in the cluster.
-   * @param conf configuration
-   * @param nsIds
-   *@param defaultAddress default address to return in case key is not found.
-   * @param keys Set of keys to look for in the order of preference   @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
-   */
-  private static Map<String, Map<String, InetSocketAddress>>
-    getAddressesForNsIds(Configuration conf, Collection<String> nsIds,
-                         String defaultAddress, String... keys) {
-    // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
-    // across all of the configured nameservices and namenodes.
-    Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
-    for (String nsId : emptyAsSingletonNull(nsIds)) {
-      Map<String, InetSocketAddress> isas =
-        getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
-      if (!isas.isEmpty()) {
-        ret.put(nsId, isas);
-      }
-    }
-    return ret;
-  }
-  
-  /**
    * Get all of the RPC addresses of the individual NNs in a given nameservice.
    * 
    * @param conf Configuration
@@ -669,30 +549,8 @@ public class DFSUtil {
    */
   public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
       Configuration conf, String nsId, String defaultValue) {
-    return getAddressesForNameserviceId(conf, nsId, defaultValue,
-        DFS_NAMENODE_RPC_ADDRESS_KEY);
-  }
-
-  private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
-      Configuration conf, String nsId, String defaultValue,
-      String... keys) {
-    Collection<String> nnIds = getNameNodeIds(conf, nsId);
-    Map<String, InetSocketAddress> ret = Maps.newHashMap();
-    for (String nnId : emptyAsSingletonNull(nnIds)) {
-      String suffix = concatSuffixes(nsId, nnId);
-      String address = getConfValue(defaultValue, suffix, conf, keys);
-      if (address != null) {
-        InetSocketAddress isa = NetUtils.createSocketAddr(address);
-        if (isa.isUnresolved()) {
-          LOG.warn("Namenode for " + nsId +
-                   " remains unresolved for ID " + nnId +
-                   ".  Check your hdfs-site.xml file to " +
-                   "ensure namenodes are configured properly.");
-        }
-        ret.put(nnId, isa);
-      }
-    }
-    return ret;
+    return DFSUtilClient.getAddressesForNameserviceId(conf, nsId, defaultValue,
+                                                      DFS_NAMENODE_RPC_ADDRESS_KEY);
   }
 
   /**
@@ -700,9 +558,9 @@ public class DFSUtil {
    */
   public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException {
     Set<String> principals = new HashSet<String>();
-    for (String nsId : DFSUtil.getNameServiceIds(conf)) {
+    for (String nsId : DFSUtilClient.getNameServiceIds(conf)) {
       if (HAUtil.isHAEnabled(conf, nsId)) {
-        for (String nnId : DFSUtil.getNameNodeIds(conf, nsId)) {
+        for (String nnId : DFSUtilClient.getNameNodeIds(conf, nsId)) {
           Configuration confForNn = new Configuration(conf);
           NameNode.initializeGenericKeys(confForNn, nsId, nnId);
           String principal = SecurityUtil.getServerPrincipal(confForNn
@@ -732,26 +590,8 @@ public class DFSUtil {
    */
   public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
       Configuration conf) {
-    return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-  }
-
-  /**
-   * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
-   * the configuration.
-   *
-   * @return list of InetSocketAddresses
-   */
-  public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
-      Configuration conf, String scheme) {
-    if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
-      return getAddresses(conf, null,
-          DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-    } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
-      return getAddresses(conf, null,
-          DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
-    } else {
-      throw new IllegalArgumentException("Unsupported scheme: " + scheme);
-    }
+    return DFSUtilClient.getAddresses(conf, null,
+                                      DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
   }
 
   /**
@@ -764,8 +604,8 @@ public class DFSUtil {
    */
   public static Map<String, Map<String, InetSocketAddress>> getBackupNodeAddresses(
       Configuration conf) throws IOException {
-    Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf,
-        null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+    Map<String, Map<String, InetSocketAddress>> addressList = DFSUtilClient.getAddresses(
+        conf, null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
     if (addressList.isEmpty()) {
       throw new IOException("Incorrect configuration: backup node address "
           + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
@@ -783,8 +623,8 @@ public class DFSUtil {
    */
   public static Map<String, Map<String, InetSocketAddress>> getSecondaryNameNodeAddresses(
       Configuration conf) throws IOException {
-    Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, null,
-        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    Map<String, Map<String, InetSocketAddress>> addressList = DFSUtilClient.getAddresses(
+        conf, null, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
     if (addressList.isEmpty()) {
       throw new IOException("Incorrect configuration: secondary namenode address "
           + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
@@ -815,8 +655,9 @@ public class DFSUtil {
     }
     
     Map<String, Map<String, InetSocketAddress>> addressList =
-      getAddresses(conf, defaultAddress,
-        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+      DFSUtilClient.getAddresses(conf, defaultAddress,
+                                 DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                                 DFS_NAMENODE_RPC_ADDRESS_KEY);
     if (addressList.isEmpty()) {
       throw new IOException("Incorrect configuration: namenode address "
           + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
@@ -868,8 +709,10 @@ public class DFSUtil {
     }
 
     Map<String, Map<String, InetSocketAddress>> addressList =
-            getAddressesForNsIds(conf, parentNameServices, defaultAddress,
-                    DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+            DFSUtilClient.getAddressesForNsIds(conf, parentNameServices,
+                                               defaultAddress,
+                                               DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                                               DFS_NAMENODE_RPC_ADDRESS_KEY);
     if (addressList.isEmpty()) {
       throw new IOException("Incorrect configuration: namenode address "
               + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
@@ -1001,7 +844,7 @@ public class DFSUtil {
     // keep track of non-preferred keys here.
     Set<URI> nonPreferredUris = new HashSet<URI>();
     
-    for (String nsId : getNameServiceIds(conf)) {
+    for (String nsId : DFSUtilClient.getNameServiceIds(conf)) {
       if (HAUtil.isHAEnabled(conf, nsId)) {
         // Add the logical URI of the nameservice.
         try {
@@ -1013,7 +856,7 @@ public class DFSUtil {
         // Add the URI corresponding to the address of the NN.
         boolean uriFound = false;
         for (String key : keys) {
-          String addr = conf.get(concatSuffixes(key, nsId));
+          String addr = conf.get(DFSUtilClient.concatSuffixes(key, nsId));
           if (addr != null) {
             URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME,
                 NetUtils.createSocketAddr(addr));
@@ -1311,7 +1154,7 @@ public class DFSUtil {
     if (nameserviceId != null) {
       return nameserviceId;
     }
-    Collection<String> nsIds = getNameServiceIds(conf);
+    Collection<String> nsIds = DFSUtilClient.getNameServiceIds(conf);
     if (1 == nsIds.size()) {
       return nsIds.toArray(new String[1])[0];
     }
@@ -1342,14 +1185,14 @@ public class DFSUtil {
     String namenodeId = null;
     int found = 0;
     
-    Collection<String> nsIds = getNameServiceIds(conf);
-    for (String nsId : emptyAsSingletonNull(nsIds)) {
+    Collection<String> nsIds = DFSUtilClient.getNameServiceIds(conf);
+    for (String nsId : DFSUtilClient.emptyAsSingletonNull(nsIds)) {
       if (knownNsId != null && !knownNsId.equals(nsId)) {
         continue;
       }
       
-      Collection<String> nnIds = getNameNodeIds(conf, nsId);
-      for (String nnId : emptyAsSingletonNull(nnIds)) {
+      Collection<String> nnIds = DFSUtilClient.getNameNodeIds(conf, nsId);
+      for (String nnId : DFSUtilClient.emptyAsSingletonNull(nnIds)) {
         if (LOG.isTraceEnabled()) {
           LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s",
               addressKey, nsId, nnId));
@@ -1453,10 +1296,10 @@ public class DFSUtil {
       nsId = getOnlyNameServiceIdOrNull(conf);
     }
 
-    String serviceAddrKey = concatSuffixes(
+    String serviceAddrKey = DFSUtilClient.concatSuffixes(
         DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
 
-    String addrKey = concatSuffixes(
+    String addrKey = DFSUtilClient.concatSuffixes(
         DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
 
     String serviceRpcAddr = conf.get(serviceAddrKey);
@@ -1471,7 +1314,7 @@ public class DFSUtil {
    * name of that nameservice. If it refers to 0 or more than 1, return null.
    */
   public static String getOnlyNameServiceIdOrNull(Configuration conf) {
-    Collection<String> nsIds = getNameServiceIds(conf);
+    Collection<String> nsIds = DFSUtilClient.getNameServiceIds(conf);
     if (1 == nsIds.size()) {
       return nsIds.toArray(new String[1])[0];
     } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 8e7daf3..8d6a8fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -1534,7 +1534,7 @@ public class DistributedFileSystem extends FileSystem {
   
   @Override
   protected URI canonicalizeUri(URI uri) {
-    if (HAUtil.isLogicalUri(getConf(), uri)) {
+    if (HAUtilClient.isLogicalUri(getConf(), uri)) {
       // Don't try to DNS-resolve logical URIs, since the 'authority'
       // portion isn't a proper hostname
       return uri;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 240256c..c967c69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
-import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -38,7 +37,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -151,7 +149,7 @@ public class HAUtil {
         "machine is one of the machines listed as a NN RPC address, " +
         "or configure " + DFSConfigKeys.DFS_NAMESERVICE_ID);
     
-    Collection<String> nnIds = DFSUtil.getNameNodeIds(conf, nsId);
+    Collection<String> nnIds = DFSUtilClient.getNameNodeIds(conf, nsId);
     String myNNId = conf.get(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY);
     Preconditions.checkArgument(nnIds != null,
         "Could not determine namenode ids in namespace '%s'. " +
@@ -205,32 +203,6 @@ public class HAUtil {
   public static void setAllowStandbyReads(Configuration conf, boolean val) {
     conf.setBoolean("dfs.ha.allow.stale.reads", val);
   }
- 
-  /**
-   * @return true if the given nameNodeUri appears to be a logical URI.
-   */
-  public static boolean isLogicalUri(
-      Configuration conf, URI nameNodeUri) {
-    String host = nameNodeUri.getHost();
-    // A logical name must be one of the service IDs.
-    return DFSUtil.getNameServiceIds(conf).contains(host);
-  }
-
-  /**
-   * Check whether the client has a failover proxy provider configured
-   * for the namenode/nameservice.
-   *
-   * @param conf Configuration
-   * @param nameNodeUri The URI of namenode
-   * @return true if failover is configured.
-   */
-  public static boolean isClientFailoverConfigured(
-      Configuration conf, URI nameNodeUri) {
-    String host = nameNodeUri.getHost();
-    String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX
-        + "." + host;
-    return conf.get(configKey) != null;
-  }
 
   /**
    * Check whether logical URI is needed for the namenode and
@@ -257,43 +229,6 @@ public class HAUtil {
   }
 
   /**
-   * Parse the file system URI out of the provided token.
-   */
-  public static URI getServiceUriFromToken(final String scheme, Token<?> token) {
-    String tokStr = token.getService().toString();
-    final String prefix = buildTokenServicePrefixForLogicalUri(scheme);
-    if (tokStr.startsWith(prefix)) {
-      tokStr = tokStr.replaceFirst(prefix, "");
-    }
-    return URI.create(scheme + "://" + tokStr);
-  }
-  
-  /**
-   * Get the service name used in the delegation token for the given logical
-   * HA service.
-   * @param uri the logical URI of the cluster
-   * @param scheme the scheme of the corresponding FileSystem
-   * @return the service name
-   */
-  public static Text buildTokenServiceForLogicalUri(final URI uri,
-      final String scheme) {
-    return new Text(buildTokenServicePrefixForLogicalUri(scheme)
-        + uri.getHost());
-  }
-  
-  /**
-   * @return true if this token corresponds to a logical nameservice
-   * rather than a specific namenode.
-   */
-  public static boolean isTokenForLogicalUri(Token<?> token) {
-    return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
-  }
-
-  public static String buildTokenServicePrefixForLogicalUri(String scheme) {
-    return HA_DT_SERVICE_PREFIX + scheme + ":";
-  }
-
-  /**
    * Locate a delegation token associated with the given HA cluster URI, and if
    * one is found, clone it to also represent the underlying namenode address.
    * @param ugi the UGI to modify
@@ -305,8 +240,8 @@ public class HAUtil {
       UserGroupInformation ugi, URI haUri,
       Collection<InetSocketAddress> nnAddrs) {
     // this cloning logic is only used by hdfs
-    Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
-        HdfsConstants.HDFS_URI_SCHEME);
+    Text haService = HAUtilClient.buildTokenServiceForLogicalUri(haUri,
+                                                                 HdfsConstants.HDFS_URI_SCHEME);
     Token<DelegationTokenIdentifier> haToken =
         tokenSelector.selectToken(haService, ugi.getTokens());
     if (haToken != null) {
@@ -318,7 +253,8 @@ public class HAUtil {
             new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
         SecurityUtil.setTokenService(specificToken, singleNNAddr);
         Text alias = new Text(
-            buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
+            HAUtilClient.buildTokenServicePrefixForLogicalUri(
+                HdfsConstants.HDFS_URI_SCHEME)
                 + "//" + specificToken.getService());
         ugi.addToken(alias, specificToken);
         if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index ee3290b..0da7a4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -179,8 +179,8 @@ public class NameNodeProxies {
 
       Text dtService;
       if (failoverProxyProvider.useLogicalURI()) {
-        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
-            HdfsConstants.HDFS_URI_SCHEME);
+        dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
+                                                                HdfsConstants.HDFS_URI_SCHEME);
       } else {
         dtService = SecurityUtil.buildTokenService(
             NameNode.getAddress(nameNodeUri));
@@ -245,8 +245,8 @@ public class NameNodeProxies {
           new Class[] { xface }, dummyHandler);
       Text dtService;
       if (failoverProxyProvider.useLogicalURI()) {
-        dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
-            HdfsConstants.HDFS_URI_SCHEME);
+        dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
+                                                                HdfsConstants.HDFS_URI_SCHEME);
       } else {
         dtService = SecurityUtil.buildTokenService(
             NameNode.getAddress(nameNodeUri));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 7cf8a47..52259b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -120,13 +120,6 @@ public class HdfsConstants {
    */
   public static final String HDFS_URI_SCHEME = "hdfs";
 
-  /**
-   * A prefix put before the namenode URI inside the "service" field
-   * of a delgation token, indicating that the URI is a logical (HA)
-   * URI.
-   */
-  public static final String HA_DT_SERVICE_PREFIX = "ha-";
-
 
   /**
    * Current layout version for NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
index 0ebf3dc..c6f8489 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java
@@ -21,7 +21,7 @@ import io.netty.handler.codec.http.QueryStringDecoder;
 import org.apache.commons.io.Charsets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@@ -112,10 +112,10 @@ class ParameterParser {
       Token<DelegationTokenIdentifier>();
     token.decodeFromUrlString(delegation);
     URI nnUri = URI.create(HDFS_URI_SCHEME + "://" + namenodeId());
-    boolean isLogical = HAUtil.isLogicalUri(conf, nnUri);
+    boolean isLogical = HAUtilClient.isLogicalUri(conf, nnUri);
     if (isLogical) {
-      token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri,
-        HDFS_URI_SCHEME));
+      token.setService(
+          HAUtilClient.buildTokenServiceForLogicalUri(nnUri, HDFS_URI_SCHEME));
     } else {
       token.setService(SecurityUtil.buildTokenService(nnUri));
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 4575d64..1e94923 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -73,7 +74,6 @@ import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.JvmPauseMonitor;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.LogManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -404,7 +404,7 @@ public class NameNode implements NameNodeStatusMXBean {
       return;
     }
 
-    if (DFSUtil.getNameServiceIds(conf).contains(nnHost)) {
+    if (DFSUtilClient.getNameServiceIds(conf).contains(nnHost)) {
       // host name is logical
       clientNamenodeAddress = nnHost;
     } else if (nnUri.getPort() > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index b8dcbbf..d608281 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -71,7 +72,6 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
-import org.apache.hadoop.ipc.GenericRefreshProtocol;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
@@ -574,7 +574,7 @@ public class DFSAdmin extends FsShell {
     DistributedFileSystem dfs = getDFS();
     Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
 
     if (isHaEnabled) {
       String nsId = dfsUri.getHost();
@@ -719,7 +719,7 @@ public class DFSAdmin extends FsShell {
     }
 
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
     if (isHaEnabled) {
       String nsId = dfsUri.getHost();
       List<ProxyAndInfo<ClientProtocol>> proxies =
@@ -770,7 +770,7 @@ public class DFSAdmin extends FsShell {
     DistributedFileSystem dfs = getDFS();
     Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
 
     if (isHaEnabled) {
       String nsId = dfsUri.getHost();
@@ -803,7 +803,7 @@ public class DFSAdmin extends FsShell {
     DistributedFileSystem dfs = getDFS();
     Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
 
     if (isHaEnabled) {
       String nsId = dfsUri.getHost();
@@ -854,7 +854,7 @@ public class DFSAdmin extends FsShell {
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
     Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
 
     if (isHaEnabled) {
       String nsId = dfsUri.getHost();
@@ -1148,7 +1148,7 @@ public class DFSAdmin extends FsShell {
     
     Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
-    boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
+    boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
     if (isHaAndLogicalUri) {
       // In the case of HA and logical URI, run finalizeUpgrade for all
       // NNs in this nameservice.
@@ -1188,7 +1188,7 @@ public class DFSAdmin extends FsShell {
     DistributedFileSystem dfs = getDFS();
     Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
 
     if (isHaEnabled) {
       String nsId = dfsUri.getHost();
@@ -1275,7 +1275,7 @@ public class DFSAdmin extends FsShell {
 
     DistributedFileSystem dfs = getDFS();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
 
     if (isHaEnabled) {
       // Run refreshServiceAcl for all NNs if HA is enabled
@@ -1318,7 +1318,7 @@ public class DFSAdmin extends FsShell {
 
     DistributedFileSystem dfs = getDFS();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
 
     if (isHaEnabled) {
       // Run refreshUserToGroupsMapings for all NNs if HA is enabled
@@ -1363,7 +1363,7 @@ public class DFSAdmin extends FsShell {
 
     DistributedFileSystem dfs = getDFS();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
 
     if (isHaEnabled) {
       // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
@@ -1402,7 +1402,7 @@ public class DFSAdmin extends FsShell {
 
     DistributedFileSystem dfs = getDFS();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+    boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri);
 
     if (isHaEnabled) {
       // Run refreshCallQueue for all NNs if HA is enabled

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index e9c611d..aa69dca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.ha.HAAdmin;
 import org.apache.hadoop.ha.HAServiceTarget;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -125,7 +126,9 @@ public class DFSHAAdmin extends HAAdmin {
    */
   @Override
   protected Collection<String> getTargetIds(String namenodeToActivate) {
-    return DFSUtil.getNameNodeIds(getConf(), (nameserviceId != null)? nameserviceId : DFSUtil.getNamenodeNameServiceId(getConf()));
+    return DFSUtilClient.getNameNodeIds(getConf(),
+                                        (nameserviceId != null) ? nameserviceId : DFSUtil.getNamenodeNameServiceId(
+                                            getConf()));
   }
   
   public static void main(String[] argv) throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
index 4b35a75..6376243 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
@@ -38,8 +38,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -139,8 +138,8 @@ public class DelegationTokenFetcher {
 
     // For backward compatibility
     URI fsUri = URI.create(
-            url.replaceFirst("^http://", WebHdfsFileSystem.SCHEME + "://")
-               .replaceFirst("^https://", SWebHdfsFileSystem.SCHEME + "://"));
+            url.replaceFirst("^http://", WebHdfsConstants.WEBHDFS_SCHEME + "://")
+               .replaceFirst("^https://", WebHdfsConstants.SWEBHDFS_SCHEME + "://"));
 
     return FileSystem.get(fsUri, conf);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
index ffca099..73c10d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
@@ -22,11 +22,9 @@ import org.apache.hadoop.io.Text;
 
 public class SWebHdfsFileSystem extends WebHdfsFileSystem {
 
-  public static final String SCHEME = "swebhdfs";
-
   @Override
   public String getScheme() {
-    return SCHEME;
+    return WebHdfsConstants.SWEBHDFS_SCHEME;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
index e5e755a..bc3eb4b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DelegationTokenRenewer;
 import org.apache.hadoop.fs.DelegationTokenRenewer.Renewable;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
@@ -74,8 +74,8 @@ final class TokenAspect<T extends FileSystem & Renewable> {
             throws IOException {
       final URI uri;
       final String scheme = getSchemeByKind(token.getKind());
-      if (HAUtil.isTokenForLogicalUri(token)) {
-        uri = HAUtil.getServiceUriFromToken(scheme, token);
+      if (HAUtilClient.isTokenForLogicalUri(token)) {
+        uri = HAUtilClient.getServiceUriFromToken(scheme, token);
       } else {
         final InetSocketAddress address = SecurityUtil.getTokenServiceAddr
                 (token);
@@ -86,9 +86,9 @@ final class TokenAspect<T extends FileSystem & Renewable> {
 
     private static String getSchemeByKind(Text kind) {
       if (kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)) {
-        return WebHdfsFileSystem.SCHEME;
+        return WebHdfsConstants.WEBHDFS_SCHEME;
       } else if (kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND)) {
-        return SWebHdfsFileSystem.SCHEME;
+        return WebHdfsConstants.SWEBHDFS_SCHEME;
       } else {
         throw new IllegalArgumentException("Unsupported scheme");
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 6f944c7..acdd8e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -58,7 +58,8 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.HAUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -91,12 +92,10 @@ import com.google.common.collect.Lists;
 public class WebHdfsFileSystem extends FileSystem
     implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator {
   public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class);
-  /** File System URI: {SCHEME}://namenode:port/path/to/file */
-  public static final String SCHEME = "webhdfs";
   /** WebHdfs version. */
   public static final int VERSION = 1;
   /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */
-  public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION;
+  public static final String PATH_PREFIX = "/" + WebHdfsConstants.WEBHDFS_SCHEME + "/v" + VERSION;
 
   /** Default connection factory may be overridden in tests to use smaller timeout values */
   protected URLConnectionFactory connectionFactory;
@@ -125,7 +124,7 @@ public class WebHdfsFileSystem extends FileSystem
    */
   @Override
   public String getScheme() {
-    return SCHEME;
+    return WebHdfsConstants.WEBHDFS_SCHEME;
   }
 
   /**
@@ -157,13 +156,13 @@ public class WebHdfsFileSystem extends FileSystem
     this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
     this.nnAddrs = resolveNNAddr();
 
-    boolean isHA = HAUtil.isClientFailoverConfigured(conf, this.uri);
-    boolean isLogicalUri = isHA && HAUtil.isLogicalUri(conf, this.uri);
+    boolean isHA = HAUtilClient.isClientFailoverConfigured(conf, this.uri);
+    boolean isLogicalUri = isHA && HAUtilClient.isLogicalUri(conf, this.uri);
     // In non-HA or non-logical URI case, the code needs to call
     // getCanonicalUri() in order to handle the case where no port is
     // specified in the URI
     this.tokenServiceName = isLogicalUri ?
-        HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
+        HAUtilClient.buildTokenServiceForLogicalUri(uri, getScheme())
         : SecurityUtil.buildTokenService(getCanonicalUri());
 
     if (!isHA) {
@@ -888,7 +887,6 @@ public class WebHdfsFileSystem extends FileSystem
 
   /**
    * Create a symlink pointing to the destination path.
-   * @see org.apache.hadoop.fs.Hdfs#createSymlink(Path, Path, boolean) 
    */
   public void createSymlink(Path destination, Path f, boolean createParent
       ) throws IOException {
@@ -1432,13 +1430,13 @@ public class WebHdfsFileSystem extends FileSystem
 
     ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
 
-    if (!HAUtil.isLogicalUri(conf, uri)) {
+    if (!HAUtilClient.isLogicalUri(conf, uri)) {
       InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
           getDefaultPort());
       ret.add(addr);
 
     } else {
-      Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
+      Map<String, Map<String, InetSocketAddress>> addresses = DFSUtilClient
           .getHaNnWebHdfsAddresses(conf, scheme);
 
       // Extract the entry corresponding to the logical name.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
index 2ff7050..c98ba66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.ipc.RemoteException;
@@ -88,7 +89,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
     conf.set(FsPermission.UMASK_LABEL, "000");
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
     cluster = new MiniDFSCluster.Builder(conf).build();
-    webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
+    webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
     dfs = cluster.getFileSystem();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 5ef2f73..681e9bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -232,7 +232,7 @@ public class DFSTestUtil {
     } else { // append the nsid
       conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsIds + "," + logicalName);
     }
-    conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
+    conf.set(DFSUtil.addKeySuffixes(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
             logicalName), "nn1,nn2");
     conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX +
             "." + logicalName,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index d16ce1c..68cc155 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -75,7 +75,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
@@ -890,7 +890,7 @@ public class TestDFSClientRetries {
       cluster.waitActive();
       final DistributedFileSystem dfs = cluster.getFileSystem();
       final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem(
-          conf, WebHdfsFileSystem.SCHEME) : dfs;
+          conf, WebHdfsConstants.WEBHDFS_SCHEME) : dfs;
       final URI uri = dfs.getUri();
       assertTrue(HdfsUtils.isHealthy(uri));
 
@@ -1094,7 +1094,7 @@ public class TestDFSClientRetries {
     final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
         username, new String[]{"supergroup"});
 
-    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME)
+    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsConstants.WEBHDFS_SCHEME)
         : DFSTestUtil.getFileSystemAs(ugi, conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index ed913f1..7f31f33 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -216,13 +216,13 @@ public class TestDFSUtil {
   }
 
   /**
-   * Test {@link DFSUtil#getNameServiceIds(Configuration)}
+   * Test {@link DFSUtilClient#getNameServiceIds(Configuration)}
    */
   @Test
   public void testGetNameServiceIds() {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set(DFS_NAMESERVICES, "nn1,nn2");
-    Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
+    Collection<String> nameserviceIds = DFSUtilClient.getNameServiceIds(conf);
     Iterator<String> it = nameserviceIds.iterator();
     assertEquals(2, nameserviceIds.size());
     assertEquals("nn1", it.next().toString());
@@ -587,7 +587,7 @@ public class TestDFSUtil {
     Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
 
     Map<String, Map<String, InetSocketAddress>> map =
-        DFSUtil.getHaNnWebHdfsAddresses(conf, "webhdfs");
+        DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
 
     assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
     assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 9a10206..0689a53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -67,7 +67,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -533,7 +533,7 @@ public class TestDistributedFileSystem {
     }
 
     //webhdfs
-    final String webhdfsuri = WebHdfsFileSystem.SCHEME  + "://" + nnAddr;
+    final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
     System.out.println("webhdfsuri=" + webhdfsuri);
     final FileSystem webhdfs = ugi.doAs(
         new PrivilegedExceptionAction<FileSystem>() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
index e43593d..b211ffb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -21,10 +21,8 @@ import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
-import java.io.PrintWriter;
 import java.io.RandomAccessFile;
 import java.io.StringReader;
-import java.io.StringWriter;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -44,7 +42,6 @@ import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
@@ -72,7 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.security.AccessControlException;
@@ -612,7 +609,7 @@ public class TestEncryptionZones {
     final HdfsAdmin dfsAdmin =
         new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
     final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
-        WebHdfsFileSystem.SCHEME);
+        WebHdfsConstants.WEBHDFS_SCHEME);
 
     final Path zone = new Path("/zone");
     fs.mkdirs(zone);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 163378c..4541e69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Assert;
@@ -858,7 +858,7 @@ public class TestQuota {
     DFSAdmin admin = new DFSAdmin(conf);
 
     final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-    final String webhdfsuri = WebHdfsFileSystem.SCHEME  + "://" + nnAddr;
+    final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
     System.out.println("webhdfsuri=" + webhdfsuri);
     final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);
 
@@ -922,7 +922,7 @@ public class TestQuota {
     DFSAdmin admin = new DFSAdmin(conf);
 
     final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
-    final String webhdfsuri = WebHdfsFileSystem.SCHEME  + "://" + nnAddr;
+    final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
     System.out.println("webhdfsuri=" + webhdfsuri);
     final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf);
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
index 98e4b1e..403b21e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.junit.AfterClass;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
index dff8fa4..13d34d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
@@ -169,7 +170,7 @@ public class TestDelegationToken {
   @Test
   public void testDelegationTokenWebHdfsApi() throws Exception {
     ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
-    final String uri = WebHdfsFileSystem.SCHEME  + "://"
+    final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://"
         + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
     //get file system as JobTracker
     final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f8003dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
index 3bbd6f1..9d0301c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.security.TestDoAsEffectiveUser;
@@ -149,7 +150,7 @@ public class TestDelegationTokenForProxyUser {
   public void testWebHdfsDoAs() throws Exception {
     WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
     WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
-    final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem.SCHEME);
+    final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsConstants.WEBHDFS_SCHEME);
     
     final Path root = new Path("/");
     cluster.getFileSystem().setPermission(root, new FsPermission((short)0777));


Mime
View raw message