hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From weic...@apache.org
Subject hadoop git commit: HDFS-8897. Balancer should handle fs.defaultFS trailing slash in HA. Contributed by John Zhuge.
Date Thu, 11 Aug 2016 05:30:18 GMT
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7a1a3f775 -> 65e365714


HDFS-8897. Balancer should handle fs.defaultFS trailing slash in HA. Contributed by John Zhuge.

(cherry picked from commit f1d5a95cf31f593f362a4e9c0afa7587f6e14957)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65e36571
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65e36571
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65e36571

Branch: refs/heads/branch-2.8
Commit: 65e36571494d6118fde9026c22a697a3701df52e
Parents: 7a1a3f7
Author: Wei-Chiu Chuang <weichiu@apache.org>
Authored: Wed Aug 10 22:24:04 2016 -0700
Committer: Wei-Chiu Chuang <weichiu@apache.org>
Committed: Wed Aug 10 22:30:06 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hdfs/DFSUtil.java    | 55 +++++++++++++-------
 .../org/apache/hadoop/hdfs/TestDFSUtil.java     | 54 ++++++++++++-------
 2 files changed, 71 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65e36571/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index a290564..3d01e2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -760,12 +760,7 @@ public class DFSUtil {
     Set<URI> nonPreferredUris = new HashSet<URI>();
     
     for (String nsId : nameServices) {
-      URI nsUri;
-      try {
-        nsUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId);
-      } catch (URISyntaxException ue) {
-        throw new IllegalArgumentException(ue);
-      }
+      URI nsUri = createUri(HdfsConstants.HDFS_URI_SCHEME, nsId, -1);
       /**
        * Determine whether the logical URI of the name service can be resolved
        * by the configured failover proxy provider. If not, we should try to
@@ -805,7 +800,8 @@ public class DFSUtil {
     for (String key : keys) {
       String addr = conf.get(key);
       if (addr != null) {
-        URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr));
+        URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME,
+            NetUtils.createSocketAddr(addr));
         if (!uriFound) {
           uriFound = true;
           ret.add(uri);
@@ -823,19 +819,21 @@ public class DFSUtil {
     // nor the rpc-address (which overrides defaultFS) is given.
     if (!uriFound) {
       URI defaultUri = FileSystem.getDefaultUri(conf);
+      if (defaultUri != null) {
+        // checks if defaultUri is ip:port format
+        // and convert it to hostname:port format
+        if (defaultUri.getPort() != -1) {
+          defaultUri = createUri(defaultUri.getScheme(),
+              NetUtils.createSocketAddr(defaultUri.getHost(),
+                  defaultUri.getPort()));
+        }
 
-      // checks if defaultUri is ip:port format
-      // and convert it to hostname:port format
-      if (defaultUri != null && (defaultUri.getPort() != -1)) {
-        defaultUri = createUri(defaultUri.getScheme(),
-            NetUtils.createSocketAddr(defaultUri.getHost(),
-                defaultUri.getPort()));
-      }
+        defaultUri = trimUri(defaultUri);
 
-      if (defaultUri != null &&
-          HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
-          !nonPreferredUris.contains(defaultUri)) {
-        ret.add(defaultUri);
+        if (HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
+            !nonPreferredUris.contains(defaultUri)) {
+          ret.add(defaultUri);
+        }
       }
     }
     
@@ -1168,7 +1166,26 @@ public class DFSUtil {
   public static URI createUri(String scheme, InetSocketAddress address) {
     return DFSUtilClient.createUri(scheme, address);
   }
-  
+
+  /** Create an URI from scheme, host, and port. */
+  public static URI createUri(String scheme, String host, int port) {
+    try {
+      return new URI(scheme, null, host, port, null, null, null);
+    } catch (URISyntaxException x) {
+      throw new IllegalArgumentException(x.getMessage(), x);
+    }
+  }
+
+  /** Remove unnecessary path from HDFS URI. */
+  static URI trimUri(URI uri) {
+    String path = uri.getPath();
+    if (HdfsConstants.HDFS_URI_SCHEME.equals(uri.getScheme()) &&
+        path != null && !path.isEmpty()) {
+      uri = createUri(uri.getScheme(), uri.getHost(), uri.getPort());
+    }
+    return uri;
+  }
+
   /**
    * Add protobuf based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}
    * @param conf configuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65e36571/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index ae04722..b6b3453 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -82,7 +82,11 @@ import org.junit.Test;
 import com.google.common.collect.Sets;
 
 public class TestDFSUtil {
-  
+
+  static final String NS1_NN_ADDR    = "ns1-nn.example.com:9820";
+  static final String NS1_NN1_ADDR   = "ns1-nn1.example.com:9820";
+  static final String NS1_NN2_ADDR   = "ns1-nn2.example.com:9820";
+
   /**
    * Reset to default UGI settings since some tests change them.
    */
@@ -588,8 +592,6 @@ public class TestDFSUtil {
   @Test
   public void testGetHaNnHttpAddresses() throws IOException {
     final String LOGICAL_HOST_NAME = "ns1";
-    final String NS1_NN1_ADDR      = "ns1-nn1.example.com:8020";
-    final String NS1_NN2_ADDR      = "ns1-nn2.example.com:8020";
 
     Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
 
@@ -638,9 +640,6 @@ public class TestDFSUtil {
   public void testGetNNUris() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
 
-    final String NS1_NN_ADDR   = "ns1-nn.example.com:8020";
-    final String NS1_NN1_ADDR   = "ns1-nn1.example.com:8020";
-    final String NS1_NN2_ADDR   = "ns1-nn2.example.com:8020";
     final String NS2_NN_ADDR    = "ns2-nn.example.com:8020";
     final String NN1_ADDR       = "nn.example.com:8020";
     final String NN1_SRVC_ADDR  = "nn.example.com:8021";
@@ -649,12 +648,10 @@ public class TestDFSUtil {
     conf.set(DFS_NAMESERVICES, "ns1");
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1"), NS1_NN1_ADDR);
-
     conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "hdfs://" + NN2_ADDR);
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
 
     Collection<URI> uris = DFSUtil.getInternalNsRpcUris(conf);
-
     assertEquals("Incorrect number of URIs returned", 2, uris.size());
     assertTrue("Missing URI for name service ns1",
         uris.contains(new URI("hdfs://" + NS1_NN1_ADDR)));
@@ -669,15 +666,11 @@ public class TestDFSUtil {
         DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_ADDR);
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR);
-
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1"), NS1_NN_ADDR);
-
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"), NS2_NN_ADDR);
-
     conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "hdfs://" + NN1_ADDR);
-
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN2_ADDR);
 
     /**
@@ -715,7 +708,6 @@ public class TestDFSUtil {
         + "ConfiguredFailoverProxyProvider");
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-
     assertEquals("Incorrect number of URIs returned", 3, uris.size());
     assertTrue("Missing URI for name service ns1",
         uris.contains(new URI("hdfs://ns1")));
@@ -729,7 +721,6 @@ public class TestDFSUtil {
         "viewfs://vfs-name.example.com");
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-
     assertEquals("Incorrect number of URIs returned", 3, uris.size());
     assertTrue("Missing URI for name service ns1",
         uris.contains(new URI("hdfs://ns1")));
@@ -743,7 +734,6 @@ public class TestDFSUtil {
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
     
     uris = DFSUtil.getInternalNsRpcUris(conf);
-
     assertEquals("Incorrect number of URIs returned", 3, uris.size());
     assertTrue("Missing URI for name service ns1",
         uris.contains(new URI("hdfs://ns1")));
@@ -757,7 +747,6 @@ public class TestDFSUtil {
     conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-
     assertEquals("Incorrect number of URIs returned", 1, uris.size());
     assertTrue("Missing URI for RPC address (defaultFS)",
         uris.contains(new URI("hdfs://" + NN1_ADDR)));
@@ -767,7 +756,6 @@ public class TestDFSUtil {
     conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN2_ADDR);
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-
     assertEquals("Incorrect number of URIs returned", 1, uris.size());
     assertTrue("Missing URI for RPC address",
         uris.contains(new URI("hdfs://" + NN2_ADDR)));
@@ -779,7 +767,6 @@ public class TestDFSUtil {
     conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_ADDR);
 
     uris = DFSUtil.getInternalNsRpcUris(conf);
-
     assertEquals("Incorrect number of URIs returned", 1, uris.size());
     assertTrue("Missing URI for service ns1",
         uris.contains(new URI("hdfs://" + NN1_ADDR)));
@@ -791,12 +778,41 @@ public class TestDFSUtil {
     conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR);
     
     uris = DFSUtil.getInternalNsRpcUris(conf);
-
     assertEquals("Incorrect number of URIs returned", 1, uris.size());
     assertTrue("Missing URI for service address",
         uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
   }
 
+  @Test
+  public void testGetNNUris2() throws Exception {
+    // Make sure that an HA URI plus a slash being the default URI doesn't
+    // result in multiple entries being returned.
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_NAMESERVICES, "ns1");
+    conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),
+        "nn1,nn2");
+    conf.set(DFSUtil.addKeySuffixes(
+        DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_ADDR);
+    conf.set(DFSUtil.addKeySuffixes(
+        DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR);
+
+    conf.set(DFSUtil.addKeySuffixes(
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1"), NS1_NN_ADDR);
+
+    String proxyProviderKey = HdfsClientConfigKeys.Failover.
+        PROXY_PROVIDER_KEY_PREFIX + ".ns1";
+    conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha."
+        + "ConfiguredFailoverProxyProvider");
+
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1/");
+
+    Collection<URI> uris = DFSUtil.getInternalNsRpcUris(conf);
+
+    assertEquals("Incorrect number of URIs returned", 1, uris.size());
+    assertTrue("Missing URI for name service ns1",
+        uris.contains(new URI("hdfs://ns1")));
+  }
+
   @Test (timeout=15000)
   public void testLocalhostReverseLookup() {
     // 127.0.0.1 -> localhost reverse resolution does not happen on Windows.


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message