Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 89F2918AC1 for ; Fri, 2 Oct 2015 15:57:10 +0000 (UTC) Received: (qmail 28998 invoked by uid 500); 2 Oct 2015 15:56:43 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 28907 invoked by uid 500); 2 Oct 2015 15:56:43 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 25365 invoked by uid 99); 2 Oct 2015 15:56:41 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 02 Oct 2015 15:56:41 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 8DA51E0103; Fri, 2 Oct 2015 15:56:41 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: eclark@apache.org To: common-commits@hadoop.apache.org Date: Fri, 02 Oct 2015 15:57:31 -0000 Message-Id: In-Reply-To: <83dc4c38b9934a28b2166a83e9091d33@git.apache.org> References: <83dc4c38b9934a28b2166a83e9091d33@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [52/52] [abbrv] hadoop git commit: HADOOP-12432 Add support for include/exclude lists on IPv6 setup HADOOP-12432 Add support for include/exclude lists on IPv6 setup Signed-off-by: Elliott Clark Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f33be9aa Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f33be9aa Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f33be9aa Branch: refs/heads/HADOOP-11890 Commit: f33be9aa619cae48ba56ef172bf146cd805c1ba2 Parents: 6e3702c Author: Nemanja Matkovic Authored: Thu Oct 1 17:54:20 2015 -0700 Committer: Elliott Clark Committed: Fri Oct 2 08:55:47 2015 -0700 ---------------------------------------------------------------------- .../server/blockmanagement/HostFileManager.java | 13 +++++----- .../blockmanagement/TestHostFileManager.java | 25 ++++++++++++++------ .../hdfs/server/namenode/TestHostsFiles.java | 10 ++++---- 3 files changed, 29 insertions(+), 19 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f33be9aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java index e05ef9a..352fc95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java @@ -25,17 +25,18 @@ import com.google.common.collect.HashMultimap; import com.google.common.collect.Iterators; import com.google.common.collect.Multimap; import com.google.common.collect.UnmodifiableIterator; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.HostsFileReader; import javax.annotation.Nullable; + import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.URI; -import java.net.URISyntaxException; import java.util.Collection; import java.util.HashSet; import java.util.Iterator; @@ -83,16 +84,14 @@ class HostFileManager { @VisibleForTesting static InetSocketAddress parseEntry(String type, String fn, String line) { try { - URI uri = new URI("dummy", line, null, null, null); - int port = uri.getPort() == -1 ? 0 : uri.getPort(); - InetSocketAddress addr = new InetSocketAddress(uri.getHost(), port); + InetSocketAddress addr = NetUtils.createSocketAddr(line, 0); if (addr.isUnresolved()) { LOG.warn(String.format("Failed to resolve address `%s` in `%s`. " + "Ignoring in the %s list.", line, fn, type)); return null; } return addr; - } catch (URISyntaxException e) { + } catch (IllegalArgumentException e) { LOG.warn(String.format("Failed to parse `%s` in `%s`. " + "Ignoring in " + "the %s list.", line, fn, type)); } @@ -227,7 +226,7 @@ class HostFileManager { @Override public String apply(@Nullable InetSocketAddress addr) { assert addr != null; - return addr.getAddress().getHostAddress() + ":" + addr.getPort(); + return NetUtils.getSocketAddressString(addr); } })); return sb.append(")").toString(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f33be9aa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java index c65b580..4871cc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java @@ -111,13 +111,19 @@ public class TestHostFileManager { includedNodes.add(entry("127.0.0.1:12345")); includedNodes.add(entry("localhost:12345")); includedNodes.add(entry("127.0.0.1:12345")); + + includedNodes.add(entry("[::1]:42")); + includedNodes.add(entry("[0:0:0:0:0:0:0:1]:42")); + includedNodes.add(entry("[::1]:42")); + includedNodes.add(entry("127.0.0.2")); excludedNodes.add(entry("127.0.0.1:12346")); excludedNodes.add(entry("127.0.30.1:12346")); + excludedNodes.add(entry("[::1]:24")); - Assert.assertEquals(2, includedNodes.size()); - Assert.assertEquals(2, excludedNodes.size()); + Assert.assertEquals(3, includedNodes.size()); + Assert.assertEquals(3, excludedNodes.size()); hm.refresh(includedNodes, excludedNodes); @@ -126,20 +132,25 @@ public class TestHostFileManager { Map dnMap = (Map) Whitebox.getInternalState(dm, "datanodeMap"); - // After the de-duplication, there should be only one DN from the included + // After the de-duplication, there should be three DN from the included // nodes declared as dead. - Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants + Assert.assertEquals(3, dm.getDatanodeListForReport(HdfsConstants .DatanodeReportType.ALL).size()); - Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants + Assert.assertEquals(3, dm.getDatanodeListForReport(HdfsConstants .DatanodeReportType.DEAD).size()); dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1", "localhost", "uuid-foo", 12345, 1020, 1021, 1022))); - Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants + Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants .DatanodeReportType.DEAD).size()); dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2", "127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022))); - Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants + Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants .DatanodeReportType.DEAD).size()); + dnMap.put("uuid-baz", new DatanodeDescriptor(new DatanodeID("[::1]", + "localhost", "uuid-baz", 42, 1020, 1021, 1022))); + Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants + .DatanodeReportType.DEAD).size()); + DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" + ".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022)); DFSTestUtil.setDatanodeDead(spam); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f33be9aa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java index a93cc2a..380f998 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java @@ -112,8 +112,7 @@ public class TestHostsFiles { BlockLocation locs[] = fs.getFileBlockLocations( fs.getFileStatus(filePath), 0, Long.MAX_VALUE); String name = locs[0].getNames()[0]; - String names = name + "\n" + "localhost:42\n"; - LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath()); + LOG.info("adding '" + name + "' to exclude file " + excludeFile.toUri().getPath()); DFSTestUtil.writeFile(localFileSys, excludeFile, name); ns.getBlockManager().getDatanodeManager().refreshNodes(conf); DFSTestUtil.waitForDecommission(fs, name); @@ -150,7 +149,8 @@ public class TestHostsFiles { assertTrue(localFileSys.mkdirs(dir)); StringBuilder includeHosts = new StringBuilder(); includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777") - .append("\n"); + .append("\n").append("[::1]:42").append("\n") + .append("[0:0:0:0:0:0:0:1]:24").append("\n"); DFSTestUtil.writeFile(localFileSys, excludeFile, ""); DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString()); conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath()); @@ -160,7 +160,7 @@ public class TestHostsFiles { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); final FSNamesystem ns = cluster.getNameNode().getNamesystem(); - assertTrue(ns.getNumDeadDataNodes() == 2); + assertTrue(ns.getNumDeadDataNodes() == 4); assertTrue(ns.getNumLiveDataNodes() == 0); // Testing using MBeans @@ -168,7 +168,7 @@ public class TestHostsFiles { ObjectName mxbeanName = new ObjectName( "Hadoop:service=NameNode,name=FSNamesystemState"); String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + ""; - assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2); + assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 4); assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0); } finally { if (cluster != null) {