Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id D1DF911346 for ; Thu, 24 Apr 2014 23:09:31 +0000 (UTC) Received: (qmail 56317 invoked by uid 500); 24 Apr 2014 23:09:30 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 56258 invoked by uid 500); 24 Apr 2014 23:09:30 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 56149 invoked by uid 99); 24 Apr 2014 23:09:29 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 24 Apr 2014 23:09:29 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 24 Apr 2014 23:09:28 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 0513823889EC; Thu, 24 Apr 2014 23:09:05 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1589908 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/CHANGES.txt hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Date: Thu, 24 Apr 2014 23:09:04 -0000 To: hdfs-commits@hadoop.apache.org From: cmccabe@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20140424230905.0513823889EC@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: cmccabe Date: Thu Apr 24 23:09:04 2014 New Revision: 1589908 URL: http://svn.apache.org/r1589908 Log: HDFS-6282. Re-add testIncludeByRegistrationName (cmccabe) Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project:r1589907 Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1589907 Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1589908&r1=1589907&r2=1589908&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Apr 24 23:09:04 2014 @@ -67,6 +67,8 @@ Release 2.5.0 - UNRELEASED HDFS-6273. Config options to allow wildcard endpoints for namenode HTTP and HTTPS servers. (Arpit Agarwal) + HDFS-6282. Re-add testIncludeByRegistrationName. (cmccabe) + OPTIMIZATIONS HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java?rev=1589908&r1=1589907&r2=1589908&view=diff ============================================================================== --- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java (original) +++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java Thu Apr 24 23:09:04 2014 @@ -884,4 +884,76 @@ public class TestDecommission { startCluster(numNamenodes, numDatanodes, conf); cluster.shutdown(); } + + /** + * Test using a "registration name" in a host include file. + * + * Registration names are DataNode names specified in the configuration by + * dfs.datanode.hostname. The DataNode will send this name to the NameNode + * as part of its registration. Registration names are helpful when you + * want to override the normal first result of DNS resolution on the + * NameNode. For example, a given datanode IP may map to two hostnames, + * and you may want to choose which hostname is used internally in the + * cluster. + * + * It is not recommended to use a registration name which is not also a + * valid DNS hostname for the DataNode. See HDFS-5237 for background. + */ + @Test(timeout=360000) + public void testIncludeByRegistrationName() throws IOException, + InterruptedException { + Configuration hdfsConf = new Configuration(conf); + // Any IPv4 address starting with 127 functions as a "loopback" address + // which is connected to the current host. So by choosing 127.0.0.100 + // as our registration name, we have chosen a name which is also a valid + // way of reaching the local DataNode we're going to start. + // Typically, a registration name would be a hostname, but we don't want + // to deal with DNS in this test. + final String registrationName = "127.0.0.100"; + final String nonExistentDn = "127.0.0.10"; + hdfsConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, registrationName); + cluster = new MiniDFSCluster.Builder(hdfsConf) + .numDataNodes(1).checkDataNodeHostConfig(true) + .setupHostsFile(true).build(); + cluster.waitActive(); + + // Set up an includes file that doesn't have our datanode. + ArrayList nodes = new ArrayList(); + nodes.add(nonExistentDn); + writeConfigFile(hostsFile, nodes); + refreshNodes(cluster.getNamesystem(0), hdfsConf); + + // Wait for the DN to be marked dead. + DFSClient client = getDfsClient(cluster.getNameNode(0), hdfsConf); + while (true) { + DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.DEAD); + if (info.length == 1) { + break; + } + LOG.info("Waiting for datanode to be marked dead"); + Thread.sleep(HEARTBEAT_INTERVAL * 1000); + } + + // Use a non-empty include file with our registration name. + // It should work. + int dnPort = cluster.getDataNodes().get(0).getXferPort(); + nodes = new ArrayList(); + nodes.add(registrationName + ":" + dnPort); + writeConfigFile(hostsFile, nodes); + refreshNodes(cluster.getNamesystem(0), hdfsConf); + cluster.restartDataNode(0); + + // Wait for the DN to come back. + while (true) { + DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE); + if (info.length == 1) { + Assert.assertFalse(info[0].isDecommissioned()); + Assert.assertFalse(info[0].isDecommissionInProgress()); + assertEquals(registrationName, info[0].getHostName()); + break; + } + LOG.info("Waiting for datanode to come back"); + Thread.sleep(HEARTBEAT_INTERVAL * 1000); + } + } }