Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id BD562902F for ; Mon, 23 Jul 2012 14:50:45 +0000 (UTC) Received: (qmail 8038 invoked by uid 500); 23 Jul 2012 14:50:45 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 7742 invoked by uid 500); 23 Jul 2012 14:50:40 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 7705 invoked by uid 99); 23 Jul 2012 14:50:38 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 23 Jul 2012 14:50:38 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 23 Jul 2012 14:50:35 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id D646223888CD; Mon, 23 Jul 2012 14:50:14 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1364662 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java Date: Mon, 23 Jul 2012 14:50:14 -0000 To: hdfs-commits@hadoop.apache.org From: daryn@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120723145014.D646223888CD@eris.apache.org> Author: daryn Date: Mon Jul 23 14:50:14 2012 New Revision: 1364662 URL: http://svn.apache.org/viewvc?rev=1364662&view=rev Log: HDFS-3688. Namenode loses datanode hostname if datanode re-registers (Jason Lowe via daryn) Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1364662&r1=1364661&r2=1364662&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Jul 23 14:50:14 2012 @@ -78,6 +78,9 @@ Release 0.23.3 - UNRELEASED HDFS-3646. LeaseRenewer can hold reference to inactive DFSClient instances forever (Kihwal Lee via daryn) + HDFS-3688. Namenode loses datanode hostname if datanode re-registers + (Jason Lowe via daryn) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1364662&r1=1364661&r2=1364662&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Jul 23 14:50:14 2012 @@ -35,7 +35,6 @@ import static org.apache.hadoop.hdfs.DFS import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY; @@ -374,6 +373,7 @@ public class DataNode extends Configured private InetSocketAddress selfAddr; private volatile String hostName; // Host name of this datanode + private final String confHostName; boolean isBlockTokenEnabled; BlockPoolTokenSecretManager blockPoolTokenSecretManager; @@ -414,7 +414,8 @@ public class DataNode extends Configured this.userWithLocalPathAccess = conf .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY); try { - hostName = getHostName(conf); + confHostName = getHostName(conf); + hostName = confHostName; startDataNode(conf, dataDirs, resources); } catch (IOException ie) { shutdown(); @@ -830,7 +831,9 @@ public class DataNode extends Configured * before we can load any specific block pool. */ private DatanodeRegistration createUnknownBPRegistration() { - DatanodeRegistration reg = new DatanodeRegistration(getMachineName()); + DatanodeRegistration reg = new DatanodeRegistration( + confHostName + ":" + getPort()); + reg.setInfoPort(infoServer.getPort()); reg.setIpcPort(getIpcPort()); return reg; Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java?rev=1364662&r1=1364661&r2=1364662&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java (original) +++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRegister.java Mon Jul 23 14:50:14 2012 @@ -18,13 +18,18 @@ package org.apache.hadoop.hdfs.server.datanode; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + import java.net.InetSocketAddress; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -62,4 +67,32 @@ public class TestDatanodeRegister { LOG.info("register() returned correct Exception: IncorrectVersionException"); } } + + @Test + public void testDataNodeReregister() throws Exception { + + final String hostname = "somehostname"; + Configuration conf = new HdfsConfiguration(); + conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "127.0.0.1:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hostname); + DataNode dn = new DataNode(conf, null); + try { + NamespaceInfo fakeNSInfo = mock(NamespaceInfo.class); + when(fakeNSInfo.getBuildVersion()).thenReturn("NSBuildVersion"); + DatanodeRegistration bpReg = dn.createBPRegistration(fakeNSInfo); + assertEquals("Bad hostname in registration", hostname, bpReg.getHost()); + + // set the datanode name to an IP address and verify the symbolic name + // is still used during registration + bpReg.setName("127.0.0.1:0"); + dn.bpRegistrationSucceeded(bpReg, null); + bpReg = dn.createBPRegistration(fakeNSInfo); + assertEquals("Bad hostname in re-registration", hostname, bpReg.getHost()); + } finally { + dn.shutdown(); + } + } }