hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r1061600 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Date Fri, 21 Jan 2011 02:31:59 GMT
Author: shv
Date: Fri Jan 21 02:31:59 2011
New Revision: 1061600

URL: http://svn.apache.org/viewvc?rev=1061600&view=rev
Log:
HDFS-1561. BackupNode listens on the default host. Contributed by Konstantin Shvachko

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1061600&r1=1061599&r2=1061600&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Jan 21 02:31:59 2011
@@ -495,6 +495,8 @@ Release 0.22.0 - Unreleased
     HDFS-1572. Checkpointer should trigger checkpoint with specified period.
     (jghoman)
 
+    HDFS-1561. BackupNode listens on the default host. (shv)
+
 Release 0.21.1 - Unreleased
 
   IMPROVEMENTS

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1061600&r1=1061599&r2=1061600&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Fri
Jan 21 02:31:59 2011
@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 
 /**
@@ -79,9 +78,7 @@ public class BackupNode extends NameNode
   @Override // NameNode
   protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException
{
     String addr = conf.get(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT);
-    int port = NetUtils.createSocketAddr(addr).getPort();
-    String hostName = DNS.getDefaultHost("default");
-    return new InetSocketAddress(hostName, port);
+    return NetUtils.createSocketAddr(addr);
   }
   
   @Override
@@ -90,9 +87,7 @@ public class BackupNode extends NameNode
     if (addr == null || addr.isEmpty()) {
       return null;
     }
-    int port = NetUtils.createSocketAddr(addr).getPort();
-    String hostName = DNS.getDefaultHost("default");
-    return new InetSocketAddress(hostName, port);
+    return NetUtils.createSocketAddr(addr);
   }
 
   @Override // NameNode
@@ -107,14 +102,9 @@ public class BackupNode extends NameNode
 
   @Override // NameNode
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
-    // It is necessary to resolve the hostname at this point in order
-    // to ensure that the server address that is sent to the namenode
-    // is correct.
     assert rpcAddress != null : "rpcAddress should be calculated first";
     String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT);
-    int port = NetUtils.createSocketAddr(addr).getPort();
-    String hostName = rpcAddress.getHostName();
-    return new InetSocketAddress(hostName, port);
+    return NetUtils.createSocketAddr(addr);
   }
 
   @Override // NameNode

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=1061600&r1=1061599&r2=1061600&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Fri Jan
21 02:31:59 2011
@@ -49,8 +49,8 @@ import org.apache.hadoop.net.DNS;
 public class TestHDFSServerPorts extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestHDFSServerPorts.class);
   
-  public static final String NAME_NODE_HOST = "localhost:";
-  public static final String NAME_NODE_HTTP_HOST = getFullHostName() + ":";
+  // reset default 0.0.0.0 addresses in order to avoid IPv6 problem
+  static final String THIS_HOST = getFullHostName() + ":0";
 
   Configuration config;
   File hdfsDir;
@@ -66,7 +66,7 @@ public class TestHDFSServerPorts extends
    * 
    * @return Fully qualified hostname, or 127.0.0.1 if can't determine
    */
-  private static String getFullHostName() {
+  public static String getFullHostName() {
     try {
       return DNS.getDefaultHost("default");
     } catch (UnknownHostException e) {
@@ -98,11 +98,11 @@ public class TestHDFSServerPorts extends
     config = new HdfsConfiguration();
     config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         fileAsURI(new File(hdfsDir, "name1")).toString());
-    FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
+    FileSystem.setDefaultUri(config, "hdfs://" + THIS_HOST);
     if (withService) {
-      NameNode.setServiceAddress(config, NAME_NODE_HOST + "0");      
+      NameNode.setServiceAddress(config, THIS_HOST);      
     }
-    config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
+    config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
     NameNode.format(config);
 
     String[] args = new String[] {};
@@ -266,24 +266,24 @@ public class TestHDFSServerPorts extends
       assertFalse(started); // should fail
 
       // start on a different main port
-      FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
+      FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
       started = canStartNameNode(conf2);
       assertFalse(started); // should fail again
 
       // reset conf2 since NameNode modifies it
-      FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
+      FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
       // different http port
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
       started = canStartNameNode(conf2);
 
       if (withService) {
         assertFalse("Should've failed on service port", started);
 
         // reset conf2 since NameNode modifies it
-        FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
-        conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
+        FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
         // Set Service address      
-        conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NAME_NODE_HOST + "0");
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,  THIS_HOST);
         started = canStartNameNode(conf2);        
       }
       assertTrue(started);
@@ -305,21 +305,21 @@ public class TestHDFSServerPorts extends
       conf2.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
       conf2.set("dfs.datanode.address",
                 FileSystem.getDefaultUri(config).getAuthority());
-      conf2.set("dfs.datanode.http.address", NAME_NODE_HTTP_HOST + "0");
+      conf2.set("dfs.datanode.http.address", THIS_HOST);
       boolean started = canStartDataNode(conf2);
       assertFalse(started); // should fail
 
       // bind http server to the same port as name-node
-      conf2.set("dfs.datanode.address", NAME_NODE_HOST + "0");
+      conf2.set("dfs.datanode.address", THIS_HOST);
       conf2.set("dfs.datanode.http.address", 
                 config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
       started = canStartDataNode(conf2);
       assertFalse(started); // should fail
     
       // both ports are different from the name-node ones
-      conf2.set("dfs.datanode.address", NAME_NODE_HOST + "0");
-      conf2.set("dfs.datanode.http.address", NAME_NODE_HTTP_HOST + "0");
-      conf2.set("dfs.datanode.ipc.address", NAME_NODE_HOST + "0");
+      conf2.set("dfs.datanode.address", THIS_HOST);
+      conf2.set("dfs.datanode.http.address", THIS_HOST);
+      conf2.set("dfs.datanode.ipc.address", THIS_HOST);
       started = canStartDataNode(conf2);
       assertTrue(started); // should start now
     } finally {
@@ -345,7 +345,7 @@ public class TestHDFSServerPorts extends
       assertFalse(started); // should fail
 
       // bind http server to a different port
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST
+ "0");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, THIS_HOST);
       LOG.info("= Starting 2 on: " + 
                                  conf2.get(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY));
       started = canStartSecondaryNode(conf2);
@@ -363,21 +363,24 @@ public class TestHDFSServerPorts extends
       try {
         nn = startNameNode();
 
-        // bind http server to the same port as name-node
         Configuration backup_config = new HdfsConfiguration(config);
+        backup_config.set(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
+        // bind http server to the same port as name-node
         backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
-                                        backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
+            backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
 
-        LOG.info("= Starting 1 on: " + 
-                                  backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
+        LOG.info("= Starting 1 on: " + backup_config.get(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
         assertFalse("Backup started on same port as Namenode", 
                            canStartBackupNode(backup_config)); // should fail
 
         // bind http server to a different port
-        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST
+ "0");
-        LOG.info("= Starting 2 on: " + 
-                                  backup_config.get(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
+        backup_config.set(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
+        LOG.info("= Starting 2 on: " + backup_config.get(
+            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
 
         boolean started = canStartBackupNode(backup_config);
         assertTrue("Backup Namenode should've started", started); // should start now

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1061600&r1=1061599&r2=1061600&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
(original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
Fri Jan 21 02:31:59 2011
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestHDFSServerPorts;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -37,6 +38,8 @@ import junit.framework.TestCase;
 public class TestBackupNode extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
 
+  // reset default 0.0.0.0 addresses in order to avoid IPv6 problem
+  static final String THIS_HOST = TestHDFSServerPorts.getFullHostName();
   static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
 
   protected void setUp() throws Exception {
@@ -135,6 +138,10 @@ public class TestBackupNode extends Test
       //
       // Take a checkpoint
       //
+      conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
+          THIS_HOST + ":0");
+      conf.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+          THIS_HOST + ":0");
       backup = startBackupNode(conf, op, 1);
       waitCheckpointDone(backup);
     } catch(IOException e) {
@@ -217,13 +224,17 @@ public class TestBackupNode extends Test
     try {
       // start name-node and backup node 1
       cluster = new MiniDFSCluster.Builder(conf1).numDataNodes(0).build();
-      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7771");
-      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7775");
+      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
+                THIS_HOST + ":7771");
+      conf1.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+                THIS_HOST + ":7775");
       backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);
       // try to start backup node 2
       conf2 = new HdfsConfiguration(conf1);
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "0.0.0.0:7772");
-      conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "0.0.0.0:7776");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
+                THIS_HOST + ":7772");
+      conf2.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+                THIS_HOST + ":7776");
       try {
         backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2);
         backup2.stop();



Mime
View raw message