hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ma...@apache.org
Subject svn commit: r1133181 - in /hadoop/hdfs/trunk: CHANGES.txt src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
Date Tue, 07 Jun 2011 22:26:52 GMT
Author: mattf
Date: Tue Jun  7 22:26:52 2011
New Revision: 1133181

URL: http://svn.apache.org/viewvc?rev=1133181&view=rev
Log:
HDFS-1875. MiniDFSCluster hard-codes dfs.datanode.address to localhost. Contributed by Eric
Payne.

Added:
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1133181&r1=1133180&r2=1133181&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Tue Jun  7 22:26:52 2011
@@ -287,6 +287,9 @@ Trunk (unreleased changes)
 
   IMPROVEMENTS
 
+    HDFS-1875. MiniDFSCluster hard-codes dfs.datanode.address to localhost
+    (Eric Payne via mattf)
+
     HDFS-2019. Fix all the places where Java method File.list is used with
     FileUtil.list API (Bharath Mundlapudi via mattf)
 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java?rev=1133181&r1=1133180&r2=1133181&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DataNodeCluster.java Tue Jun  7
22:26:52 2011
@@ -75,10 +75,12 @@ public class DataNodeCluster {
     " [-inject startingBlockId numBlocksPerDN]" +
     " [-r replicationFactorForInjectedBlocks]" +
     " [-d dataNodeDirs]\n" + 
+    " [-checkDataNodeAddrConfig]\n" +
     "      Default datanode direcory is " + DATANODE_DIRS + "\n" +
     "      Default replication factor for injected blocks is 1\n" +
     "      Defaul rack is used if -racks is not specified\n" +
-    "      Data nodes are simulated if -simulated OR conf file specifies simulated\n";
+    "      Data nodes are simulated if -simulated OR conf file specifies simulated\n" +
+    "      -checkDataNodeAddrConfig tells DataNodeConf to use data node addresses from conf
file, if it is set. If not set, use .localhost'.";
   
   
   static void printUsageExit() {
@@ -97,6 +99,7 @@ public class DataNodeCluster {
     long startingBlockId = 1;
     int numBlocksPerDNtoInject = 0;
     int replication = 1;
+    boolean checkDataNodeAddrConfig = false;
     
     Configuration conf = new HdfsConfiguration();
 
@@ -139,6 +142,8 @@ public class DataNodeCluster {
          printUsageExit("Missing number of blocks to inject");
        }
        numBlocksPerDNtoInject = Integer.parseInt(args[i]);      
+      } else if (args[i].equals("-checkDataNodeAddrConfig")) {
+        checkDataNodeAddrConfig = true;
       } else {
         printUsageExit();
       }
@@ -186,7 +191,7 @@ public class DataNodeCluster {
     }
     try {
       mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR,
-          rack4DataNode);
+          rack4DataNode, null, null, false, checkDataNodeAddrConfig);
       if (inject) {
         long blockSize = 10;
         System.out.println("Injecting " + numBlocksPerDNtoInject +

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1133181&r1=1133180&r2=1133181&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Jun  7
22:26:52 2011
@@ -752,7 +752,41 @@ public class MiniDFSCluster {
                              String[] racks, String[] hosts,
                              long[] simulatedCapacities,
                              boolean setupHostsFile) throws IOException {
+    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
+                   simulatedCapacities, setupHostsFile, false);
+  }
 
+  /**
+   * Modify the config and start up additional DataNodes.  The info port for
+   * DataNodes is guaranteed to use a free port.
+   *  
+   *  Data nodes can run with the name node in the mini cluster or
+   *  a real name node. For example, running with a real name node is useful
+   *  when running simulated data nodes with a real name node.
+   *  If minicluster's name node is null assume that the conf has been
+   *  set with the right address:port of the name node.
+   *
+   * @param conf the base configuration to use in starting the DataNodes.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param manageDfsDirs if true, the data directories for DataNodes will be
+   *          created and dfs.datanode.data.dir will be set in the conf
+   * @param operation the operation with which to start the DataNodes.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   * @param hosts array of strings indicating the hostnames for each DataNode
+   * @param simulatedCapacities array of capacities of the simulated data nodes
+   * @param setupHostsFile add new nodes to dfs hosts files
+   * @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already
set in config
+   *
+   * @throws IllegalStateException if NameNode has been shutdown
+   */
+  public synchronized void startDataNodes(Configuration conf, int numDataNodes,
+                             boolean manageDfsDirs, StartupOption operation, 
+                             String[] racks, String[] hosts,
+                             long[] simulatedCapacities,
+                             boolean setupHostsFile,
+                             boolean checkDataNodeAddrConfig) throws IOException {
     int curDatanodesNum = dataNodes.size();
     // for mincluster's the default initialDelay for BRs is 0
     if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
@@ -792,7 +826,7 @@ public class MiniDFSCluster {
     for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
       Configuration dnConf = new HdfsConfiguration(conf);
       // Set up datanode address
-      setupDatanodeAddress(dnConf, setupHostsFile);
+      setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
         File dir1 = getStorageDir(i, 0);
         File dir2 = getStorageDir(i, 1);
@@ -1791,7 +1825,8 @@ public class MiniDFSCluster {
     return port;
   }
   
-  private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile) throws IOException
{
+  private void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
+                           boolean checkDataNodeAddrConfig) throws IOException {
     if (setupHostsFile) {
       String hostsFile = conf.get(DFSConfigKeys.DFS_HOSTS, "").trim();
       if (hostsFile.length() == 0) {
@@ -1799,13 +1834,23 @@ public class MiniDFSCluster {
       }
       // Setup datanode in the include file, if it is defined in the conf
       String address = "127.0.0.1:" + getFreeSocketPort();
-      conf.set("dfs.datanode.address", address);
+      if (checkDataNodeAddrConfig) {
+        conf.setIfUnset("dfs.datanode.address", address);
+      } else {
+        conf.set("dfs.datanode.address", address);
+      }
       addToFile(hostsFile, address);
       LOG.info("Adding datanode " + address + " to hosts file " + hostsFile);
     } else {
-      conf.set("dfs.datanode.address", "127.0.0.1:0");
-      conf.set("dfs.datanode.http.address", "127.0.0.1:0");
-      conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
+      if (checkDataNodeAddrConfig) {
+        conf.setIfUnset("dfs.datanode.address", "127.0.0.1:0");
+        conf.setIfUnset("dfs.datanode.http.address", "127.0.0.1:0");
+        conf.setIfUnset("dfs.datanode.ipc.address", "127.0.0.1:0");
+      } else {
+        conf.set("dfs.datanode.address", "127.0.0.1:0");
+        conf.set("dfs.datanode.http.address", "127.0.0.1:0");
+        conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
+      }
     }
   }
   

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java?rev=1133181&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSAddressConfig.java Tue Jun
 7 22:26:52 2011
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Test the MiniDFSCluster functionality that allows "dfs.datanode.address",
+ * "dfs.datanode.http.address", and "dfs.datanode.ipc.address" to be
+ * configurable. The MiniDFSCluster.startDataNodes() API now has a parameter
+ * that will check these properties if told to do so.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import junit.framework.Assert;
+import junit.framework.TestCase;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+
+
+public class TestDFSAddressConfig extends TestCase {
+
+  public void testDFSAddressConfig() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+
+    /*-------------------------------------------------------------------------
+     * By default, the DataNode socket address should be localhost (127.0.0.1).
+     *------------------------------------------------------------------------*/
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+
+    ArrayList<DataNode> dns = cluster.getDataNodes();
+    DataNode dn = dns.get(0);
+
+    String selfSocketAddr = dn.getSelfAddr().toString();
+    System.out.println("DN Self Socket Addr == " + selfSocketAddr);
+    assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
+
+    /*-------------------------------------------------------------------------
+     * Shut down the datanodes, reconfigure, and bring them back up.
+     * Even if told to use the configuration properties for dfs.datanode,
+     * MiniDFSCluster.startDataNodes() should use localhost as the default if
+     * the dfs.datanode properties are not set.
+     *------------------------------------------------------------------------*/
+    for (int i = 0; i < dns.size(); i++) {
+      DataNodeProperties dnp = cluster.stopDataNode(i);
+      assertNotNull("Should have been able to stop simulated datanode", dnp);
+    }
+
+    conf.unset("dfs.datanode.address");
+    conf.unset("dfs.datanode.http.address");
+    conf.unset("dfs.datanode.ipc.address");
+
+    cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
+                           null, null, null, false, true);
+
+    dns = cluster.getDataNodes();
+    dn = dns.get(0);
+
+    selfSocketAddr = dn.getSelfAddr().toString();
+    System.out.println("DN Self Socket Addr == " + selfSocketAddr);
+    // assert that default self socket address is 127.0.0.1
+    assertTrue(selfSocketAddr.startsWith("/127.0.0.1:"));
+
+    /*-------------------------------------------------------------------------
+     * Shut down the datanodes, reconfigure, and bring them back up.
+     * This time, modify the dfs.datanode properties and make sure that they
+     * are used to configure sockets by MiniDFSCluster.startDataNodes().
+     *------------------------------------------------------------------------*/
+    for (int i = 0; i < dns.size(); i++) {
+      DataNodeProperties dnp = cluster.stopDataNode(i);
+      assertNotNull("Should have been able to stop simulated datanode", dnp);
+    }
+
+    conf.set("dfs.datanode.address","0.0.0.0:0");
+    conf.set("dfs.datanode.http.address","0.0.0.0:0");
+    conf.set("dfs.datanode.ipc.address","0.0.0.0:0");
+
+    cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR,
+                           null, null, null, false, true);
+
+    dns = cluster.getDataNodes();
+    dn = dns.get(0);
+
+    selfSocketAddr = dn.getSelfAddr().toString();
+    System.out.println("DN Self Socket Addr == " + selfSocketAddr);
+    // assert that default self socket address is 0.0.0.0
+    assertTrue(selfSocketAddr.startsWith("/0.0.0.0:"));
+
+    cluster.shutdown();
+  }
+}



Mime
View raw message