hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r1077543 - in /hadoop/common/branches/branch-0.20-security-patches/src: hdfs/org/apache/hadoop/hdfs/ hdfs/org/apache/hadoop/hdfs/server/balancer/ hdfs/org/apache/hadoop/hdfs/server/datanode/ hdfs/org/apache/hadoop/hdfs/server/namenode/ test...
Date Fri, 04 Mar 2011 04:27:24 GMT
Author: omalley
Date: Fri Mar  4 04:27:24 2011
New Revision: 1077543

URL: http://svn.apache.org/viewvc?rev=1077543&view=rev
Log:
commit f605ec6278d8309a242d2af21ccab09ac2fce225
Author: Suresh Srinivas <sureshms@yahoo-inc.com>
Date:   Tue Jul 13 16:02:09 2010 -0700

    HDFS-599 from https://issues.apache.org/jira/secure/attachment/12449119/HDFS-599.y20.patch
    
    +++ b/YAHOO-CHANGES.txt
    +
    +    HDFS-599. Allow NameNode to have a seprate port for service requests from
    +    client requests. (Dmytro Molkov via hairong)
    +

Modified:
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
    hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
    hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1077543&r1=1077542&r2=1077543&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java
Fri Mar  4 04:27:24 2011
@@ -43,10 +43,12 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT = "localhost:50100";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY = "dfs.namenode.backup.http-address";
   public static final String  DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50105";
+  public static final String  DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.backup.dnrpc-address";
   public static final String  DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY = "dfs.datanode.balance.bandwidthPerSec";
   public static final long    DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
   public static final String  DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50070";
+  public static final String  DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address";
   public static final String  DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects";
   public static final long    DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0;
   public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension";
@@ -159,6 +161,8 @@ public class DFSConfigKeys extends Commo
   public static final int     DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT = 5;
   public static final String  DFS_NAMENODE_HANDLER_COUNT_KEY = "dfs.namenode.handler.count";
   public static final int     DFS_NAMENODE_HANDLER_COUNT_DEFAULT = 10;
+  public static final String  DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY = "dfs.namenode.service.handler.count";
+  public static final int     DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_SUPPORT_APPEND_KEY = "dfs.support.append";
   public static final boolean DFS_SUPPORT_APPEND_DEFAULT = false;
   public static final String  DFS_HTTPS_ENABLE_KEY = "dfs.https.enable";

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1077543&r1=1077542&r2=1077543&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/balancer/Balancer.java
Fri Mar  4 04:27:24 2011
@@ -908,7 +908,7 @@ public class Balancer implements Tool {
    * set up the retry policy */ 
   private static NamenodeProtocol createNamenode(Configuration conf)
     throws IOException {
-    InetSocketAddress nameNodeAddr = NameNode.getAddress(conf);
+    InetSocketAddress nameNodeAddr = NameNode.getServiceAddress(conf, true);
     RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(
         5, 200, TimeUnit.MILLISECONDS);
     Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1077543&r1=1077542&r2=1077543&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
Fri Mar  4 04:27:24 2011
@@ -290,7 +290,7 @@ public class DataNode extends Configured
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.nameserver","default"));
     }
-    InetSocketAddress nameNodeAddr = NameNode.getAddress(conf);
+    InetSocketAddress nameNodeAddr = NameNode.getServiceAddress(conf, true);
     
     this.socketTimeout =  conf.getInt("dfs.socket.timeout",
                                       HdfsConstants.READ_TIMEOUT);

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1077543&r1=1077542&r2=1077543&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
Fri Mar  4 04:27:24 2011
@@ -111,7 +111,7 @@ public class NameNode implements ClientP
   }
   
   public long getProtocolVersion(String protocol, 
-                                 long clientVersion) throws IOException { 
+                                 long clientVersion) throws IOException {
     if (protocol.equals(ClientProtocol.class.getName())) {
       return ClientProtocol.versionID; 
     } else if (protocol.equals(DatanodeProtocol.class.getName())){
@@ -134,8 +134,17 @@ public class NameNode implements ClientP
   public FSNamesystem namesystem; // TODO: This should private. Use getNamesystem() instead.

   /** RPC server */
   private Server server;
+  /** RPC server for HDFS Services communication.
+      BackupNode, Datanodes and all other services
+      should be connecting to this server if it is
+      configured. Clients should only go to NameNode#server
+  */
+  private Server serviceRpcServer;
+
   /** RPC server address */
   private InetSocketAddress serverAddress = null;
+  /** RPC server for DN address */
+  protected InetSocketAddress serviceRPCAddress = null;
   /** httpServer */
   private HttpServer httpServer;
   /** HTTP server address */
@@ -166,6 +175,32 @@ public class NameNode implements ClientP
     return NetUtils.createSocketAddr(address, DEFAULT_PORT);
   }
 
+  /**
+   * Set the configuration property for the service rpc address
+   * to address
+   */
+  public static void setServiceAddress(Configuration conf,
+                                           String address) {
+    LOG.info("Setting ADDRESS " + address);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
+  }
+  
+  /**
+   * Fetches the address for services to use when connecting to namenode
+   * based on the value of fallback returns null if the special
+   * address is not specified or returns the default namenode address
+   * to be used by both clients and services.
+   * Services here are datanodes, backup node, any non client connection
+   */
+  public static InetSocketAddress getServiceAddress(Configuration conf,
+                                                    boolean fallback) {
+    String addr = conf.get(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+    if (addr == null || addr.isEmpty()) {
+      return fallback ? getAddress(conf) : null;
+    }
+    return getAddress(addr);
+  }
+
   public static InetSocketAddress getAddress(Configuration conf) {
     return getAddress(FileSystem.getDefaultUri(conf).getAuthority());
   }
@@ -177,6 +212,25 @@ public class NameNode implements ClientP
   }
 
   /**
+   * Given a configuration get the address of the service rpc server
+   * If the service rpc is not configured returns null
+   */
+  protected InetSocketAddress getServiceRpcServerAddress(Configuration conf)
+    throws IOException {
+    return NameNode.getServiceAddress(conf, false);
+  }
+
+ 
+  /**
+   * Modifies the configuration passed to contain the service rpc address setting
+   */
+  protected void setRpcServiceServerAddress(Configuration conf) {
+    String address = serviceRPCAddress.getHostName() + ":"
+        + serviceRPCAddress.getPort();
+    setServiceAddress(conf, address);
+  }
+
+  /**
    * Initialize name-node.
    * 
    * @param conf the configuration
@@ -202,7 +256,18 @@ public class NameNode implements ClientP
       namesystem.activateSecretManager();
     }
 
-    // create rpc server 
+    // create rpc server
+    InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf);
+    if (dnSocketAddr != null) {
+      int serviceHandlerCount =
+        conf.getInt(DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+                    DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+      this.serviceRpcServer = RPC.getServer(this, dnSocketAddr.getHostName(), 
+          dnSocketAddr.getPort(), serviceHandlerCount,
+          false, conf, namesystem.getDelegationTokenSecretManager());
+      this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
+      setRpcServiceServerAddress(conf);
+    }
     this.server = RPC.getServer(this, socAddr.getHostName(),
         socAddr.getPort(), handlerCount, false, conf, namesystem
         .getDelegationTokenSecretManager());
@@ -216,6 +281,9 @@ public class NameNode implements ClientP
 
     startHttpServer(conf);
     this.server.start();  //start RPC server   
+    if (serviceRpcServer != null) {
+      serviceRpcServer.start();      
+    }
     startTrashEmptier(conf);
   }
 
@@ -395,6 +463,7 @@ public class NameNode implements ClientP
     if(namesystem != null) namesystem.close();
     if(emptier != null) emptier.interrupt();
     if(server != null) server.stop();
+    if(serviceRpcServer != null) serviceRpcServer.stop();
     if (myMetrics != null) {
       myMetrics.shutdown();
     }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1077543&r1=1077542&r2=1077543&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
Fri Mar  4 04:27:24 2011
@@ -158,7 +158,7 @@ public class SecondaryNameNode implement
     
     // Create connection to the namenode.
     shouldRun = true;
-    nameNodeAddr = NameNode.getAddress(conf);
+    nameNodeAddr = NameNode.getServiceAddress(conf, true);
 
     this.conf = conf;
     this.namenode =

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1077543&r1=1077542&r2=1077543&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
Fri Mar  4 04:27:24 2011
@@ -34,15 +34,26 @@ import org.apache.log4j.Level;
 public class TestDistributedFileSystem extends junit.framework.TestCase {
   private static final Random RAN = new Random();
 
-  public void testFileSystemCloseAll() throws Exception {
+  private boolean dualPortTesting = false;
+  
+  private Configuration getTestConfiguration() {
     Configuration conf = new Configuration();
+    if (dualPortTesting) {
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+              "localhost:0");
+    }
+    return conf;
+  }
+
+  public void testFileSystemCloseAll() throws Exception {
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
     URI address = FileSystem.getDefaultUri(conf);
 
     try {
       FileSystem.closeAll();
 
-      conf = new Configuration();
+      conf = getTestConfiguration();
       FileSystem.setDefaultUri(conf, address);
       FileSystem.get(conf);
       FileSystem.get(conf);
@@ -58,7 +69,7 @@ public class TestDistributedFileSystem e
    * multiple files are open.
    */
   public void testDFSClose() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
     FileSystem fileSys = cluster.getFileSystem();
 
@@ -75,7 +86,7 @@ public class TestDistributedFileSystem e
   }
 
   public void testDFSClient() throws Exception {
-    Configuration conf = new Configuration();
+    Configuration conf = getTestConfiguration();
     MiniDFSCluster cluster = null;
 
     try {
@@ -125,7 +136,7 @@ public class TestDistributedFileSystem e
     System.out.println("seed=" + seed);
     RAN.setSeed(seed);
 
-    final Configuration conf = new Configuration();
+    final Configuration conf = getTestConfiguration();
     conf.set("slave.host.name", "localhost");
 
     final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
@@ -188,5 +199,15 @@ public class TestDistributedFileSystem e
         assertEquals(qfoocs, barcs);
       }
     }
+    cluster.shutdown();
+  }
+  
+  public void testAllWithDualPort() throws Exception {
+    dualPortTesting = true;
+
+    testFileSystemCloseAll();
+    testDFSClose();
+    testDFSClient();
+    testFileChecksum();
   }
 }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=1077543&r1=1077542&r2=1077543&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
Fri Mar  4 04:27:24 2011
@@ -46,10 +46,14 @@ public class TestHDFSServerPorts extends
   Configuration config;
   File hdfsDir;
 
+  public NameNode startNameNode() throws IOException {
+    return startNameNode(false);
+  }
+
   /**
    * Start the name-node.
    */
-  public NameNode startNameNode() throws IOException {
+  public NameNode startNameNode(boolean withService) throws IOException {
     String dataDir = System.getProperty("test.build.data");
     hdfsDir = new File(dataDir, "dfs");
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
@@ -58,6 +62,9 @@ public class TestHDFSServerPorts extends
     config = new Configuration();
     config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
+    if (withService) {
+      NameNode.setServiceAddress(config, NAME_NODE_HOST + "0");      
+    }
     config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
     NameNode.format(config);
 
@@ -147,13 +154,18 @@ public class TestHDFSServerPorts extends
     return true;
   }
 
+  public void testNameNodePorts() throws Exception {
+    runTestNameNodePorts(false);
+    runTestNameNodePorts(true);
+  }
+
   /**
    * Verify name-node port usage.
    */
-  public void testNameNodePorts() throws Exception {
+  public void runTestNameNodePorts(boolean withService) throws Exception {
     NameNode nn = null;
     try {
-      nn = startNameNode();
+      nn = startNameNode(withService);
 
       // start another namenode on the same port
       Configuration conf2 = new Configuration(config);
@@ -172,7 +184,18 @@ public class TestHDFSServerPorts extends
       // different http port
       conf2.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
       started = canStartNameNode(conf2);
-      assertTrue(started); // should start now
+
+      if (withService) {
+        assertFalse("Should've failed on service port", started);
+
+        // reset conf2 since NameNode modifies it
+        FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, NAME_NODE_HTTP_HOST + "0");
+        // Set Service address      
+        conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NAME_NODE_HOST + "0");
+        started = canStartNameNode(conf2);        
+      }
+      assertTrue(started);
     } finally {
       stopNameNode(nn);
     }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=1077543&r1=1077542&r2=1077543&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
(original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
Fri Mar  4 04:27:24 2011
@@ -29,9 +29,7 @@ import org.apache.hadoop.fs.Path;
  * A JUnit test for checking if restarting DFS preserves integrity.
  */
 public class TestRestartDFS extends TestCase {
-  /** check if DFS remains in proper condition after a restart */
-  public void testRestartDFS() throws Exception {
-    final Configuration conf = new Configuration();
+  public void runTests(Configuration conf, boolean serviceTest) throws Exception {
     MiniDFSCluster cluster = null;
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
 
@@ -44,6 +42,10 @@ public class TestRestartDFS extends Test
     FileStatus dirstatus;
 
     try {
+      if (serviceTest) {
+        conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                 "localhost:0");
+      }
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem fs = cluster.getFileSystem();
       files.createFiles(fs, dir);
@@ -58,8 +60,12 @@ public class TestRestartDFS extends Test
       if (cluster != null) { cluster.shutdown(); }
     }
     try {
+      if (serviceTest) {
+        conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+                 "localhost:0");
+      }
       // Here we restart the MiniDFScluster without formatting namenode
-      cluster = new MiniDFSCluster(conf, 4, false, null);
+      cluster = new MiniDFSCluster(conf, 4, false, null); 
       FileSystem fs = cluster.getFileSystem();
       assertTrue("Filesystem corrupted after restart.",
                  files.checkFiles(fs, dir));
@@ -78,4 +84,17 @@ public class TestRestartDFS extends Test
       if (cluster != null) { cluster.shutdown(); }
     }
   }
+  /** check if DFS remains in proper condition after a restart */
+  public void testRestartDFS() throws Exception {
+    final Configuration conf = new Configuration();
+    runTests(conf, false);
+  }
+  
+  /** check if DFS remains in proper condition after a restart 
+   * this rerun is with 2 ports enabled for RPC in the namenode
+   */
+   public void testRestartDualPortDFS() throws Exception {
+     final Configuration conf = new Configuration();
+     runTests(conf, true);
+   }
 }



Mime
View raw message