hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From s..@apache.org
Subject svn commit: r777761 - in /hadoop/core/trunk: ./ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/
Date Sat, 23 May 2009 01:38:14 GMT
Author: shv
Date: Sat May 23 01:38:14 2009
New Revision: 777761

URL: http://svn.apache.org/viewvc?rev=777761&view=rev
Log:
HADOOP-5877. Fix javac warnings in TestHDFSServerPorts, TestCheckpoint, TestNameEditsConfig,
TestStartup and TestStorageRestore. Contributed by Jakob Homan.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
    hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
    hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
    hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
    hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
    hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=777761&r1=777760&r2=777761&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Sat May 23 01:38:14 2009
@@ -329,7 +329,6 @@
     HADOOP-5080. Add new test cases to TestMRCLI and TestHDFSCLI
     (V.Karthikeyan via nigel)
 
-
     HADOOP-5135. Splits the tests into different directories based on the 
     package. Four new test targets have been defined - run-test-core, 
     run-test-mapred, run-test-hdfs and run-test-hdfs-with-mr.
@@ -383,6 +382,10 @@
     HADOOP-5839. Fix EC2 scripts to allow remote job submission.
     (Joydeep Sen Sarma via tomwhite)
 
+    HADOOP-5877. Fix javac warnings in TestHDFSServerPorts, TestCheckpoint, 
+    TestNameEditsConfig, TestStartup and TestStorageRestore.
+    (Jakob Homan via shv)
+
   OPTIMIZATIONS
 
     HADOOP-5595. NameNode does not need to run a replicator to choose a

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=777761&r1=777760&r2=777761&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Sat
May 23 01:38:14 2009
@@ -51,7 +51,7 @@
  * namespace image to local disk(s).</li>
  * </ol>
  */
-class BackupNode extends NameNode {
+public class BackupNode extends NameNode {
   private static final String BN_ADDRESS_NAME_KEY = "dfs.backup.address";
   private static final String BN_ADDRESS_DEFAULT = "localhost:50100";
   private static final String BN_HTTP_ADDRESS_NAME_KEY = "dfs.backup.http.address";
@@ -90,6 +90,9 @@
 
   @Override // NameNode
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
+    // It is necessary to resolve the hostname at this point in order
+    // to ensure that the server address that is sent to the namenode
+    // is correct.
     assert rpcAddress != null : "rpcAddress should be calculated first";
     String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT);
     int port = NetUtils.createSocketAddr(addr).getPort();

Modified: hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=777761&r1=777760&r2=777761&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Sat May
23 01:38:14 2009
@@ -19,19 +19,24 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.net.UnknownHostException;
 
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.BackupNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
+import org.apache.hadoop.net.DNS;
 
 /**
  * This test checks correctness of port usage by hdfs components:
- * NameNode, DataNode, and SecondaryNamenode.
+ * NameNode, DataNode, SecondaryNamenode and BackupNode.
  * 
  * The correct behavior is:<br> 
  * - when a specific port is provided the server must either start on that port 
@@ -40,17 +45,47 @@
  * a free port and start on it.
  */
 public class TestHDFSServerPorts extends TestCase {
+  public static final Log LOG = LogFactory.getLog(TestHDFSServerPorts.class);
+  
   public static final String NAME_NODE_HOST = "localhost:";
-  public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
+  public static final String NAME_NODE_HTTP_HOST = getFullHostName() + ":";
 
   Configuration config;
   File hdfsDir;
 
   /**
-   * Start the name-node.
+   * Attempt to determine the fully qualified domain name for this host 
+   * to compare during testing.
+   * 
+   * This is necessary because in order for the BackupNode test to correctly 
+   * work, the namenode must have its http server started with the fully 
+   * qualified address, as this is the one the backupnode will attempt to start
+   * on as well.
+   * 
+   * @return Fully qualified hostname, or 127.0.0.1 if can't determine
+   */
+  private static String getFullHostName() {
+    try {
+      return DNS.getDefaultHost("default");
+    } catch (UnknownHostException e) {
+      LOG.warn("Unable to determine hostname.  May interfere with obtaining " +
+          "valid test results.");
+      return "127.0.0.1";
+    }
+  }
+  
+  /**
+   * Get base directory these tests should run in.
+   */
+  private String getTestingDir() {
+    return System.getProperty("test.build.data", "build/test/data");
+  }
+  
+  /**
+   * Start the namenode.
    */
   public NameNode startNameNode() throws IOException {
-    String dataDir = System.getProperty("test.build.data");
+    String dataDir = getTestingDir();
     hdfsDir = new File(dataDir, "dfs");
     if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
       throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
@@ -67,11 +102,39 @@
   }
 
   /**
-   * Start the data-node.
+   * Start the BackupNode
+   */
+  public BackupNode startBackupNode(Configuration conf) throws IOException {
+    String dataDir = getTestingDir();
+    // Set up testing environment directories
+    hdfsDir = new File(dataDir, "backupNode");
+    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+      throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
+    }
+    File currDir = new File(hdfsDir, "name2");
+    File currDir2 = new File(currDir, "current");
+    File currDir3 = new File(currDir, "image");
+    
+    assertTrue(currDir.mkdirs());
+    assertTrue(currDir2.mkdirs());
+    assertTrue(currDir3.mkdirs());
+    
+    conf.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
+    conf.set("dfs.name.edits.dir", "${dfs.name.dir}");
+    
+    // Start BackupNode
+    String[] args = new String [] { StartupOption.BACKUP.getName() };
+    BackupNode bu = (BackupNode)NameNode.createNameNode(args, conf);
+
+    return bu;
+  }
+  
+  /**
+   * Start the datanode.
    */
   public DataNode startDataNode(int index, Configuration config) 
   throws IOException {
-    String dataDir = System.getProperty("test.build.data");
+    String dataDir = getTestingDir();
     File dataNodeDir = new File(dataDir, "data-" + index);
     config.set("dfs.data.dir", dataNodeDir.getPath());
 
@@ -100,7 +163,7 @@
   }
 
   /**
-   * Check whether the name-node can be started.
+   * Check whether the namenode can be started.
    */
   private boolean canStartNameNode(Configuration conf) throws IOException {
     NameNode nn2 = null;
@@ -110,13 +173,14 @@
       if (e instanceof java.net.BindException)
         return false;
       throw e;
+    } finally {
+      stopNameNode(nn2);
     }
-    stopNameNode(nn2);
     return true;
   }
 
   /**
-   * Check whether the data-node can be started.
+   * Check whether the datanode can be started.
    */
   private boolean canStartDataNode(Configuration conf) throws IOException {
     DataNode dn = null;
@@ -126,29 +190,53 @@
       if (e instanceof java.net.BindException)
         return false;
       throw e;
+    } finally {
+      if(dn != null) dn.shutdown();
     }
-    dn.shutdown();
     return true;
   }
 
   /**
    * Check whether the secondary name-node can be started.
    */
+  @SuppressWarnings("deprecation")
   private boolean canStartSecondaryNode(Configuration conf) throws IOException {
-    SecondaryNameNode sn = null;
+    // Using full name allows us not to have to add deprecation tag to
+    // entire source file.
+    org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode sn = null;
+    try {
+      sn = new org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode(conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    } finally {
+      if(sn != null) sn.shutdown();
+    }
+    return true;
+  }
+  
+  /**
+   * Check whether the BackupNode can be started.
+   */
+  private boolean canStartBackupNode(Configuration conf) throws IOException {
+    BackupNode bn = null;
+
     try {
-      sn = new SecondaryNameNode(conf);
+      bn = startBackupNode(conf);
     } catch(IOException e) {
       if (e instanceof java.net.BindException)
         return false;
       throw e;
+    } finally {
+      if(bn != null) bn.stop();
     }
-    sn.shutdown();
+
     return true;
   }
 
   /**
-   * Verify name-node port usage.
+   * Verify namenode port usage.
    */
   public void testNameNodePorts() throws Exception {
     NameNode nn = null;
@@ -179,7 +267,7 @@
   }
 
   /**
-   * Verify data-node port usage.
+   * Verify datanode port usage.
    */
   public void testDataNodePorts() throws Exception {
     NameNode nn = null;
@@ -214,7 +302,7 @@
   }
 
   /**
-   * Verify secondary name-node port usage.
+   * Verify secondary namenode port usage.
    */
   public void testSecondaryNodePorts() throws Exception {
     NameNode nn = null;
@@ -225,14 +313,14 @@
       Configuration conf2 = new Configuration(config);
       conf2.set("dfs.secondary.http.address", 
                 config.get("dfs.http.address"));
-      SecondaryNameNode.LOG.info("= Starting 1 on: " + 
+      LOG.info("= Starting 1 on: " + 
                                  conf2.get("dfs.secondary.http.address"));
       boolean started = canStartSecondaryNode(conf2);
       assertFalse(started); // should fail
 
       // bind http server to a different port
       conf2.set("dfs.secondary.http.address", NAME_NODE_HTTP_HOST + "0");
-      SecondaryNameNode.LOG.info("= Starting 2 on: " + 
+      LOG.info("= Starting 2 on: " + 
                                  conf2.get("dfs.secondary.http.address"));
       started = canStartSecondaryNode(conf2);
       assertTrue(started); // should start now
@@ -240,4 +328,34 @@
       stopNameNode(nn);
     }
   }
+    
+    /**
+     * Verify BackupNode port usage.
+     */
+    public void testBackupNodePorts() throws Exception {
+      NameNode nn = null;
+      try {
+        nn = startNameNode();
+
+        // bind http server to the same port as name-node
+        Configuration backup_config = new Configuration(config);
+        backup_config.set("dfs.backup.http.address", 
+                                        backup_config.get("dfs.http.address"));
+
+        LOG.info("= Starting 1 on: " + 
+                                  backup_config.get("dfs.backup.http.address"));
+
+        assertFalse("Backup started on same port as Namenode", 
+                           canStartBackupNode(backup_config)); // should fail
+
+        // bind http server to a different port
+        backup_config.set("dfs.backup.http.address", NAME_NODE_HTTP_HOST + "0");
+        LOG.info("= Starting 2 on: " + 
+                                  backup_config.get("dfs.backup.http.address"));
+
+        assertTrue(canStartBackupNode(backup_config)); // should start now
+      } finally {
+        stopNameNode(nn);
+      }
+  }
 }

Modified: hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=777761&r1=777760&r2=777761&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
(original)
+++ hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
Sat May 23 01:38:14 2009
@@ -134,6 +134,7 @@
   /*
    * Simulate namenode crashing after rolling edit log.
    */
+  @SuppressWarnings("deprecation")
   private void testSecondaryNamenodeError1(Configuration conf)
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 1");
@@ -210,6 +211,7 @@
   /*
    * Simulate a namenode crash after uploading new image
    */
+  @SuppressWarnings("deprecation")
   private void testSecondaryNamenodeError2(Configuration conf)
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 21");
@@ -268,6 +270,7 @@
   /*
    * Simulate a secondary namenode crash after rolling the edit log.
    */
+  @SuppressWarnings("deprecation")
   private void testSecondaryNamenodeError3(Configuration conf)
     throws IOException {
     System.out.println("Starting testSecondaryNamenodeError 31");
@@ -336,6 +339,7 @@
    * back to the name-node.
    * Used to truncate primary fsimage file.
    */
+  @SuppressWarnings("deprecation")
   void testSecondaryFailsToReturnImage(Configuration conf)
     throws IOException {
     System.out.println("Starting testSecondaryFailsToReturnImage");
@@ -399,6 +403,7 @@
    * <li> Complete failed checkpoint for secondary node.
    * </ol>
    */
+  @SuppressWarnings("deprecation")
   void testStartup(Configuration conf) throws IOException {
     System.out.println("Startup of the name-node in the checkpoint directory.");
     String primaryDirs = conf.get("dfs.name.dir");
@@ -553,6 +558,9 @@
     return nn;
   }
 
+  // This deprecation suppress warning does not work due to known Java bug:
+  // http://bugs.sun.com/view_bug.do?bug_id=6460147
+  @SuppressWarnings("deprecation")
   SecondaryNameNode startSecondaryNameNode(Configuration conf
                                           ) throws IOException {
     conf.set("dfs.secondary.http.address", "0.0.0.0:0");
@@ -562,6 +570,7 @@
   /**
    * Tests checkpoint in HDFS.
    */
+  @SuppressWarnings("deprecation")
   public void testCheckpoint() throws IOException {
     Path file1 = new Path("checkpoint.dat");
     Path file2 = new Path("checkpoint2.dat");

Modified: hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=777761&r1=777760&r2=777761&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
(original)
+++ hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
Sat May 23 01:38:14 2009
@@ -88,6 +88,9 @@
     assertTrue(!fileSys.exists(name));
   }
 
+  // This deprecation suppress warning does not work due to known Java bug:
+  // http://bugs.sun.com/view_bug.do?bug_id=6460147
+  @SuppressWarnings("deprecation")
   SecondaryNameNode startSecondaryNameNode(Configuration conf
                                           ) throws IOException {
     conf.set("dfs.secondary.http.address", "0.0.0.0:0");
@@ -106,6 +109,7 @@
    * All along the test, we create and delete files at reach restart to make
    * sure we are reading proper edits and image.
    */
+  @SuppressWarnings("deprecation")
   public void testNameEditsConfigs() throws IOException {
     Path file1 = new Path("TestNameEditsConfigs1");
     Path file2 = new Path("TestNameEditsConfigs2");

Modified: hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=777761&r1=777760&r2=777761&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
(original)
+++ hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
Sat May 23 01:38:14 2009
@@ -83,6 +83,7 @@
    * start MiniDFScluster, create a file (to create edits) and do a checkpoint  
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   public void createCheckPoint() throws IOException {
     LOG.info("--starting mini cluster");
     // manage dirs parameter set to false 
@@ -248,6 +249,7 @@
    * secondary node copies fsimage and edits into correct separate directories.
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   public void testSNNStartup() throws IOException{
     //setUpConfig();
     LOG.info("--starting SecondNN startup test");

Modified: hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=777761&r1=777760&r2=777761&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
(original)
+++ hadoop/core/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
Sat May 23 01:38:14 2009
@@ -282,6 +282,7 @@
    * 7. run doCheckpoint
    * 8. verify that all the image and edits files are the same.
    */
+  @SuppressWarnings("deprecation")
   public void testStorageRestore() throws Exception {
     int numDatanodes = 2;
     cluster = new MiniDFSCluster(0, config, numDatanodes, true, false, true,  null, null,
null, null);



Mime
View raw message