hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ste...@apache.org
Subject svn commit: r752949 [3/3] - in /hadoop/core: branches/HADOOP-3628/src/test/org/apache/hadoop/cli/ branches/HADOOP-3628/src/test/org/apache/hadoop/hdfs/ trunk/conf/ trunk/ivy/ trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/ trunk/src/c...
Date Thu, 12 Mar 2009 17:50:05 GMT
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Mar 12 17:50:03 2009
@@ -25,12 +25,14 @@
 import java.nio.channels.FileChannel;
 import java.util.Random;
 import java.io.RandomAccessFile;
+import java.io.Closeable;
 
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.net.*;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -43,13 +45,18 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.security.*;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.util.Service;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 
 /**
  * This class creates a single-process DFS cluster for junit testing.
  * The data directories for non-simulated DFS are under the testing directory.
  * For simulated data nodes, no underlying fs storage is used.
  */
-public class MiniDFSCluster {
+public class MiniDFSCluster implements Closeable {
+  private static final int WAIT_SLEEP_TIME_MILLIS = 500;
+  private static final int STARTUP_TIMEOUT_MILLIS = 30000;
 
   public class DataNodeProperties {
     DataNode datanode;
@@ -63,6 +70,7 @@
     }
   }
 
+  private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
   private Configuration conf;
   private NameNode nameNode;
   private int numDataNodes;
@@ -281,17 +289,18 @@
   }
 
   /**
-   * wait for the cluster to get out of 
+   * wait for the cluster to get out of
    * safemode.
    */
   public void waitClusterUp() {
     if (numDataNodes > 0) {
-      while (!isClusterUp()) {
-        try {
-          System.err.println("Waiting for the Mini HDFS Cluster to start...");
-          Thread.sleep(1000);
-        } catch (InterruptedException e) {
+      try {
+        while (!isClusterUp()) {
+          LOG.warn("Waiting for the Mini HDFS Cluster to start...");
+          Thread.sleep(WAIT_SLEEP_TIME_MILLIS);
         }
+      } catch (InterruptedException e) {
+        LOG.warn("Interrupted during startup", e);
       }
     }
   }
@@ -323,7 +332,6 @@
                              boolean manageDfsDirs, StartupOption operation, 
                              String[] racks, String[] hosts,
                              long[] simulatedCapacities) throws IOException {
-
     int curDatanodesNum = dataNodes.size();
     // for mincluster's the default initialDelay for BRs is 0
     if (conf.get("dfs.blockreport.initialDelay") == null) {
@@ -350,7 +358,7 @@
     }
     //Generate some hostnames if required
     if (racks != null && hosts == null) {
-      System.out.println("Generating host names for datanodes");
+      LOG.info("Generating host names for datanodes");
       hosts = new String[numDataNodes];
       for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
         hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
@@ -393,16 +401,16 @@
         dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
             simulatedCapacities[i-curDatanodesNum]);
       }
-      System.out.println("Starting DataNode " + i + " with dfs.data.dir: " 
+      LOG.info("Starting DataNode " + i + " with dfs.data.dir: "
                          + dnConf.get("dfs.data.dir"));
       if (hosts != null) {
         dnConf.set("slave.host.name", hosts[i - curDatanodesNum]);
-        System.out.println("Starting DataNode " + i + " with hostname set to: " 
+        LOG.info("Starting DataNode " + i + " with hostname set to: "
                            + dnConf.get("slave.host.name"));
       }
       if (racks != null) {
         String name = hosts[i - curDatanodesNum];
-        System.out.println("Adding node with hostname : " + name + " to rack "+
+        LOG.info("Adding node with hostname : " + name + " to rack "+
                             racks[i-curDatanodesNum]);
         StaticMapping.addNodeToRack(name,
                                     racks[i-curDatanodesNum]);
@@ -417,7 +425,7 @@
       String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
       if (racks != null) {
         int port = dn.getSelfAddr().getPort();
-        System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
+        LOG.info("Adding node with IP:port : " + ipAddr + ":" + port+
                             " to rack " + racks[i-curDatanodesNum]);
         StaticMapping.addNodeToRack(ipAddr + ":" + port,
                                   racks[i-curDatanodesNum]);
@@ -548,8 +556,8 @@
   /**
    * Shut down the servers that are up.
    */
-  public void shutdown() {
-    System.out.println("Shutting down the Mini HDFS Cluster");
+  public synchronized void shutdown() {
+    LOG.info("Shutting down the Mini HDFS Cluster");
     shutdownDataNodes();
     if (nameNode != null) {
       nameNode.stop();
@@ -557,20 +565,57 @@
       nameNode = null;
     }
   }
-  
+
+  /**
+   * Shuts down the cluster.  
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  public void close() throws IOException {
+    shutdown();
+  }
+
+  /**
+   * Static operation to shut down a cluster;
+   * harmless if the cluster argument is null
+   *
+   * @param cluster cluster to shut down, or null for no cluster
+   */
+  public static void close(Closeable cluster) {
+    Service.close(cluster);
+  }
+
   /**
    * Shutdown all DataNodes started by this class.  The NameNode
    * is left running so that new DataNodes may be started.
    */
   public void shutdownDataNodes() {
     for (int i = dataNodes.size()-1; i >= 0; i--) {
-      System.out.println("Shutting down DataNode " + i);
+      LOG.info("Shutting down DataNode " + i);
       DataNode dn = dataNodes.remove(i).datanode;
       dn.shutdown();
       numDataNodes--;
     }
   }
 
+  /**
+   * Returns a string representation of the cluster.
+   *
+   * @return a string representation of the cluster
+   */
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder();
+    builder.append("Cluster up:").append(isClusterUp());
+    builder.append("\nName Node:").append(getNameNode());
+    builder.append("\nData node count:").append(dataNodes.size());
+    for (DataNodeProperties dnp : dataNodes) {
+      builder.append("\n Datanode: ").append(dnp.datanode);
+      builder.append("\n  state: ").append(dnp.datanode.getServiceState());
+    }
+    return builder.toString();
+  }
+
   /*
    * Corrupt a block on all datanode
    */
@@ -595,12 +640,15 @@
       if (blockFile.exists()) {
         // Corrupt replica by writing random bytes into replica
         RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
-        FileChannel channel = raFile.getChannel();
-        String badString = "BADBAD";
-        int rand = random.nextInt((int)channel.size()/2);
-        raFile.seek(rand);
-        raFile.write(badString.getBytes());
-        raFile.close();
+        try {
+          FileChannel channel = raFile.getChannel();
+          String badString = "BADBAD";
+          int rand = random.nextInt((int) channel.size() / 2);
+          raFile.seek(rand);
+          raFile.write(badString.getBytes());
+        } finally {
+          raFile.close();
+        }
       }
       corrupted = true;
     }
@@ -616,7 +664,7 @@
     }
     DataNodeProperties dnprop = dataNodes.remove(i);
     DataNode dn = dnprop.datanode;
-    System.out.println("MiniDFSCluster Stopping DataNode " + 
+    LOG.info("MiniDFSCluster Stopping DataNode " +
                        dn.dnRegistration.getName() +
                        " from a total of " + (dataNodes.size() + 1) + 
                        " datanodes.");
@@ -655,8 +703,10 @@
     }
   }
 
-  /*
+  /**
    * Shutdown a datanode by name.
+   * @param name datanode name
+   * @return true if a node was shut down
    */
   public synchronized DataNodeProperties stopDataNode(String name) {
     int i;
@@ -670,7 +720,7 @@
   }
   
   /**
-   * Returns true if the NameNode is running and is out of Safe Mode.
+   * @return true if the NameNode is running and is out of Safe Mode.
    */
   public boolean isClusterUp() {
     if (nameNode == null) {
@@ -685,7 +735,7 @@
   }
   
   /**
-   * Returns true if there is at least one DataNode running.
+   * @return true if there is at least one DataNode running.
    */
   public boolean isDataNodeUp() {
     if (dataNodes == null || dataNodes.size() == 0) {
@@ -696,13 +746,15 @@
   
   /**
    * Get a client handle to the DFS cluster.
+   * @return a new filesystem, which must be closed when no longer needed.
+   * @throws IOException if the filesystem cannot be created
    */
   public FileSystem getFileSystem() throws IOException {
     return FileSystem.get(conf);
   }
 
   /**
-   * Get the directories where the namenode stores its image.
+   * @return the directories where the namenode stores its state.
    */
   public Collection<File> getNameDirs() {
     return FSNamesystem.getNamespaceDirs(conf);
@@ -725,17 +777,27 @@
     InetSocketAddress addr = new InetSocketAddress("localhost",
                                                    getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
+    try {
+      DatanodeInfo[] dnInfos;
 
-    // make sure all datanodes are alive
-    while(client.datanodeReport(DatanodeReportType.LIVE).length
-        != numDataNodes) {
-      try {
-        Thread.sleep(500);
-      } catch (Exception e) {
+      // make sure all datanodes are alive
+      long timeout=System.currentTimeMillis() + STARTUP_TIMEOUT_MILLIS;
+      while((dnInfos = client.datanodeReport(DatanodeReportType.LIVE)).length
+          != numDataNodes) {
+        try {
+          Thread.sleep(WAIT_SLEEP_TIME_MILLIS);
+          if(System.currentTimeMillis() > timeout) {
+            throw new IOException("Timeout waiting for the datanodes. "+
+                    "Expected " + numDataNodes + "but got " + dnInfos.length);
+          }
+        } catch (InterruptedException e) {
+          throw new IOException("Interrupted while waiting for the datanodes",e);
+        }
       }
+    } finally {
+      client.close();
     }
 
-    client.close();
   }
   
   public void formatDataNodeDirs() throws IOException {
@@ -824,7 +886,7 @@
   }
 
   /**
-   * Returns the current set of datanodes
+   * @return the current set of datanodes
    */
   DataNode[] listDataNodes() {
     DataNode[] list = new DataNode[dataNodes.size()];

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java Thu Mar 12 17:50:03 2009
@@ -58,7 +58,7 @@
     config = new Configuration();
     config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
-    config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
+    config.set("dfs.http.address", "0.0.0.0:0");
     NameNode.format(config);
 
     String[] args = new String[] {};

Added: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestMiniDFSCluster.java?rev=752949&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestMiniDFSCluster.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestMiniDFSCluster.java Thu Mar 12 17:50:03 2009
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.net.Socket;
+import java.net.SocketAddress;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+
+import junit.framework.TestCase;
+import junit.framework.AssertionFailedError;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Tests that the dfs cluster can be started and stopped and that it appears to
+ * work
+ */
+public class TestMiniDFSCluster extends TestCase {
+
+  private Log log = LogFactory.getLog(getClass());
+  private static final int CONNECT_TIMEOUT = 1000;
+  private static final int PORT_OPEN_TIMEOUT = 20000;
+  private MiniDFSCluster cluster;
+
+
+  /**
+   * Tears down the fixture, for example, close a network connection. This method
+   * is called after a test is executed.
+   */
+  @Override
+  protected void tearDown() throws Exception {
+    MiniDFSCluster.close(cluster);
+    super.tearDown();
+  }
+
+  private MiniDFSCluster createCluster() throws IOException {
+    MiniDFSCluster miniCluster;
+    Configuration conf = new Configuration();
+    log.info("Starting the cluster");
+    miniCluster = new MiniDFSCluster(conf, 1, true, null);
+    log.info("cluster is created");
+    log.info(miniCluster);
+    return miniCluster;
+  }
+
+  public void testClusterStartStop() throws Throwable {
+    long start = System.currentTimeMillis();
+    cluster = createCluster();
+    long live = System.currentTimeMillis();
+    log.info("cluster is live -time: "
+            + (live - start) / 1000.0 + " millis");
+    log.info("terminating cluster");
+    cluster.shutdown();
+    long terminated = System.currentTimeMillis();
+    log.info("cluster is terminated -time: "
+            + (terminated - live) / 1000.0 + " millis");
+    assertClusterIsNotLive(cluster);
+    assertFalse("Datanodes are still up after cluster Termination",
+            cluster.isDataNodeUp());
+  }
+
+  private void assertClusterIsNotLive(MiniDFSCluster cluster) {
+    assertFalse("Cluster still thinks it is live", cluster.isClusterUp());
+  }
+
+
+  /**
+   * Check that shutting down twice is OK
+   *
+   * @throws Throwable if something went wrong
+   */
+  public void testClusterDoubleShutdown() throws Throwable {
+    cluster = createCluster();
+    log.info("terminating cluster");
+    MiniDFSCluster.close(cluster);
+    assertClusterIsNotLive(cluster);
+    log.info("terminating cluster again");
+    cluster.shutdown();
+    log.info("cluster is terminated");
+    assertClusterIsNotLive(cluster);
+    //and finish by terminating again
+    cluster = null;
+  }
+
+
+  /**
+   * Check that a cluster is not live until asked
+   *
+   * @throws Throwable if something went wrong
+   */
+  public void testClusterGoesLive() throws Throwable {
+    cluster = createCluster();
+    assertTrue("Cluster thinks it is not live", cluster.isClusterUp());
+    assertTrue("No datanodes are up", cluster.isDataNodeUp());
+    log.info("terminating cluster");
+    MiniDFSCluster.close(cluster);
+    cluster = null;
+  }
+
+  /**
+   * Check that shutting down twice is OK
+   *
+   * @throws Throwable if something went wrong
+   */
+  public void testClusterNetworkVisible() throws Throwable {
+    cluster = createCluster();
+    NameNode namenode = cluster.getNameNode();
+    int infoServerPort = namenode.getHttpAddress().getPort();
+
+    int nameNodePort = cluster.getNameNodePort();
+    assertPortOpen("Name Node", nameNodePort);
+    assertPortOpen("Name Node Info Server", infoServerPort);
+    ArrayList<DataNode> datanodes = cluster.getDataNodes();
+    ArrayList<Integer> ports = new ArrayList<Integer>(datanodes.size());
+    for (DataNode node : datanodes) {
+      int port = node.getSelfAddr().getPort();
+      ports.add(port);
+      assertPortOpen("Data Node " + node, port);
+    }
+    log.info("terminating cluster");
+    MiniDFSCluster.close(cluster);
+    assertClusterIsNotLive(cluster);
+    assertPortClosed("Name Node", nameNodePort);
+    assertPortClosed("Name Node Info Server", infoServerPort);
+    assertPortsClosed("Data Node", ports);
+  }
+
+  private void assertPortsClosed(String name, ArrayList<Integer> ports)
+          throws Throwable {
+    for (int port : ports) {
+      assertPortClosed(name, port);
+    }
+  }
+
+  void assertPortOpen(String name, int port) throws Throwable {
+    assertPortOpen(name, port, PORT_OPEN_TIMEOUT);
+  }
+  
+  void assertPortOpen(String name, int port, int timeout) throws Throwable {
+    Socket socket = null;
+    SocketAddress sa = new InetSocketAddress("localhost", port);
+    IOException failure = null;
+    long endtime = System.currentTimeMillis() + timeout;
+    boolean connected = false;
+    do {
+      try {
+        socket = new Socket();
+        socket.connect(sa, CONNECT_TIMEOUT);
+        connected = true;
+      } catch (IOException e) {
+        failure = e;
+      } finally {
+        socket.close();
+      }
+    } while (!connected && System.currentTimeMillis() < endtime);
+
+    if (failure != null) {
+      AssertionFailedError afe =
+              new AssertionFailedError("Failed to connect to the service "
+                      + name + " on port " + port + " : " + failure);
+      afe.initCause(failure);
+      throw afe;
+    }
+  }
+
+  void assertPortClosed(String name, int port) throws Throwable {
+    Socket socket = new Socket();
+    try {
+      SocketAddress sa = new InetSocketAddress("localhost", port);
+      socket.connect(sa, CONNECT_TIMEOUT);
+      fail("Expected the service " + name
+              + " on port " + port + " to be offline, but it is open");
+    } catch (IOException expected) {
+      log.debug("IOE checking port " + port + ": " + expected,
+              expected);
+    } finally {
+      socket.close();
+    }
+  }
+}

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestReplication.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestReplication.java Thu Mar 12 17:50:03 2009
@@ -19,7 +19,6 @@
 
 import junit.framework.TestCase;
 import java.io.*;
-import java.util.Iterator;
 import java.util.Random;
 import java.net.*;
 
@@ -71,7 +70,7 @@
     Configuration conf = fileSys.getConf();
     ClientProtocol namenode = DFSClient.createNamenode(conf);
       
-    waitForBlockReplication(name.toString(), namenode, 
+    waitForBlockReplication(name.toString(), namenode,
                             Math.min(numDatanodes, repl), -1);
     
     LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
@@ -169,9 +168,9 @@
     // Now get block details and check if the block is corrupt
     blocks = dfsClient.namenode.
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+    LOG.info("Waiting until block is marked as corrupt...");
     while (blocks.get(0).isCorrupt() != true) {
       try {
-        LOG.info("Waiting until block is marked as corrupt...");
         Thread.sleep(1000);
       } catch (InterruptedException ie) {
       }
@@ -249,16 +248,14 @@
       boolean replOk = true;
       LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, 
                                                         Long.MAX_VALUE);
-      
-      for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator();
-           iter.hasNext();) {
-        LocatedBlock block = iter.next();
+
+      for (LocatedBlock block : blocks.getLocatedBlocks()) {
         int actual = block.getLocations().length;
-        if ( actual < expected ) {
+        if (actual < expected) {
           if (true || iters > 0) {
             LOG.info("Not enough replicas for " + block.getBlock() +
-                               " yet. Expecting " + expected + ", got " + 
-                               actual + ".");
+                    " yet. Expecting " + expected + ", got " +
+                    actual + ".");
           }
           replOk = false;
           break;
@@ -385,10 +382,8 @@
       waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
       
     } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-    }  
+      MiniDFSCluster.close(cluster);
+    }
   }
   
   /**

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Thu Mar 12 17:50:03 2009
@@ -137,6 +137,11 @@
     
     editLog.close();
 
+    //check that the namesystem is still healthy
+    assertNotNull("FSNamesystem.getFSNamesystem()  is null",
+            FSNamesystem.getFSNamesystem());
+    assertNotNull("FSNamesystem.getFSNamesystem().dir is null",
+            FSNamesystem.getFSNamesystem().dir);
     // Verify that we can read in all the transactions that we have written.
     // If there were any corruptions, it is likely that the reading in
     // of these transactions will throw an exception.

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/ClusterMapReduceTestCase.java Thu Mar 12 17:50:03 2009
@@ -93,7 +93,7 @@
       config = props;
     }
 
-    public ConfigurableMiniMRCluster(int numTaskTrackers, String namenode,
+    private ConfigurableMiniMRCluster(int numTaskTrackers, String namenode,
                                      int numDir) throws Exception {
       super(numTaskTrackers, namenode, numDir);
     }
@@ -121,14 +121,10 @@
    * @throws Exception if the cluster could not be stopped
    */
   protected void stopCluster() throws Exception {
-    if (mrCluster != null) {
-      mrCluster.shutdown();
-      mrCluster = null;
-    }
-    if (dfsCluster != null) {
-      dfsCluster.shutdown();
-      dfsCluster = null;
-    }
+    MiniMRCluster.close(mrCluster);
+    mrCluster = null;
+    MiniDFSCluster.close(dfsCluster);
+    dfsCluster = null;
   }
 
   /**

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java Thu Mar 12 17:50:03 2009
@@ -163,22 +163,8 @@
    * @throws Exception
    */
   protected void tearDown() throws Exception {
-    try {
-      if (mrCluster != null) {
-        mrCluster.shutdown();
-      }
-    }
-    catch (Exception ex) {
-      System.out.println(ex);
-    }
-    try {
-      if (dfsCluster != null) {
-        dfsCluster.shutdown();
-      }
-    }
-    catch (Exception ex) {
-      System.out.println(ex);
-    }
+    MiniMRCluster.close(mrCluster);
+    MiniDFSCluster.close(dfsCluster);
     super.tearDown();
   }
 

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java Thu Mar 12 17:50:03 2009
@@ -19,6 +19,7 @@
 
 import java.io.File;
 import java.io.IOException;
+import java.io.Closeable;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
@@ -31,12 +32,13 @@
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.util.Service;
 
 /**
  * This class creates a single-process Map-Reduce cluster for junit testing.
  * One thread is created for each server.
  */
-public class MiniMRCluster {
+public class MiniMRCluster implements Closeable {
   private static final Log LOG = LogFactory.getLog(MiniMRCluster.class);
     
   private Thread jobTrackerThread;
@@ -52,9 +54,13 @@
     
   private String namenode;
   private UnixUserGroupInformation ugi = null;
+  private static final int TRACKER_STABILIZATION_TIMEOUT = 30000;
+
   private JobConf conf;
     
   private JobConf job;
+  /** time for a tracker to shut down : {@value} */
+  private static final long TRACKER_SHUTDOWN_TIMEOUT = 30000;
   
   /**
    * An inner class that runs a job tracker.
@@ -101,7 +107,7 @@
         tracker = JobTracker.startTracker(jc);
         tracker.offerService();
       } catch (Throwable e) {
-        LOG.error("Job tracker crashed", e);
+        LOG.error("Job tracker crashed: " + e, e);
         isActive = false;
       }
     }
@@ -110,13 +116,12 @@
      * Shutdown the job tracker and wait for it to finish.
      */
     public void shutdown() {
-      try {
-        if (tracker != null) {
-          tracker.stopTracker();
-        }
-      } catch (Throwable e) {
-        LOG.error("Problem shutting down job tracker", e);
+      JobTracker jobTracker;
+      synchronized (this) {
+        jobTracker = tracker;
+        tracker = null;
       }
+      Service.close(jobTracker);
       isActive = false;
     }
   }
@@ -177,7 +182,7 @@
       } catch (Throwable e) {
         isDead = true;
         tt = null;
-        LOG.error("task tracker " + trackerId + " crashed", e);
+        LOG.error("task tracker " + trackerId + " crashed : "+e, e);
       }
     }
         
@@ -420,7 +425,7 @@
      
      //Generate rack names if required
      if (racks == null) {
-       System.out.println("Generating rack names for tasktrackers");
+       LOG.info("Generating rack names for tasktrackers");
        racks = new String[numTaskTrackers];
        for (int i=0; i < racks.length; ++i) {
          racks[i] = NetworkTopology.DEFAULT_RACK;
@@ -429,7 +434,7 @@
      
     //Generate some hostnames if required
     if (hosts == null) {
-      System.out.println("Generating host names for tasktrackers");
+      LOG.info("Generating host names for tasktrackers");
       hosts = new String[numTaskTrackers];
       for (int i = 0; i < numTaskTrackers; i++) {
         hosts[i] = "host" + i + ".foo.com";
@@ -470,6 +475,24 @@
     }
     
     this.job = createJobConf(conf);
+    // Wait till the MR cluster stabilizes
+    long timeout = System.currentTimeMillis() +
+            TRACKER_STABILIZATION_TIMEOUT;
+    while(jobTracker.tracker.getNumResolvedTaskTrackers() != numTaskTrackers) {
+      try {
+        Thread.sleep(50);
+      } catch (InterruptedException ie) {
+        throw new IOException("Interrupted during startup");
+      }
+      if(System.currentTimeMillis() > timeout) {
+        String message = "Time out waiting for the task trackers to stabilize. "
+                + "Expected tracker count: " + numTaskTrackers
+                + "  -actual count: "
+                + jobTracker.tracker.getNumResolvedTaskTrackers();
+        LOG.error(message);
+        throw new IOException(message);
+      }
+    }
     waitUntilIdle();
   }
     
@@ -583,12 +606,11 @@
    * Kill the jobtracker.
    */
   public void stopJobTracker() {
-    //jobTracker.exit(-1);
     jobTracker.shutdown();
 
     jobTrackerThread.interrupt();
     try {
-      jobTrackerThread.join();
+      jobTrackerThread.join(TRACKER_SHUTDOWN_TIMEOUT);
     } catch (InterruptedException ex) {
       LOG.error("Problem waiting for job tracker to finish", ex);
     }
@@ -649,6 +671,25 @@
     }
   }
     
+  /**
+   * Static operation to shut down a cluster; harmless if the cluster argument
+   * is null
+   *
+   * @param cluster cluster to shut down, or null for no cluster
+   */
+  public static void close(Closeable cluster) {
+    Service.close(cluster);
+  }
+
+  /**
+   * Shuts down the cluster.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  public void close() throws IOException {
+    shutdown();
+  }
+    
   public static void main(String[] args) throws IOException {
     LOG.info("Bringing up Jobtracker and tasktrackers.");
     MiniMRCluster mr = new MiniMRCluster(4, "file:///", 1);

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMRServerPorts.java Thu Mar 12 17:50:03 2009
@@ -35,6 +35,9 @@
  */
 public class TestMRServerPorts extends TestCase {
   TestHDFSServerPorts hdfs = new TestHDFSServerPorts();
+  private static final String STARTED_UNEXPECTEDLY
+          = "the Job tracker should not have started";
+  private static final String FAILED_TO_START = "The Job tracker did not start";
 
   // Runs the JT in a separate thread
   private static class JTRunner extends Thread {
@@ -85,7 +88,6 @@
         return false;
       throw e;
     }
-    jt.fs.close();
     jt.stopTracker();
     return true;
   }
@@ -122,21 +124,21 @@
       conf2.set("mapred.job.tracker.http.address",
         TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
       boolean started = canStartJobTracker(conf2);
-      assertFalse(started); // should fail
+      assertFalse(STARTED_UNEXPECTEDLY, started); // should fail
 
       // bind http server to the same port as name-node
       conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
       conf2.set("mapred.job.tracker.http.address",
         hdfs.getConfig().get("dfs.http.address"));
       started = canStartJobTracker(conf2);
-      assertFalse(started); // should fail again
+      assertFalse(STARTED_UNEXPECTEDLY, started); // should fail again
 
       // both ports are different from the name-node ones
       conf2.set("mapred.job.tracker", TestHDFSServerPorts.NAME_NODE_HOST + 0);
       conf2.set("mapred.job.tracker.http.address",
         TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
       started = canStartJobTracker(conf2);
-      assertTrue(started); // should start now
+      assertTrue(FAILED_TO_START, started); // should start now
 
     } finally {
       hdfs.stopNameNode(nn);
@@ -163,7 +165,7 @@
       conf2.set("mapred.task.tracker.http.address",
         TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
       boolean started = canStartTaskTracker(conf2);
-      assertFalse(started); // should fail
+      assertFalse(STARTED_UNEXPECTEDLY, started); // should fail
 
       // bind http server to the same port as name-node
       conf2.set("mapred.task.tracker.report.address",
@@ -171,7 +173,7 @@
       conf2.set("mapred.task.tracker.http.address",
         hdfs.getConfig().get("dfs.http.address"));
       started = canStartTaskTracker(conf2);
-      assertFalse(started); // should fail again
+      assertFalse(STARTED_UNEXPECTEDLY, started); // should fail again
 
       // both ports are different from the name-node ones
       conf2.set("mapred.task.tracker.report.address",
@@ -179,7 +181,7 @@
       conf2.set("mapred.task.tracker.http.address",
         TestHDFSServerPorts.NAME_NODE_HTTP_HOST + 0);
       started = canStartTaskTracker(conf2);
-      assertTrue(started); // should start now
+      assertTrue(FAILED_TO_START, started); // should start now
     } finally {
       if (jt != null) {
         jt.fs.close();

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRMapRedDebugScript.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRMapRedDebugScript.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRMapRedDebugScript.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRMapRedDebugScript.java Thu Mar 12 17:50:03 2009
@@ -46,7 +46,12 @@
   private MiniMRCluster mr;
   private MiniDFSCluster dfs;
   private FileSystem fileSys;
-  
+  private static final String BAILING_OUT = "Bailing out";
+  private static final String TEST_SCRIPT_BAILING_OUT
+          = "Test Script\n"+ BAILING_OUT;
+  private static final int SCRIPT_SLEEP_TIMEOUT = 60000;
+  private static final int SCRIPT_SLEEP_INTERVAL = 1000;
+
   /**
    * Fail map class 
    */
@@ -55,7 +60,7 @@
      public void map (LongWritable key, Text value, 
                      OutputCollector<Text, IntWritable> output, 
                      Reporter reporter) throws IOException {
-       System.err.println("Bailing out");
+       System.err.println(BAILING_OUT);
        throw new IOException();
      }
   }
@@ -165,7 +170,17 @@
     // construct the task id of first map task of failmap
     TaskAttemptID taskId = new TaskAttemptID(new TaskID(jobId,true, 0), 0);
     // wait for the job to finish.
-    while (!job.isComplete()) ;
+    long timeout = System.currentTimeMillis() + SCRIPT_SLEEP_TIMEOUT;
+    while (!job.isComplete()) {
+      try {
+          Thread.sleep(SCRIPT_SLEEP_INTERVAL);
+        } catch (InterruptedException e) {
+          fail("Interrupted");
+        }
+        if(System.currentTimeMillis() > timeout) {
+          fail("Timeout waiting for the job to complete ");
+      }
+    }
     
     // return the output of debugout log.
     return readTaskLog(TaskLog.LogName.DEBUGOUT,taskId);
@@ -204,7 +219,9 @@
                                outDir,debugDir, debugScript, input);
       
       // Assert the output of debug script.
-      assertEquals("Test Script\nBailing out", result);
+      if(!result.contains(TEST_SCRIPT_BAILING_OUT)) {
+        fail("Did not find " + TEST_SCRIPT_BAILING_OUT + "in \n" + result);
+      }
 
     } finally {  
       // close file system and shut down dfs and mapred cluster

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMultipleLevelCaching.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMultipleLevelCaching.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMultipleLevelCaching.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMultipleLevelCaching.java Thu Mar 12 17:50:03 2009
@@ -112,13 +112,11 @@
        */
       TestRackAwareTaskPlacement.launchJobAndTestCounters(
     		  testName, mr, fileSys, inDir, outputPath, 1, 1, 0, 0);
-      mr.shutdown();
     } finally {
       fileSys.delete(inDir, true);
       fileSys.delete(outputPath, true);
-      if (dfs != null) { 
-        dfs.shutdown(); 
-      }
+      MiniMRCluster.close(mr);
+      MiniDFSCluster.close(dfs);
     }
   }
 }

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java Thu Mar 12 17:50:03 2009
@@ -151,14 +151,11 @@
       launchJobAndTestCounters(testName, mr, fileSys, inDir, outputPath, 3, 0,
                                0, 3);
       mr.shutdown();
+      mr=null;
       
     } finally {
-      if (dfs != null) { 
-        dfs.shutdown(); 
-      }
-      if (mr != null) { 
-        mr.shutdown();
-      }
+      MiniDFSCluster.close(dfs);
+      MiniMRCluster.close(mr);
     }
   }
   static RunningJob launchJob(JobConf jobConf, Path inDir, Path outputPath, 

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java Thu Mar 12 17:50:03 2009
@@ -247,8 +247,13 @@
       // they were running on a lost tracker
       testSetupAndCleanupKill(mr, dfs, false);
     } finally {
-      if (dfs != null) { dfs.shutdown(); }
-      if (mr != null) { mr.shutdown();
+      try {
+        if (dfs != null) { dfs.shutdown(); }
+        if (mr != null) { mr.shutdown();
+        }
+      } catch (OutOfMemoryError e) {
+        //ignore this as it means something went very wrong in the test logging
+        //any attempt to log it may make things worse
       }
     }
   }

Added: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestTaskTrackerLifecycle.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestTaskTrackerLifecycle.java?rev=752949&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestTaskTrackerLifecycle.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestTaskTrackerLifecycle.java Thu Mar 12 17:50:03 2009
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.util.Service;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * Test that the task tracker follows the service lifecycle
+ */
+
+public class TestTaskTrackerLifecycle extends TestCase {
+  private TaskTracker tracker;
+  private static final Log LOG = LogFactory.getLog(TestTaskTrackerLifecycle.class);
+
+  /**
+   * Tears down the fixture, for example, close a network connection. This method
+   * is called after a test is executed.
+   */
+  protected void tearDown() throws Exception {
+    super.tearDown();
+    Service.close(tracker);
+  }
+
+  /**
+   * Create a job conf suitable for testing
+   * @return a new job conf instance
+   */
+  private JobConf createJobConf() {
+    JobConf config = new JobConf();
+    String dataDir = System.getProperty("test.build.data");
+    File hdfsDir = new File(dataDir, "dfs");
+    config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
+    FileSystem.setDefaultUri(config, "hdfs://localhost:0");
+    config.set("dfs.http.address", "hdfs://localhost:0");
+    config.set("mapred.job.tracker", "localhost:8012");
+    config.set("ipc.client.connect.max.retries", "1");
+    return config;
+  }
+
+  private void assertConnectionRefused(IOException e) throws Throwable {
+    if(!e.getMessage().contains("Connection refused")) {
+      LOG.error("Wrong exception",e);
+      throw e;
+    }
+  }
+
+  /**
+   * Test that if a tracker isn't started, we can still terminate it cleanly
+   * @throws Throwable on a failure
+   */
+  public void testTerminateUnstartedTracker() throws Throwable {
+    tracker = new TaskTracker(createJobConf(), false);
+    tracker.ping();
+    tracker.close();
+  }
+
+  public void testOrphanTrackerFailure() throws Throwable {
+    try {
+      tracker = new TaskTracker(createJobConf());
+      fail("Expected a failure");
+    } catch (IOException e) {
+      assertConnectionRefused(e);
+    }
+  }
+
+  public void testFailingTracker() throws Throwable {
+    tracker = new TaskTracker(createJobConf(), false);
+    try {
+      tracker.start();
+      fail("Expected a failure");
+    } catch (IOException e) {
+      assertConnectionRefused(e);
+      assertEquals(Service.ServiceState.FAILED, tracker.getServiceState());
+    }
+  }
+
+  public void testStartedTracker() throws Throwable {
+    tracker = new TaskTracker(createJobConf(), false);
+    try {
+      Service.deploy(tracker);
+      fail("Expected a failure");
+    } catch (IOException e) {
+      assertConnectionRefused(e);
+      assertEquals(Service.ServiceState.CLOSED, tracker.getServiceState());
+    }
+    tracker.ping();
+    tracker.close();
+    tracker.ping();
+  }
+
+}

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java?rev=752949&r1=752948&r2=752949&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/pipes/TestPipes.java Thu Mar 12 17:50:03 2009
@@ -81,8 +81,8 @@
       runNonPipedProgram(mr, dfs, new Path(cppExamples,"bin/wordcount-nopipe"));
       mr.waitUntilIdle();
     } finally {
-      mr.shutdown();
-      dfs.shutdown();
+      MiniMRCluster.close(mr);
+      MiniDFSCluster.close(dfs);
     }
   }
 

Added: hadoop/core/trunk/src/test/org/apache/hadoop/net/TestDNS.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/net/TestDNS.java?rev=752949&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/net/TestDNS.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/net/TestDNS.java Thu Mar 12 17:50:03 2009
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.net;
+
+import junit.framework.TestCase;
+
+import javax.naming.NamingException;
+import java.net.UnknownHostException;
+import java.net.InetAddress;
+import java.net.Inet6Address;
+import java.net.Inet4Address;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.util.Enumeration;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * These tests print things out. This is not normally approved of in testing,
+ * where it is the job of the test cases to determine success/failure. The
+ * printing of values is to help diagnose root causes of any failures.
+ */
+public class TestDNS extends TestCase {
+
+  private static final Log LOG = LogFactory.getLog(TestDNS.class);
+
+  private static final String DEFAULT = "default";
+
+  /**
+   * Constructs a test case with the given name.
+   *
+   * @param name test name
+   */
+  public TestDNS(String name) {
+    super(name);
+  }
+
+  public void testGetLocalHost() throws Exception {
+    String hostname = DNS.getDefaultHost(DEFAULT);
+    assertNotNull(hostname);
+  }
+
+  public void testGetLocalHostIsFast() throws Exception {
+    long t0 = System.currentTimeMillis();
+    String hostname = DNS.getDefaultHost(DEFAULT);
+    assertNotNull(hostname);
+    String hostname2 = DNS.getDefaultHost(DEFAULT);
+    long t2 = System.currentTimeMillis();
+    String hostname3 = DNS.getDefaultHost(DEFAULT);
+    long t3 = System.currentTimeMillis();
+    assertEquals(hostname3, hostname2);
+    assertEquals(hostname2, hostname);
+    long interval2 = t3 - t2;
+    assertTrue(
+            "It is taking to long to determine the local host -caching is not working",
+            interval2 < 20000);
+  }
+
+  public void testLocalHostHasAnAddress() throws Exception {
+    InetAddress addr = getLocalIPAddr();
+    assertNotNull(addr);
+    LOG.info("Default address: " + addr.getHostAddress());
+    if(addr.isLoopbackAddress()) {
+      LOG.warn("This is a loopback address, and not accessible remotely");
+    }
+    if (addr.isLinkLocalAddress()) {
+      LOG.warn("This is a link local address, "
+              + "and not accessible beyond the local network");
+    }
+
+    String canonical = addr.getCanonicalHostName();
+    LOG.info("Canonical Hostname of default address: " + canonical);
+    if(canonical.indexOf('.')<0) {
+      LOG.warn("The canonical hostname is not fully qualified: "+canonical);
+    }
+  }
+
+  private InetAddress getLocalIPAddr() throws UnknownHostException {
+    String hostname = DNS.getDefaultHost(DEFAULT);
+    InetAddress localhost = InetAddress.getByName(hostname);
+    return localhost;
+  }
+
+  public void testLocalHostHasAName() throws Throwable {
+    String hostname = DNS.getDefaultHost(DEFAULT);
+    assertNotNull(hostname);
+    LOG.info("Default hostname: " + hostname);
+  }
+
+  public void testLocalHostHasAtLeastOneName() throws Throwable {
+    String hostname = DNS.getDefaultHost(DEFAULT);
+    InetAddress[] inetAddresses = InetAddress.getAllByName(hostname);
+    int count = 0;
+    for (InetAddress address : inetAddresses) {
+      LOG.info("Address " + count + ": " + address);
+      count++;
+    }
+    assertTrue("No addresses enumerated", count > 0);
+
+  }
+
+  public void testNullInterface() throws Exception {
+    try {
+      String host = DNS.getDefaultHost(null);
+      fail("Expected a NullPointerException, got " + host);
+    } catch (NullPointerException e) {
+      //this is expected
+    }
+  }
+
+  public void testIPsOfUnknownInterface() throws Exception {
+    String[] ips = DNS.getIPs("name-of-an-unknown-interface");
+    assertNotNull(ips);
+    assertTrue(ips.length > 0);
+  }
+
+  public void testRDNS() throws Exception {
+    InetAddress localhost = getLocalIPAddr();
+    try {
+      String s = DNS.reverseDns(localhost, null);
+      LOG.info("Local RDNS hostname is " + s);
+    } catch (NamingException e) {
+      LOG.info("Unable to determine hostname of " + localhost
+              + " through Reverse DNS", e);
+    }
+  }
+
+  /**
+   * Here for diagnostics; if this is seen to fail
+   *
+   * @throws Exception for any problems
+   */
+  public void testLocalhostResolves() throws Exception {
+    InetAddress localhost = InetAddress.getByName("localhost");
+    assertNotNull("localhost is null", localhost);
+    LOG.info("Localhost IPAddr is " + localhost.toString());
+  }
+
+  public NetworkInterface getFirstInterface() throws SocketException {
+    Enumeration<NetworkInterface> interfaces = NetworkInterface
+            .getNetworkInterfaces();
+    if (interfaces.hasMoreElements()) {
+      return interfaces.nextElement();
+    } else {
+      return null;
+    }
+  }
+
+  public void testGetDefaultHostFromFirstInterface() throws Throwable {
+    String hostname = DNS.getDefaultHost(getFirstInterface().toString(), DEFAULT);
+    assertNotNull("hostname is null", hostname);
+  }
+
+  public void testGetDefaultHostFromAllInterfaces() throws Throwable {
+    Enumeration<NetworkInterface> interfaces = NetworkInterface
+            .getNetworkInterfaces();
+    while(interfaces.hasMoreElements()) {
+      NetworkInterface nic = interfaces.nextElement();
+      String hostname = DNS
+              .getDefaultHost(nic.toString(), DEFAULT);
+      assertNotNull("hostname is null", hostname);
+    }
+  }
+
+  public void testGetDefaultHostFromEth0() throws Throwable {
+    String hostname = DNS
+            .getDefaultHost(getFirstInterface().toString(), DEFAULT);
+    assertNotNull("hostname is null", hostname);
+  }
+
+  public void testReverseDNSIPv4() throws Throwable {
+    InetAddress ipAddr = InetAddress.getByName("140.211.11.9");
+    assertTrue(ipAddr instanceof Inet4Address);
+    String hostname = DNS.reverseDns(ipAddr, null);
+    assertNotNull(hostname);
+  }
+
+  public void testReverseDNSIPv6() throws Throwable {
+    InetAddress ipAddr = InetAddress.getByName("fe80::250:56ff:fec0:8");
+    assertTrue(ipAddr instanceof Inet6Address);
+    try {
+      String hostname = DNS.reverseDns(ipAddr, null);
+      fail("Expected an exception, got "+hostname);
+    } catch (IllegalArgumentException e) {
+      //we expect this
+    }
+
+  }
+
+
+}

Added: hadoop/core/trunk/src/test/org/apache/hadoop/util/TestServiceLifecycle.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/util/TestServiceLifecycle.java?rev=752949&view=auto
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/util/TestServiceLifecycle.java (added)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/util/TestServiceLifecycle.java Thu Mar 12 17:50:03 2009
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import junit.framework.TestCase;
+
+import java.io.IOException;
+import java.util.List;
+
+
+/**vc
+ * Test service transitions in a mock service
+ */
+
+public class TestServiceLifecycle extends TestCase {
+  private MockService service;
+
+  public TestServiceLifecycle(String name) {
+    super(name);
+  }
+
+  @Override
+  protected void setUp() throws Exception {
+    super.setUp();
+    service = new MockService();
+  }
+
+  @Override
+  protected void tearDown() throws Exception {
+    Service.close(service);
+    super.tearDown();
+  }
+
+  private void ping() throws IOException {
+    service.ping();
+  }
+
+  private void start() throws IOException {
+    service.start();
+  }
+
+  private void close() throws IOException {
+    service.close();
+    assertInTerminatedState();
+  }
+
+  protected void assertInState(Service.ServiceState state)
+          throws Service.ServiceStateException {
+    service.verifyServiceState(state);
+  }
+
+  private void assertInLiveState() throws Service.ServiceStateException {
+    assertInState(Service.ServiceState.LIVE);
+  }
+
+  private void assertInCreatedState() throws Service.ServiceStateException {
+    assertInState(Service.ServiceState.CREATED);
+  }
+
+  private void assertInFailedState() throws Service.ServiceStateException {
+    assertInState(Service.ServiceState.FAILED);
+  }
+
+  private void assertInTerminatedState() throws Service.ServiceStateException {
+    assertInState(Service.ServiceState.CLOSED);
+  }
+
+  private void assertRunning() {
+    assertTrue("Service is not running: " + service, service.isRunning());
+  }
+
+  private void assertNotRunning() {
+    assertFalse("Service is running: " + service, service.isRunning());
+  }
+
+  private void enterState(Service.ServiceState state)
+          throws Service.ServiceStateException {
+    service.changeState(state);
+    assertInState(state);
+  }
+
+
+  private void enterFailedState() throws Service.ServiceStateException {
+    enterState(Service.ServiceState.FAILED);
+  }
+
+  private void enterTerminatedState() throws Service.ServiceStateException {
+    enterState(Service.ServiceState.CLOSED);
+  }
+
+  private void assertStateChangeCount(int expected) {
+    assertEquals("Wrong state change count for " + service,
+            expected,
+            service.getStateChangeCount());
+  }
+
+  private void assertPingCount(int expected) {
+    assertEquals("Wrong pingchange count for " + service,
+            expected,
+            service.getPingCount());
+  }
+
+  private void assertNoStartFromState(Service.ServiceState serviceState)
+          throws IOException {
+    enterState(serviceState);
+    try {
+      service.start();
+      failShouldNotGetHere();
+    } catch (Service.ServiceStateException expected) {
+      //expected
+    }
+  }
+
+  private void failShouldNotGetHere() {
+    fail("expected failure, but service is in " + service.getServiceState());
+  }
+
+  /**
+   * Test that the ping operation returns a mock exception
+   * @return the service status
+   * @throws IOException IO problems
+   */
+  private Service.ServiceStatus assertPingContainsMockException()
+          throws IOException {
+    Service.ServiceStatus serviceStatus = service.ping();
+    List<Throwable> thrown = serviceStatus.getThrowables();
+    assertFalse("No nested exceptions in service status", thrown.isEmpty());
+    Throwable throwable = thrown.get(0);
+    assertTrue(
+            "Nested exception is not a MockServiceException : "+throwable,
+            throwable instanceof MockService.MockServiceException);
+    return serviceStatus;
+  }
+
+  /**
+   * Walk through the lifecycle and check it changes visible state
+   */
+  public void testBasicLifecycle() throws Throwable {
+    assertInCreatedState();
+    assertNotRunning();
+    assertNotRunning();
+    start();
+    assertInLiveState();
+    assertRunning();
+    ping();
+    ping();
+    assertPingCount(2);
+    close();
+    assertStateChangeCount(3);
+    assertNotRunning();
+  }
+
+  /**
+   * Assert that a state changing operation is idempotent
+   * @throws Throwable if something went wrong
+   */
+  public void testStartIdempotent() throws Throwable {
+    start();
+    int count = service.getStateChangeCount();
+    //declare that we want to fail in our start operation
+    service.setFailOnStart(true);
+    //then start. If the innerStart() method is called: failure
+    start();
+    //check that the state count has not changed either.
+    assertStateChangeCount(count);
+    assertInLiveState();
+  }
+
+  public void testTerminateIdempotent() throws Throwable {
+    close();
+    int count = service.getStateChangeCount();
+    close();
+    assertStateChangeCount(count);
+  }
+
+  public void testCloseFromCreated() throws Throwable {
+    close();
+  }
+
+  public void testStaticCloseHandlesNull() throws Throwable {
+    Service.close(null);
+  }
+
+
+  public void testStaticCloseOperation() throws Throwable {
+    Service.close(service);
+    assertInTerminatedState();
+    Service.close(service);
+  }
+
+  public void testFailInStart() throws Throwable {
+    service.setFailOnStart(true);
+    try {
+      start();
+      failShouldNotGetHere();
+    } catch (MockService.MockServiceException e) {
+      assertInFailedState();
+    }
+  }
+
+  public void testPingInFailedReturnsException() throws Throwable {
+    service.setFailOnStart(true);
+    try {
+      start();
+      failShouldNotGetHere();
+    } catch (MockService.MockServiceException e) {
+      assertInFailedState();
+      //and test that the ping works out
+      Service.ServiceStatus serviceStatus = assertPingContainsMockException();
+      assertEquals(Service.ServiceState.FAILED, serviceStatus.getState());
+    }
+  }
+
+  public void testTerminateFromFailure() throws Throwable {
+    enterFailedState();
+    //test that we can get from failed to terminated
+    close();
+  }
+
+  public void testFailInPing() throws Throwable {
+    service.setFailOnPing(true);
+    start();
+    Service.ServiceStatus serviceStatus = service.ping();
+    assertEquals(Service.ServiceState.FAILED, serviceStatus.getState());
+    assertPingCount(1);
+    List<Throwable> thrown = serviceStatus.getThrowables();
+    assertEquals(1, thrown.size());
+    Throwable throwable = thrown.get(0);
+    assertTrue(throwable instanceof MockService.MockServiceException);
+  }
+
+  public void testPingInCreated() throws Throwable {
+    service.setFailOnPing(true);
+    ping();
+    assertPingCount(0);
+  }
+
+
+  /**
+   * Test that when in a failed state, you can't ping the service
+   *
+   * @throws Throwable if needed
+   */
+  public void testPingInFailedStateIsNoop() throws Throwable {
+    enterFailedState();
+    assertInFailedState();
+    Service.ServiceStatus serviceStatus = service.ping();
+    assertEquals(Service.ServiceState.FAILED, serviceStatus.getState());
+    assertPingCount(0);
+  }
+
+  /**
+   * Test that when in a terminated state, you can't ping the service
+   *
+   * @throws Throwable if needed
+   */
+  public void testPingInTerminatedStateIsNoop() throws Throwable {
+    enterTerminatedState();
+    assertInTerminatedState();
+    Service.ServiceStatus serviceStatus = service.ping();
+    assertEquals(Service.ServiceState.CLOSED, serviceStatus.getState());
+    assertPingCount(0);
+  }
+
+  public void testDeploy() throws Throwable {
+    Service.deploy(service);
+    assertInLiveState();
+  }
+
+  public void testDeployFailingStart() throws Throwable {
+    service.setFailOnStart(true);
+    try {
+      Service.deploy(service);
+    } catch (MockService.MockServiceException e) {
+      assertInTerminatedState();
+    }
+  }
+
+  public void testNoStartFromTerminated() throws Throwable {
+    assertNoStartFromState(Service.ServiceState.CLOSED);
+  }
+
+  public void testNoStartFromFailed() throws Throwable {
+    assertNoStartFromState(Service.ServiceState.CLOSED);
+  }
+
+  public void testStartFromLiveIdempotent() throws Throwable {
+    enterState(Service.ServiceState.LIVE);
+    int count = service.getStateChangeCount();
+    start();
+    assertStateChangeCount(count);
+  }
+
+  public void testFailOnClose() throws Throwable {
+    service.setFailOnClose(true);
+    try {
+      service.close();
+      fail("Should have thrown an exception");
+    } catch (IOException e) {
+      assertInTerminatedState();
+      assertTrue(service.isClosed());
+    }
+    //the second call should be a no-op; no exceptions get thrown
+    service.close();
+  }
+
+  public void testFailIdempotent() throws Throwable {
+    Exception cause = new Exception("test");
+    service.enterFailedState(null);
+    int count = service.getStateChangeCount();
+    service.enterFailedState(cause);
+    assertStateChangeCount(count);
+    assertEquals(cause, service.getFailureCause());
+  }
+
+  public void testFailFromTerminatedDoesNotChangeState() throws Throwable {
+    Service.deploy(service);
+    service.close();
+    assertInTerminatedState();
+    Exception cause = new Exception("test");
+    service.enterFailedState(cause);
+    assertInTerminatedState();
+    assertEquals(cause,service.getFailureCause());
+  }
+
+  public void testFailFromFailedDoesNotChangeCause() throws Throwable {
+    Exception cause = new Exception("test");
+    service.enterFailedState(cause);
+    assertInFailedState();
+    service.enterFailedState(new Exception("test2"));
+    assertInFailedState();
+    assertEquals(cause, service.getFailureCause());
+  }
+
+}



Mime
View raw message