hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r505512 - in /lucene/hadoop/trunk: ./ src/c++/libhdfs/ src/c++/libhdfs/tests/ src/contrib/streaming/src/test/org/apache/hadoop/streaming/ src/test/org/apache/hadoop/mapred/
Date Fri, 09 Feb 2007 21:43:31 GMT
Author: cutting
Date: Fri Feb  9 13:43:30 2007
New Revision: 505512

URL: http://svn.apache.org/viewvc?view=rev&rev=505512
Log:
HADOOP-761.  Change unit tests to not use /tmp.  Contributed by Nigel.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/c++/libhdfs/hdfs_test.c
    lucene/hadoop/trunk/src/c++/libhdfs/tests/test-libhdfs.sh
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
    lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/PiEstimator.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Fri Feb  9 13:43:30 2007
@@ -30,6 +30,8 @@
     and stderr respectively, with each line tagged by the task's name.
     (Arun C Murthy via cutting)
 
+ 9. HADOOP-761.  Change unit tests to not use /tmp.  (Nigel Daley via cutting)
+
 
 Branch 0.11 - unreleased
 

Modified: lucene/hadoop/trunk/src/c++/libhdfs/hdfs_test.c
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/c%2B%2B/libhdfs/hdfs_test.c?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/src/c++/libhdfs/hdfs_test.c (original)
+++ lucene/hadoop/trunk/src/c++/libhdfs/hdfs_test.c Fri Feb  9 13:43:30 2007
@@ -114,31 +114,47 @@
         hdfsCloseFile(fs, readFile);
     }
 
- 
+    int totalResult = 0;
+    int result = 0;
     {
         //Generic file-system operations
 
         const char* srcPath = "/tmp/testfile.txt";
+        const char* localSrcPath = "testfile.txt";
         const char* dstPath = "/tmp/testfile2.txt";
+        const char* localDstPath = "testfile2.txt";
 
-        fprintf(stderr, "hdfsCopy(remote-local): %s\n", (hdfsCopy(fs, srcPath, lfs, srcPath)
? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", (hdfsCopy(fs, srcPath, fs, dstPath)
? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsMove(local-local): %s\n", (hdfsMove(lfs, srcPath, lfs, dstPath)
? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsMove(remote-local): %s\n", (hdfsMove(fs, srcPath, lfs, srcPath)
? "Failed!" : "Success!"));
-
-        fprintf(stderr, "hdfsRename: %s\n", (hdfsRename(fs, dstPath, srcPath) ? "Failed!"
: "Success!"));
-
-        fprintf(stderr, "hdfsLock: %s\n", (hdfsLock(fs, srcPath, 1) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsReleaseLock: %s\n", (hdfsReleaseLock(fs, srcPath) ? "Failed!"
: "Success!"));
+        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath,
lfs, srcPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath,
fs, dstPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath,
lfs, dstPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath,
lfs, srcPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath))
? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath,
fs, dstPath)) ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsLock: %s\n", ((result = hdfsLock(fs, srcPath, 1)) ? "Failed!"
: "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsReleaseLock: %s\n", ((result = hdfsReleaseLock(fs, srcPath))
? "Failed!" : "Success!"));
+        totalResult += result;
 
         const char* slashTmp = "/tmp";
         const char* newDirectory = "/tmp/newdir";
-        fprintf(stderr, "hdfsCreateDirectory: %s\n", (hdfsCreateDirectory(fs, newDirectory)
? "Failed!" : "Success!"));
+        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory))
? "Failed!" : "Success!"));
+        totalResult += result;
 
         char buffer[256];
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", (hdfsGetWorkingDirectory(fs, buffer,
sizeof(buffer)) ? buffer : "Failed!"));
-        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", (hdfsSetWorkingDirectory(fs, slashTmp)
? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", (hdfsGetWorkingDirectory(fs, buffer,
sizeof(buffer)) ? buffer : "Failed!"));
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((result = hdfsGetWorkingDirectory(fs,
buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        totalResult += (result ? 0 : 1);
+        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs,
slashTmp)) ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((result = hdfsGetWorkingDirectory(fs,
buffer, sizeof(buffer))) ? buffer : "Failed!"));
+        totalResult += (result ? 0 : 1);
 
         fprintf(stderr, "hdfsGetDefaultBlockSize: %Ld\n", hdfsGetDefaultBlockSize(fs));
         fprintf(stderr, "hdfsGetCapacity: %Ld\n", hdfsGetCapacity(fs));
@@ -152,6 +168,7 @@
             fprintf(stderr, "Size: %ld\n", fileInfo->mSize);
             hdfsFreeFileInfo(fileInfo, 1);
         } else {
+            totalResult++;
             fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
         }
 
@@ -167,6 +184,7 @@
             hdfsFreeFileInfo(fileList, numEntries);
         } else {
             if (errno) {
+                totalResult++;
                 fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
             } else {
                 fprintf(stderr, "Empty directory!\n");
@@ -187,18 +205,28 @@
                 ++i;
             }
         } else {
+            totalResult++;
             fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
         }
         
         // Clean up
-        fprintf(stderr, "hdfsDelete: %s\n", (hdfsDelete(fs, newDirectory) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsDelete: %s\n", (hdfsDelete(fs, srcPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsDelete: %s\n", (hdfsDelete(lfs, srcPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsDelete: %s\n", (hdfsDelete(lfs, dstPath) ? "Failed!" : "Success!"));
-        fprintf(stderr, "hdfsExists: %s\n", (hdfsExists(fs, newDirectory) ? "Success!" :
"Failed!"));
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory)) ? "Failed!"
: "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath)) ? "Failed!"
: "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath)) ? "Failed!"
: "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath)) ? "Failed!"
: "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!"
: "Failed!"));
+        totalResult += (result ? 0 : 1);
     }
 
-    return 0;
+    if (totalResult != 0) {
+        return -1;
+    } else {
+        return 0;
+    }
 }
 
 /**

Modified: lucene/hadoop/trunk/src/c++/libhdfs/tests/test-libhdfs.sh
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/c%2B%2B/libhdfs/tests/test-libhdfs.sh?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/src/c++/libhdfs/tests/test-libhdfs.sh (original)
+++ lucene/hadoop/trunk/src/c++/libhdfs/tests/test-libhdfs.sh Fri Feb  9 13:43:30 2007
@@ -28,10 +28,13 @@
 HADOOP_LIB_DIR=$HADOOP_HOME/lib
 HADOOP_BIN_DIR=$HADOOP_HOME/bin
 
-## Manipulate HADOOP_CONF_DIR so as to include 
-## HADOOP_HOME/conf/hadoop-default.xml too
+# Manipulate HADOOP_CONF_DIR so as to include 
+# HADOOP_HOME/conf/hadoop-default.xml too
 # which is necessary to circumvent bin/hadoop
 HADOOP_CONF_DIR=$HADOOP_CONF_DIR:$HADOOP_HOME/conf
+
+# set pid file dir so they are not written to /tmp
+export HADOOP_PID_DIR=$HADOOP_LOG_DIR
 
 # CLASSPATH initially contains $HADOOP_CONF_DIR
 CLASSPATH="${HADOOP_CONF_DIR}"

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
(original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
Fri Feb  9 13:43:30 2007
@@ -139,6 +139,7 @@
         "-dfs", conf_.get("fs.default.name"), 
         "-jt", "local",
         "-jobconf", "stream.sideoutput.localfs=true", 
+        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
     };
     ArrayList argList = new ArrayList();
     argList.addAll(Arrays.asList(testargs));

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
(original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
Fri Feb  9 13:43:30 2007
@@ -69,7 +69,8 @@
         "-reducer", reduce,
         //"-verbose",
         //"-jobconf", "stream.debug=set"
-        "-jobconf", "keep.failed.task.files=true"
+        "-jobconf", "keep.failed.task.files=true",
+        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
         };
   }
   

Modified: lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
(original)
+++ lucene/hadoop/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
Fri Feb  9 13:43:30 2007
@@ -75,6 +75,7 @@
             //"-jobconf", "stream.debug=set"
             "-jobconf", strNamenode,
             "-jobconf", strJobtracker,
+            "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
             "-cacheFile", "dfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
         };
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java Fri Feb  9 13:43:30
2007
@@ -62,8 +62,9 @@
         Path[] localFiles = DistributedCache.getLocalCacheFiles(conf);
         FileSystem fs = FileSystem.get(conf);
         // read the cached files (unzipped, unjarred and text)
-        // and put it into a single file /tmp/test.txt
-        Path file = new Path("/tmp");
+        // and put it into a single file TEST_ROOT_DIR/test.txt
+        String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp");
+        Path file = new Path(TEST_ROOT_DIR);
         if (!fs.mkdirs(file)) {
           throw new IOException("Mkdirs failed to create " + file.toString());
         }
@@ -127,6 +128,9 @@
   public static boolean launchMRCache(String indir,
       String outdir, JobConf conf, String input)
       throws IOException {
+    String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/tmp"))
+      .toString().replace(' ', '+');
+    conf.set("test.build.data",TEST_ROOT_DIR);
     final Path inDir = new Path(indir);
     final Path outDir = new Path(outdir);
     FileSystem fs = FileSystem.get(conf);
@@ -158,7 +162,7 @@
     Path txtPath = new Path(localPath, new Path("test.txt"));
     Path jarPath = new Path(localPath, new Path("test.jar"));
     Path zipPath = new Path(localPath, new Path("test.zip"));
-    Path cacheTest = new Path("/tmp/cachedir");
+    Path cacheTest = new Path(TEST_ROOT_DIR + "/cachedir");
     fs.delete(cacheTest);
     if (!fs.mkdirs(cacheTest)) {
       throw new IOException("Mkdirs failed to create " + cacheTest.toString());
@@ -168,9 +172,9 @@
     fs.copyFromLocalFile(zipPath, cacheTest);
     // setting the cached archives to zip, jar and simple text files
     String fileSys = fs.getName();
-    String archive1 = "dfs://" + fileSys + "/tmp/cachedir/test.jar";
-    String archive2 = "dfs://" + fileSys + "/tmp/cachedir/test.zip"; 
-    String file1 = "dfs://" + fileSys + "/tmp/cachedir/test.txt";
+    String archive1 = "dfs://" + fileSys + TEST_ROOT_DIR + "/cachedir/test.jar";
+    String archive2 = "dfs://" + fileSys + TEST_ROOT_DIR + "/cachedir/test.zip";
+    String file1 = "dfs://" + fileSys + TEST_ROOT_DIR + "/cachedir/test.txt";
     URI uri1 = null;
     URI uri2 = null;
     URI uri3 = null;
@@ -187,7 +191,7 @@
     int count = 0;
     // after the job ran check to see if the the input from the localized cache
     // match the real string. check if there are 3 instances or not.
-    Path result = new Path("/tmp/test.txt");
+    Path result = new Path(TEST_ROOT_DIR + "/test.txt");
     {
       BufferedReader file = new BufferedReader(new InputStreamReader(fs
           .open(result)));

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/PiEstimator.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/PiEstimator.java?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/PiEstimator.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/PiEstimator.java Fri Feb  9 13:43:30
2007
@@ -31,7 +31,7 @@
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 
 /**
- * A Map-reduce program to estimaate the valu eof Pi using monte-carlo
+ * A Map-reduce program to estimate the value of Pi using monte-carlo
  * method.
  *
  * @author Milind Bhandarkar

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java?view=diff&rev=505512&r1=505511&r2=505512
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java Fri Feb 
9 13:43:30 2007
@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapred;
 
 import java.io.IOException;
+import java.io.File;
 import junit.framework.TestCase;
 
 /**
@@ -28,8 +29,11 @@
  */
 public class TestMiniMRLocalFS extends TestCase {
   
-    static final int NUM_MAPS = 10;
-    static final int NUM_SAMPLES = 100000;
+  static final int NUM_MAPS = 10;
+  static final int NUM_SAMPLES = 100000;
+  private static String TEST_ROOT_DIR =
+    new File(System.getProperty("test.build.data","/tmp"))
+    .toString().replace(' ', '+');
     
   public void testWithLocal() throws IOException {
       MiniMRCluster mr = null;
@@ -41,8 +45,8 @@
           assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01));
           // run the wordcount example with caching
           JobConf job = mr.createJobConf();
-          boolean ret = MRCaching.launchMRCache("/tmp/wc/input",
-                                                "/tmp/wc/output", 
+          boolean ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input",
+                                                TEST_ROOT_DIR + "/wc/output", 
                                                 job,
                                                 "The quick brown fox\nhas many silly\n"
                                                     + "red fox sox\n");



Mime
View raw message