hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ste...@apache.org
Subject svn commit: r783055 [6/6] - in /hadoop/core/branches/HADOOP-3628-2: ./ .eclipse.templates/ ivy/ lib/ lib/jsp-2.1/ src/contrib/ src/contrib/capacity-scheduler/ src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/ src/contrib/capacity-schedu...
Date Tue, 09 Jun 2009 16:11:23 GMT
Modified: hadoop/core/branches/HADOOP-3628-2/src/test/findbugsExcludeFile.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/findbugsExcludeFile.xml?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/findbugsExcludeFile.xml (original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/findbugsExcludeFile.xml Tue Jun  9 16:11:19
2009
@@ -220,9 +220,17 @@
        <Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
      </Match>
 
+     <!--
+       CreateBlockWriteStreams and getTmpInputStreams are pretty much like a stream constructor.
+       The newly created streams are not supposed to be closed in the constructor. So ignore
+       the OBL warning.
+     -->
      <Match>
-       <Class name="org.apache.hadoop.examples.ContextFactory" />
-       <Method name="setAttributes" />
+       <Class name="org.apache.hadoop.hdfs.server.datanode.FSDataset" />
+       <Or>
+         <Method name="createBlockWriteStreams" />
+         <Method name="getTmpInputStreams" />
+       </Or>
        <Bug pattern="OBL_UNSATISFIED_OBLIGATION" />
      </Match>
 

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java
Tue Jun  9 16:11:19 2009
@@ -295,6 +295,37 @@
       if (cluster != null) { cluster.shutdown(); }
     }
   }
+
+  /** copy empty directory on dfs file system */
+  public void testEmptyDir() throws Exception {
+    String namenode = null;
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 2, true, null);
+      final FileSystem hdfs = cluster.getFileSystem();
+      namenode = FileSystem.getDefaultUri(conf).toString();
+      if (namenode.startsWith("hdfs://")) {
+        
+        FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
+        fs.mkdirs(new Path("/empty"));
+
+        ToolRunner.run(new DistCp(conf), new String[] {
+                                         "-log",
+                                         namenode+"/logs",
+                                         namenode+"/empty",
+                                         namenode+"/dest"});
+        fs = FileSystem.get(URI.create(namenode+"/destdat"), conf);
+        assertTrue("Destination directory does not exist.",
+                   fs.exists(new Path(namenode+"/dest")));
+        deldir(hdfs, "/dest");
+        deldir(hdfs, "/empty");
+        deldir(hdfs, "/logs");
+      }
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
   
   /** copy files from local file system to dfs file system */
   public void testCopyFromLocalToDfs() throws Exception {
@@ -380,7 +411,7 @@
         deldir(hdfs, "/logs");
 
         ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-p",
+                                         "-prbugp", // no t to avoid preserving mod. times
                                          "-update",
                                          "-log",
                                          namenode+"/logs",
@@ -393,7 +424,7 @@
 
         deldir(hdfs, "/logs");
         ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-p",
+                                         "-prbugp", // no t to avoid preserving mod. times
                                          "-overwrite",
                                          "-log",
                                          namenode+"/logs",
@@ -551,6 +582,32 @@
         deldir(fs, "/destdat");
         deldir(fs, "/srcdat");
       }
+
+      {//test preserving times
+        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
+        fs.mkdirs(new Path("/srcdat/tmpf1"));
+        fs.mkdirs(new Path("/srcdat/tmpf2"));
+        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
+        FsPermission[] permissions = new FsPermission[srcstat.length];
+        for(int i = 0; i < srcstat.length; i++) {
+          fs.setTimes(srcstat[i].getPath(), 40, 50);
+        }
+
+        ToolRunner.run(new DistCp(conf),
+            new String[]{"-pt", nnUri+"/srcdat", nnUri+"/destdat"});
+
+        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
+        for(int i = 0; i < dststat.length; i++) {
+          assertEquals("Modif. Time i=" + i, 40, dststat[i].getModificationTime());
+          assertEquals("Access Time i=" + i+ srcstat[i].getPath() + "-" + dststat[i].getPath(),
50, dststat[i].getAccessTime());
+        }
+        
+        assertTrue("Source and destination directories do not match.",
+                   checkFiles(fs, "/destdat", files));
+  
+        deldir(fs, "/destdat");
+        deldir(fs, "/srcdat");
+      }
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java
Tue Jun  9 16:11:19 2009
@@ -150,8 +150,6 @@
         writer = SequenceFile.createWriter(tempFS, config, filePath, Text.class, 
                 LongWritable.class, CompressionType.NONE);
         writer.append(new Text(strFileName), new LongWritable(0l));
-      } catch(Exception e) {
-        throw new IOException(e.getLocalizedMessage());
       } finally {
         if (writer != null) {
           writer.close();

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
Tue Jun  9 16:11:19 2009
@@ -29,7 +29,6 @@
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.ToolRunner;
 
 public class TestDFSShellGenericOptions extends TestCase {
@@ -101,8 +100,8 @@
     FileSystem fs=null;
     try {
       ToolRunner.run(shell, args);
-      fs = new DistributedFileSystem(NameNode.getAddress(namenode), 
-                                     shell.getConf());
+      fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
+          shell.getConf());
       assertTrue("Directory does not get created", 
                  fs.isDirectory(new Path("/data")));
       fs.delete(new Path("/data"), true);

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
Tue Jun  9 16:11:19 2009
@@ -17,22 +17,21 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+
 import java.io.File;
+
 import junit.framework.TestCase;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
-
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 
 /**
  * This test ensures the appropriate response (successful or failure) from 
@@ -42,8 +41,6 @@
   
   private static final Log LOG = LogFactory.getLog(
                                                    "org.apache.hadoop.hdfs.TestDFSStartupVersions");
-  private static Path TEST_ROOT_DIR = new Path(
-                                               System.getProperty("test.build.data","/tmp").toString().replace('
', '+'));
   private MiniDFSCluster cluster = null;
   
   /**

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
Tue Jun  9 16:11:19 2009
@@ -34,7 +34,6 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
Tue Jun  9 16:11:19 2009
@@ -35,10 +35,6 @@
   static final int blockSize = 8192;
   static final int fileSize = 16384;
 
-  private static String TEST_ROOT_DIR =
-    new Path(System.getProperty("test.build.data","/tmp"))
-    .toString().replace(' ', '+');
-  
   private void writeFile(FileSystem fileSys, Path name, int repl,
                          int fileSize, int blockSize)
     throws IOException {
@@ -99,7 +95,6 @@
 
       // create an empty directory
       //
-      Path parentDir = new Path("/test");
       Path dir = new Path("/test/mkdirs");
       assertTrue(fs.mkdirs(dir));
       assertTrue(fs.exists(dir));

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java
Tue Jun  9 16:11:19 2009
@@ -49,7 +49,7 @@
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message)
{
     for (int idx = 0; idx < actual.length; idx++) {
-      this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
                         expected[from+idx]+" actual "+actual[idx],
                         actual[idx], expected[from+idx]);
       actual[idx] = 0;

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java
Tue Jun  9 16:11:19 2009
@@ -52,7 +52,7 @@
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message)
{
     for (int idx = 0; idx < actual.length; idx++) {
-      this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
                         expected[from+idx]+" actual "+actual[idx],
                         actual[idx], expected[from+idx]);
       actual[idx] = 0;

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java
Tue Jun  9 16:11:19 2009
@@ -210,9 +210,9 @@
           try {
             URI inputURI = new URI(job.get("map.input.file"));
             String inputFile = inputURI.getPath();
-            partition = Integer.valueOf(
-                                        inputFile.substring(inputFile.lastIndexOf("part")+5)
-                                        ).intValue();
+            // part file is of the form part-r-xxxxx
+            partition = Integer.valueOf(inputFile.substring(
+              inputFile.lastIndexOf("part") + 7)).intValue();
             noSortReducers = job.getInt("sortvalidate.sort.reduce.tasks", -1);
           } catch (Exception e) {
             System.err.println("Caught: " + e);

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java
Tue Jun  9 16:11:19 2009
@@ -26,6 +26,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -47,6 +48,26 @@
 
   MiniMRCluster mr = null;
 
+  /** Committer with cleanup waiting on a signal
+   */
+  static class CommitterWithDelayCleanup extends FileOutputCommitter {
+    @Override
+    public void cleanupJob(JobContext context) throws IOException {
+      Configuration conf = context.getConfiguration();
+      Path share = new Path(conf.get("share"));
+      FileSystem fs = FileSystem.get(conf);
+
+      
+      while (true) {
+        if (fs.exists(share)) {
+          break;
+        }
+        UtilsForTests.waitFor(100);
+      }
+      super.cleanupJob(context);
+    }
+  }
+
   /**
    * Simple method running a MapReduce job with no input data. Used to test that
    * such a job is successful.
@@ -62,8 +83,13 @@
     // create an empty input dir
     final Path inDir = new Path(TEST_ROOT_DIR, "testing/empty/input");
     final Path outDir = new Path(TEST_ROOT_DIR, "testing/empty/output");
+    final Path inDir2 = new Path(TEST_ROOT_DIR, "testing/dummy/input");
+    final Path outDir2 = new Path(TEST_ROOT_DIR, "testing/dummy/output");
+    final Path share = new Path(TEST_ROOT_DIR, "share");
+
     JobConf conf = mr.createJobConf();
     FileSystem fs = FileSystem.get(fileSys, conf);
+    fs.delete(new Path(TEST_ROOT_DIR), true);
     fs.delete(outDir, true);
     if (!fs.mkdirs(inDir)) {
       LOG.warn("Can't create " + inDir);
@@ -75,6 +101,7 @@
     conf.setJobName("empty");
     // use an InputFormat which returns no split
     conf.setInputFormat(EmptyInputFormat.class);
+    conf.setOutputCommitter(CommitterWithDelayCleanup.class);
     conf.setOutputKeyClass(Text.class);
     conf.setOutputValueClass(IntWritable.class);
     conf.setMapperClass(IdentityMapper.class);
@@ -83,11 +110,53 @@
     FileOutputFormat.setOutputPath(conf, outDir);
     conf.setNumMapTasks(numMaps);
     conf.setNumReduceTasks(numReduces);
+    conf.set("share", share.toString());
 
     // run job and wait for completion
     JobClient jc = new JobClient(conf);
     RunningJob runningJob = jc.submitJob(conf);
+    JobInProgress job = mr.getJobTrackerRunner().getJobTracker().getJob(runningJob.getID());
+    
+    while (true) {
+      if (job.isCleanupLaunched()) {
+        LOG.info("Waiting for cleanup to be launched for job " 
+                 + runningJob.getID());
+        break;
+      }
+      UtilsForTests.waitFor(100);
+    }
+    
+    // submit another job so that the map load increases and scheduling happens
+    LOG.info("Launching dummy job ");
+    RunningJob dJob = null;
+    try {
+      JobConf dConf = new JobConf(conf);
+      dConf.setOutputCommitter(FileOutputCommitter.class);
+      dJob = UtilsForTests.runJob(dConf, inDir2, outDir2, 2, 0);
+    } catch (Exception e) {
+      LOG.info("Exception ", e);
+      throw new IOException(e);
+    }
+    
+    while (true) {
+      LOG.info("Waiting for job " + dJob.getID() + " to complete");
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException e) {
+      }
+      if (dJob.isComplete()) {
+        break;
+      }
+    }
+    
+    // check if the second job is successful
+    assertTrue(dJob.isSuccessful());
+
+    // signal the cleanup
+    fs.create(share).close();
+    
     while (true) {
+      LOG.info("Waiting for job " + runningJob.getID() + " to complete");
       try {
         Thread.sleep(100);
       } catch (InterruptedException e) {
@@ -148,7 +217,7 @@
       throws IOException {
     FileSystem fileSys = null;
     try {
-      final int taskTrackers = 1;
+      final int taskTrackers = 2;
       JobConf conf = new JobConf();
       fileSys = FileSystem.get(conf);
 

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java
Tue Jun  9 16:11:19 2009
@@ -32,7 +32,7 @@
 public class TestFileOutputFormat extends HadoopTestCase {
 
   public TestFileOutputFormat() throws IOException {
-    super(HadoopTestCase.CLUSTER_MR, HadoopTestCase.LOCAL_FS, 1, 1);
+    super(HadoopTestCase.LOCAL_MR, HadoopTestCase.LOCAL_FS, 1, 1);
   }
 
   public void testCustomFile() throws Exception {

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java
Tue Jun  9 16:11:19 2009
@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapred;
 
 import java.io.BufferedReader;
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
@@ -27,14 +28,25 @@
 import java.util.Iterator;
 import java.util.StringTokenizer;
 
+import junit.framework.TestCase;
+
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.serializer.JavaSerializationComparator;
 
-public class TestJavaSerialization extends ClusterMapReduceTestCase {
-  
+public class TestJavaSerialization extends TestCase {
+
+  private static String TEST_ROOT_DIR =
+    new File(System.getProperty("test.build.data", "/tmp")).toURI()
+    .toString().replace(' ', '+');
+
+  private final Path INPUT_DIR = new Path(TEST_ROOT_DIR + "/input");
+  private final Path OUTPUT_DIR = new Path(TEST_ROOT_DIR + "/out");
+  private final Path INPUT_FILE = new Path(INPUT_DIR , "inp");
+
   static class WordCountMapper extends MapReduceBase implements
       Mapper<LongWritable, Text, String, Long> {
 
@@ -64,17 +76,26 @@
     }
     
   }
-  
-  public void testMapReduceJob() throws Exception {
-    OutputStream os = getFileSystem().create(new Path(getInputDir(),
-        "text.txt"));
+
+  private void cleanAndCreateInput(FileSystem fs) throws IOException {
+    fs.delete(INPUT_FILE, true);
+    fs.delete(OUTPUT_DIR, true);
+
+    OutputStream os = fs.create(INPUT_FILE);
+
     Writer wr = new OutputStreamWriter(os);
     wr.write("b a\n");
     wr.close();
+  }
+  
+  public void testMapReduceJob() throws Exception {
 
-    JobConf conf = createJobConf();
+    JobConf conf = new JobConf(TestJavaSerialization.class);
     conf.setJobName("JavaSerialization");
     
+    FileSystem fs = FileSystem.get(conf);
+    cleanAndCreateInput(fs);
+
     conf.set("io.serializations",
     "org.apache.hadoop.io.serializer.JavaSerialization," +
     "org.apache.hadoop.io.serializer.WritableSerialization");
@@ -88,17 +109,16 @@
     conf.setMapperClass(WordCountMapper.class);
     conf.setReducerClass(SumReducer.class);
 
-    FileInputFormat.setInputPaths(conf, getInputDir());
+    FileInputFormat.setInputPaths(conf, INPUT_DIR);
 
-    FileOutputFormat.setOutputPath(conf, getOutputDir());
+    FileOutputFormat.setOutputPath(conf, OUTPUT_DIR);
 
     JobClient.runJob(conf);
 
     Path[] outputFiles = FileUtil.stat2Paths(
-                           getFileSystem().listStatus(getOutputDir(),
-                           new OutputLogFilter()));
+        fs.listStatus(OUTPUT_DIR, new OutputLogFilter()));
     assertEquals(1, outputFiles.length);
-    InputStream is = getFileSystem().open(outputFiles[0]);
+    InputStream is = fs.open(outputFiles[0]);
     BufferedReader reader = new BufferedReader(new InputStreamReader(is));
     assertEquals("a\t1", reader.readLine());
     assertEquals("b\t1", reader.readLine());
@@ -108,27 +128,25 @@
 
   /**
    * HADOOP-4466:
-   * This test verifies the JavSerialization impl can write to SequenceFiles. by virtue other
-   * SequenceFileOutputFormat is not coupled to Writable types, if so, the job will fail.
+   * This test verifies the JavSerialization impl can write to
+   * SequenceFiles. by virtue other SequenceFileOutputFormat is not 
+   * coupled to Writable types, if so, the job will fail.
    *
    */
   public void testWriteToSequencefile() throws Exception {
-    OutputStream os = getFileSystem().create(new Path(getInputDir(),
-        "text.txt"));
-    Writer wr = new OutputStreamWriter(os);
-    wr.write("b a\n");
-    wr.close();
-
-    JobConf conf = createJobConf();
+    JobConf conf = new JobConf(TestJavaSerialization.class);
     conf.setJobName("JavaSerialization");
 
+    FileSystem fs = FileSystem.get(conf);
+    cleanAndCreateInput(fs);
+
     conf.set("io.serializations",
     "org.apache.hadoop.io.serializer.JavaSerialization," +
     "org.apache.hadoop.io.serializer.WritableSerialization");
 
     conf.setInputFormat(TextInputFormat.class);
-    conf.setOutputFormat(SequenceFileOutputFormat.class); // test we can write to sequence
files
-
+    // test we can write to sequence files
+    conf.setOutputFormat(SequenceFileOutputFormat.class); 
     conf.setOutputKeyClass(String.class);
     conf.setOutputValueClass(Long.class);
     conf.setOutputKeyComparatorClass(JavaSerializationComparator.class);
@@ -136,16 +154,15 @@
     conf.setMapperClass(WordCountMapper.class);
     conf.setReducerClass(SumReducer.class);
 
-    FileInputFormat.setInputPaths(conf, getInputDir());
+    FileInputFormat.setInputPaths(conf, INPUT_DIR);
 
-    FileOutputFormat.setOutputPath(conf, getOutputDir());
+    FileOutputFormat.setOutputPath(conf, OUTPUT_DIR);
 
     JobClient.runJob(conf);
 
     Path[] outputFiles = FileUtil.stat2Paths(
-                           getFileSystem().listStatus(getOutputDir(),
-                           new OutputLogFilter()));
+        fs.listStatus(OUTPUT_DIR, new OutputLogFilter()));
     assertEquals(1, outputFiles.length);
-}
+  }
 
 }

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerRestart.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerRestart.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerRestart.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerRestart.java
Tue Jun  9 16:11:19 2009
@@ -34,11 +34,11 @@
  * recover previosuly submitted jobs.
  */
 public class TestJobTrackerRestart extends TestCase {
-  final Path testDir = 
+  static final Path testDir = 
     new Path(System.getProperty("test.build.data","/tmp"), 
              "jt-restart-testing");
   final Path inDir = new Path(testDir, "input");
-  final Path shareDir = new Path(testDir, "share");
+  static final Path shareDir = new Path(testDir, "share");
   final Path outputDir = new Path(testDir, "output");
   private static int numJobsSubmitted = 0;
   
@@ -400,6 +400,115 @@
            && status.getReduceTasks() == 0;
   }
   
+  /** Committer with setup waiting
+   */
+  static class CommitterWithDelaySetup extends FileOutputCommitter {
+    @Override
+    public void setupJob(JobContext context) throws IOException {
+      FileSystem fs = FileSystem.get(context.getConfiguration());
+      while (true) {
+        if (fs.exists(shareDir)) {
+          break;
+        }
+        UtilsForTests.waitFor(100);
+      }
+      super.cleanupJob(context);
+    }
+  }
+
+  /** Tests a job on jobtracker with restart-recovery turned on and empty 
+   *  jobhistory file.
+   * Preparation :
+   *    - Configure a job with
+   *       - num-maps : 0 (long waiting setup)
+   *       - num-reducers : 0
+   *    
+   * Check if the job succeedes after restart.
+   * 
+   * Assumption that map slots are given first for setup.
+   */
+  public void testJobRecoveryWithEmptyHistory(MiniDFSCluster dfs, 
+                                              MiniMRCluster mr) 
+  throws IOException {
+    mr.startTaskTracker(null, null, 1, 1);
+    FileSystem fileSys = dfs.getFileSystem();
+    
+    cleanUp(fileSys, shareDir);
+    cleanUp(fileSys, inDir);
+    cleanUp(fileSys, outputDir);
+    
+    JobConf conf = mr.createJobConf();
+    conf.setNumReduceTasks(0);
+    conf.setOutputCommitter(TestEmptyJob.CommitterWithDelayCleanup.class);
+    fileSys.delete(outputDir, false);
+    RunningJob job1 = 
+      UtilsForTests.runJob(conf, inDir, outputDir, 30, 0);
+    
+    conf.setNumReduceTasks(0);
+    conf.setOutputCommitter(CommitterWithDelaySetup.class);
+    Path inDir2 = new Path(testDir, "input2");
+    fileSys.mkdirs(inDir2);
+    Path outDir2 = new Path(testDir, "output2");
+    fileSys.delete(outDir2, false);
+    JobConf newConf = getJobs(mr.createJobConf(),
+                              new JobPriority[] {JobPriority.NORMAL},
+                              new int[] {10}, new int[] {0},
+                              outDir2, inDir2,
+                              getMapSignalFile(shareDir),
+                              getReduceSignalFile(shareDir))[0];
+
+    JobClient jobClient = new JobClient(newConf);
+    RunningJob job2 = jobClient.submitJob(newConf);
+    JobID id = job2.getID();
+
+    /*RunningJob job2 = 
+      UtilsForTests.runJob(mr.createJobConf(), inDir2, outDir2, 0);
+    
+    JobID id = job2.getID();*/
+    JobInProgress jip = mr.getJobTrackerRunner().getJobTracker().getJob(id);
+    
+    jip.initTasks();
+    
+    // find out the history filename
+    String history = 
+      JobHistory.JobInfo.getJobHistoryFileName(jip.getJobConf(), id);
+    Path historyPath = JobHistory.JobInfo.getJobHistoryLogLocation(history);
+    
+    //  make sure that setup is launched
+    while (jip.runningMaps() == 0) {
+      UtilsForTests.waitFor(100);
+    }
+    
+    id = job1.getID();
+    jip = mr.getJobTrackerRunner().getJobTracker().getJob(id);
+    
+    jip.initTasks();
+    
+    //  make sure that cleanup is launched and is waiting
+    while (!jip.isCleanupLaunched()) {
+      UtilsForTests.waitFor(100);
+    }
+    
+    mr.stopJobTracker();
+    
+    // delete the history file .. just to be safe.
+    FileSystem historyFS = historyPath.getFileSystem(conf);
+    historyFS.delete(historyPath, false);
+    historyFS.create(historyPath).close(); // create an empty file
+    
+    
+    UtilsForTests.signalTasks(dfs, fileSys, getMapSignalFile(shareDir), getReduceSignalFile(shareDir),
(short)1);
+
+    // Turn on the recovery
+    mr.getJobTrackerConf().setBoolean("mapred.jobtracker.restart.recover", 
+                                      true);
+    
+    mr.startJobTracker();
+    
+    job1.waitForCompletion();
+    job2.waitForCompletion();
+  }
+  
   public void testJobTrackerRestart() throws IOException {
     String namenode = null;
     MiniDFSCluster dfs = null;
@@ -450,6 +559,9 @@
       
       // Test jobtracker with restart-recovery turned off
       testRestartWithoutRecovery(dfs, mr);
+      
+      // test recovery with empty file
+      testJobRecoveryWithEmptyHistory(dfs, mr);
     } finally {
       if (mr != null) {
         try {

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
Tue Jun  9 16:11:19 2009
@@ -96,7 +96,7 @@
     // Run Sort
     Sort sort = new Sort();
     assertEquals(ToolRunner.run(job, sort, sortArgs), 0);
-    Counters counters = sort.getResult().getCounters();
+    org.apache.hadoop.mapreduce.Counters counters = sort.getResult().getCounters();
     long mapInput = counters.findCounter(
       org.apache.hadoop.mapreduce.lib.input.FileInputFormat.COUNTER_GROUP,
       org.apache.hadoop.mapreduce.lib.input.FileInputFormat.BYTES_READ).
@@ -106,7 +106,7 @@
     // the hdfs read should be between 100% and 110% of the map input bytes
     assertTrue("map input = " + mapInput + ", hdfs read = " + hdfsRead,
                (hdfsRead < (mapInput * 1.1)) &&
-               (hdfsRead > mapInput));  
+               (hdfsRead >= mapInput));  
   }
   
   private static void runSortValidator(JobConf job, 

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java
Tue Jun  9 16:11:19 2009
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapred;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -28,9 +29,12 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.examples.SleepJob;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.util.ProcfsBasedProcessTree;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.TestProcfsBasedProcessTree;
 import org.apache.hadoop.util.ToolRunner;
 
 import junit.framework.TestCase;
@@ -42,6 +46,9 @@
 
   private static final Log LOG =
       LogFactory.getLog(TestTaskTrackerMemoryManager.class);
+  private static String TEST_ROOT_DIR = new Path(System.getProperty(
+		    "test.build.data", "/tmp")).toString().replace(' ', '+');
+
   private MiniMRCluster miniMRCluster;
 
   private String taskOverLimitPatternString =
@@ -345,4 +352,90 @@
     // Test succeeded, kill the job.
     job.killJob();
   }
+  
+  /**
+   * Test to verify the check for whether a process tree is over limit or not.
+   * @throws IOException if there was a problem setting up the
+   *                      fake procfs directories or files.
+   */
+  public void testProcessTreeLimits() throws IOException {
+    
+    // set up a dummy proc file system
+    File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
+    String[] pids = { "100", "200", "300", "400", "500", "600", "700" };
+    try {
+      TestProcfsBasedProcessTree.setupProcfsRootDir(procfsRootDir);
+      
+      // create pid dirs.
+      TestProcfsBasedProcessTree.setupPidDirs(procfsRootDir, pids);
+      
+      // create process infos.
+      TestProcfsBasedProcessTree.ProcessStatInfo[] procs =
+          new TestProcfsBasedProcessTree.ProcessStatInfo[7];
+
+      // assume pids 100, 500 are in 1 tree 
+      // 200,300,400 are in another
+      // 600,700 are in a third
+      procs[0] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+          new String[] {"100", "proc1", "1", "100", "100", "100000"});
+      procs[1] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+          new String[] {"200", "proc2", "1", "200", "200", "200000"});
+      procs[2] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+          new String[] {"300", "proc3", "200", "200", "200", "300000"});
+      procs[3] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+          new String[] {"400", "proc4", "200", "200", "200", "400000"});
+      procs[4] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+          new String[] {"500", "proc5", "100", "100", "100", "1500000"});
+      procs[5] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+          new String[] {"600", "proc6", "1", "600", "600", "100000"});
+      procs[6] = new TestProcfsBasedProcessTree.ProcessStatInfo(
+          new String[] {"700", "proc7", "600", "600", "600", "100000"});
+      // write stat files.
+      TestProcfsBasedProcessTree.writeStatFiles(procfsRootDir, pids, procs);
+
+      // vmem limit
+      long limit = 700000;
+      
+      // Create TaskMemoryMonitorThread
+      TaskMemoryManagerThread test = new TaskMemoryManagerThread(1000000L,
+                                                                5000L);
+      // create process trees
+      // tree rooted at 100 is over limit immediately, as it is
+      // twice over the mem limit.
+      ProcfsBasedProcessTree pTree = new ProcfsBasedProcessTree(
+                                          "100", true, 100L, 
+                                          procfsRootDir.getAbsolutePath());
+      pTree.getProcessTree();
+      assertTrue("tree rooted at 100 should be over limit " +
+                    "after first iteration.",
+                  test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+      
+      // the tree rooted at 200 is initially below limit.
+      pTree = new ProcfsBasedProcessTree("200", true, 100L,
+                                          procfsRootDir.getAbsolutePath());
+      pTree.getProcessTree();
+      assertFalse("tree rooted at 200 shouldn't be over limit " +
+                    "after one iteration.",
+                  test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+      // second iteration - now the tree has been over limit twice,
+      // hence it should be declared over limit.
+      pTree.getProcessTree();
+      assertTrue("tree rooted at 200 should be over limit after 2 iterations",
+                  test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+      
+      // the tree rooted at 600 is never over limit.
+      pTree = new ProcfsBasedProcessTree("600", true, 100L,
+                                            procfsRootDir.getAbsolutePath());
+      pTree.getProcessTree();
+      assertFalse("tree rooted at 600 should never be over limit.",
+                    test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+      
+      // another iteration does not make any difference.
+      pTree.getProcessTree();
+      assertFalse("tree rooted at 600 should never be over limit.",
+                    test.isProcessTreeOverLimit(pTree, "dummyId", limit));
+    } finally {
+      FileUtil.fullyDelete(procfsRootDir);
+    }
+  }
 }

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
Tue Jun  9 16:11:19 2009
@@ -548,6 +548,12 @@
   // Start a job and return its RunningJob object
   static RunningJob runJob(JobConf conf, Path inDir, Path outDir)
                     throws IOException {
+    return runJob(conf, inDir, outDir, conf.getNumMapTasks(), conf.getNumReduceTasks());
+  }
+
+  // Start a job and return its RunningJob object
+  static RunningJob runJob(JobConf conf, Path inDir, Path outDir, int numMaps, 
+                           int numReds) throws IOException {
 
     FileSystem fs = FileSystem.get(conf);
     if (fs.exists(outDir)) {
@@ -558,9 +564,11 @@
     }
     String input = "The quick brown fox\n" + "has many silly\n"
         + "red fox sox\n";
-    DataOutputStream file = fs.create(new Path(inDir, "part-0"));
-    file.writeBytes(input);
-    file.close();
+    for (int i = 0; i < numMaps; ++i) {
+      DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
+      file.writeBytes(input);
+      file.close();
+    }    
 
     conf.setInputFormat(TextInputFormat.class);
     conf.setOutputKeyClass(LongWritable.class);
@@ -568,8 +576,8 @@
 
     FileInputFormat.setInputPaths(conf, inDir);
     FileOutputFormat.setOutputPath(conf, outDir);
-    conf.setNumMapTasks(conf.getNumMapTasks());
-    conf.setNumReduceTasks(conf.getNumReduceTasks());
+    conf.setNumMapTasks(numMaps);
+    conf.setNumReduceTasks(numReds);
 
     JobClient jobClient = new JobClient(conf);
     RunningJob job = jobClient.submitJob(conf);

Modified: hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java
(original)
+++ hadoop/core/branches/HADOOP-3628-2/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java
Tue Jun  9 16:11:19 2009
@@ -27,6 +27,7 @@
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.examples.MultiFileWordCount;
 import org.apache.hadoop.examples.SecondarySort;
 import org.apache.hadoop.examples.WordCount;
 import org.apache.hadoop.examples.SecondarySort.FirstGroupingComparator;
@@ -41,6 +42,7 @@
 import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.ToolRunner;
 
 /**
  * A JUnit test to test min map-reduce cluster with local file system.
@@ -88,6 +90,7 @@
       Configuration conf = mr.createJobConf();
       runWordCount(conf);
       runSecondarySort(conf);
+      runMultiFileWordCount(conf);
     } finally {
       if (mr != null) { mr.shutdown(); }
     }
@@ -172,5 +175,21 @@
                  "------------------------------------------------\n" +
                  "10\t20\n10\t25\n10\t30\n", out);
   }
-  
+ 
+  public void runMultiFileWordCount(Configuration  conf) throws Exception  {
+    localFs.delete(new Path(TEST_ROOT_DIR + "/in"), true);
+    localFs.delete(new Path(TEST_ROOT_DIR + "/out"), true);    
+    writeFile("in/part1", "this is a test\nof " +
+              "multi file word count test\ntest\n");
+    writeFile("in/part2", "more test");
+
+    int ret = ToolRunner.run(conf, new MultiFileWordCount(), 
+                new String[] {TEST_ROOT_DIR + "/in", TEST_ROOT_DIR + "/out"});
+    assertTrue("MultiFileWordCount failed", ret == 0);
+    String out = readFile("out/part-r-00000");
+    System.out.println(out);
+    assertEquals("a\t1\ncount\t1\nfile\t1\nis\t1\n" +
+      "more\t1\nmulti\t1\nof\t1\ntest\t4\nthis\t1\nword\t1\n", out);
+  }
+
 }

Modified: hadoop/core/branches/HADOOP-3628-2/src/tools/org/apache/hadoop/tools/DistCp.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/tools/org/apache/hadoop/tools/DistCp.java?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/tools/org/apache/hadoop/tools/DistCp.java (original)
+++ hadoop/core/branches/HADOOP-3628-2/src/tools/org/apache/hadoop/tools/DistCp.java Tue Jun
 9 16:11:19 2009
@@ -86,7 +86,8 @@
     "\n                       u: user" + 
     "\n                       g: group" +
     "\n                       p: permission" +
-    "\n                       -p alone is equivalent to -prbugp" +
+    "\n                       t: modification and access times" +
+    "\n                       -p alone is equivalent to -prbugpt" +
     "\n-i                     Ignore failures" +
     "\n-log <logdir>          Write logs to <logdir>" +
     "\n-m <num_maps>          Maximum number of simultaneous copies" +
@@ -146,7 +147,7 @@
     }
   }
   static enum FileAttribute {
-    BLOCK_SIZE, REPLICATION, USER, GROUP, PERMISSION;
+    BLOCK_SIZE, REPLICATION, USER, GROUP, PERMISSION, TIMES;
 
     final char symbol;
 
@@ -460,7 +461,7 @@
               + ") but expected " + bytesString(srcstat.getLen()) 
               + " from " + srcstat.getPath());        
         } 
-        updatePermissions(srcstat, dststat);
+        updateDestStatus(srcstat, dststat);
       }
 
       // report at least once for each file
@@ -486,10 +487,10 @@
       }
     }
 
-    private void updatePermissions(FileStatus src, FileStatus dst
+    private void updateDestStatus(FileStatus src, FileStatus dst
         ) throws IOException {
       if (preserve_status) {
-        DistCp.updatePermissions(src, dst, preseved, destFileSys);
+        DistCp.updateDestStatus(src, dst, preseved, destFileSys);
       }
     }
 
@@ -669,7 +670,7 @@
     }
   }
 
-  private static void updatePermissions(FileStatus src, FileStatus dst,
+  private static void updateDestStatus(FileStatus src, FileStatus dst,
       EnumSet<FileAttribute> preseved, FileSystem destFileSys
       ) throws IOException {
     String owner = null;
@@ -689,6 +690,9 @@
         && !src.getPermission().equals(dst.getPermission())) {
       destFileSys.setPermission(dst.getPath(), src.getPermission());
     }
+    if (preseved.contains(FileAttribute.TIMES)) {
+      destFileSys.setTimes(dst.getPath(), src.getModificationTime(), src.getAccessTime());
+    }
   }
 
   static private void finalize(Configuration conf, JobConf jobconf,
@@ -713,7 +717,7 @@
       FilePair pair = new FilePair(); 
       for(; in.next(dsttext, pair); ) {
         Path absdst = new Path(destPath, pair.output);
-        updatePermissions(pair.input, dstfs.getFileStatus(absdst),
+        updateDestStatus(pair.input, dstfs.getFileStatus(absdst),
             preseved, dstfs);
       }
     } finally {
@@ -1050,7 +1054,7 @@
     final boolean special =
       (args.srcs.size() == 1 && !dstExists) || update || overwrite;
     int srcCount = 0, cnsyncf = 0, dirsyn = 0;
-    long fileCount = 0L, byteCount = 0L, cbsyncs = 0L;
+    long fileCount = 0L, dirCount = 0L, byteCount = 0L, cbsyncs = 0L;
     try {
       for(Iterator<Path> srcItr = args.srcs.iterator(); srcItr.hasNext(); ) {
         final Path src = srcItr.next();
@@ -1059,6 +1063,10 @@
         Path root = special && srcfilestat.isDir()? src: src.getParent();
         if (srcfilestat.isDir()) {
           ++srcCount;
+          ++dirCount;
+          final String dst = makeRelative(root,src);
+          src_writer.append(new LongWritable(0), new FilePair(srcfilestat, dst));
+          dst_writer.append(new Text(dst), new Text(src.toString()));
         }
 
         Stack<FileStatus> pathstack = new Stack<FileStatus>();
@@ -1073,6 +1081,7 @@
 
             if (child.isDir()) {
               pathstack.push(child);
+              ++dirCount;
             }
             else {
               //skip file if the src and the dst files are the same.
@@ -1157,7 +1166,7 @@
     jobConf.setInt(SRC_COUNT_LABEL, srcCount);
     jobConf.setLong(TOTAL_SIZE_LABEL, byteCount);
     setMapCount(byteCount, jobConf);
-    return fileCount > 0;
+    return (fileCount + dirCount) > 0;
   }
 
   /**

Modified: hadoop/core/branches/HADOOP-3628-2/src/webapps/job/jobdetails.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-3628-2/src/webapps/job/jobdetails.jsp?rev=783055&r1=783054&r2=783055&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-3628-2/src/webapps/job/jobdetails.jsp (original)
+++ hadoop/core/branches/HADOOP-3628-2/src/webapps/job/jobdetails.jsp Tue Jun  9 16:11:19
2009
@@ -267,6 +267,10 @@
           "<a href=\"jobblacklistedtrackers.jsp?jobid=" + jobId + "\">" +
           flakyTaskTrackers + "</a><br>\n");
     }
+    if (job.getSchedulingInfo() != null) {
+      out.print("<b>Job Scheduling information: </b>" +
+          job.getSchedulingInfo().toString() +"\n");
+    }
     out.print("<hr>\n");
     out.print("<table border=2 cellpadding=\"5\" cellspacing=\"2\">");
     out.print("<tr><th>Kind</th><th>% Complete</th><th>Num
Tasks</th>" +



Mime
View raw message