hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1158072 [3/3] - in /hadoop/common/branches/HDFS-1623/mapreduce: ./ conf/ src/c++/ src/contrib/ src/contrib/block_forensics/ src/contrib/capacity-scheduler/ src/contrib/data_join/ src/contrib/dynamic-scheduler/ src/contrib/eclipse-plugin/ s...
Date Tue, 16 Aug 2011 00:37:31 GMT
Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java Tue Aug 16 00:37:15 2011
@@ -89,6 +89,9 @@ public interface JTConfig extends MRConf
     "mapreduce.jobtracker.jobhistory.completed.location";
   public static final String JT_JOBHISTORY_LOCATION = 
     "mapreduce.jobtracker.jobhistory.location";
+  // number of partial task progress reports we retain in job history
+  public static final String JT_JOBHISTORY_TASKPROGRESS_NUMBER_SPLITS =
+    "mapreduce.jobtracker.jobhistory.task.numberprogresssplits";
   public static final String JT_AVG_BLACKLIST_THRESHOLD = 
     "mapreduce.jobtracker.blacklist.average.threshold";
   public static final String JT_SYSTEM_DIR = "mapreduce.jobtracker.system.dir";

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java Tue Aug 16 00:37:15 2011
@@ -133,7 +133,7 @@ public class MergeManager<K, V> {
                       Counters.Counter reduceCombineInputCounter,
                       Counters.Counter mergedMapOutputsCounter,
                       ExceptionReporter exceptionReporter,
-                      Progress mergePhase) {
+                      Progress mergePhase, MapOutputFile mapOutputFile) {
     this.reduceId = reduceId;
     this.jobConf = jobConf;
     this.localDirAllocator = localDirAllocator;
@@ -146,7 +146,7 @@ public class MergeManager<K, V> {
     this.reduceCombineInputCounter = reduceCombineInputCounter;
     this.spilledRecordsCounter = spilledRecordsCounter;
     this.mergedMapOutputsCounter = mergedMapOutputsCounter;
-    this.mapOutputFile = new MapOutputFile();
+    this.mapOutputFile = mapOutputFile;
     this.mapOutputFile.setConf(jobConf);
     
     this.localFS = localFS;

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java Tue Aug 16 00:37:15 2011
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.LocalDirAllo
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.MapOutputFile;
 import org.apache.hadoop.mapred.RawKeyValueIterator;
 import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
@@ -75,7 +76,8 @@ public class Shuffle<K, V> implements Ex
                  TaskStatus status,
                  Progress copyPhase,
                  Progress mergePhase,
-                 Task reduceTask) {
+                 Task reduceTask,
+                 MapOutputFile mapOutputFile) {
     this.reduceId = reduceId;
     this.jobConf = jobConf;
     this.umbilical = umbilical;
@@ -95,7 +97,7 @@ public class Shuffle<K, V> implements Ex
                                     spilledRecordsCounter, 
                                     reduceCombineInputCounter, 
                                     mergedMapOutputsCounter, 
-                                    this, mergePhase);
+                                    this, mergePhase, mapOutputFile);
   }
 
   @SuppressWarnings("unchecked")

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/mapreduce/src/test/mapred:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/test/mapred:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred:713112
 /hadoop/core/trunk/src/test/mapred:776175-785643

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred-site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred-site.xml?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred-site.xml (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred-site.xml Tue Aug 16 00:37:15 2011
@@ -48,4 +48,8 @@
   <name>mapreduce.jobtracker.persist.jobstatus.active</name>
   <value>false</value>
 </property>
+<property>
+  <name>mapreduce.task.local.output.class</name>
+  <value>org.apache.hadoop.mapred.MROutputFiles</value>
+</property>
 </configuration>

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/fs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/fs:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/fs:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/fs:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/fs:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/fs:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/hdfs:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/hdfs:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/hdfs:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/hdfs:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/hdfs:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/io/FileBench.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/io/FileBench.java:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/io/FileBench.java:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/io/FileBench.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/io/FileBench.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/io/FileBench.java:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/io/TestSequenceFileMergeProgress.java:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/ipc/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/ipc:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/ipc:1152502-1158071
 /hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/ipc:713112
 /hadoop/core/trunk/src/test/hdfs-with-mr/org/apache/hadoop/ipc:776175-784663
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs-with-mr/org/apache/hadoop/ipc:796829-820463

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIndexCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIndexCache.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIndexCache.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIndexCache.java Tue Aug 16 00:37:15 2011
@@ -193,6 +193,60 @@ public class TestIndexCache extends Test
     }
   }
 
+  public void testRemoveMap() throws Exception {
+    // This test case use two thread to call getIndexInformation and 
+    // removeMap concurrently, in order to construct race condition.
+    // This test case may not repeatable. But on my macbook this test 
+    // fails with probability of 100% on code before MAPREDUCE-2541,
+    // so it is repeatable in practice.
+    JobConf conf = new JobConf();
+    FileSystem fs = FileSystem.getLocal(conf).getRaw();
+    Path p = new Path(System.getProperty("test.build.data", "/tmp"),
+                      "cache").makeQualified(fs);
+    fs.delete(p, true);
+    conf.setInt(TTConfig.TT_INDEX_CACHE, 10);
+    // Make a big file so removeMapThread almost surely runs faster than 
+    // getInfoThread 
+    final int partsPerMap = 100000;
+    final int bytesPerFile = partsPerMap * 24;
+    final IndexCache cache = new IndexCache(conf);
+
+    final Path big = new Path(p, "bigIndex");
+    final String user = 
+      UserGroupInformation.getCurrentUser().getShortUserName();
+    writeFile(fs, big, bytesPerFile, partsPerMap);
+    
+    // run multiple times
+    for (int i = 0; i < 20; ++i) {
+      Thread getInfoThread = new Thread() {
+        @Override
+        public void run() {
+          try {
+            cache.getIndexInformation("bigIndex", partsPerMap, big, user);
+          } catch (Exception e) {
+            // should not be here
+          }
+        }
+      };
+      Thread removeMapThread = new Thread() {
+        @Override
+        public void run() {
+          cache.removeMap("bigIndex");
+        }
+      };
+      if (i%2==0) {
+        getInfoThread.start();
+        removeMapThread.start();        
+      } else {
+        removeMapThread.start();        
+        getInfoThread.start();
+      }
+      getInfoThread.join();
+      removeMapThread.join();
+      assertEquals(true, cache.checkTotalMemoryUsed());
+    }      
+  }
+  
   private static void checkRecord(IndexRecord rec, long fill) {
     assertEquals(fill, rec.startOffset);
     assertEquals(fill, rec.rawLength);

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java Tue Aug 16 00:37:15 2011
@@ -324,7 +324,7 @@ public class TestJobInProgress extends T
 
     verify(jspy).getStatus();
     verify(jspy).getProfile();
-    verify(jspy).getJobCounters();
+    verify(jspy, atLeastOnce()).getJobCounters();
     verify(jspy, atLeastOnce()).getJobID();
     verify(jspy).getStartTime();
     verify(jspy).getFirstTaskLaunchTimes();

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapRed.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapRed.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapRed.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapRed.java Tue Aug 16 00:37:15 2011
@@ -293,7 +293,7 @@ public class TestMapRed extends Configur
                        ) throws IOException {
       if (first) {
         first = false;
-        MapOutputFile mapOutputFile = new MapOutputFile();
+        MapOutputFile mapOutputFile = new MROutputFiles();
         mapOutputFile.setConf(conf);
         Path input = mapOutputFile.getInputFile(0);
         FileSystem fs = FileSystem.get(conf);

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java Tue Aug 16 00:37:15 2011
@@ -34,6 +34,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.lib.IdentityMapper;
 import org.apache.hadoop.mapred.lib.IdentityReducer;
 import org.apache.hadoop.mapred.lib.NullOutputFormat;
+import org.apache.hadoop.mapreduce.FileSystemCounter;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -104,8 +105,8 @@ public class TestMiniMRDFSSort extends T
     org.apache.hadoop.mapreduce.Counters counters = sort.getResult().getCounters();
     long mapInput = counters.findCounter(FileInputFormatCounter.BYTES_READ)
         .getValue();
-    long hdfsRead = counters.findCounter(Task.FILESYSTEM_COUNTER_GROUP,
-                                         "HDFS_BYTES_READ").getValue();
+    long hdfsRead = counters.findCounter("hdfs", FileSystemCounter.BYTES_READ)
+        .getValue();
     // the hdfs read should be between 100% and 110% of the map input bytes
     assertTrue("map input = " + mapInput + ", hdfs read = " + hdfsRead,
                (hdfsRead < (mapInput * 1.1)) &&

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java Tue Aug 16 00:37:15 2011
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.FileSystemCounter;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MapReduceTestUtil;
 import org.apache.hadoop.mapreduce.TaskCounter;
@@ -244,12 +245,10 @@ public class TestMiniMRWithDFS extends T
     result = launchWordCount(jobConf, inDir, outDir, input, 0, 1);
     assertEquals("is\t1\noom\t1\nowen\t1\n", result.output);
     Counters counters = result.job.getCounters();
-    long hdfsRead = 
-      counters.findCounter(Task.FILESYSTEM_COUNTER_GROUP, 
-          Task.getFileSystemCounterNames("hdfs")[0]).getCounter();
-    long hdfsWrite = 
-      counters.findCounter(Task.FILESYSTEM_COUNTER_GROUP, 
-          Task.getFileSystemCounterNames("hdfs")[1]).getCounter();
+    long hdfsRead = counters.findCounter("HDFS",
+        FileSystemCounter.BYTES_READ).getValue();
+    long hdfsWrite = counters.findCounter("HDFS",
+        FileSystemCounter.BYTES_WRITTEN).getValue();
     long rawSplitBytesRead = 
       counters.findCounter(TaskCounter.SPLIT_RAW_BYTES).getCounter();
     assertEquals(result.output.length(), hdfsWrite);

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java Tue Aug 16 00:37:15 2011
@@ -279,7 +279,7 @@ public class TestSeveral extends TestCas
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     int exitCode = TestJobClient.runTool(conf, new JobClient(),
         new String[] { "-counter", jobId.toString(),
-      "org.apache.hadoop.mapred.Task$Counter", "MAP_INPUT_RECORDS" },
+      "org.apache.hadoop.mapreduce.TaskCounter", "MAP_INPUT_RECORDS" },
       out);
     assertEquals(0, exitCode);
     assertEquals(numReduces, Integer.parseInt(out.toString().trim()));

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java Tue Aug 16 00:37:15 2011
@@ -97,6 +97,9 @@ public class SleepJob extends Configured
 
         public boolean nextKeyValue()
             throws IOException {
+          if (count == 0) {
+            return false;
+          }
           key = new IntWritable();
           key.set(emitCount);
           int emit = emitPerMapTask / count;
@@ -112,7 +115,7 @@ public class SleepJob extends Configured
         public IntWritable getCurrentValue() { return value; }
         public void close() throws IOException { }
         public float getProgress() throws IOException {
-          return records / ((float)count);
+          return count == 0 ? 100 : records / ((float)count);
         }
       };
     }
@@ -129,7 +132,7 @@ public class SleepJob extends Configured
       Configuration conf = context.getConfiguration();
       this.mapSleepCount =
         conf.getInt(MAP_SLEEP_COUNT, mapSleepCount);
-      this.mapSleepDuration =
+      this.mapSleepDuration = mapSleepCount == 0 ? 0 :
         conf.getLong(MAP_SLEEP_TIME , 100) / mapSleepCount;
     }
 
@@ -166,7 +169,7 @@ public class SleepJob extends Configured
       Configuration conf = context.getConfiguration();
       this.reduceSleepCount =
         conf.getInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
-      this.reduceSleepDuration =
+      this.reduceSleepDuration = reduceSleepCount == 0 ? 0 : 
         conf.getLong(REDUCE_SLEEP_TIME , 100) / reduceSleepCount;
     }
 

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestCounters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestCounters.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestCounters.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/TestCounters.java Tue Aug 16 00:37:15 2011
@@ -17,17 +17,23 @@
  */
 package org.apache.hadoop.mapreduce;
 
-import java.io.IOException;
 import java.util.Random;
 
 import org.junit.Test;
 import static org.junit.Assert.*;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.counters.LimitExceededException;
+import org.apache.hadoop.mapreduce.counters.Limits;
+
 /**
  * TestCounters checks the sanity and recoverability of {@code Counters}
  */
 public class TestCounters {
 
+  static final Log LOG = LogFactory.getLog(TestCounters.class);
+
   /**
    * Verify counter value works
    */
@@ -39,7 +45,8 @@ public class TestCounters {
     for (int i = 0; i < NUMBER_TESTS; i++) {
       long initValue = rand.nextInt();
       long expectedValue = initValue;
-      Counter counter = new Counter("foo", "bar", expectedValue);
+      Counter counter = new Counters().findCounter("test", "foo");
+      counter.setValue(initValue);
       assertEquals("Counter value is not initialized correctly",
           expectedValue, counter.getValue());
       for (int j = 0; j < NUMBER_INC; j++) {
@@ -56,4 +63,69 @@ public class TestCounters {
     }
   }
 
+  @Test public void testLimits() {
+    for (int i = 0; i < 3; ++i) {
+      // make sure limits apply to separate containers
+      testMaxCounters(new Counters());
+      testMaxGroups(new Counters());
+    }
+  }
+
+  static final Enum<?> FRAMEWORK_COUNTER = TaskCounter.CPU_MILLISECONDS;
+  static final long FRAMEWORK_COUNTER_VALUE = 8;
+  static final String FS_SCHEME = "HDFS";
+  static final FileSystemCounter FS_COUNTER = FileSystemCounter.BYTES_READ;
+  static final long FS_COUNTER_VALUE = 10;
+
+  private void testMaxCounters(final Counters counters) {
+    LOG.info("counters max="+ Limits.COUNTERS_MAX);
+    for (int i = 0; i < Limits.COUNTERS_MAX; ++i) {
+      counters.findCounter("test", "test"+ i);
+    }
+    setExpected(counters);
+    shouldThrow(LimitExceededException.class, new Runnable() {
+      public void run() {
+        counters.findCounter("test", "bad");
+      }
+    });
+    checkExpected(counters);
+  }
+
+  private void testMaxGroups(final Counters counters) {
+    LOG.info("counter groups max="+ Limits.GROUPS_MAX);
+    for (int i = 0; i < Limits.GROUPS_MAX; ++i) {
+      // assuming COUNTERS_MAX > GROUPS_MAX
+      counters.findCounter("test"+ i, "test");
+    }
+    setExpected(counters);
+    shouldThrow(LimitExceededException.class, new Runnable() {
+      public void run() {
+        counters.findCounter("bad", "test");
+      }
+    });
+    checkExpected(counters);
+  }
+
+  private void setExpected(Counters counters) {
+    counters.findCounter(FRAMEWORK_COUNTER).setValue(FRAMEWORK_COUNTER_VALUE);
+    counters.findCounter(FS_SCHEME, FS_COUNTER).setValue(FS_COUNTER_VALUE);
+  }
+
+  private void checkExpected(Counters counters) {
+    assertEquals(FRAMEWORK_COUNTER_VALUE,
+                 counters.findCounter(FRAMEWORK_COUNTER).getValue());
+    assertEquals(FS_COUNTER_VALUE,
+                 counters.findCounter(FS_SCHEME, FS_COUNTER).getValue());
+  }
+
+  private void shouldThrow(Class<? extends Exception> ecls, Runnable runnable) {
+    try {
+      runnable.run();
+    } catch (Exception e) {
+      assertSame(ecls, e.getClass());
+      LOG.info("got expected: "+ e);
+      return;
+    }
+    assertTrue("Should've thrown "+ ecls.getSimpleName(), false);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java Tue Aug 16 00:37:15 2011
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.mapreduce.jobhistory;
 
+import java.util.List;
+import java.util.ArrayList;
+
 import org.apache.hadoop.mapred.TaskStatus;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
@@ -28,6 +31,15 @@ import junit.framework.TestCase;
  * Test various jobhistory events
  */
 public class TestJobHistoryEvents extends TestCase {
+  static final int[][] NULL_SPLITS_ARRAY
+    = new int[org.apache.hadoop.tools.rumen.LoggedTaskAttempt.SplitVectorKind.values().length][];
+
+  static {
+    for (int i = 0; i < NULL_SPLITS_ARRAY.length; ++i) {
+      NULL_SPLITS_ARRAY[i] = new int[0];
+    }
+  }
+ 
   /**
    * Test {@link TaskAttemptStartedEvent} for various task types.
    */
@@ -73,7 +85,8 @@ public class TestJobHistoryEvents extend
                                                      String state) {
     for (TaskType t : types) {
       TaskAttemptUnsuccessfulCompletionEvent tauce = 
-        new TaskAttemptUnsuccessfulCompletionEvent(id, t, state, 0L, "", "");
+        new TaskAttemptUnsuccessfulCompletionEvent
+           (id, t, state, 0L, "", "", NULL_SPLITS_ARRAY);
       assertEquals(expected, tauce.getEventType());
     }
   }

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java Tue Aug 16 00:37:15 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
@@ -147,7 +148,7 @@ public class TestBinaryTokenFile {
         dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
         jConf);
 
-    dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+    NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
     FileSystem fs = dfsCluster.getFileSystem();
     
     p1 = new Path("file1");
@@ -177,7 +178,7 @@ public class TestBinaryTokenFile {
     jConf = mrCluster.createJobConf();
     
     // provide namenodes names for the job to get the delegation tokens for
-    String nnUri = dfsCluster.getURI().toString();
+    String nnUri = dfsCluster.getURI(0).toString();
     jConf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
     // job tracker principla id..
     jConf.set(JTConfig.JT_USER_NAME, "jt_id");

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java Tue Aug 16 00:37:15 2011
@@ -30,7 +30,9 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.NoSuchAlgorithmException;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 
 import javax.crypto.KeyGenerator;
@@ -38,13 +40,16 @@ import javax.crypto.spec.SecretKeySpec;
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem;
 import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
@@ -148,7 +153,9 @@ public class TestTokenCache {
   
   @BeforeClass
   public static void setUp() throws Exception {
+    
     Configuration conf = new Configuration();
+    conf.set("hadoop.security.auth_to_local", "RULE:[2:$1]");
     dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
     jConf = new JobConf(conf);
     mrCluster = new MiniMRCluster(0, 0, numSlaves, 
@@ -157,7 +164,7 @@ public class TestTokenCache {
     
     createTokenFileJson();
     verifySecretKeysInJSONFile();
-    dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+    NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
     FileSystem fs = dfsCluster.getFileSystem();
     
     p1 = new Path("file1");
@@ -223,10 +230,10 @@ public class TestTokenCache {
     jConf = mrCluster.createJobConf();
     
     // provide namenodes names for the job to get the delegation tokens for
-    String nnUri = dfsCluster.getURI().toString();
+    String nnUri = dfsCluster.getURI(0).toString();
     jConf.set(MRJobConfig.JOB_NAMENODES, nnUri + "," + nnUri);
     // job tracker principla id..
-    jConf.set(JTConfig.JT_USER_NAME, "jt_id");
+    jConf.set(JTConfig.JT_USER_NAME, "jt_id/foo@BAR");
     
     // using argument to pass the file name
     String[] args = {
@@ -303,7 +310,7 @@ public class TestTokenCache {
     HftpFileSystem hfs = mock(HftpFileSystem.class);
 
     DelegationTokenSecretManager dtSecretManager = 
-      dfsCluster.getNamesystem().getDelegationTokenSecretManager();
+        NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem());
     String renewer = "renewer";
     jConf.set(JTConfig.JT_USER_NAME,renewer);
     DelegationTokenIdentifier dtId = 
@@ -332,6 +339,14 @@ public class TestTokenCache {
         return t;
       }}).when(hfs).getDelegationToken(renewer);
     
+    //when(hfs.getDelegationTokens()).thenReturn((Token<? extends TokenIdentifier>) t);
+    Mockito.doAnswer(new Answer<List<Token<DelegationTokenIdentifier>>>(){
+      @Override
+      public List<Token<DelegationTokenIdentifier>>  answer(InvocationOnMock invocation)
+      throws Throwable {
+        return Collections.singletonList(t);
+      }}).when(hfs).getDelegationTokens(renewer);
+    
     //when(hfs.getCanonicalServiceName).thenReturn(fs_addr);
     Mockito.doAnswer(new Answer<String>(){
       @Override
@@ -360,4 +375,56 @@ public class TestTokenCache {
     }
   }
 
+  /** 
+   * verify _HOST substitution
+   * @throws IOException
+   */
+  @Test
+  public void testGetJTPrincipal() throws IOException {
+    String serviceName = "jt/";
+    String hostName = "foo";
+    String domainName = "@BAR";
+    Configuration conf = new Configuration();
+    conf.set(JTConfig.JT_IPC_ADDRESS, hostName + ":8888");
+    conf.set(JTConfig.JT_USER_NAME, serviceName + SecurityUtil.HOSTNAME_PATTERN
+        + domainName);
+    assertEquals("Failed to substitute HOSTNAME_PATTERN with hostName",
+        serviceName + hostName + domainName, TokenCache.getJTPrincipal(conf));
+  }
+
+  @Test
+  public void testGetTokensForViewFS() throws IOException, URISyntaxException {
+    Configuration conf = new Configuration(jConf);
+    FileSystem dfs = dfsCluster.getFileSystem();
+    String serviceName = dfs.getCanonicalServiceName();
+
+    Path p1 = new Path("/mount1");
+    Path p2 = new Path("/mount2");
+    p1 = dfs.makeQualified(p1);
+    p2 = dfs.makeQualified(p2);
+
+    conf.set("fs.viewfs.mounttable.default.link./dir1", p1.toString());
+    conf.set("fs.viewfs.mounttable.default.link./dir2", p2.toString());
+    Credentials credentials = new Credentials();
+    Path lp1 = new Path("viewfs:///dir1");
+    Path lp2 = new Path("viewfs:///dir2");
+    Path[] paths = new Path[2];
+    paths[0] = lp1;
+    paths[1] = lp2;
+    TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);
+
+    Collection<Token<? extends TokenIdentifier>> tns =
+        credentials.getAllTokens();
+    assertEquals("number of tokens is not 1", 1, tns.size());
+
+    boolean found = false;
+    for (Token<? extends TokenIdentifier> tt : tns) {
+      System.out.println("token=" + tt);
+      if (tt.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)
+          && tt.getService().equals(new Text(serviceName))) {
+        found = true;
+      }
+      assertTrue("didn't find token for [" + lp1 + ", " + lp2 + "]", found);
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java Tue Aug 16 00:37:15 2011
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
@@ -185,8 +186,7 @@ public class TestTokenCacheOldApi {
     
     createTokenFileJson();
     verifySecretKeysInJSONFile();
-    dfsCluster.getNamesystem()
-				.getDelegationTokenSecretManager().startThreads();
+    NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
     FileSystem fs = dfsCluster.getFileSystem();
     
     p1 = new Path("file1");

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java Tue Aug 16 00:37:15 2011
@@ -108,7 +108,7 @@ public class TestMapredGroupMappingServi
     cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null, 
         null, null);
     cluster.waitActive();
-    URI uri = cluster.getURI();
+    URI uri = cluster.getURI(0);
     
     MiniMRCluster miniMRCluster = new MiniMRCluster(0, uri.toString() , 
       3, null, null, config);

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java:817878-835934

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,4 +1,4 @@
-/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:713112
 /hadoop/core/trunk/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:776175-785643
 /hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/test/MapredTestDriver.java:817878-835934

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/test/mapred/org/apache/hadoop/tools/rumen/TestRumenJobTraces.java Tue Aug 16 00:37:15 2011
@@ -852,6 +852,30 @@ public class TestRumenJobTraces {
   public void testTopologyBuilder() throws Exception {
     final TopologyBuilder subject = new TopologyBuilder();
 
+    // This 4 comes from 
+    //   TaskInProgress.ProgressibleSplitsBlock.burst().size , which 
+    //   is invisible here.
+
+    int[][] splits = new int[4][];
+
+    splits[0] = new int[12];
+    splits[1] = new int[12];
+    splits[2] = new int[12];
+    splits[3] = new int[12];
+
+    for (int j = 0; j < 4; ++j) {
+      for (int i = 0; i < 12; ++i) {
+        splits[j][i] = -1;
+      }
+    }
+
+    for (int i = 0; i < 6; ++i) {
+      splits[0][i] = 500000 * i;
+      splits[1][i] = 300000 * i;
+      splits[2][i] = 500000;
+      splits[3][i] = 700000;
+    }
+
     // currently we extract no host names from the Properties
     subject.process(new Properties());
 
@@ -860,16 +884,16 @@ public class TestRumenJobTraces {
         .valueOf("MAP"), "STATUS", 1234567890L,
         "/194\\.6\\.134\\.64/cluster50261\\.secondleveldomain\\.com",
         "SUCCESS", null));
-    subject.process(new TaskAttemptUnsuccessfulCompletionEvent(TaskAttemptID
-        .forName("attempt_200904211745_0003_m_000004_1"), TaskType
-        .valueOf("MAP"), "STATUS", 1234567890L,
-        "/194\\.6\\.134\\.80/cluster50262\\.secondleveldomain\\.com",
-        "MACHINE_EXPLODED"));
-    subject.process(new TaskAttemptUnsuccessfulCompletionEvent(TaskAttemptID
-        .forName("attempt_200904211745_0003_m_000004_2"), TaskType
-        .valueOf("MAP"), "STATUS", 1234567890L,
-        "/194\\.6\\.134\\.80/cluster50263\\.secondleveldomain\\.com",
-        "MACHINE_EXPLODED"));
+    subject.process(new TaskAttemptUnsuccessfulCompletionEvent
+                    (TaskAttemptID.forName("attempt_200904211745_0003_m_000004_1"),
+                     TaskType.valueOf("MAP"), "STATUS", 1234567890L,
+                     "/194\\.6\\.134\\.80/cluster50262\\.secondleveldomain\\.com",
+                     "MACHINE_EXPLODED", splits));
+    subject.process(new TaskAttemptUnsuccessfulCompletionEvent
+                    (TaskAttemptID.forName("attempt_200904211745_0003_m_000004_2"),
+                     TaskType.valueOf("MAP"), "STATUS", 1234567890L,
+                     "/194\\.6\\.134\\.80/cluster50263\\.secondleveldomain\\.com",
+                     "MACHINE_EXPLODED", splits));
     subject.process(new TaskStartedEvent(TaskID
         .forName("task_200904211745_0003_m_000004"), 1234567890L, TaskType
         .valueOf("MAP"),

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobBuilder.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobBuilder.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobBuilder.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/JobBuilder.java Tue Aug 16 00:37:15 2011
@@ -476,6 +476,11 @@ public class JobBuilder {
     }
 
     attempt.setFinishTime(event.getFinishTime());
+
+    attempt.arraySetClockSplits(event.getClockSplits());
+    attempt.arraySetCpuUsages(event.getCpuUsages());
+    attempt.arraySetVMemKbytes(event.getVMemKbytes());
+    attempt.arraySetPhysMemKbytes(event.getPhysMemKbytes());
   }
 
   private void processTaskAttemptStartedEvent(TaskAttemptStartedEvent event) {
@@ -521,6 +526,10 @@ public class JobBuilder {
     attempt.setSortFinished(event.getSortFinishTime());
     attempt
         .incorporateCounters(((ReduceAttemptFinished) event.getDatum()).counters);
+    attempt.arraySetClockSplits(event.getClockSplits());
+    attempt.arraySetCpuUsages(event.getCpuUsages());
+    attempt.arraySetVMemKbytes(event.getVMemKbytes());
+    attempt.arraySetPhysMemKbytes(event.getPhysMemKbytes());
   }
 
   private void processMapAttemptFinishedEvent(MapAttemptFinishedEvent event) {
@@ -537,7 +546,11 @@ public class JobBuilder {
     // is redundant, but making this will add future-proofing.
     attempt.setFinishTime(event.getFinishTime());
     attempt
-        .incorporateCounters(((MapAttemptFinished) event.getDatum()).counters);
+      .incorporateCounters(((MapAttemptFinished) event.getDatum()).counters);
+    attempt.arraySetClockSplits(event.getClockSplits());
+    attempt.arraySetCpuUsages(event.getCpuUsages());
+    attempt.arraySetVMemKbytes(event.getVMemKbytes());
+    attempt.arraySetPhysMemKbytes(event.getPhysMemKbytes());
   }
 
   private void processJobUnsuccessfulCompletionEvent(

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/LoggedTaskAttempt.java Tue Aug 16 00:37:15 2011
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.tools.rumen;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -71,10 +73,118 @@ public class LoggedTaskAttempt implement
   // Initialize to default object for backward compatibility
   ResourceUsageMetrics metrics = new ResourceUsageMetrics();
   
+  List<Integer> clockSplits = new ArrayList<Integer>();
+  List<Integer> cpuUsages = new ArrayList<Integer>();
+  List<Integer> vMemKbytes = new ArrayList<Integer>();
+  List<Integer> physMemKbytes = new ArrayList<Integer>();
+
   LoggedTaskAttempt() {
     super();
   }
 
+  // carries the kinds of splits vectors a LoggedTaskAttempt holds.
+  //
+  // Each enumeral has the following methods:
+  //   get(LoggedTaskAttempt attempt)
+  //    returns a List<Integer> with the corresponding value field
+  //   set(LoggedTaskAttempt attempt, List<Integer> newValue)
+  //    sets the value
+  // There is also a pair of methods get(List<List<Integer>>) and
+  //  set(List<List<Integer>>, List<Integer>) which correspondingly
+  //  delivers or sets the appropriate element of the
+  //  List<List<Integer>> .
+  // This makes it easier to add another kind in the future.
+  public enum SplitVectorKind {
+
+    WALLCLOCK_TIME {
+      @Override
+      public List<Integer> get(LoggedTaskAttempt attempt) {
+        return attempt.getClockSplits();
+      }
+      @Override
+      public void set(LoggedTaskAttempt attempt, List<Integer> newValue) {
+        attempt.setClockSplits(newValue);
+      }
+    },
+
+    CPU_USAGE {
+      @Override
+      public List<Integer> get(LoggedTaskAttempt attempt) {
+        return attempt.getCpuUsages();
+      }
+      @Override
+      public void set(LoggedTaskAttempt attempt, List<Integer> newValue) {
+        attempt.setCpuUsages(newValue);
+      }
+    },
+
+    VIRTUAL_MEMORY_KBYTES {
+      @Override
+      public List<Integer> get(LoggedTaskAttempt attempt) {
+        return attempt.getVMemKbytes();
+      }
+      @Override
+      public void set(LoggedTaskAttempt attempt, List<Integer> newValue) {
+        attempt.setVMemKbytes(newValue);
+      }
+    },
+
+    PHYSICAL_MEMORY_KBYTES {
+      @Override
+      public List<Integer> get(LoggedTaskAttempt attempt) {
+        return attempt.getPhysMemKbytes();
+      }
+      @Override
+      public void set(LoggedTaskAttempt attempt, List<Integer> newValue) {
+        attempt.setPhysMemKbytes(newValue);
+      }
+    };
+
+    static private final List<List<Integer>> NULL_SPLITS_VECTOR
+      = new ArrayList<List<Integer>>();
+
+    static {
+      for (SplitVectorKind kind : SplitVectorKind.values() ) {
+        NULL_SPLITS_VECTOR.add(new ArrayList<Integer>());
+      }
+    }
+
+    abstract public List<Integer> get(LoggedTaskAttempt attempt);
+
+    abstract public void set(LoggedTaskAttempt attempt, List<Integer> newValue);
+
+    public List<Integer> get(List<List<Integer>> listSplits) {
+      return listSplits.get(this.ordinal());
+    }
+
+    public void set(List<List<Integer>> listSplits, List<Integer> newValue) {
+      listSplits.set(this.ordinal(), newValue);
+    }
+
+    static public List<List<Integer>> getNullSplitsVector() {
+      return NULL_SPLITS_VECTOR;
+    }
+  }
+
+  /**
+   *
+   * @returns a list of all splits vectors, ordered in enumeral order
+   *           within {@link SplitVectorKind} .  Do NOT use hard-coded
+   *           indices within the return for this with a hard-coded
+   *           index to get individual values; use
+   *           {@code SplitVectorKind.get(LoggedTaskAttempt)} instead.
+   */
+  public List<List<Integer>> allSplitVectors() {
+    List<List<Integer>> result
+      = new ArrayList<List<Integer>>(SplitVectorKind.values().length);
+
+    for (SplitVectorKind kind : SplitVectorKind.values() ) {
+      result.add(kind.get(this));
+    }
+
+    return result;
+  }
+
   static private Set<String> alreadySeenAnySetterAttributes =
       new TreeSet<String>();
 
@@ -89,6 +199,78 @@ public class LoggedTaskAttempt implement
     }
   }
 
+  public List<Integer> getClockSplits() {
+    return clockSplits;
+  }
+
+  void setClockSplits(List<Integer> clockSplits) {
+    this.clockSplits = clockSplits;
+  }
+
+  void arraySetClockSplits(int[] clockSplits) {
+    List<Integer> result = new ArrayList<Integer>();
+
+    for (int i = 0; i < clockSplits.length; ++i) {
+      result.add(clockSplits[i]);
+    }
+                 
+    this.clockSplits = result;
+  }
+
+  public List<Integer> getCpuUsages() {
+    return cpuUsages;
+  }
+
+  void setCpuUsages(List<Integer> cpuUsages) {
+    this.cpuUsages = cpuUsages;
+  }
+
+  void arraySetCpuUsages(int[] cpuUsages) {
+    List<Integer> result = new ArrayList<Integer>();
+
+    for (int i = 0; i < cpuUsages.length; ++i) {
+      result.add(cpuUsages[i]);
+    }
+                 
+    this.cpuUsages = result;
+  }
+
+  public List<Integer> getVMemKbytes() {
+    return vMemKbytes;
+  }
+
+  void setVMemKbytes(List<Integer> vMemKbytes) {
+    this.vMemKbytes = vMemKbytes;
+  }
+
+  void arraySetVMemKbytes(int[] vMemKbytes) {
+    List<Integer> result = new ArrayList<Integer>();
+
+    for (int i = 0; i < vMemKbytes.length; ++i) {
+      result.add(vMemKbytes[i]);
+    }
+                 
+    this.vMemKbytes = result;
+  }
+
+  public List<Integer> getPhysMemKbytes() {
+    return physMemKbytes;
+  }
+
+  void setPhysMemKbytes(List<Integer> physMemKbytes) {
+    this.physMemKbytes = physMemKbytes;
+  }
+
+  void arraySetPhysMemKbytes(int[] physMemKbytes) {
+    List<Integer> result = new ArrayList<Integer>();
+
+    for (int i = 0; i < physMemKbytes.length; ++i) {
+      result.add(physMemKbytes[i]);
+    }
+                 
+    this.physMemKbytes = result;
+  }
+
   void adjustTimes(long adjustment) {
     startTime += adjustment;
     finishTime += adjustment;
@@ -480,6 +662,26 @@ public class LoggedTaskAttempt implement
     c1.deepCompare(c2, recurse);
   }
 
+  private void compare1(List<Integer> c1, List<Integer> c2, TreePath loc,
+                        String eltname)
+        throws DeepInequalityException {
+    if (c1 == null && c2 == null) {
+      return;
+    }
+
+    if (c1 == null || c2 == null || c1.size() != c2.size()) {
+      throw new DeepInequalityException
+              (eltname + " miscompared", new TreePath(loc, eltname));
+    }
+
+    for (int i = 0; i < c1.size(); ++i) {
+      if (!c1.get(i).equals(c2.get(i))) {
+        throw new DeepInequalityException("" + c1.get(i) + " != " + c2.get(i),
+                                          new TreePath(loc, eltname, i));
+      }
+    }
+  }    
+
   public void deepCompare(DeepCompare comparand, TreePath loc)
       throws DeepInequalityException {
     if (!(comparand instanceof LoggedTaskAttempt)) {
@@ -518,5 +720,10 @@ public class LoggedTaskAttempt implement
     compare1(sortFinished, other.sortFinished, loc, "sortFinished");
 
     compare1(location, other.location, loc, "location");
+
+    compare1(clockSplits, other.clockSplits, loc, "clockSplits");
+    compare1(cpuUsages, other.cpuUsages, loc, "cpuUsages");
+    compare1(vMemKbytes, other.vMemKbytes, loc, "vMemKbytes");
+    compare1(physMemKbytes, other.physMemKbytes, loc, "physMemKbytes");
   }
 }

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapAttempt20LineHistoryEventEmitter.java Tue Aug 16 00:37:15 2011
@@ -68,10 +68,13 @@ public class MapAttempt20LineHistoryEven
             (MapAttempt20LineHistoryEventEmitter) thatg;
 
         if (finishTime != null && "success".equalsIgnoreCase(status)) {
-          return new MapAttemptFinishedEvent(taskAttemptID,
-              that.originalTaskType, status, Long.parseLong(finishTime), Long
-                  .parseLong(finishTime), hostName, state,
-              maybeParseCounters(counters));
+          return new MapAttemptFinishedEvent
+            (taskAttemptID,
+              that.originalTaskType, status,
+             Long.parseLong(finishTime),
+             Long.parseLong(finishTime),
+             hostName, state, maybeParseCounters(counters),
+             null);
         }
       }
 
@@ -88,5 +91,4 @@ public class MapAttempt20LineHistoryEven
   List<SingleEventEmitter> nonFinalSEEs() {
     return nonFinals;
   }
-
 }

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/MapTaskAttemptInfo.java Tue Aug 16 00:37:15 2011
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.tools.rumen;
 
+import java.util.List;
+
 import org.apache.hadoop.mapred.TaskStatus.State;
 
 /**
@@ -26,11 +28,33 @@ import org.apache.hadoop.mapred.TaskStat
 public class MapTaskAttemptInfo extends TaskAttemptInfo {
   private long runtime;
 
-  public MapTaskAttemptInfo(State state, TaskInfo taskInfo, long runtime) {
-    super(state, taskInfo);
+  public MapTaskAttemptInfo(State state, TaskInfo taskInfo,
+                            long runtime, List<List<Integer>> allSplits) {
+    super(state, taskInfo,
+          allSplits == null
+            ? LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector()
+           : allSplits);
     this.runtime = runtime;
   }
 
+  /**
+   *
+   * @deprecated please use the constructor with 
+   *               {@code (state, taskInfo, runtime,
+   *                  List<List<Integer>> allSplits)}
+   *             instead.  
+   *
+   * see {@link LoggedTaskAttempt} for an explanation of
+   *        {@code allSplits}.
+   *
+   * If there are no known splits, use {@code null}.
+   */
+  @Deprecated
+  public MapTaskAttemptInfo(State state, TaskInfo taskInfo,
+                            long runtime) {
+    this(state, taskInfo, runtime, null);
+  }
+
   @Override
   public long getRuntime() {
     return getMapRuntime();

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceAttempt20LineHistoryEventEmitter.java Tue Aug 16 00:37:15 2011
@@ -28,8 +28,8 @@ import org.apache.hadoop.mapreduce.TaskA
 import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinishedEvent;
 
-public class ReduceAttempt20LineHistoryEventEmitter extends
-    TaskAttempt20LineEventEmitter {
+public class ReduceAttempt20LineHistoryEventEmitter
+     extends TaskAttempt20LineEventEmitter {
 
   static List<SingleEventEmitter> nonFinals =
       new LinkedList<SingleEventEmitter>();
@@ -71,10 +71,15 @@ public class ReduceAttempt20LineHistoryE
           ReduceAttempt20LineHistoryEventEmitter that =
               (ReduceAttempt20LineHistoryEventEmitter) thatg;
 
-          return new ReduceAttemptFinishedEvent(taskAttemptID,
-              that.originalTaskType, status, Long.parseLong(shuffleFinish),
-              Long.parseLong(sortFinish), Long.parseLong(finishTime), hostName,
-              state, maybeParseCounters(counters));
+          return new ReduceAttemptFinishedEvent
+            (taskAttemptID,
+             that.originalTaskType, status,
+             Long.parseLong(shuffleFinish),
+             Long.parseLong(sortFinish),
+             Long.parseLong(finishTime),
+             hostName,
+             state, maybeParseCounters(counters),
+             null);
         }
       }
 

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ReduceTaskAttemptInfo.java Tue Aug 16 00:37:15 2011
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.tools.rumen;
 
+import java.util.List;
+
 import org.apache.hadoop.mapred.TaskStatus.State;
 
 /**
@@ -29,13 +31,35 @@ public class ReduceTaskAttemptInfo exten
   private long reduceTime;
 
   public ReduceTaskAttemptInfo(State state, TaskInfo taskInfo, long shuffleTime,
-      long mergeTime, long reduceTime) {
-    super(state, taskInfo);
+      long mergeTime, long reduceTime, List<List<Integer>> allSplits) {
+    super(state, taskInfo,
+          allSplits == null
+            ? LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector()
+           : allSplits);
     this.shuffleTime = shuffleTime;
     this.mergeTime = mergeTime;
     this.reduceTime = reduceTime;
   }
 
+
+  /**
+   *
+   * @deprecated please use the constructor with 
+   *               {@code (state, taskInfo, shuffleTime, mergeTime, reduceTime
+   *                  List<List<Integer>> allSplits)}
+   *             instead.  
+   *
+   * see {@link LoggedTaskAttempt} for an explanation of
+   *        {@code allSplits}.
+   *
+   * If there are no known splits, use {@code null}.
+   */
+  @Deprecated
+  public ReduceTaskAttemptInfo(State state, TaskInfo taskInfo, long shuffleTime,
+      long mergeTime, long reduceTime) {
+    this(state, taskInfo, shuffleTime, mergeTime, reduceTime, null);
+  }
+
   /**
    * Get the runtime for the <b>reduce</b> phase of the reduce task-attempt.
    * 
@@ -67,5 +91,4 @@ public class ReduceTaskAttemptInfo exten
   public long getRuntime() {
     return (getShuffleRuntime() + getMergeRuntime() + getReduceRuntime());
   }
-
 }

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttempt20LineEventEmitter.java Tue Aug 16 00:37:15 2011
@@ -138,9 +138,10 @@ public abstract class TaskAttempt20LineE
         TaskAttempt20LineEventEmitter that =
             (TaskAttempt20LineEventEmitter) thatg;
 
-        return new TaskAttemptUnsuccessfulCompletionEvent(taskAttemptID,
-            that.originalTaskType, status, Long.parseLong(finishTime),
-            hostName, error);
+        return new TaskAttemptUnsuccessfulCompletionEvent
+          (taskAttemptID,
+           that.originalTaskType, status, Long.parseLong(finishTime),
+           hostName, error, null);
       }
 
       return null;

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/TaskAttemptInfo.java Tue Aug 16 00:37:15 2011
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.tools.rumen;
 
+import java.util.List;
+
 import org.apache.hadoop.mapred.TaskStatus.State;
 
 /**
@@ -27,13 +29,22 @@ public abstract class TaskAttemptInfo {
   protected final State state;
   protected final TaskInfo taskInfo;
 
-  protected TaskAttemptInfo(State state, TaskInfo taskInfo) {
+  protected final List<List<Integer>> allSplits;
+
+  protected TaskAttemptInfo
+       (State state, TaskInfo taskInfo, List<List<Integer>> allSplits) {
     if (state == State.SUCCEEDED || state == State.FAILED) {
       this.state = state;
     } else {
       throw new IllegalArgumentException("status cannot be " + state);
     }
     this.taskInfo = taskInfo;
+    this.allSplits = allSplits;
+  }
+
+  protected TaskAttemptInfo
+       (State state, TaskInfo taskInfo) {
+    this(state, taskInfo, LoggedTaskAttempt.SplitVectorKind.getNullSplitsVector());
   }
 
   /**
@@ -60,4 +71,8 @@ public abstract class TaskAttemptInfo {
   public TaskInfo getTaskInfo() {
     return taskInfo;
   }
+      
+  public List<Integer> getSplitVector(LoggedTaskAttempt.SplitVectorKind kind) {
+    return kind.get(allSplits);
+  }
 }

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/tools/org/apache/hadoop/tools/rumen/ZombieJob.java Tue Aug 16 00:37:15 2011
@@ -537,7 +537,8 @@ public class ZombieJob implements JobSto
       }
       taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
       taskTime *= scaleFactor;
-      return new MapTaskAttemptInfo(state, taskInfo, taskTime);
+      return new MapTaskAttemptInfo
+        (state, taskInfo, taskTime, loggedAttempt.allSplitVectors());
     } else {
       throw new IllegalArgumentException("taskType can only be MAP: "
           + loggedTask.getTaskType());
@@ -584,6 +585,9 @@ public class ZombieJob implements JobSto
   private TaskAttemptInfo getTaskAttemptInfo(LoggedTask loggedTask,
       LoggedTaskAttempt loggedAttempt) {
     TaskInfo taskInfo = getTaskInfo(loggedTask);
+    
+    List<List<Integer>> allSplitVectors = loggedAttempt.allSplitVectors();
+
     State state = convertState(loggedAttempt.getResult());
     if (loggedTask.getTaskType() == Values.MAP) {
       long taskTime;
@@ -594,7 +598,7 @@ public class ZombieJob implements JobSto
         taskTime = loggedAttempt.getFinishTime() - loggedAttempt.getStartTime();
       }
       taskTime = sanitizeTaskRuntime(taskTime, loggedAttempt.getAttemptID());
-      return new MapTaskAttemptInfo(state, taskInfo, taskTime);
+      return new MapTaskAttemptInfo(state, taskInfo, taskTime, allSplitVectors);
     } else if (loggedTask.getTaskType() == Values.REDUCE) {
       long startTime = loggedAttempt.getStartTime();
       long mergeDone = loggedAttempt.getSortFinished();
@@ -605,7 +609,8 @@ public class ZombieJob implements JobSto
         // haven't seen reduce task with startTime=0 ever. But if this happens,
         // make up a reduceTime with no shuffle/merge.
         long reduceTime = makeUpReduceRuntime(state);
-        return new ReduceTaskAttemptInfo(state, taskInfo, 0, 0, reduceTime);
+        return new ReduceTaskAttemptInfo
+          (state, taskInfo, 0, 0, reduceTime, allSplitVectors);
       } else {
         if (shuffleDone <= 0) {
           shuffleDone = startTime;
@@ -619,7 +624,7 @@ public class ZombieJob implements JobSto
         reduceTime = sanitizeTaskRuntime(reduceTime, loggedAttempt.getAttemptID());
         
         return new ReduceTaskAttemptInfo(state, taskInfo, shuffleTime,
-            mergeTime, reduceTime);
+            mergeTime, reduceTime, allSplitVectors);
       }
     } else {
       throw new IllegalArgumentException("taskType for "
@@ -700,7 +705,8 @@ public class ZombieJob implements JobSto
       runtime = makeUpMapRuntime(state, locality);
       runtime = sanitizeTaskRuntime(runtime, makeTaskAttemptID(taskType,
           taskNumber, taskAttemptNumber).toString());
-      TaskAttemptInfo tai = new MapTaskAttemptInfo(state, taskInfo, runtime);
+      TaskAttemptInfo tai
+        = new MapTaskAttemptInfo(state, taskInfo, runtime, null);
       return tai;
     } else if (taskType == TaskType.REDUCE) {
       State state = State.SUCCEEDED;
@@ -711,8 +717,8 @@ public class ZombieJob implements JobSto
       // TODO make up state
       // state = makeUpState(taskAttemptNumber, job.getReducerTriesToSucceed());
       reduceTime = makeUpReduceRuntime(state);
-      TaskAttemptInfo tai = new ReduceTaskAttemptInfo(state, taskInfo,
-          shuffleTime, sortTime, reduceTime);
+      TaskAttemptInfo tai = new ReduceTaskAttemptInfo
+        (state, taskInfo, shuffleTime, sortTime, reduceTime, null);
       return tai;
     }
 

Propchange: hadoop/common/branches/HDFS-1623/mapreduce/src/webapps/job/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 16 00:37:15 2011
@@ -1,3 +1,3 @@
-/hadoop/common/trunk/mapreduce/src/webapps/job:1152502-1153927
+/hadoop/common/trunk/mapreduce/src/webapps/job:1152502-1158071
 /hadoop/core/branches/branch-0.19/mapred/src/webapps/job:713112
 /hadoop/core/trunk/src/webapps/job:776175-785643

Modified: hadoop/common/branches/HDFS-1623/mapreduce/src/webapps/job/jobdetailshistory.jsp
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-1623/mapreduce/src/webapps/job/jobdetailshistory.jsp?rev=1158072&r1=1158071&r2=1158072&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-1623/mapreduce/src/webapps/job/jobdetailshistory.jsp (original)
+++ hadoop/common/branches/HDFS-1623/mapreduce/src/webapps/job/jobdetailshistory.jsp Tue Aug 16 00:37:15 2011
@@ -45,6 +45,7 @@
 <%! static SimpleDateFormat dateFormat = new SimpleDateFormat("d-MMM-yyyy HH:mm:ss") ; %>
 <%
     String logFile = request.getParameter("logFile");
+    String reasonforFailure = " ";
     final Path jobFile = new Path(logFile);
     String jobid = JobHistory.getJobIDFromHistoryFilePath(jobFile).toString();
 
@@ -55,6 +56,8 @@
     if (job == null) {
       return;
     }
+    if (job.getJobStatus().equals("FAILED")) 
+      reasonforFailure = job.getErrorInfo();
 %>
 
 <html>
@@ -78,6 +81,7 @@
 <b>Launched At: </b> <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLaunchTime(), job.getSubmitTime()) %><br/>
 <b>Finished At: </b>  <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getFinishTime(), job.getLaunchTime()) %><br/>
 <b>Status: </b> <%= ((job.getJobStatus()) == null ? "Incomplete" :job.getJobStatus()) %><br/> 
+<b>ReasonForFailure: </b> <%=reasonforFailure %><br/>
 <%
     HistoryViewer.SummarizedJob sj = new HistoryViewer.SummarizedJob(job);
 %>



Mime
View raw message