hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From e..@apache.org
Subject svn commit: r1145413 - in /hadoop/common/trunk/mapreduce: ./ src/docs/src/documentation/content/xdocs/ src/java/org/apache/hadoop/mapred/ src/test/mapred/org/apache/hadoop/mapred/
Date Tue, 12 Jul 2011 00:54:49 GMT
Author: eli
Date: Tue Jul 12 00:54:48 2011
New Revision: 1145413

URL: http://svn.apache.org/viewvc?rev=1145413&view=rev
Log:
MAPREDUCE-2606. Remove IsolationRunner. Contributed by Alejandro Abdelnur

Removed:
    hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/IsolationRunner.java
    hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestIsolationRunner.java
Modified:
    hadoop/common/trunk/mapreduce/CHANGES.txt
    hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
    hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/site.xml
    hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java
    hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/Task.java
    hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java

Modified: hadoop/common/trunk/mapreduce/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/CHANGES.txt?rev=1145413&r1=1145412&r2=1145413&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/CHANGES.txt (original)
+++ hadoop/common/trunk/mapreduce/CHANGES.txt Tue Jul 12 00:54:48 2011
@@ -9,6 +9,8 @@ Trunk (unreleased changes)
 
     MAPREDUCE-2430. Remove mrunit contrib. (nigel via eli)
 
+    MAPREDUCE-2606. Remove IsolationRunner. (Alejandro Abdelnur via eli)
+
   NEW FEATURES
 
     MAPREDUCE-2107. [Gridmix] Total heap usage emulation in Gridmix.

Modified: hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml?rev=1145413&r1=1145412&r2=1145413&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
(original)
+++ hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
Tue Jul 12 00:54:48 2011
@@ -552,8 +552,7 @@
       and others.</p>
       
       <p>Finally, we will wrap up by discussing some useful features of the
-      framework such as the <code>DistributedCache</code>, 
-      <code>IsolationRunner</code> etc.</p>
+      framework such as the <code>DistributedCache</code>.
 
       <section>
         <title>Payload</title>
@@ -2308,31 +2307,6 @@
         </section>
         
         <section>
-          <title>IsolationRunner</title>
-          
-          <p><a href="ext:api/org/apache/hadoop/mapred/isolationrunner">
-          IsolationRunner</a> is a utility to help debug MapReduce programs.</p>
-          
-          <p>To use the <code>IsolationRunner</code>, first set 
-          <code>keep.failed.tasks.files</code> to <code>true</code>

-          (also see <code>keep.tasks.files.pattern</code>).</p>
-          
-          <p>
-            Next, go to the node on which the failed task ran and go to the 
-            <code>TaskTracker</code>'s local directory and run the 
-            <code>IsolationRunner</code>:<br/>
-            <code>$ cd &lt;local path&gt;
-            /taskTracker/$user/jobcache/$jobid/${taskid}/work</code><br/>
-            <code>
-              $ bin/hadoop org.apache.hadoop.mapred.IsolationRunner ../job.xml
-            </code>
-          </p>
-          
-          <p><code>IsolationRunner</code> will run the failed task in a
single 
-          jvm, which can be in the debugger, over precisely the same input.</p>
-        </section>
-
-        <section>
           <title>Profiling</title>
           <p>Profiling is a utility to get a representative (2 or 3) sample
           of built-in java profiler for a sample of maps and reduces. </p>

Modified: hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/site.xml
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/site.xml?rev=1145413&r1=1145412&r2=1145413&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/site.xml (original)
+++ hadoop/common/trunk/mapreduce/src/docs/src/documentation/content/xdocs/site.xml Tue Jul
12 00:54:48 2011
@@ -187,7 +187,6 @@ See http://forrest.apache.org/docs/linki
               <filesplit href="FileSplit.html" />
               <inputformat href="InputFormat.html" />
               <inputsplit href="InputSplit.html" />
-              <isolationrunner href="IsolationRunner.html" />
               <jobclient href="JobClient.html">
                 <runjob href="#runJob(org.apache.hadoop.mapred.JobConf)" />
                 <submitjob href="#submitJob(org.apache.hadoop.mapred.JobConf)" />

Modified: hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java?rev=1145413&r1=1145412&r2=1145413&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java (original)
+++ hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/MapTask.java Tue Jul 12
00:54:48 2011
@@ -106,23 +106,6 @@ class MapTask extends Task {
   public void localizeConfiguration(JobConf conf)
       throws IOException {
     super.localizeConfiguration(conf);
-    // split.dta/split.info files are used only by IsolationRunner.
-    // Write the split file to the local disk if it is a normal map task (not a
-    // job-setup or a job-cleanup task) and if the user wishes to run
-    // IsolationRunner either by setting keep.failed.tasks.files to true or by
-    // using keep.tasks.files.pattern
-    if (supportIsolationRunner(conf) && isMapOrReduce()) {
-      // localize the split meta-information 
-      Path localSplitMeta =
-          new LocalDirAllocator(MRConfig.LOCAL_DIR).getLocalPathForWrite(
-              TaskTracker.getLocalSplitMetaFile(conf.getUser(), 
-                getJobID().toString(), getTaskID()
-                  .toString()), conf);
-      LOG.debug("Writing local split to " + localSplitMeta);
-      DataOutputStream out = FileSystem.getLocal(conf).create(localSplitMeta);
-      splitMetaInfo.write(out);
-      out.close();
-    }
   }
   
   

Modified: hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/Task.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/Task.java?rev=1145413&r1=1145412&r2=1145413&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/Task.java (original)
+++ hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/Task.java Tue Jul 12 00:54:48
2011
@@ -1097,7 +1097,7 @@ abstract public class Task implements Wr
     
     // delete the staging area for the job
     JobConf conf = new JobConf(jobContext.getConfiguration());
-    if (!supportIsolationRunner(conf)) {
+    if (!keepTaskFiles(conf)) {
       String jobTempDir = conf.get("mapreduce.job.dir");
       Path jobTempDirPath = new Path(jobTempDir);
       FileSystem fs = jobTempDirPath.getFileSystem(conf);
@@ -1106,7 +1106,7 @@ abstract public class Task implements Wr
     done(umbilical, reporter);
   }
   
-  protected boolean supportIsolationRunner(JobConf conf) {
+  protected boolean keepTaskFiles(JobConf conf) {
     return (conf.getKeepTaskFilesPattern() != null || conf
         .getKeepFailedTaskFiles());
   }

Modified: hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java?rev=1145413&r1=1145412&r2=1145413&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java
(original)
+++ hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestMapProgress.java
Tue Jul 12 00:54:48 2011
@@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.ipc.ProtocolSignature;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
@@ -53,7 +54,6 @@ import org.apache.hadoop.util.Reflection
  *  TestTaskReporter instead of TaskReporter and call mapTask.run().
  *  Similar to LocalJobRunner, we set up splits and call mapTask.run()
  *  directly. No job is run, only map task is run.
- *  We use IsolationRunner.FakeUmbilical.
  *  As the reporter's setProgress() validates progress after
  *  every record is read, we are done with the validation of map phase progress
  *  once mapTask.run() is finished. Sort phase progress in map task is not
@@ -63,12 +63,90 @@ public class TestMapProgress extends Tes
   public static final Log LOG = LogFactory.getLog(TestMapProgress.class);
   private static String TEST_ROOT_DIR = new File(System.getProperty(
            "test.build.data", "/tmp")).getAbsolutePath() + "/mapPahseprogress";
+
+  static class FakeUmbilical implements TaskUmbilicalProtocol {
+
+    public long getProtocolVersion(String protocol, long clientVersion) {
+      return TaskUmbilicalProtocol.versionID;
+    }
+    
+    @Override
+    public ProtocolSignature getProtocolSignature(String protocol,
+        long clientVersion, int clientMethodsHash) throws IOException {
+      return ProtocolSignature.getProtocolSignature(
+          this, protocol, clientVersion, clientMethodsHash);
+    }
+
+    public void done(TaskAttemptID taskid) throws IOException {
+      LOG.info("Task " + taskid + " reporting done.");
+    }
+
+    public void fsError(TaskAttemptID taskId, String message) throws IOException {
+      LOG.info("Task " + taskId + " reporting file system error: " + message);
+    }
+
+    public void shuffleError(TaskAttemptID taskId, String message) throws IOException {
+      LOG.info("Task " + taskId + " reporting shuffle error: " + message);
+    }
+
+    public void fatalError(TaskAttemptID taskId, String msg) throws IOException {
+      LOG.info("Task " + taskId + " reporting fatal error: " + msg);
+    }
+
+    public JvmTask getTask(JvmContext context) throws IOException {
+      return null;
+    }
+
+    public boolean ping(TaskAttemptID taskid) throws IOException {
+      return true;
+    }
+
+    public void commitPending(TaskAttemptID taskId, TaskStatus taskStatus) 
+    throws IOException, InterruptedException {
+      statusUpdate(taskId, taskStatus);
+    }
+    
+    public boolean canCommit(TaskAttemptID taskid) throws IOException {
+      return true;
+    }
+    
+    public boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus) 
+    throws IOException, InterruptedException {
+      StringBuffer buf = new StringBuffer("Task ");
+      buf.append(taskId);
+      buf.append(" making progress to ");
+      buf.append(taskStatus.getProgress());
+      String state = taskStatus.getStateString();
+      if (state != null) {
+        buf.append(" and state of ");
+        buf.append(state);
+      }
+      LOG.info(buf.toString());
+      // ignore phase
+      // ignore counters
+      return true;
+    }
+
+    public void reportDiagnosticInfo(TaskAttemptID taskid, String trace) throws IOException
{
+      LOG.info("Task " + taskid + " has problem " + trace);
+    }
+    
+    public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId, 
+        int fromEventId, int maxLocs, TaskAttemptID id) throws IOException {
+      return new MapTaskCompletionEventsUpdate(TaskCompletionEvent.EMPTY_ARRAY, 
+                                               false);
+    }
+
+    public void reportNextRecordRange(TaskAttemptID taskid, 
+        SortedRanges.Range range) throws IOException {
+      LOG.info("Task " + taskid + " reportedNextRecordRange " + range);
+    }
+  }
   
   private FileSystem fs = null;
   private TestMapTask map = null;
   private JobID jobId = null;
-  private IsolationRunner.FakeUmbilical fakeUmbilical =
-                                        new IsolationRunner.FakeUmbilical();
+  private FakeUmbilical fakeUmbilical = new FakeUmbilical();
 
   /**
    *  Task Reporter that validates map phase progress after each record is



Mime
View raw message