hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From d...@apache.org
Subject svn commit: r794541 - in /hadoop/mapreduce/trunk: CHANGES.txt src/java/org/apache/hadoop/mapred/JobTracker.java src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java
Date Thu, 16 Jul 2009 05:47:31 GMT
Author: ddas
Date: Thu Jul 16 05:47:31 2009
New Revision: 794541

URL: http://svn.apache.org/viewvc?rev=794541&view=rev
Log:
MAPREDUCE-630. Improves execution time of TestKillCompletedJob. Contributed by Jothi Padmanabhan.

Modified:
    hadoop/mapreduce/trunk/CHANGES.txt
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java

Modified: hadoop/mapreduce/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/CHANGES.txt?rev=794541&r1=794540&r2=794541&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/CHANGES.txt (original)
+++ hadoop/mapreduce/trunk/CHANGES.txt Thu Jul 16 05:47:31 2009
@@ -105,6 +105,9 @@
     MAPREDUCE-627. Improves execution time of TestTrackerBlacklistAcrossJobs.
     (Jothi Padmanabhan via ddas)
 
+    MAPREDUCE-630. Improves execution time of TestKillCompletedJob.
+    (Jothi Padmanabhan via ddas)
+
   BUG FIXES
     MAPREDUCE-703. Sqoop requires dependency on hsqldb in ivy.
     (Aaron Kimball via matei)

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=794541&r1=794540&r2=794541&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java Thu Jul 16 05:47:31
2009
@@ -3362,7 +3362,7 @@
    * adding a job. This is the core job submission logic
    * @param jobId The id for the job submitted which needs to be added
    */
-  private synchronized JobStatus addJob(JobID jobId, JobInProgress job) {
+  synchronized JobStatus addJob(JobID jobId, JobInProgress job) {
     totalSubmissions++;
 
     synchronized (jobs) {

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java?rev=794541&r1=794540&r2=794541&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java
(original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java
Thu Jul 16 05:47:31 2009
@@ -15,110 +15,51 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.mapred;
 
-import java.io.*;
-import java.net.*;
-import junit.framework.TestCase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Text;
+import java.io.IOException;
 
+import junit.framework.TestCase;
 
+import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker;
+import org.apache.hadoop.mapred.TestRackAwareTaskPlacement.MyFakeJobInProgress;
+import org.apache.hadoop.mapred.UtilsForTests.FakeClock;
 
 /**
  * A JUnit test to test that killing completed jobs does not move them
  * to the failed sate - See JIRA HADOOP-2132
  */
 public class TestKillCompletedJob extends TestCase {
+
+  MyFakeJobInProgress job;
+  static FakeJobTracker jobTracker;
+ 
+  static FakeClock clock;
   
-  
-  static Boolean launchWordCount(String fileSys,
-                                String jobTracker,
-                                JobConf conf,
-                                String input,
-                                int numMaps,
-                                int numReduces) throws IOException {
-    final Path inDir = new Path("/testing/wc/input");
-    final Path outDir = new Path("/testing/wc/output");
-    FileSystem fs = FileSystem.get(URI.create(fileSys), conf);
-    fs.delete(outDir, true);
-    if (!fs.mkdirs(inDir)) {
-      throw new IOException("Mkdirs failed to create " + inDir.toString());
-    }
-    {
-      DataOutputStream file = fs.create(new Path(inDir, "part-0"));
-      file.writeBytes(input);
-      file.close();
-    }
-
-    FileSystem.setDefaultUri(conf, fileSys);
-    conf.set("mapred.job.tracker", jobTracker);
-    conf.setJobName("wordcount");
-    conf.setInputFormat(TextInputFormat.class);
-    
-    // the keys are words (strings)
-    conf.setOutputKeyClass(Text.class);
-    // the values are counts (ints)
-    conf.setOutputValueClass(IntWritable.class);
-    
-    conf.setMapperClass(WordCount.MapClass.class);
-    conf.setCombinerClass(WordCount.Reduce.class);
-    conf.setReducerClass(WordCount.Reduce.class);
-    
-    FileInputFormat.setInputPaths(conf, inDir);
-    FileOutputFormat.setOutputPath(conf, outDir);
-    conf.setNumMapTasks(numMaps);
-    conf.setNumReduceTasks(numReduces);
-
-    RunningJob rj = JobClient.runJob(conf);
-    JobID jobId = rj.getID();
-    
-    // Kill the job after it is successful
-    if (rj.isSuccessful())
-    {
-      System.out.println("Job Id:" + jobId + 
-        " completed successfully. Killing it now");
-      rj.killJob();
-    }
-    
-       
-    return rj.isSuccessful();
-      
-  }
+  static String trackers[] = new String[] {"tracker_tracker1:1000"};
 
-     
-  public void testKillCompJob() throws IOException {
-    String namenode = null;
-    MiniDFSCluster dfs = null;
-    MiniMRCluster mr = null;
-    FileSystem fileSys = null;
-    try {
-      final int taskTrackers = 1;
-
-      Configuration conf = new Configuration();
-      dfs = new MiniDFSCluster(conf, 1, true, null);
-      fileSys = dfs.getFileSystem();
-      namenode = fileSys.getUri().toString();
-      mr = new MiniMRCluster(taskTrackers, namenode, 3);
-      JobConf jobConf = new JobConf();
-    
-      Boolean result;
-      final String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
-      result = launchWordCount(namenode, jobTrackerName, jobConf, 
-                               "Small text\n",
-                               1, 0);
-      assertTrue(result);
-          
-    } finally {
-      if (dfs != null) { dfs.shutdown(); }
-      if (mr != null) { mr.shutdown();
-      }
-    }
+  @Override
+  protected void setUp() throws Exception {
+    JobConf conf = new JobConf();
+    conf.set("mapred.job.tracker", "localhost:0");
+    conf.set("mapred.job.tracker.http.address", "0.0.0.0:0");
+    conf.setLong("mapred.tasktracker.expiry.interval", 1000);
+    jobTracker = new FakeJobTracker(conf, (clock = new FakeClock()), trackers);
   }
+
   
+  @SuppressWarnings("deprecation")
+  public void testKillCompletedJob() throws IOException, InterruptedException {
+    job = new MyFakeJobInProgress(new JobConf(), jobTracker);
+    jobTracker.addJob(job.getJobID(), job);
+    job.status.setRunState(JobStatus.SUCCEEDED);
+
+    jobTracker.killJob(job.getJobID());
+
+    assertTrue("Run state changed when killing completed job" ,
+        job.status.getRunState() == JobStatus.SUCCEEDED);
+
+  }
+
 }
+ 



Mime
View raw message