hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From acmur...@apache.org
Subject svn commit: r816496 [2/4] - in /hadoop/mapreduce/trunk: ./ src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ src/contrib/fairscheduler/src/test/org/apache/hadoop/mapred/ src/contrib/gridmix/src/java/org/apache/hadoop/mapred/gridmix/ src...
Date Fri, 18 Sep 2009 07:04:45 GMT
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueInfo.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueInfo.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueInfo.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueInfo.java Fri Sep 18 07:04:42 2009
@@ -17,42 +17,29 @@
  */
 package org.apache.hadoop.mapred;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.List;
 import java.util.ArrayList;
+import java.util.List;
 import java.util.Properties;
 
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.QueueInfo;
+import org.apache.hadoop.mapreduce.QueueState;
 
 /**
  * Class that contains the information regarding the Job Queues which are 
  * maintained by the Hadoop Map/Reduce framework.
- * 
+ * @deprecated Use {@link QueueInfo} instead
  */
-
-public class JobQueueInfo implements Writable {
-
-  private String queueName = "";
-  //The scheduling Information object is read back as String.
-  //Once the scheduling information is set there is no way to recover it.
-  private String schedulingInfo; 
-  
-  private String queueState;
-
-  private List<JobQueueInfo> children;
-
-  private Properties props;
+@Deprecated
+public class JobQueueInfo extends QueueInfo {
 
   /**
    * Default constructor for Job Queue Info.
    * 
    */
   public JobQueueInfo() {
-    children = new ArrayList<JobQueueInfo>();
+    super();  
   }
+
   /**
    * Construct a new JobQueueInfo object using the queue name and the
    * scheduling information passed.
@@ -62,30 +49,24 @@
    * queue
    */
   public JobQueueInfo(String queueName, String schedulingInfo) {
-    this.queueName = queueName;
-    this.schedulingInfo = schedulingInfo;
-    // make it running by default.
-    this.queueState = Queue.QueueState.RUNNING.getStateName();
-    children = new ArrayList<JobQueueInfo>();
+    super(queueName, schedulingInfo);
   }
   
+  JobQueueInfo(QueueInfo queue) {
+    this(queue.getQueueName(), queue.getSchedulingInfo());
+    setQueueState(queue.getState().name());
+    setQueueChildren(queue.getQueueChildren());
+    setProperties(queue.getProperties());
+    setJobStatuses(queue.getJobStatuses());
+  }
   
   /**
    * Set the queue name of the JobQueueInfo
    * 
    * @param queueName Name of the job queue.
    */
-  public void setQueueName(String queueName) {
-    this.queueName = queueName;
-  }
-
-  /**
-   * Get the queue name from JobQueueInfo
-   * 
-   * @return queue name
-   */
-  public String getQueueName() {
-    return queueName;
+  protected void setQueueName(String queueName) {
+    super.setQueueName(queueName);
   }
 
   /**
@@ -93,82 +74,44 @@
    * 
    * @param schedulingInfo
    */
-  public void setSchedulingInfo(String schedulingInfo) {
-    this.schedulingInfo = schedulingInfo;
+  protected void setSchedulingInfo(String schedulingInfo) {
+    super.setSchedulingInfo(schedulingInfo);
   }
 
   /**
-   * Gets the scheduling information associated to particular job queue.
-   * If nothing is set would return <b>"N/A"</b>
-   * 
-   * @return Scheduling information associated to particular Job Queue
-   */
-  public String getSchedulingInfo() {
-    if(schedulingInfo != null) {
-      return schedulingInfo;
-    }else {
-      return "N/A";
-    }
-  }
-  
-  /**
    * Set the state of the queue
    * @param state state of the queue.
    */
-  public void setQueueState(String state) {
-    queueState = state;
+  protected void setQueueState(String state) {
+    super.setState(QueueState.valueOf(state));
   }
   
-  /**
-   * Return the queue state
-   * @return the queue state.
-   */
-  public String getQueueState() {
-    return queueState;
+  String getQueueState() {
+    return super.getState().toString();
   }
-
-  public List<JobQueueInfo> getChildren() {
-    return children;
-  }
-
-  public void setChildren(List<JobQueueInfo> children) {
-    this.children =  children; 
+  
+  protected void setChildren(List<JobQueueInfo> children) {
+    List<QueueInfo> list = new ArrayList<QueueInfo>();
+    for (JobQueueInfo q : children) {
+      list.add(q);
+    }
+    super.setQueueChildren(list);
   }
 
-  Properties getProperties() {
-    return props;
+  public List<JobQueueInfo> getChildren() {
+    List<JobQueueInfo> list = new ArrayList<JobQueueInfo>();
+    for (QueueInfo q : super.getQueueChildren()) {
+      list.add(new JobQueueInfo(q));
+    }
+    return list;
   }
 
-  void setProperties(Properties props) {
-    this.props = props;
+  protected void setProperties(Properties props) {
+    super.setProperties(props);
   }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    queueName = Text.readString(in);
-    queueState = Text.readString(in);
-    schedulingInfo = Text.readString(in);
-    int count = in.readInt();
-    children.clear();
-    for (int i = 0; i < count; i++) {
-      JobQueueInfo childQueueInfo = new JobQueueInfo();
-      childQueueInfo.readFields(in);
-      children.add(childQueueInfo);
-    }
+  
+  protected void setJobStatuses(org.apache.hadoop.mapreduce.JobStatus[] stats) {
+    super.setJobStatuses(stats);
   }
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-    Text.writeString(out, queueName);
-    Text.writeString(out, queueState);
-    if (schedulingInfo != null) {
-      Text.writeString(out, schedulingInfo);
-    } else {
-      Text.writeString(out, "N/A");
-    }
-    out.writeInt(children.size());
-    for(JobQueueInfo childQueueInfo : children) {
-      childQueueInfo.write(out);
-    }
-  }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobStatus.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobStatus.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobStatus.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobStatus.java Fri Sep 18 07:04:42 2009
@@ -17,41 +17,32 @@
  */
 package org.apache.hadoop.mapred;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableUtils;
-
 /**************************************************
  * Describes the current status of a job.  This is
  * not intended to be a comprehensive piece of data.
  * For that, look at JobProfile.
- **************************************************/
-public class JobStatus implements Writable, Cloneable {
-
-  static {                                      // register a ctor
-    WritableFactories.setFactory
-      (JobStatus.class,
-       new WritableFactory() {
-         public Writable newInstance() { return new JobStatus(); }
-       });
-  }
-
-  public static final int RUNNING = 1;
-  public static final int SUCCEEDED = 2;
-  public static final int FAILED = 3;
-  public static final int PREP = 4;
-  public static final int KILLED = 5;
+ *************************************************
+ *@deprecated Use {@link org.apache.hadoop.mapreduce.JobStatus} instead
+ **/
+@Deprecated
+public class JobStatus extends org.apache.hadoop.mapreduce.JobStatus {
+
+  public static final int RUNNING = 
+    org.apache.hadoop.mapreduce.JobStatus.State.RUNNING.getValue();
+  public static final int SUCCEEDED = 
+    org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED.getValue();
+  public static final int FAILED = 
+    org.apache.hadoop.mapreduce.JobStatus.State.FAILED.getValue();
+  public static final int PREP = 
+    org.apache.hadoop.mapreduce.JobStatus.State.PREP.getValue();
+  public static final int KILLED = 
+    org.apache.hadoop.mapreduce.JobStatus.State.KILLED.getValue();
 
   private static final String UNKNOWN = "UNKNOWN";
-  private static final String[] runStates =
-      {UNKNOWN, "RUNNING", "SUCCEEDED", "FAILED", "PREP", "KILLED"};
   
+  private static final String[] runStates =
+    {UNKNOWN, "RUNNING", "SUCCEEDED", "FAILED", "PREP", "KILLED"};
+
   /**
    * Helper method to get human-readable state of the job.
    * @param state job state
@@ -64,25 +55,17 @@
     return runStates[state];
   }
   
-  private JobID jobid;
-  private float mapProgress;
-  private float reduceProgress;
-  private float cleanupProgress;
-  private float setupProgress;
-  private int runState;
-  private long startTime;
-  private String user;
-  private JobPriority priority;
-  private String schedulingInfo="NA";
-
-  private String jobName;
-  private String jobFile;
-  private long finishTime;
-  private boolean isRetired;
-  private String historyFile = "";
-  private String trackingUrl ="";
-
-    
+  static org.apache.hadoop.mapreduce.JobStatus.State getEnum(int state) {
+    switch (state) {
+      case 1: return org.apache.hadoop.mapreduce.JobStatus.State.RUNNING;
+      case 2: return org.apache.hadoop.mapreduce.JobStatus.State.SUCCEEDED;
+      case 3: return org.apache.hadoop.mapreduce.JobStatus.State.FAILED;
+      case 4: return org.apache.hadoop.mapreduce.JobStatus.State.PREP;
+      case 5: return org.apache.hadoop.mapreduce.JobStatus.State.KILLED;
+    }
+    return null;
+  }
+  
   /**
    */
   public JobStatus() {
@@ -164,295 +147,174 @@
                     float reduceProgress, float cleanupProgress, 
                     int runState, JobPriority jp, String user, String jobName, 
                     String jobFile, String trackingUrl) {
-     this.jobid = jobid;
-     this.setupProgress = setupProgress;
-     this.mapProgress = mapProgress;
-     this.reduceProgress = reduceProgress;
-     this.cleanupProgress = cleanupProgress;
-     this.runState = runState;
-     this.user = user;
-     if (jp == null) {
-       throw new IllegalArgumentException("Job Priority cannot be null.");
-     }
-     priority = jp;
-     this.jobName = jobName;
-     this.jobFile = jobFile;
-     this.trackingUrl = trackingUrl;
+     super(jobid, setupProgress, mapProgress, reduceProgress, cleanupProgress,
+       getEnum(runState), org.apache.hadoop.mapreduce.JobPriority.valueOf(jp.name()),
+       user, jobName, jobFile, trackingUrl);
    }
    
+  public static JobStatus downgrade(org.apache.hadoop.mapreduce.JobStatus stat){
+    JobStatus old = new JobStatus(JobID.downgrade(stat.getJobID()),
+      stat.getSetupProgress(), stat.getMapProgress(), stat.getReduceProgress(),
+      stat.getCleanupProgress(), stat.getState().getValue(), 
+      JobPriority.valueOf(stat.getPriority().name()),
+      stat.getUsername(), stat.getJobName(), stat.getJobFile(),
+      stat.getTrackingUrl());
+    old.setStartTime(stat.getStartTime());
+    old.setFinishTime(stat.getFinishTime());
+    old.setSchedulingInfo(stat.getSchedulingInfo());
+    old.setHistoryFile(stat.getHistoryFile());
+    return old;
+  }
   /**
    * @deprecated use getJobID instead
    */
   @Deprecated
-  public String getJobId() { return jobid.toString(); }
+  public String getJobId() { return getJobID().toString(); }
   
   /**
    * @return The jobid of the Job
    */
-  public JobID getJobID() { return jobid; }
-    
-  /**
-   * @return Percentage of progress in maps 
-   */
-  public synchronized float mapProgress() { return mapProgress; }
-    
-  /**
-   * Sets the map progress of this job
-   * @param p The value of map progress to set to
-   */
-  synchronized void setMapProgress(float p) { 
-    this.mapProgress = (float) Math.min(1.0, Math.max(0.0, p)); 
-  }
-
-  /**
-   * @return Percentage of progress in cleanup 
-   */
-  public synchronized float cleanupProgress() { return cleanupProgress; }
-    
-  /**
-   * Sets the cleanup progress of this job
-   * @param p The value of cleanup progress to set to
-   */
-  synchronized void setCleanupProgress(float p) { 
-    this.cleanupProgress = (float) Math.min(1.0, Math.max(0.0, p)); 
-  }
-
-  /**
-   * @return Percentage of progress in setup 
-   */
-  public synchronized float setupProgress() { return setupProgress; }
-    
-  /**
-   * Sets the setup progress of this job
-   * @param p The value of setup progress to set to
-   */
-  synchronized void setSetupProgress(float p) { 
-    this.setupProgress = (float) Math.min(1.0, Math.max(0.0, p)); 
-  }
-
-  /**
-   * @return Percentage of progress in reduce 
-   */
-  public synchronized float reduceProgress() { return reduceProgress; }
-    
-  /**
-   * Sets the reduce progress of this Job
-   * @param p The value of reduce progress to set to
-   */
-  synchronized void setReduceProgress(float p) { 
-    this.reduceProgress = (float) Math.min(1.0, Math.max(0.0, p)); 
-  }
-    
-  /**
-   * @return running state of the job
-   */
-  public synchronized int getRunState() { return runState; }
-    
-  /**
-   * Change the current run state of the job.
-   */
-  public synchronized void setRunState(int state) {
-    this.runState = state;
-  }
-
-  /** 
-   * Set the start time of the job
-   * @param startTime The startTime of the job
-   */
-  synchronized void setStartTime(long startTime) { this.startTime = startTime;}
-    
-  /**
-   * @return start time of the job
-   */
-  synchronized public long getStartTime() { return startTime;}
-
-  @Override
-  public Object clone() {
-    try {
-      return super.clone();
-    } catch (CloneNotSupportedException cnse) {
-      // Shouldn't happen since we do implement Clonable
-      throw new InternalError(cnse.toString());
-    }
-  }
-  
-  /**
-   * @param user The username of the job
-   */
-  synchronized void setUsername(String userName) { this.user = userName;}
-
-  /**
-   * @return the username of the job
-   */
-  public synchronized String getUsername() { return this.user;}
-  
-  /**
-   * Gets the Scheduling information associated to a particular Job.
-   * @return the scheduling information of the job
-   */
-  public synchronized String getSchedulingInfo() {
-   return schedulingInfo;
-  }
-
-  /**
-   * Used to set the scheduling information associated to a particular Job.
-   * 
-   * @param schedulingInfo Scheduling information of the job
-   */
-  public synchronized void setSchedulingInfo(String schedulingInfo) {
-    this.schedulingInfo = schedulingInfo;
-  }
+  public JobID getJobID() { return JobID.downgrade(super.getJobID()); }
   
   /**
    * Return the priority of the job
    * @return job priority
    */
-   public synchronized JobPriority getJobPriority() { return priority; }
-  
-  /**
-   * Set the priority of the job, defaulting to NORMAL.
-   * @param jp new job priority
-   */
-   public synchronized void setJobPriority(JobPriority jp) {
-     if (jp == null) {
-       throw new IllegalArgumentException("Job priority cannot be null.");
-     }
-     priority = jp;
+   public synchronized JobPriority getJobPriority() { 
+     return JobPriority.valueOf(super.getPriority().name());
    }
-  
+
    /**
-    * Returns true if the status is for a completed job.
+    * Sets the map progress of this job
+    * @param p The value of map progress to set to
     */
-   public synchronized boolean isJobComplete() {
-     return (runState == JobStatus.SUCCEEDED || runState == JobStatus.FAILED 
-             || runState == JobStatus.KILLED);
-   }
-
-  ///////////////////////////////////////
-  // Writable
-  ///////////////////////////////////////
-  public synchronized void write(DataOutput out) throws IOException {
-    jobid.write(out);
-    out.writeFloat(setupProgress);
-    out.writeFloat(mapProgress);
-    out.writeFloat(reduceProgress);
-    out.writeFloat(cleanupProgress);
-    out.writeInt(runState);
-    out.writeLong(startTime);
-    Text.writeString(out, user);
-    WritableUtils.writeEnum(out, priority);
-    Text.writeString(out, schedulingInfo);
-    out.writeLong(finishTime);
-    out.writeBoolean(isRetired);
-    Text.writeString(out, historyFile);
-    Text.writeString(out, jobName);
-    Text.writeString(out, trackingUrl);
-    Text.writeString(out, jobFile);
-  }
-
-  public synchronized void readFields(DataInput in) throws IOException {
-    this.jobid = JobID.read(in);
-    this.setupProgress = in.readFloat();
-    this.mapProgress = in.readFloat();
-    this.reduceProgress = in.readFloat();
-    this.cleanupProgress = in.readFloat();
-    this.runState = in.readInt();
-    this.startTime = in.readLong();
-    this.user = Text.readString(in);
-    this.priority = WritableUtils.readEnum(in, JobPriority.class);
-    this.schedulingInfo = Text.readString(in);
-    this.finishTime = in.readLong();
-    this.isRetired = in.readBoolean();
-    this.historyFile = Text.readString(in);
-    this.jobName = Text.readString(in);
-    this.trackingUrl = Text.readString(in);
-    this.jobFile = Text.readString(in);
-  }
+   protected synchronized void setMapProgress(float p) { 
+     super.setMapProgress(p); 
+   }
 
-  /**
-   * Get the user-specified job name.
-   */
-  public String getJobName() {
-    return jobName;
-  }
+   /**
+    * Sets the cleanup progress of this job
+    * @param p The value of cleanup progress to set to
+    */
+   protected synchronized void setCleanupProgress(float p) { 
+     super.setCleanupProgress(p); 
+   }
 
-  /**
-   * Get the configuration file for the job.
-   */
-  public String getJobFile() {
-    return jobFile;
-  }
+   /**
+    * Sets the setup progress of this job
+    * @param p The value of setup progress to set to
+    */
+   protected synchronized void setSetupProgress(float p) { 
+     super.setSetupProgress(p); 
+   }
 
-  /**
-   * Get the link to the web-ui for details of the job.
-   */
-  public synchronized String getTrackingUrl() {
-    return trackingUrl;
-  }
+   /**
+    * Sets the reduce progress of this Job
+    * @param p The value of reduce progress to set to
+    */
+   protected synchronized void setReduceProgress(float p) { 
+     super.setReduceProgress(p); 
+   }
+     
+   /** 
+    * Set the finish time of the job
+    * @param finishTime The finishTime of the job
+    */
+   protected synchronized void setFinishTime(long finishTime) {
+     super.setFinishTime(finishTime);
+   }
 
-  /**
-   * Get the finish time of the job.
-   */
-  public synchronized long getFinishTime() { 
-    return finishTime;
-  }
+   /**
+    * Set the job history file url for a completed job
+    */
+   protected synchronized void setHistoryFile(String historyFile) {
+     super.setHistoryFile(historyFile);
+   }
 
-  /**
-   * Check whether the job has retired.
-   */
-  public synchronized boolean isRetired() {
-    return isRetired;
-  }
+   /**
+    * Set the link to the web-ui for details of the job.
+    */
+   protected synchronized void setTrackingUrl(String trackingUrl) {
+     super.setTrackingUrl(trackingUrl);
+   }
 
-  /**
-   * @return the job history file name for a completed job. If job is not 
-   * completed or history file not available then return null.
-   */
-  public synchronized String getHistoryFile() {
-    return historyFile;
-  }
+   /**
+    * Set the job retire flag to true.
+    */
+   protected synchronized void setRetired() {
+     super.setRetired();
+   }
 
- /** 
-   * Set the finish time of the job
-   * @param finishTime The finishTime of the job
-   */
-  synchronized void setFinishTime(long finishTime) {
-    this.finishTime = finishTime;
-  }
+   /**
+    * Change the current run state of the job.
+    */
+   protected synchronized void setRunState(int state) {
+     super.setState(getEnum(state));
+   }
 
-  /**
-   * Set the job history file url for a completed job
-   */
-  synchronized void setHistoryFile(String historyFile) {
-    this.historyFile = historyFile;
-  }
+   /**
+    * @return running state of the job
+    */
+   public synchronized int getRunState() { return super.getState().getValue(); }
+     
 
-  /**
-   * Set the link to the web-ui for details of the job.
-   */
-  synchronized void setTrackingUrl(String trackingUrl) {
-    this.trackingUrl = trackingUrl;
-  }
+   /** 
+    * Set the start time of the job
+    * @param startTime The startTime of the job
+    */
+   protected synchronized void setStartTime(long startTime) { 
+     super.setStartTime(startTime);
+   }
+     
+   /**
+    * @param userName The username of the job
+    */
+   protected synchronized void setUsername(String userName) { 
+     super.setUsername(userName);
+   }
 
+   /**
+    * Used to set the scheduling information associated to a particular Job.
+    * 
+    * @param schedulingInfo Scheduling information of the job
+    */
+   protected synchronized void setSchedulingInfo(String schedulingInfo) {
+     super.setSchedulingInfo(schedulingInfo);
+   }
+   
   /**
-   * Set the job retire flag to true.
+   * Set the priority of the job, defaulting to NORMAL.
+   * @param jp new job priority
    */
-  synchronized void setRetired() {
-    this.isRetired = true;
-  }
+   public synchronized void setJobPriority(JobPriority jp) {
+     super.setPriority(
+       org.apache.hadoop.mapreduce.JobPriority.valueOf(jp.name()));
+   }
+  
+   /**
+    * @return Percentage of progress in maps 
+    */
+   public synchronized float mapProgress() { return super.getMapProgress(); }
+     
+   /**
+    * @return Percentage of progress in cleanup 
+    */
+   public synchronized float cleanupProgress() { 
+     return super.getCleanupProgress(); 
+   }
+     
+   /**
+    * @return Percentage of progress in setup 
+    */
+   public synchronized float setupProgress() { 
+     return super.getSetupProgress(); 
+   }
+     
+   /**
+    * @return Percentage of progress in reduce 
+    */
+   public synchronized float reduceProgress() { 
+     return super.getReduceProgress(); 
+   }
 
-  public String toString() {
-    StringBuffer buffer = new StringBuffer();
-    buffer.append("job-id : " + jobid);
-    buffer.append("map-progress : " + mapProgress);
-    buffer.append("reduce-progress : " + reduceProgress);
-    buffer.append("cleanup-progress : " + cleanupProgress);
-    buffer.append("setup-progress : " + setupProgress);
-    buffer.append("runstate : " + runState);
-    buffer.append("start-time : " + startTime);
-    buffer.append("user-name : " + user);
-    buffer.append("priority : " + priority);
-    buffer.append("scheduling-info : " + schedulingInfo);
-    return buffer.toString();
-  }
-}
+}
\ No newline at end of file

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobSubmissionProtocol.java Fri Sep 18 07:04:42 2009
@@ -1,250 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred;
-
-import java.io.IOException;
-
-import org.apache.hadoop.ipc.VersionedProtocol;
-
-/** 
- * Protocol that a JobClient and the central JobTracker use to communicate.  The
- * JobClient can use these methods to submit a Job for execution, and learn about
- * the current system status.
- */ 
-interface JobSubmissionProtocol extends VersionedProtocol {
-  /* 
-   *Changing the versionID to 2L since the getTaskCompletionEvents method has
-   *changed.
-   *Changed to 4 since killTask(String,boolean) is added
-   *Version 4: added jobtracker state to ClusterStatus
-   *Version 5: max_tasks in ClusterStatus is replaced by
-   * max_map_tasks and max_reduce_tasks for HADOOP-1274
-   * Version 6: change the counters representation for HADOOP-2248
-   * Version 7: added getAllJobs for HADOOP-2487
-   * Version 8: change {job|task}id's to use corresponding objects rather that strings.
-   * Version 9: change the counter representation for HADOOP-1915
-   * Version 10: added getSystemDir for HADOOP-3135
-   * Version 11: changed JobProfile to include the queue name for HADOOP-3698
-   * Version 12: Added getCleanupTaskReports and 
-   *             cleanupProgress to JobStatus as part of HADOOP-3150
-   * Version 13: Added getJobQueueInfos and getJobQueueInfo(queue name)
-   *             and getAllJobs(queue) as a part of HADOOP-3930
-   * Version 14: Added setPriority for HADOOP-4124
-   * Version 15: Added KILLED status to JobStatus as part of HADOOP-3924            
-   * Version 16: Added getSetupTaskReports and 
-   *             setupProgress to JobStatus as part of HADOOP-4261           
-   * Version 17: getClusterStatus returns the amount of memory used by 
-   *             the server. HADOOP-4435
-   * Version 18: Added blacklisted trackers to the ClusterStatus 
-   *             for HADOOP-4305
-   * Version 19: Modified TaskReport to have TIP status and modified the
-   *             method getClusterStatus() to take a boolean argument
-   *             for HADOOP-4807
-   * Version 20: Modified ClusterStatus to have the tasktracker expiry
-   *             interval for HADOOP-4939
-   * Version 21: Modified TaskID to be aware of the new TaskTypes                                 
-   * Version 22: Added method getQueueAclsForCurrentUser to get queue acls info
-   *             for a user
-   * Version 23: Modified the JobQueueInfo class to inlucde queue state.
-   *             Part of HADOOP-5913.  
-   * Version 24: Modified ClusterStatus to include BlackListInfo class which 
-   *             encapsulates reasons and report for blacklisted node.          
-   * Version 25: Added fields to JobStatus for HADOOP-817.
-   *
-   * Version 26: Added properties to JobQueueInfo as part of MAPREDUCE-861.
-   *             added new api's getRootQueues and
-   *             getChildQueues(String queueName)
-   */
-  public static final long versionID = 26L;
-
-  /**
-   * Allocate a name for the job.
-   * @return a unique job name for submitting jobs.
-   * @throws IOException
-   */
-  public JobID getNewJobId() throws IOException;
-
-  /**
-   * Submit a Job for execution.  Returns the latest profile for
-   * that job.
-   * The job files should be submitted in <b>system-dir</b>/<b>jobName</b>.
-   */
-  public JobStatus submitJob(JobID jobName) throws IOException;
-
-  /**
-   * Get the current status of the cluster
-   * @param detailed if true then report tracker names as well
-   * @return summary of the state of the cluster
-   */
-  public ClusterStatus getClusterStatus(boolean detailed) throws IOException;
-  
-    
-  /**
-   * Kill the indicated job
-   */
-  public void killJob(JobID jobid) throws IOException;
-
-  /**
-   * Set the priority of the specified job
-   * @param jobid ID of the job
-   * @param priority Priority to be set for the job
-   */
-  public void setJobPriority(JobID jobid, String priority) 
-                                                      throws IOException;
-  /**
-   * Kill indicated task attempt.
-   * @param taskId the id of the task to kill.
-   * @param shouldFail if true the task is failed and added to failed tasks list, otherwise
-   * it is just killed, w/o affecting job failure status.  
-   */ 
-  public boolean killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException;
-  
-  /**
-   * Grab a handle to a job that is already known to the JobTracker.
-   * @return Profile of the job, or null if not found. 
-   */
-  public JobProfile getJobProfile(JobID jobid) throws IOException;
-
-  /**
-   * Grab a handle to a job that is already known to the JobTracker.
-   * @return Status of the job, or null if not found.
-   */
-  public JobStatus getJobStatus(JobID jobid) throws IOException;
-
-  /**
-   * Grab the current job counters
-   */
-  public Counters getJobCounters(JobID jobid) throws IOException;
-    
-  /**
-   * Grab a bunch of info on the map tasks that make up the job
-   */
-  public TaskReport[] getMapTaskReports(JobID jobid) throws IOException;
-
-  /**
-   * Grab a bunch of info on the reduce tasks that make up the job
-   */
-  public TaskReport[] getReduceTaskReports(JobID jobid) throws IOException;
-
-  /**
-   * Grab a bunch of info on the cleanup tasks that make up the job
-   */
-  public TaskReport[] getCleanupTaskReports(JobID jobid) throws IOException;
-
-  /**
-   * Grab a bunch of info on the setup tasks that make up the job
-   */
-  public TaskReport[] getSetupTaskReports(JobID jobid) throws IOException;
-
-  /**
-   * A MapReduce system always operates on a single filesystem.  This 
-   * function returns the fs name.  ('local' if the localfs; 'addr:port' 
-   * if dfs).  The client can then copy files into the right locations 
-   * prior to submitting the job.
-   */
-  public String getFilesystemName() throws IOException;
-
-  /** 
-   * Get the jobs that are not completed and not failed
-   * @return array of JobStatus for the running/to-be-run
-   * jobs.
-   */
-  public JobStatus[] jobsToComplete() throws IOException;
-    
-  /** 
-   * Get all the jobs submitted. 
-   * @return array of JobStatus for the submitted jobs
-   */
-  public JobStatus[] getAllJobs() throws IOException;
-  
-  /**
-   * Get task completion events for the jobid, starting from fromEventId. 
-   * Returns empty aray if no events are available. 
-   * @param jobid job id 
-   * @param fromEventId event id to start from.
-   * @param maxEvents the max number of events we want to look at 
-   * @return array of task completion events. 
-   * @throws IOException
-   */
-  public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid
-      , int fromEventId, int maxEvents) throws IOException;
-    
-  /**
-   * Get the diagnostics for a given task in a given job
-   * @param taskId the id of the task
-   * @return an array of the diagnostic messages
-   */
-  public String[] getTaskDiagnostics(TaskAttemptID taskId) 
-  throws IOException;
-
-  /**
-   * Grab the jobtracker system directory path where job-specific files are to be placed.
-   * 
-   * @return the system directory where job-specific files are to be placed.
-   */
-  public String getSystemDir();  
-  
-  /**
-   * Gets set of Job Queues associated with the Job Tracker
-   * 
-   * @return Array of the Job Queue Information Object
-   * @throws IOException 
-   */
-  public JobQueueInfo[] getQueues() throws IOException;
-  
-  /**
-   * Gets scheduling information associated with the particular Job queue
-   * 
-   * @param queue Queue Name
-   * @return Scheduling Information of the Queue
-   * @throws IOException 
-   */
-  public JobQueueInfo getQueueInfo(String queue) throws IOException;
-  
-  /**
-   * Gets all the jobs submitted to the particular Queue
-   * @param queue Queue name
-   * @return array of JobStatus for the submitted jobs
-   * @throws IOException
-   */
-  public JobStatus[] getJobsFromQueue(String queue) throws IOException;
-  
-  /**
-   * Gets the Queue ACLs for current user
-   * @return array of QueueAclsInfo object for current user.
-   * @throws IOException
-   */
-  public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException;
-
-  /**
-   * Gets the root level queues.
-   * @return array of JobQueueInfo object.
-   * @throws IOException
-   */
-  public JobQueueInfo[] getRootQueues() throws IOException;
-  
-
-  /**
-   * Returns immediate children of queueName.
-   * @param queueName
-   * @return array of JobQueueInfo which are children of queueName
-   * @throws IOException
-   */
-  public JobQueueInfo[] getChildQueues(String queueName) throws IOException;
-}

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java Fri Sep 18 07:04:42 2009
@@ -73,6 +73,12 @@
 import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType;
 import org.apache.hadoop.mapred.JobTrackerStatistics.TaskTrackerStat;
 import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus;
+import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.QueueInfo;
+import org.apache.hadoop.mapreduce.QueueState;
+import org.apache.hadoop.mapreduce.TaskTrackerInfo;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistory;
 import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
 import org.apache.hadoop.net.DNSToSwitchMapping;
@@ -102,7 +108,7 @@
  *
  *******************************************************/
 public class JobTracker implements MRConstants, InterTrackerProtocol,
-    JobSubmissionProtocol, TaskTrackerManager,
+    ClientProtocol, TaskTrackerManager,
     RefreshAuthorizationPolicyProtocol, AdminOperationsProtocol {
 
   static{
@@ -243,8 +249,8 @@
                                  long clientVersion) throws IOException {
     if (protocol.equals(InterTrackerProtocol.class.getName())) {
       return InterTrackerProtocol.versionID;
-    } else if (protocol.equals(JobSubmissionProtocol.class.getName())){
-      return JobSubmissionProtocol.versionID;
+    } else if (protocol.equals(ClientProtocol.class.getName())){
+      return ClientProtocol.versionID;
     } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
       return RefreshAuthorizationPolicyProtocol.versionID;
     } else if (protocol.equals(AdminOperationsProtocol.class.getName())){
@@ -2736,19 +2742,46 @@
 
   /**
    * Allocates a new JobId string.
+   * @deprecated use {@link #getNewJobID()} instead
    */
+  @Deprecated
   public synchronized JobID getNewJobId() throws IOException {
-    return new JobID(getTrackerIdentifier(), nextJobId++);
+    return JobID.downgrade(getNewJobID());
   }
 
   /**
+   * Allocates a new JobId string.
+   */
+  public synchronized org.apache.hadoop.mapreduce.JobID getNewJobID()
+      throws IOException {
+    return new org.apache.hadoop.mapreduce.JobID(
+      getTrackerIdentifier(), nextJobId++);
+  }
+
+  /**
+   * JobTracker.submitJob() kicks off a new job.  
+   *
+   * Create a 'JobInProgress' object, which contains both JobProfile
+   * and JobStatus.  Those two sub-objects are sometimes shipped outside
+   * of the JobTracker.  But JobInProgress adds info that's useful for
+   * the JobTracker alone.
+   */
+  public synchronized org.apache.hadoop.mapreduce.JobStatus submitJob(
+      org.apache.hadoop.mapreduce.JobID jobId) throws IOException {
+    return submitJob(JobID.downgrade(jobId));
+  }
+  
+  /**
    * JobTracker.submitJob() kicks off a new job.  
    *
    * Create a 'JobInProgress' object, which contains both JobProfile
    * and JobStatus.  Those two sub-objects are sometimes shipped outside
    * of the JobTracker.  But JobInProgress adds info that's useful for
    * the JobTracker alone.
+   * @deprecated Use 
+   * {@link #submitJob(org.apache.hadoop.mapreduce.JobID)} instead
    */
+  @Deprecated
   public synchronized JobStatus submitJob(JobID jobId) throws IOException {
     return submitJob(jobId, 0);
   }
@@ -2884,7 +2917,63 @@
       }
     }
   }
-    
+  
+  public synchronized ClusterMetrics getClusterMetrics() {
+    return new ClusterMetrics(totalMaps, totalReduces, totalMapTaskCapacity,
+      totalReduceTaskCapacity, taskTrackers.size() - 
+      getBlacklistedTrackerCount(), 
+      getBlacklistedTrackerCount(), getExcludedNodes().size()) ;
+  }
+
+  public org.apache.hadoop.mapreduce.server.jobtracker.State 
+      getJobTrackerState() {
+    return org.apache.hadoop.mapreduce.server.jobtracker.
+      State.valueOf(state.name());
+  }
+  
+  public long getTaskTrackerExpiryInterval() {
+    return tasktrackerExpiryInterval;
+  }
+  
+  /** 
+   * Get all active trackers in cluster. 
+   * @return array of TaskTrackerInfo
+   */
+  public TaskTrackerInfo[] getActiveTrackers() 
+  throws IOException, InterruptedException {
+    List<String> activeTrackers = taskTrackerNames().get(0);
+    TaskTrackerInfo[] info = new TaskTrackerInfo[activeTrackers.size()];
+    for (int i = 0; i < activeTrackers.size(); i++) {
+      info[i] = new TaskTrackerInfo(activeTrackers.get(i));
+    }
+    return info;
+  }
+
+  /** 
+   * Get all blacklisted trackers in cluster. 
+   * @return array of TaskTrackerInfo
+   */
+  public TaskTrackerInfo[] getBlacklistedTrackers() 
+  throws IOException, InterruptedException {
+    Collection<BlackListInfo> blackListed = getBlackListedTrackers();
+    TaskTrackerInfo[] info = new TaskTrackerInfo[blackListed.size()];
+    int i = 0;
+    for (BlackListInfo binfo : blackListed) {
+      info[i++] = new TaskTrackerInfo(binfo.getTrackerName(),
+        binfo.getReasonForBlackListing(), binfo.getBlackListReport());
+    }
+    return info;
+  }
+
+  public synchronized void killJob(org.apache.hadoop.mapreduce.JobID jobid) 
+      throws IOException {
+    killJob(JobID.downgrade(jobid));
+  }
+  
+  /**
+   * @deprecated Use {@link #killJob(org.apache.hadoop.mapreduce.JobID)} instead 
+   */
+  @Deprecated
   public synchronized void killJob(JobID jobid) throws IOException {
     if (null == jobid) {
       LOG.info("Null jobid object sent to JobTracker.killJob()");
@@ -3012,6 +3101,18 @@
    * @param jobid id of the job
    * @param priority new priority of the job
    */
+  public synchronized void setJobPriority(org.apache.hadoop.mapreduce.JobID 
+      jobid, String priority) throws IOException {
+    setJobPriority(JobID.downgrade(jobid), priority);
+  }
+  /**
+   * Set the priority of a job
+   * @param jobid id of the job
+   * @param priority new priority of the job
+   * @deprecated Use 
+   * {@link #setJobPriority(org.apache.hadoop.mapreduce.JobID, String)} instead
+   */
+  @Deprecated
   public synchronized void setJobPriority(JobID jobid, 
                                               String priority)
                                                 throws IOException {
@@ -3031,6 +3132,15 @@
     completedJobStatusStore.store(job);
   }
 
+  public JobProfile getJobProfile(org.apache.hadoop.mapreduce.JobID jobid) {
+    return getJobProfile(JobID.downgrade(jobid));
+  }
+  
+  /**
+   * @deprecated Use {@link #getJobProfile(org.apache.hadoop.mapreduce.JobID)} 
+   * instead
+   */
+  @Deprecated
   public JobProfile getJobProfile(JobID jobid) {
     synchronized (this) {
       JobInProgress job = jobs.get(jobid);
@@ -3040,6 +3150,16 @@
     }
     return completedJobStatusStore.readJobProfile(jobid);
   }
+  
+  public JobStatus getJobStatus(org.apache.hadoop.mapreduce.JobID jobid) {
+    return getJobStatus(JobID.downgrade(jobid));
+  }
+
+  /**
+   * @deprecated Use 
+   * {@link #getJobStatus(org.apache.hadoop.mapreduce.JobID)} instead
+   */
+  @Deprecated
   public JobStatus getJobStatus(JobID jobid) {
     if (null == jobid) {
       LOG.warn("JobTracker.getJobStatus() cannot get status for null jobid");
@@ -3059,6 +3179,21 @@
     }
     return completedJobStatusStore.readJobStatus(jobid);
   }
+  
+  public org.apache.hadoop.mapreduce.Counters getJobCounters(
+      org.apache.hadoop.mapreduce.JobID jobid) {
+    Counters counters = getJobCounters(JobID.downgrade(jobid));
+    if (counters != null) {
+      return new org.apache.hadoop.mapreduce.Counters(counters);
+    }
+    return null;
+  }
+  
+  /**
+   * @deprecated Use 
+   * {@link #getJobCounters(org.apache.hadoop.mapreduce.JobID)} instead
+   */
+  @Deprecated
   public Counters getJobCounters(JobID jobid) {
     synchronized (this) {
       JobInProgress job = jobs.get(jobid);
@@ -3068,6 +3203,15 @@
     }
     return completedJobStatusStore.readCounters(jobid);
   }
+  
+  /**
+   * @param jobid
+   * @return array of TaskReport
+   * @deprecated Use 
+   * {@link #getTaskReports(org.apache.hadoop.mapreduce.JobID, TaskType)} 
+   * instead
+   */
+  @Deprecated
   public synchronized TaskReport[] getMapTaskReports(JobID jobid) {
     JobInProgress job = jobs.get(jobid);
     if (job == null) {
@@ -3090,6 +3234,14 @@
     }
   }
 
+  /**
+   * @param jobid
+   * @return array of TaskReport
+   * @deprecated Use 
+   * {@link #getTaskReports(org.apache.hadoop.mapreduce.JobID, TaskType)} 
+   * instead
+   */
+  @Deprecated
   public synchronized TaskReport[] getReduceTaskReports(JobID jobid) {
     JobInProgress job = jobs.get(jobid);
     if (job == null) {
@@ -3110,6 +3262,14 @@
     }
   }
 
+  /**
+   * @param jobid
+   * @return array of TaskReport
+   * @deprecated Use 
+   * {@link #getTaskReports(org.apache.hadoop.mapreduce.JobID, TaskType)} 
+   * instead
+   */
+  @Deprecated
   public synchronized TaskReport[] getCleanupTaskReports(JobID jobid) {
     JobInProgress job = jobs.get(jobid);
     if (job == null) {
@@ -3132,7 +3292,15 @@
     }
   
   }
-  
+
+  /**
+   * @param jobid
+   * @return array of TaskReport
+   * @deprecated Use 
+   * {@link #getTaskReports(org.apache.hadoop.mapreduce.JobID, TaskType)} 
+   * instead
+   */
+  @Deprecated
   public synchronized TaskReport[] getSetupTaskReports(JobID jobid) {
     JobInProgress job = jobs.get(jobid);
     if (job == null) {
@@ -3154,7 +3322,22 @@
       return reports.toArray(new TaskReport[reports.size()]);
     }
   }
-  
+
+  public synchronized TaskReport[] getTaskReports(
+      org.apache.hadoop.mapreduce.JobID jobid, TaskType type) {
+    switch (type) {
+      case MAP :
+        return getMapTaskReports(JobID.downgrade(jobid));
+      case REDUCE :
+        return getReduceTaskReports(JobID.downgrade(jobid));
+      case JOB_SETUP:
+        return getCleanupTaskReports(JobID.downgrade(jobid));
+      case JOB_CLEANUP :
+        return getSetupTaskReports(JobID.downgrade(jobid));
+    }
+    return new TaskReport[0];
+  }
+
   TaskCompletionEvent[] EMPTY_EVENTS = new TaskCompletionEvent[0];
 
   static final String MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY =
@@ -3166,12 +3349,24 @@
       "mapred.cluster.max.map.memory.mb";
   static final String MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY =
       "mapred.cluster.max.reduce.memory.mb";
+
+  /* 
+   * Returns a list of TaskCompletionEvent for the given job, 
+   * starting from fromEventId.
+   */
+  public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
+      org.apache.hadoop.mapreduce.JobID jobid, int fromEventId, int maxEvents)
+      throws IOException {
+    return getTaskCompletionEvents(JobID.downgrade(jobid),
+      fromEventId, maxEvents);
+  }
   
   /* 
    * Returns a list of TaskCompletionEvent for the given job, 
    * starting from fromEventId.
    * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getTaskCompletionEvents(java.lang.String, int, int)
    */
+  @Deprecated
   public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
       JobID jobid, int fromEventId, int maxEvents) throws IOException{
     synchronized (this) {
@@ -3192,6 +3387,17 @@
    * @param taskId the id of the task
    * @return an array of the diagnostic messages
    */
+  public synchronized String[] getTaskDiagnostics(
+      org.apache.hadoop.mapreduce.TaskAttemptID taskId)  
+      throws IOException {
+    return getTaskDiagnostics(TaskAttemptID.downgrade(taskId));
+  }
+  /**
+   * Get the diagnostics for a given task
+   * @param taskId the id of the task
+   * @return an array of the diagnostic messages
+   */
+  @Deprecated
   public synchronized String[] getTaskDiagnostics(TaskAttemptID taskId)  
     throws IOException {
     List<String> taskDiagnosticInfo = null;
@@ -3247,8 +3453,15 @@
     JobInProgress job = jobs.get(tipid.getJobID());
     return (job == null ? null : job.getTaskInProgress(tipid));
   }
-    
+
+  public synchronized boolean killTask(
+      org.apache.hadoop.mapreduce.TaskAttemptID taskid,
+      boolean shouldFail) throws IOException {
+    return killTask(TaskAttemptID.downgrade(taskid), shouldFail);
+  }
+  
   /** Mark a Task to be killed */
+  @Deprecated
   public synchronized boolean killTask(TaskAttemptID taskid, boolean shouldFail) throws IOException{
     TaskInProgress tip = taskidToTIPMap.get(taskid);
     if(tip != null) {
@@ -3274,7 +3487,7 @@
     return getJobStatus(jobs.values(), true);
   } 
   
-  public JobStatus[] getAllJobs() {
+  public org.apache.hadoop.mapreduce.JobStatus[] getAllJobs() {
     List<JobStatus> list = new ArrayList<JobStatus>();
     list.addAll(Arrays.asList(getJobStatus(jobs.values(),false)));
     list.addAll(retireJobs.getAll());
@@ -3282,7 +3495,7 @@
   }
     
   /**
-   * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir()
+   * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getSystemDir()
    */
   public String getSystemDir() {
     Path sysDir = new Path(conf.get("mapred.system.dir", "/tmp/hadoop/mapred/system"));  
@@ -3602,45 +3815,76 @@
   /**
    * Gets the root level queues.
    *
-   * @return array of JobQueueInfo object.
+   * @return array of QueueInfo object.
    * @throws java.io.IOException
    */
    @Override
-  public JobQueueInfo[] getRootQueues() throws IOException {
-    return queueManager.getRootQueues();
+  public QueueInfo[] getRootQueues() throws IOException {
+    return getQueueInfoArray(queueManager.getRootQueues());
   }
  
   /**
    * Returns immediate children of queueName.
    *
    * @param queueName
-   * @return array of JobQueueInfo which are children of queueName
+   * @return array of QueueInfo which are children of queueName
    * @throws java.io.IOException
    */
   @Override
-  public JobQueueInfo[] getChildQueues(String queueName) throws IOException {
-     return queueManager.getChildQueues(queueName);
+  public QueueInfo[] getChildQueues(String queueName) throws IOException {
+    return getQueueInfoArray(queueManager.getChildQueues(queueName));
   }
 
-  @Override
-  public JobQueueInfo[] getQueues() throws IOException {
-    return queueManager.getJobQueueInfos();
+  /**
+   * Gets the root level queues.
+   *
+   * @return array of JobQueueInfo object.
+   * @throws java.io.IOException
+   */
+   @Deprecated
+  public JobQueueInfo[] getRootJobQueues() throws IOException {
+    return queueManager.getRootQueues();
   }
 
+  @Deprecated 
+  public JobQueueInfo[] getJobQueues() throws IOException {
+    return queueManager.getJobQueueInfos();
+  }
 
-  @Override
+  @Deprecated 
   public JobQueueInfo getQueueInfo(String queue) throws IOException {
     return queueManager.getJobQueueInfo(queue);
   }
 
+  private QueueInfo[] getQueueInfoArray(JobQueueInfo[] queues) 
+      throws IOException {
+    for (JobQueueInfo queue : queues) {
+      queue.setJobStatuses(getJobsFromQueue(queue.getQueueName()));
+    }
+    return queues;
+  }
+  
+  @Override
+  public QueueInfo[] getQueues() throws IOException {
+    return getQueueInfoArray(queueManager.getJobQueueInfos());
+  }
+
   @Override
-  public JobStatus[] getJobsFromQueue(String queue) throws IOException {
+  public QueueInfo getQueue(String queue) throws IOException {
+    JobQueueInfo jqueue = queueManager.getJobQueueInfo(queue);
+    jqueue.setJobStatuses(getJobsFromQueue(jqueue.getQueueName()));
+    return jqueue;
+  }
+
+  public org.apache.hadoop.mapreduce.JobStatus[] getJobsFromQueue(String queue) 
+      throws IOException {
     Collection<JobInProgress> jips = taskScheduler.getJobs(queue);
     return getJobStatus(jips,false);
   }
   
   @Override
-  public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException{
+  public org.apache.hadoop.mapreduce.QueueAclsInfo[] 
+      getQueueAclsForCurrentUser() throws IOException{
     return queueManager.getQueueAcls(
             UserGroupInformation.getCurrentUGI());
   }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java Fri Sep 18 07:04:42 2009
@@ -28,6 +28,8 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.Job.RawSplit;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalDirAllocator;
@@ -35,14 +37,19 @@
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.serializer.SerializationFactory;
 import org.apache.hadoop.io.serializer.Serializer;
-import org.apache.hadoop.mapred.JobClient.RawSplit;
+import org.apache.hadoop.mapreduce.ClusterMetrics;
+import org.apache.hadoop.mapreduce.QueueInfo;
+import org.apache.hadoop.mapreduce.TaskCompletionEvent;
+import org.apache.hadoop.mapreduce.TaskTrackerInfo;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.filecache.TaskDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager;
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
+import org.apache.hadoop.mapreduce.server.jobtracker.State;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /** Implements MapReduce locally, in-process, for debugging. */ 
-class LocalJobRunner implements JobSubmissionProtocol {
+public class LocalJobRunner implements ClientProtocol {
   public static final Log LOG =
     LogFactory.getLog(LocalJobRunner.class);
 
@@ -57,7 +64,7 @@
   private static final String jobDir =  "localRunner/";
   
   public long getProtocolVersion(String protocol, long clientVersion) {
-    return JobSubmissionProtocol.versionID;
+    return ClientProtocol.versionID;
   }
   
   static RawSplit[] getRawSplits(JobContext jContext, JobConf job)
@@ -120,10 +127,11 @@
       return TaskUmbilicalProtocol.versionID;
     }
     
-    public Job(JobID jobid, JobConf conf) throws IOException {
+    public Job(JobID jobid) throws IOException {
       this.systemJobDir = new Path(getSystemDir(), jobid.toString());
       this.systemJobFile = new Path(systemJobDir, "job.xml");
       this.id = jobid;
+      JobConf conf = new JobConf(systemJobFile);
       this.localFs = FileSystem.getLocal(conf);
       this.localJobDir = localFs.makeQualified(conf.getLocalPath(jobDir));
       this.localJobFile = new Path(this.localJobDir, id + ".xml");
@@ -188,7 +196,7 @@
     @Override
     public void run() {
       JobID jobId = profile.getJobID();
-      JobContext jContext = new JobContext(conf, jobId);
+      JobContext jContext = new JobContext(job, jobId);
       OutputCommitter outputCommitter = job.getOutputCommitter();
       try {
         // split input into minimum number of splits
@@ -422,14 +430,19 @@
     
     public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId, 
         int fromEventId, int maxLocs, TaskAttemptID id) throws IOException {
-      return new MapTaskCompletionEventsUpdate(TaskCompletionEvent.EMPTY_ARRAY,
-                                               false);
+      return new MapTaskCompletionEventsUpdate(
+        org.apache.hadoop.mapred.TaskCompletionEvent.EMPTY_ARRAY, false);
     }
     
   }
 
+  public LocalJobRunner(Configuration conf) throws IOException {
+    this(new JobConf(conf));
+  }
+
+  @Deprecated
   public LocalJobRunner(JobConf conf) throws IOException {
-    this.fs = FileSystem.get(conf);
+    this.fs = FileSystem.getLocal(conf);
     this.conf = conf;
     myMetrics = new JobTrackerMetricsInst(null, new JobConf(conf));
   }
@@ -437,94 +450,108 @@
   // JobSubmissionProtocol methods
 
   private static int jobid = 0;
-  public synchronized JobID getNewJobId() {
-    return new JobID("local", ++jobid);
+  public synchronized org.apache.hadoop.mapreduce.JobID getNewJobID() {
+    return new org.apache.hadoop.mapreduce.JobID("local", ++jobid);
   }
 
-  public JobStatus submitJob(JobID jobid) throws IOException {
-    return new Job(jobid, this.conf).status;
+  public org.apache.hadoop.mapreduce.JobStatus submitJob(
+      org.apache.hadoop.mapreduce.JobID jobid) 
+      throws IOException {
+    return new Job(JobID.downgrade(jobid)).status;
   }
 
-  public void killJob(JobID id) {
+  public void killJob(org.apache.hadoop.mapreduce.JobID id) {
     jobs.get(id).killed = true;
     jobs.get(id).interrupt();
   }
 
-  public void setJobPriority(JobID id, String jp) throws IOException {
+  public void setJobPriority(org.apache.hadoop.mapreduce.JobID id,
+      String jp) throws IOException {
     throw new UnsupportedOperationException("Changing job priority " +
                       "in LocalJobRunner is not supported.");
   }
   
   /** Throws {@link UnsupportedOperationException} */
-  public boolean killTask(TaskAttemptID taskId, boolean shouldFail) throws IOException {
+  public boolean killTask(org.apache.hadoop.mapreduce.TaskAttemptID taskId,
+      boolean shouldFail) throws IOException {
     throw new UnsupportedOperationException("Killing tasks in " +
     "LocalJobRunner is not supported");
   }
 
-  public JobProfile getJobProfile(JobID id) {
-    Job job = jobs.get(id);
-    if(job != null)
-      return job.getProfile();
-    else 
-      return null;
+  public org.apache.hadoop.mapreduce.TaskReport[] getTaskReports(
+      org.apache.hadoop.mapreduce.JobID id, TaskType type) {
+    return new org.apache.hadoop.mapreduce.TaskReport[0];
   }
 
-  public TaskReport[] getMapTaskReports(JobID id) {
-    return new TaskReport[0];
-  }
-  public TaskReport[] getReduceTaskReports(JobID id) {
-    return new TaskReport[0];
-  }
-  public TaskReport[] getCleanupTaskReports(JobID id) {
-    return new TaskReport[0];
-  }
-  public TaskReport[] getSetupTaskReports(JobID id) {
-    return new TaskReport[0];
-  }
-
-  public JobStatus getJobStatus(JobID id) {
-    Job job = jobs.get(id);
+  public org.apache.hadoop.mapreduce.JobStatus getJobStatus(
+      org.apache.hadoop.mapreduce.JobID id) {
+    Job job = jobs.get(JobID.downgrade(id));
     if(job != null)
       return job.status;
     else 
       return null;
   }
   
-  public Counters getJobCounters(JobID id) {
-    Job job = jobs.get(id);
-    return job.currentCounters;
+  public org.apache.hadoop.mapreduce.Counters getJobCounters(
+      org.apache.hadoop.mapreduce.JobID id) {
+    Job job = jobs.get(JobID.downgrade(id));
+    return new org.apache.hadoop.mapreduce.Counters(job.currentCounters);
   }
 
   public String getFilesystemName() throws IOException {
     return fs.getUri().toString();
   }
   
-  public ClusterStatus getClusterStatus(boolean detailed) {
-    return new ClusterStatus(1, 0, 0, map_tasks, reduce_tasks, 1, 1, 
-                             JobTracker.State.RUNNING);
+  public ClusterMetrics getClusterMetrics() {
+    return new ClusterMetrics(map_tasks, reduce_tasks, 1, 1, 1, 0, 0);
   }
 
-  public JobStatus[] jobsToComplete() {return null;}
+  public State getJobTrackerState() throws IOException, InterruptedException {
+    return State.RUNNING;
+  }
+
+  public long getTaskTrackerExpiryInterval() throws IOException, InterruptedException {
+    return 0;
+  }
 
-  public TaskCompletionEvent[] getTaskCompletionEvents(JobID jobid
+  /** 
+   * Get all active trackers in cluster. 
+   * @return array of TaskTrackerInfo
+   */
+  public TaskTrackerInfo[] getActiveTrackers() 
+      throws IOException, InterruptedException {
+    return null;
+  }
+
+  /** 
+   * Get all blacklisted trackers in cluster. 
+   * @return array of TaskTrackerInfo
+   */
+  public TaskTrackerInfo[] getBlacklistedTrackers() 
+      throws IOException, InterruptedException {
+    return null;
+  }
+
+  public TaskCompletionEvent[] getTaskCompletionEvents(
+      org.apache.hadoop.mapreduce.JobID jobid
       , int fromEventId, int maxEvents) throws IOException {
     return TaskCompletionEvent.EMPTY_ARRAY;
   }
   
-  public JobStatus[] getAllJobs() {return null;}
+  public org.apache.hadoop.mapreduce.JobStatus[] getAllJobs() {return null;}
 
   
   /**
    * Returns the diagnostic information for a particular task in the given job.
    * To be implemented
    */
-  public String[] getTaskDiagnostics(TaskAttemptID taskid)
-  		throws IOException{
+  public String[] getTaskDiagnostics(
+      org.apache.hadoop.mapreduce.TaskAttemptID taskid) throws IOException{
 	  return new String [0];
   }
 
   /**
-   * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getSystemDir()
+   * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getSystemDir()
    */
   public String getSystemDir() {
     Path sysDir = new Path(conf.get("mapred.system.dir", "/tmp/hadoop/mapred/system"));  
@@ -533,33 +560,29 @@
 
 
   @Override
-  public JobQueueInfo[] getChildQueues(String queueName) throws IOException {
-    return null;
-  }
-
-  @Override
-  public JobQueueInfo[] getRootQueues() throws IOException {
+  public QueueInfo[] getChildQueues(String queueName) throws IOException {
     return null;
   }
 
   @Override
-  public JobStatus[] getJobsFromQueue(String queue) throws IOException {
+  public QueueInfo[] getRootQueues() throws IOException {
     return null;
   }
 
   @Override
-  public JobQueueInfo[] getQueues() throws IOException {
+  public QueueInfo[] getQueues() throws IOException {
     return null;
   }
 
 
   @Override
-  public JobQueueInfo getQueueInfo(String queue) throws IOException {
+  public QueueInfo getQueue(String queue) throws IOException {
     return null;
   }
 
   @Override
-  public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException{
+  public org.apache.hadoop.mapreduce.QueueAclsInfo[] 
+      getQueueAclsForCurrentUser() throws IOException{
     return null;
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapReducePolicyProvider.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapReducePolicyProvider.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapReducePolicyProvider.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapReducePolicyProvider.java Fri Sep 18 07:04:42 2009
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.mapred;
 
+import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.Service;
@@ -30,7 +31,7 @@
       new Service("security.inter.tracker.protocol.acl", 
                   InterTrackerProtocol.class),
       new Service("security.job.submission.protocol.acl",
-                  JobSubmissionProtocol.class),
+                  ClientProtocol.class),
       new Service("security.task.umbilical.protocol.acl", 
                   TaskUmbilicalProtocol.class),
       new Service("security.refresh.policy.protocol.acl", 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Queue.java Fri Sep 18 07:04:42 2009
@@ -330,7 +330,7 @@
   JobQueueInfo getJobQueueInfo() {
     JobQueueInfo queueInfo = new JobQueueInfo();
     queueInfo.setQueueName(name);
-    queueInfo.setQueueState(state.getStateName());
+    queueInfo.setQueueState(state.name());
     if (schedulingInfo != null) {
       queueInfo.setSchedulingInfo(schedulingInfo.toString());
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueAclsInfo.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueAclsInfo.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueAclsInfo.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/QueueAclsInfo.java Fri Sep 18 07:04:42 2009
@@ -17,28 +17,20 @@
  */
 package org.apache.hadoop.mapred;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-
 /**
  *  Class to encapsulate Queue ACLs for a particular
  *  user.
- * 
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.QueueAclsInfo} instead
  */
-class QueueAclsInfo implements Writable {
+@Deprecated
+class QueueAclsInfo extends org.apache.hadoop.mapreduce.QueueAclsInfo {
 
-  private String queueName;
-  private String[] operations;
   /**
    * Default constructor for QueueAclsInfo.
    * 
    */
   QueueAclsInfo() {
-    
+    super();
   }
 
   /**
@@ -50,31 +42,11 @@
    * 
    */
   QueueAclsInfo(String queueName, String[] operations) {
-    this.queueName = queueName;
-    this.operations = operations;    
-  }
-
-  String getQueueName() {
-    return queueName;
-  }
-
-  void setQueueName(String queueName) {
-    this.queueName = queueName;
+    super(queueName, operations);
   }
-
-  String[] getOperations() {
-    return operations;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    queueName = Text.readString(in);
-    operations = WritableUtils.readStringArray(in);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    Text.writeString(out, queueName);
-    WritableUtils.writeStringArray(out, operations);
+  
+  public static QueueAclsInfo downgrade(
+      org.apache.hadoop.mapreduce.QueueAclsInfo acl) {
+    return new QueueAclsInfo(acl.getQueueName(), acl.getOperations());
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/RunningJob.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/RunningJob.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/RunningJob.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/RunningJob.java Fri Sep 18 07:04:42 2009
@@ -30,7 +30,9 @@
  * progress etc.</p> 
  * 
  * @see JobClient
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.Job} instead
  */
+@Deprecated
 public interface RunningJob {
   /**
    * Get the job identifier.

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java Fri Sep 18 07:04:42 2009
@@ -18,35 +18,25 @@
 
 package org.apache.hadoop.mapred;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-
 /**
  * This is used to track task completion events on 
  * job tracker. 
+ * @deprecated Use 
+ * {@link org.apache.hadoop.mapreduce.TaskCompletionEvent} instead
  */
-public class TaskCompletionEvent implements Writable{
+@Deprecated
+public class TaskCompletionEvent 
+    extends org.apache.hadoop.mapreduce.TaskCompletionEvent {
   static public enum Status {FAILED, KILLED, SUCCEEDED, OBSOLETE, TIPFAILED};
-    
-  private int eventId; 
-  private String taskTrackerHttp;
-  private int taskRunTime; // using int since runtime is the time difference
-  private TaskAttemptID taskId;
-  Status status; 
-  boolean isMap = false;
-  private int idWithinJob;
+  
   public static final TaskCompletionEvent[] EMPTY_ARRAY = 
-    new TaskCompletionEvent[0];
+	    new TaskCompletionEvent[0];
   /**
    * Default constructor for Writable.
    *
    */
-  public TaskCompletionEvent(){
-    taskId = new TaskAttemptID();
+  public TaskCompletionEvent() {
+    super();
   }
 
   /**
@@ -64,20 +54,16 @@
                              boolean isMap,
                              Status status, 
                              String taskTrackerHttp){
-      
-    this.taskId = taskId;
-    this.idWithinJob = idWithinJob;
-    this.isMap = isMap;
-    this.eventId = eventId; 
-    this.status =status; 
-    this.taskTrackerHttp = taskTrackerHttp;
+    super(eventId, taskId, idWithinJob, isMap, org.apache.hadoop.mapreduce.
+          TaskCompletionEvent.Status.valueOf(status.name()), taskTrackerHttp);
   }
-  /**
-   * Returns event Id. 
-   * @return event id
-   */
-  public int getEventId() {
-    return eventId;
+  
+  static TaskCompletionEvent downgrade(
+    org.apache.hadoop.mapreduce.TaskCompletionEvent event) {
+    return new TaskCompletionEvent(event.getEventId(),
+      TaskAttemptID.downgrade(event.getTaskAttemptId()),event.idWithinJob(),
+      event.isMapTask(), Status.valueOf(event.getStatus().name()),
+      event.getTaskTrackerHttp());
   }
   /**
    * Returns task id. 
@@ -86,7 +72,7 @@
    */
   @Deprecated
   public String getTaskId() {
-    return taskId.toString();
+    return getTaskAttemptId().toString();
   }
   
   /**
@@ -94,7 +80,7 @@
    * @return task id
    */
   public TaskAttemptID getTaskAttemptId() {
-    return taskId;
+    return TaskAttemptID.downgrade(super.getTaskAttemptId());
   }
   
   /**
@@ -102,133 +88,57 @@
    * @return task tracker status
    */
   public Status getTaskStatus() {
-    return status;
-  }
-  /**
-   * http location of the tasktracker where this task ran. 
-   * @return http location of tasktracker user logs
-   */
-  public String getTaskTrackerHttp() {
-    return taskTrackerHttp;
-  }
-
-  /**
-   * Returns time (in millisec) the task took to complete. 
-   */
-  public int getTaskRunTime() {
-    return taskRunTime;
-  }
-
-  /**
-   * Set the task completion time
-   * @param taskCompletionTime time (in millisec) the task took to complete
-   */
-  public void setTaskRunTime(int taskCompletionTime) {
-    this.taskRunTime = taskCompletionTime;
-  }
-
-  /**
-   * set event Id. should be assigned incrementally starting from 0. 
-   * @param eventId
-   */
-  public void setEventId(
-                         int eventId) {
-    this.eventId = eventId;
+    return Status.valueOf(super.getStatus().name());
   }
+  
   /**
    * Sets task id. 
    * @param taskId
-   * @deprecated use {@link #setTaskID(TaskAttemptID)} instead.
+   * @deprecated use {@link #setTaskAttemptId(TaskAttemptID)} instead.
    */
   @Deprecated
   public void setTaskId(String taskId) {
-    this.taskId = TaskAttemptID.forName(taskId);
+    this.setTaskAttemptId(TaskAttemptID.forName(taskId));
   }
   
   /**
    * Sets task id. 
    * @param taskId
    */
-  public void setTaskID(TaskAttemptID taskId) {
-    this.taskId = taskId;
+  protected void setTaskAttemptId(TaskAttemptID taskId) {
+    super.setTaskAttemptId(taskId);
   }
   
   /**
    * Set task status. 
    * @param status
    */
-  public void setTaskStatus(
-                            Status status) {
-    this.status = status;
+  protected void setTaskStatus(Status status) {
+    super.setTaskStatus(org.apache.hadoop.mapreduce.
+      TaskCompletionEvent.Status.valueOf(status.name()));
   }
+  
   /**
-   * Set task tracker http location. 
-   * @param taskTrackerHttp
+   * Set the task completion time
+   * @param taskCompletionTime time (in millisec) the task took to complete
    */
-  public void setTaskTrackerHttp(
-                                 String taskTrackerHttp) {
-    this.taskTrackerHttp = taskTrackerHttp;
-  }
-    
-  @Override
-  public String toString(){
-    StringBuffer buf = new StringBuffer(); 
-    buf.append("Task Id : "); 
-    buf.append(taskId); 
-    buf.append(", Status : ");  
-    buf.append(status.name());
-    return buf.toString();
-  }
-    
-  @Override
-  public boolean equals(Object o) {
-    if(o == null)
-      return false;
-    if(o.getClass().equals(this.getClass())) {
-      TaskCompletionEvent event = (TaskCompletionEvent) o;
-      return this.isMap == event.isMapTask() 
-             && this.eventId == event.getEventId()
-             && this.idWithinJob == event.idWithinJob() 
-             && this.status.equals(event.getTaskStatus())
-             && this.taskId.equals(event.getTaskAttemptId()) 
-             && this.taskRunTime == event.getTaskRunTime()
-             && this.taskTrackerHttp.equals(event.getTaskTrackerHttp());
-    }
-    return false;
+  protected void setTaskRunTime(int taskCompletionTime) {
+    super.setTaskRunTime(taskCompletionTime);
   }
 
-  @Override
-  public int hashCode() {
-    return toString().hashCode(); 
+  /**
+   * set event Id. should be assigned incrementally starting from 0. 
+   * @param eventId
+   */
+  protected void setEventId(int eventId) {
+    super.setEventId(eventId);
   }
 
-  public boolean isMapTask() {
-    return isMap;
-  }
-    
-  public int idWithinJob() {
-    return idWithinJob;
-  }
-  //////////////////////////////////////////////
-  // Writable
-  //////////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    taskId.write(out); 
-    WritableUtils.writeVInt(out, idWithinJob);
-    out.writeBoolean(isMap);
-    WritableUtils.writeEnum(out, status); 
-    WritableUtils.writeString(out, taskTrackerHttp);
-    WritableUtils.writeVInt(out, taskRunTime);
-    WritableUtils.writeVInt(out, eventId);
-  }
-  
-  public void readFields(DataInput in) throws IOException {
-    taskId.readFields(in); 
-    idWithinJob = WritableUtils.readVInt(in);
-    isMap = in.readBoolean();
-    status = WritableUtils.readEnum(in, Status.class);
-    taskTrackerHttp = WritableUtils.readString(in);
-    taskRunTime = WritableUtils.readVInt(in);
-    eventId = WritableUtils.readVInt(in);
+  /**
+   * Set task tracker http location. 
+   * @param taskTrackerHttp
+   */
+  protected void setTaskTrackerHttp(String taskTrackerHttp) {
+    super.setTaskTrackerHttp(taskTrackerHttp);
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java Fri Sep 18 07:04:42 2009
@@ -32,9 +32,9 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.mapred.JobClient.RawSplit;
 import org.apache.hadoop.mapred.JobInProgress.DataStatistics;
 import org.apache.hadoop.mapred.SortedRanges.Range;
+import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistory;
 import org.apache.hadoop.mapreduce.jobhistory.TaskUpdatedEvent;
@@ -65,7 +65,7 @@
 
   // Defines the TIP
   private String jobFile = null;
-  private RawSplit rawSplit;
+  private Job.RawSplit rawSplit;
   private int numMaps;
   private int partition;
   private JobTracker jobtracker;
@@ -140,7 +140,7 @@
    * Constructor for MapTask
    */
   public TaskInProgress(JobID jobid, String jobFile, 
-                        RawSplit rawSplit, 
+                        Job.RawSplit rawSplit, 
                         JobTracker jobtracker, JobConf conf, 
                         JobInProgress job, int partition,
                         int numSlotsRequired) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskReport.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskReport.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskReport.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskReport.java Fri Sep 18 07:04:42 2009
@@ -17,33 +17,18 @@
  */
 package org.apache.hadoop.mapred;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
+import java.util.List;
 
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-
-/** A report on the state of a task. */
-public class TaskReport implements Writable {
-  private TaskID taskid;
-  private float progress;
-  private String state;
-  private String[] diagnostics;
-  private long startTime; 
-  private long finishTime; 
-  private Counters counters;
-  private TIPStatus currentStatus;
-  
-  private Collection<TaskAttemptID> runningAttempts = 
-    new ArrayList<TaskAttemptID>();
-  private TaskAttemptID successfulAttempt = new TaskAttemptID();
+/** A report on the state of a task. 
+ * @deprecated Use {@link org.apache.hadoop.mapreduce.TaskReport} instead
+ **/
+@Deprecated
+public class TaskReport extends org.apache.hadoop.mapreduce.TaskReport {
+  
   public TaskReport() {
-    taskid = new TaskID();
+    super();
   }
   
   /**
@@ -80,160 +65,83 @@
              String[] diagnostics, TIPStatus currentStatus, 
              long startTime, long finishTime,
              Counters counters) {
-    this.taskid = taskid;
-    this.progress = progress;
-    this.state = state;
-    this.diagnostics = diagnostics;
-    this.currentStatus = currentStatus;
-    this.startTime = startTime; 
-    this.finishTime = finishTime;
-    this.counters = counters;
-  }
-    
-  /** @deprecated use {@link #getTaskID()} instead */
-  @Deprecated
-  public String getTaskId() { return taskid.toString(); }
-  /** The id of the task. */
-  public TaskID getTaskID() { return taskid; }
-  /** The amount completed, between zero and one. */
-  public float getProgress() { return progress; }
-  /** The most recent state, reported by a {@link Reporter}. */
-  public String getState() { return state; }
-  /** A list of error messages. */
-  public String[] getDiagnostics() { return diagnostics; }
-  /** A table of counters. */
-  public Counters getCounters() { return counters; }
-  /** The current status */
-  public TIPStatus getCurrentStatus() {
-    return currentStatus;
+    super(taskid, progress, state, diagnostics, currentStatus, startTime,
+      finishTime, new org.apache.hadoop.mapreduce.Counters(counters));
   }
   
-  /**
-   * Get finish time of task. 
-   * @return 0, if finish time was not set else returns finish time.
-   */
-  public long getFinishTime() {
-    return finishTime;
-  }
-
-  /** 
-   * set finish time of task. 
-   * @param finishTime finish time of task. 
-   */
-  void setFinishTime(long finishTime) {
-    this.finishTime = finishTime;
+  static TaskReport downgrade(
+      org.apache.hadoop.mapreduce.TaskReport report) {
+    return new TaskReport(TaskID.downgrade(report.getTaskId()),
+      report.getProgress(), report.getState(), report.getDiagnostics(),
+      report.getCurrentStatus(), report.getStartTime(), report.getFinishTime(),
+      Counters.downgrade(report.getTaskCounters()));
   }
-
-  /**
-   * Get start time of task. 
-   * @return 0 if start time was not set, else start time. 
-   */
-  public long getStartTime() {
-    return startTime;
+  
+  static TaskReport[] downgradeArray(org.apache.hadoop.
+      mapreduce.TaskReport[] reports) {
+    List<TaskReport> ret = new ArrayList<TaskReport>();
+    for (org.apache.hadoop.mapreduce.TaskReport report : reports) {
+      ret.add(downgrade(report));
+    }
+    return ret.toArray(new TaskReport[0]);
   }
-
-  /** 
-   * set start time of the task. 
-   */ 
-  void setStartTime(long startTime) {
-    this.startTime = startTime;
+  
+  /** The id of the task. */
+  public TaskID getTaskID() { return TaskID.downgrade(super.getTaskId()); }
+  
+  public Counters getCounters() { 
+    return Counters.downgrade(super.getTaskCounters()); 
   }
-
+  
   /** 
    * set successful attempt ID of the task. 
    */ 
   public void setSuccessfulAttempt(TaskAttemptID t) {
-    successfulAttempt = t;
+    super.setSuccessfulAttemptId(t);
   }
   /**
    * Get the attempt ID that took this task to completion
    */
   public TaskAttemptID getSuccessfulTaskAttempt() {
-    return successfulAttempt;
+    return TaskAttemptID.downgrade(super.getSuccessfulTaskAttemptId());
   }
   /** 
    * set running attempt(s) of the task. 
    */ 
   public void setRunningTaskAttempts(
       Collection<TaskAttemptID> runningAttempts) {
-    this.runningAttempts = runningAttempts;
+    Collection<org.apache.hadoop.mapreduce.TaskAttemptID> attempts = 
+      new ArrayList<org.apache.hadoop.mapreduce.TaskAttemptID>();
+    for (TaskAttemptID id : runningAttempts) {
+      attempts.add(id);
+    }
+    super.setRunningTaskAttemptIds(attempts);
   }
   /**
    * Get the running task attempt IDs for this task
    */
   public Collection<TaskAttemptID> getRunningTaskAttempts() {
-    return runningAttempts;
-  }
-
-
-  @Override
-  public boolean equals(Object o) {
-    if(o == null)
-      return false;
-    if(o.getClass().equals(this.getClass())) {
-      TaskReport report = (TaskReport) o;
-      return counters.equals(report.getCounters())
-             && Arrays.toString(this.diagnostics)
-                      .equals(Arrays.toString(report.getDiagnostics()))
-             && this.finishTime == report.getFinishTime()
-             && this.progress == report.getProgress()
-             && this.startTime == report.getStartTime()
-             && this.state.equals(report.getState())
-             && this.taskid.equals(report.getTaskID());
+    Collection<TaskAttemptID> attempts = new ArrayList<TaskAttemptID>();
+    for (org.apache.hadoop.mapreduce.TaskAttemptID id : 
+         super.getRunningTaskAttemptIds()) {
+      attempts.add(TaskAttemptID.downgrade(id));
     }
-    return false; 
+    return attempts;
   }
-
-  @Override
-  public int hashCode() {
-    return (counters.toString() + Arrays.toString(this.diagnostics) 
-            + this.finishTime + this.progress + this.startTime + this.state 
-            + this.taskid.toString()).hashCode();
-  }
-  //////////////////////////////////////////////
-  // Writable
-  //////////////////////////////////////////////
-  public void write(DataOutput out) throws IOException {
-    taskid.write(out);
-    out.writeFloat(progress);
-    Text.writeString(out, state);
-    out.writeLong(startTime);
-    out.writeLong(finishTime);
-    WritableUtils.writeStringArray(out, diagnostics);
-    counters.write(out);
-    WritableUtils.writeEnum(out, currentStatus);
-    if (currentStatus == TIPStatus.RUNNING) {
-      WritableUtils.writeVInt(out, runningAttempts.size());
-      TaskAttemptID t[] = new TaskAttemptID[0];
-      t = runningAttempts.toArray(t);
-      for (int i = 0; i < t.length; i++) {
-        t[i].write(out);
-      }
-    } else if (currentStatus == TIPStatus.COMPLETE) {
-      successfulAttempt.write(out);
-    }
+  
+  /** 
+   * set finish time of task. 
+   * @param finishTime finish time of task. 
+   */
+  protected void setFinishTime(long finishTime) {
+    super.setFinishTime(finishTime);
   }
 
-  public void readFields(DataInput in) throws IOException {
-    this.taskid.readFields(in);
-    this.progress = in.readFloat();
-    this.state = Text.readString(in);
-    this.startTime = in.readLong(); 
-    this.finishTime = in.readLong();
-    
-    diagnostics = WritableUtils.readStringArray(in);
-    counters = new Counters();
-    counters.readFields(in);
-    currentStatus = WritableUtils.readEnum(in, TIPStatus.class);
-    if (currentStatus == TIPStatus.RUNNING) {
-      int num = WritableUtils.readVInt(in);    
-      for (int i = 0; i < num; i++) {
-        TaskAttemptID t = new TaskAttemptID();
-        t.readFields(in);
-        runningAttempts.add(t);
-      }
-    } else if (currentStatus == TIPStatus.COMPLETE) {
-      successfulAttempt.readFields(in);
-    }
+  /** 
+   * set start time of the task. 
+   */ 
+  protected void setStartTime(long startTime) {
+    super.setStartTime(startTime);
   }
+
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java?rev=816496&r1=816495&r2=816496&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java Fri Sep 18 07:04:42 2009
@@ -1152,7 +1152,7 @@
     List <TaskCompletionEvent> recentMapEvents = 
       new ArrayList<TaskCompletionEvent>();
     for (int i = 0; i < t.length; i++) {
-      if (t[i].isMap) {
+      if (t[i].isMapTask()) {
         recentMapEvents.add(t[i]);
       }
     }



Mime
View raw message