hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r939849 [2/3] - in /hadoop/mapreduce/trunk: ./ src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/ src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/jo...
Date Fri, 30 Apr 2010 22:26:21 GMT
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java Fri Apr 30 22:26:19 2010
@@ -34,7 +34,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.TaskController;
 import org.apache.hadoop.mapred.TaskController.DistributedCacheFileContext;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.mapreduce.util.MRAsyncDiskService;
@@ -477,7 +477,7 @@ public class TrackerDistributedCacheMana
     } else {
       // invoke taskcontroller to set permissions
       DistributedCacheFileContext context = new DistributedCacheFileContext(
-          conf.get(JobContext.USER_NAME), new File(cacheStatus.localizedBaseDir
+          conf.get(MRJobConfig.USER_NAME), new File(cacheStatus.localizedBaseDir
               .toString()), cacheStatus.localizedBaseDir,
           cacheStatus.uniqueString);
       taskController.initializeDistributedCacheFile(context);
@@ -656,7 +656,7 @@ public class TrackerDistributedCacheMana
         archiveTimestamps.append(",");
         archiveTimestamps.append(String.valueOf(status.getModificationTime()));
       }
-      job.set(JobContext.CACHE_ARCHIVES_SIZES, archiveFileSizes.toString());
+      job.set(MRJobConfig.CACHE_ARCHIVES_SIZES, archiveFileSizes.toString());
       setArchiveTimestamps(job, archiveTimestamps.toString());
     }
   
@@ -674,7 +674,7 @@ public class TrackerDistributedCacheMana
         fileTimestamps.append(",");
         fileTimestamps.append(String.valueOf(status.getModificationTime()));
       }
-      job.set(JobContext.CACHE_FILES_SIZES, fileSizes.toString());
+      job.set(MRJobConfig.CACHE_FILES_SIZES, fileSizes.toString());
       setFileTimestamps(job, fileTimestamps.toString());
     }
   }
@@ -747,7 +747,7 @@ public class TrackerDistributedCacheMana
    * @throws IOException
    */
   static String[] getFileVisibilities(Configuration conf) {
-    return conf.getStrings(JobContext.CACHE_FILE_VISIBILITIES);
+    return conf.getStrings(MRJobConfig.CACHE_FILE_VISIBILITIES);
   }
 
   /**
@@ -757,7 +757,7 @@ public class TrackerDistributedCacheMana
    * @return a string array of booleans 
    */
   static String[] getArchiveVisibilities(Configuration conf) {
-    return conf.getStrings(JobContext.CACHE_ARCHIVES_VISIBILITIES);
+    return conf.getStrings(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES);
   }
 
   /**
@@ -814,7 +814,7 @@ public class TrackerDistributedCacheMana
    * The order should be the same as the order in which the archives are added.
    */
   static void setArchiveVisibilities(Configuration conf, String booleans) {
-    conf.set(JobContext.CACHE_ARCHIVES_VISIBILITIES, booleans);
+    conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES, booleans);
   }
 
   /**
@@ -825,7 +825,7 @@ public class TrackerDistributedCacheMana
    * The order should be the same as the order in which the files are added.
    */
   static void setFileVisibilities(Configuration conf, String booleans) {
-    conf.set(JobContext.CACHE_FILE_VISIBILITIES, booleans);
+    conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, booleans);
   }
 
   /**
@@ -836,7 +836,7 @@ public class TrackerDistributedCacheMana
    * The order should be the same as the order in which the archives are added.
    */
   static void setArchiveTimestamps(Configuration conf, String timestamps) {
-    conf.set(JobContext.CACHE_ARCHIVES_TIMESTAMPS, timestamps);
+    conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS, timestamps);
   }
 
   /**
@@ -847,7 +847,7 @@ public class TrackerDistributedCacheMana
    * The order should be the same as the order in which the files are added.
    */
   static void setFileTimestamps(Configuration conf, String timestamps) {
-    conf.set(JobContext.CACHE_FILE_TIMESTAMPS, timestamps);
+    conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, timestamps);
   }
   
   /**
@@ -857,7 +857,7 @@ public class TrackerDistributedCacheMana
    * @param str a comma separated list of local archives
    */
   static void setLocalArchives(Configuration conf, String str) {
-    conf.set(JobContext.CACHE_LOCALARCHIVES, str);
+    conf.set(MRJobConfig.CACHE_LOCALARCHIVES, str);
   }
 
   /**
@@ -867,7 +867,7 @@ public class TrackerDistributedCacheMana
    * @param str a comma separated list of local files
    */
   static void setLocalFiles(Configuration conf, String str) {
-    conf.set(JobContext.CACHE_LOCALFILES, str);
+    conf.set(MRJobConfig.CACHE_LOCALFILES, str);
   }
   
   /**

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java Fri Apr 30 22:26:19 2010
@@ -23,7 +23,7 @@ import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 /** 
  * This class implements the common functionalities of 
@@ -157,6 +157,6 @@ public class ValueAggregatorBaseDescript
    * @param conf a configuration object
    */
   public void configure(Configuration conf) {
-    this.inputFile = conf.get(JobContext.MAP_INPUT_FILE);
+    this.inputFile = conf.get(MRJobConfig.MAP_INPUT_FILE);
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java Fri Apr 30 22:26:19 2010
@@ -26,7 +26,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
@@ -157,7 +157,7 @@ public class ValueAggregatorJob {
     }
     String userJarFile = conf.get(ValueAggregatorJobBase.USER_JAR);
     if (userJarFile != null) {
-      conf.set(JobContext.JAR, userJarFile);
+      conf.set(MRJobConfig.JAR, userJarFile);
     }
 
     Job theJob = new Job(conf);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/BigDecimalSplitter.java Fri Apr 30 22:26:19 2010
@@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFac
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 /**
  * Implement DBSplitter over BigDecimal values.
@@ -46,7 +46,7 @@ public class BigDecimalSplitter implemen
     String lowClausePrefix = colName + " >= ";
     String highClausePrefix = colName + " < ";
 
-    BigDecimal numSplits = new BigDecimal(conf.getInt(JobContext.NUM_MAPS, 1));
+    BigDecimal numSplits = new BigDecimal(conf.getInt(MRJobConfig.NUM_MAPS, 1));
 
     if (minVal == null && maxVal == null) {
       // Range is null to null. Return a null split accordingly.

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java Fri Apr 30 22:26:19 2010
@@ -36,6 +36,7 @@ import org.apache.hadoop.mapreduce.Input
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -238,7 +239,7 @@ public class DBInputFormat<T extends DBW
       results.next();
 
       long count = results.getLong(1);
-      int chunks = job.getConfiguration().getInt(JobContext.NUM_MAPS, 1);
+      int chunks = job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
       long chunkSize = (count / chunks);
 
       results.close();

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DataDrivenDBInputFormat.java Fri Apr 30 22:26:19 2010
@@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.Input
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -160,7 +161,7 @@ public class DataDrivenDBInputFormat<T e
   /** {@inheritDoc} */
   public List<InputSplit> getSplits(JobContext job) throws IOException {
 
-    int targetNumTasks = job.getConfiguration().getInt(JobContext.NUM_MAPS, 1);
+    int targetNumTasks = job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
     if (1 == targetNumTasks) {
       // There's no need to run a bounding vals query; just return a split
       // that separates nothing. This can be considerably more optimal for a

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DateSplitter.java Fri Apr 30 22:26:19 2010
@@ -32,7 +32,7 @@ import org.apache.commons.logging.LogFac
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 /**
  * Implement DBSplitter over date/time values.
@@ -56,7 +56,7 @@ public class DateSplitter extends Intege
     String lowClausePrefix = colName + " >= ";
     String highClausePrefix = colName + " < ";
 
-    int numSplits = conf.getInt(JobContext.NUM_MAPS, 1);
+    int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
     if (numSplits < 1) {
       numSplits = 1;
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/FloatSplitter.java Fri Apr 30 22:26:19 2010
@@ -28,7 +28,7 @@ import org.apache.commons.logging.LogFac
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 /**
  * Implement DBSplitter over floating-point values.
@@ -61,7 +61,7 @@ public class FloatSplitter implements DB
 
     // Use this as a hint. May need an extra task if the size doesn't
     // divide cleanly.
-    int numSplits = conf.getInt(JobContext.NUM_MAPS, 1);
+    int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
     double splitSize = (maxVal - minVal) / (double) numSplits;
 
     if (splitSize < MIN_INCREMENT) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/IntegerSplitter.java Fri Apr 30 22:26:19 2010
@@ -25,7 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 /**
  * Implement DBSplitter over integer values.
@@ -40,7 +40,7 @@ public class IntegerSplitter implements 
     String lowClausePrefix = colName + " >= ";
     String highClausePrefix = colName + " < ";
 
-    int numSplits = conf.getInt(JobContext.NUM_MAPS, 1);
+    int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
     if (numSplits < 1) {
       numSplits = 1;
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/TextSplitter.java Fri Apr 30 22:26:19 2010
@@ -30,7 +30,7 @@ import org.apache.commons.logging.LogFac
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 /**
  * Implement DBSplitter over text strings.
@@ -87,7 +87,7 @@ public class TextSplitter extends BigDec
 
     // Use this as a hint. May need an extra task if the size doesn't
     // divide cleanly.
-    int numSplits = conf.getInt(JobContext.NUM_MAPS, 1);
+    int numSplits = conf.getInt(MRJobConfig.NUM_MAPS, 1);
 
     String lowClausePrefix = colName + " >= '";
     String highClausePrefix = colName + " < '";

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java Fri Apr 30 22:26:19 2010
@@ -145,9 +145,9 @@ public class CombineFileRecordReader<K, 
     try {
       Configuration conf = context.getConfiguration();
       // setup some helper config variables.
-      conf.set(JobContext.MAP_INPUT_FILE, split.getPath(idx).toString());
-      conf.setLong(JobContext.MAP_INPUT_START, split.getOffset(idx));
-      conf.setLong(JobContext.MAP_INPUT_PATH, split.getLength(idx));
+      conf.set(MRJobConfig.MAP_INPUT_FILE, split.getPath(idx).toString());
+      conf.setLong(MRJobConfig.MAP_INPUT_START, split.getOffset(idx));
+      conf.setLong(MRJobConfig.MAP_INPUT_PATH, split.getLength(idx));
 
       curReader =  rrConstructor.newInstance(new Object [] 
                             {split, context, Integer.valueOf(idx)});

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java Fri Apr 30 22:26:19 2010
@@ -38,6 +38,7 @@ import org.apache.hadoop.mapreduce.Input
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.RecordReader;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
@@ -327,7 +328,7 @@ public abstract static class Node extend
         TaskAttemptContext context = 
           new TaskAttemptContextImpl(conf, 
                                      TaskAttemptID.forName(
-                                         conf.get(JobContext.TASK_ATTEMPT_ID)));
+                                         conf.get(MRJobConfig.TASK_ATTEMPT_ID)));
         return rrCstrMap.get(ident).newInstance(id,
             inf.createRecordReader(split, context), cmpcl);
       } catch (IllegalAccessException e) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java Fri Apr 30 22:26:19 2010
@@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
@@ -92,7 +93,7 @@ public class FileOutputCommitter extends
   }
   
   // Create a _success file in the job's output dir
-  private void markOutputDirSuccessful(JobContext context) throws IOException {
+  private void markOutputDirSuccessful(MRJobConfig context) throws IOException {
     if (outputPath != null) {
       // create a file in the output folder to mark the job completion
       Path filePath = new Path(outputPath, SUCCEEDED_FILE_NAME);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java Fri Apr 30 22:26:19 2010
@@ -27,6 +27,7 @@ import org.apache.hadoop.io.WritableUtil
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription;
 
 
@@ -58,7 +59,7 @@ public class KeyFieldBasedComparator<K, 
   public void setConf(Configuration conf) {
     this.conf = conf;
     String option = conf.get(COMPARATOR_OPTIONS);
-    String keyFieldSeparator = conf.get(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR,"\t");
+    String keyFieldSeparator = conf.get(MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR,"\t");
     keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator);
     keyFieldHelper.parseOption(option);
   }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java Fri Apr 30 22:26:19 2010
@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configurab
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.Partitioner;
 import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription;
 
@@ -59,7 +60,7 @@ public class KeyFieldBasedPartitioner<K2
   public void setConf(Configuration conf) {
     this.conf = conf;
     String keyFieldSeparator = 
-      conf.get(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, "\t");
+      conf.get(MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR, "\t");
     keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator);
     if (conf.get("num.key.fields.for.partition") != null) {
       LOG.warn("Using deprecated num.key.fields.for.partition. " +

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java Fri Apr 30 22:26:19 2010
@@ -26,9 +26,9 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.mapreduce.ClusterMetrics;
 import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.QueueAclsInfo;
 import org.apache.hadoop.mapreduce.QueueInfo;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
@@ -48,7 +48,7 @@ import org.apache.hadoop.security.token.
  * JobClient can use these methods to submit a Job for execution, and learn about
  * the current system status.
  */ 
-@KerberosInfo(JobContext.JOB_JOBTRACKER_ID)
+@KerberosInfo(MRJobConfig.JOB_JOBTRACKER_ID)
 @TokenInfo(DelegationTokenSelector.class)
 public interface ClientProtocol extends VersionedProtocol {
   /* 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/security/TokenCache.java Fri Apr 30 22:26:19 2010
@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.security.t
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.TokenStorage;
@@ -110,7 +110,7 @@ public class TokenCache {
   static void obtainTokensForNamenodesInternal(Path [] ps, Configuration conf)
   throws IOException {
     // get jobtracker principal id (for the renewer)
-    Text jtCreds = new Text(conf.get(JobContext.JOB_JOBTRACKER_ID, ""));
+    Text jtCreds = new Text(conf.get(MRJobConfig.JOB_JOBTRACKER_ID, ""));
     
     for(Path p: ps) {
       FileSystem fs = FileSystem.get(p.toUri(), conf);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java Fri Apr 30 22:26:19 2010
@@ -41,7 +41,7 @@ import org.apache.hadoop.mapred.Counters
 import org.apache.hadoop.mapred.IFileInputStream;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
 import org.apache.hadoop.mapreduce.task.reduce.MapOutput.Type;
@@ -124,10 +124,10 @@ class Fetcher<K,V> extends Thread {
     }
 
     this.connectionTimeout = 
-      job.getInt(JobContext.SHUFFLE_CONNECT_TIMEOUT,
+      job.getInt(MRJobConfig.SHUFFLE_CONNECT_TIMEOUT,
                  DEFAULT_STALLED_COPY_TIMEOUT);
     this.readTimeout = 
-      job.getInt(JobContext.SHUFFLE_READ_TIMEOUT, DEFAULT_READ_TIMEOUT);
+      job.getInt(MRJobConfig.SHUFFLE_READ_TIMEOUT, DEFAULT_READ_TIMEOUT);
     
     setName("fetcher#" + id);
     setDaemon(true);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java Fri Apr 30 22:26:19 2010
@@ -50,7 +50,7 @@ import org.apache.hadoop.mapred.IFile.Wr
 import org.apache.hadoop.mapred.Merger.Segment;
 import org.apache.hadoop.mapred.Task.CombineOutputCollector;
 import org.apache.hadoop.mapred.Task.CombineValuesIterator;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator;
@@ -150,27 +150,27 @@ public class MergeManager<K, V> {
     this.rfs = ((LocalFileSystem)localFS).getRaw();
     
     final float maxInMemCopyUse =
-      jobConf.getFloat(JobContext.SHUFFLE_INPUT_BUFFER_PERCENT, 0.90f);
+      jobConf.getFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 0.90f);
     if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) {
       throw new IllegalArgumentException("Invalid value for " +
-          JobContext.SHUFFLE_INPUT_BUFFER_PERCENT + ": " +
+          MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT + ": " +
           maxInMemCopyUse);
     }
 
     // Allow unit tests to fix Runtime memory
     this.memoryLimit = 
-      (long)(jobConf.getLong(JobContext.REDUCE_MEMORY_TOTAL_BYTES,
+      (long)(jobConf.getLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
           Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE))
         * maxInMemCopyUse);
  
-    this.ioSortFactor = jobConf.getInt(JobContext.IO_SORT_FACTOR, 100);
+    this.ioSortFactor = jobConf.getInt(MRJobConfig.IO_SORT_FACTOR, 100);
 
     this.maxSingleShuffleLimit = 
       (long)(memoryLimit * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION);
     this.memToMemMergeOutputsThreshold = 
-            jobConf.getInt(JobContext.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
+            jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
     this.mergeThreshold = (long)(this.memoryLimit * 
-                          jobConf.getFloat(JobContext.SHUFFLE_MERGE_EPRCENT, 
+                          jobConf.getFloat(MRJobConfig.SHUFFLE_MERGE_EPRCENT, 
                                            0.90f));
     LOG.info("MergerManager: memoryLimit=" + memoryLimit + ", " +
              "maxSingleShuffleLimit=" + maxSingleShuffleLimit + ", " +
@@ -179,7 +179,7 @@ public class MergeManager<K, V> {
              "memToMemMergeOutputsThreshold=" + memToMemMergeOutputsThreshold);
 
     boolean allowMemToMemMerge = 
-      jobConf.getBoolean(JobContext.REDUCE_MEMTOMEM_ENABLED, false);
+      jobConf.getBoolean(MRJobConfig.REDUCE_MEMTOMEM_ENABLED, false);
     if (allowMemToMemMerge) {
       this.memToMemMerger = 
         new IntermediateMemoryToMemoryMerger(this,
@@ -628,9 +628,9 @@ public class MergeManager<K, V> {
              onDiskMapOutputs.size() + " on-disk map-outputs");
     
     final float maxRedPer =
-      job.getFloat(JobContext.REDUCE_INPUT_BUFFER_PERCENT, 0f);
+      job.getFloat(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT, 0f);
     if (maxRedPer > 1.0 || maxRedPer < 0.0) {
-      throw new IOException(JobContext.REDUCE_INPUT_BUFFER_PERCENT +
+      throw new IOException(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT +
                             maxRedPer);
     }
     int maxInMemReduce = (int)Math.min(

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java Fri Apr 30 22:26:19 2010
@@ -33,7 +33,7 @@ import org.apache.hadoop.mapred.Task;
 import org.apache.hadoop.mapred.TaskStatus;
 import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
 import org.apache.hadoop.mapred.Task.CombineOutputCollector;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.util.Progress;
 
@@ -102,7 +102,7 @@ public class Shuffle<K, V> implements Ex
     eventFetcher.start();
     
     // Start the map-output fetcher threads
-    final int numFetchers = jobConf.getInt(JobContext.SHUFFLE_PARALLEL_COPIES, 5);
+    final int numFetchers = jobConf.getInt(MRJobConfig.SHUFFLE_PARALLEL_COPIES, 5);
     Fetcher<K,V>[] fetchers = new Fetcher[numFetchers];
     for (int i=0; i < numFetchers; ++i) {
       fetchers[i] = new Fetcher<K,V>(jobConf, reduceId, scheduler, merger, 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java Fri Apr 30 22:26:19 2010
@@ -18,7 +18,7 @@
 package org.apache.hadoop.mapreduce.task.reduce;
 
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
@@ -35,7 +35,7 @@ class ShuffleClientMetrics implements Up
   private final int numCopiers;
   
   ShuffleClientMetrics(TaskAttemptID reduceId, JobConf jobConf) {
-    this.numCopiers = jobConf.getInt(JobContext.SHUFFLE_PARALLEL_COPIES, 5);
+    this.numCopiers = jobConf.getInt(MRJobConfig.SHUFFLE_PARALLEL_COPIES, 5);
 
     MetricsContext metricsContext = MetricsUtil.getContext("mapred");
     this.shuffleMetrics = 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleScheduler.java Fri Apr 30 22:26:19 2010
@@ -37,7 +37,7 @@ import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.TaskStatus;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.task.reduce.MapHost.State;
@@ -112,9 +112,9 @@ class ShuffleScheduler<K,V> {
     this.maxFailedUniqueFetches = Math.min(totalMaps,
         this.maxFailedUniqueFetches);
     this.maxFetchFailuresBeforeReporting = job.getInt(
-        JobContext.SHUFFLE_FETCH_FAILURES, REPORT_FAILURE_LIMIT);
+        MRJobConfig.SHUFFLE_FETCH_FAILURES, REPORT_FAILURE_LIMIT);
     this.reportReadErrorImmediately = job.getBoolean(
-        JobContext.SHUFFLE_NOTIFY_READERROR, true);
+        MRJobConfig.SHUFFLE_NOTIFY_READERROR, true);
   }
 
   public synchronized void copySucceeded(TaskAttemptID mapId, 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java Fri Apr 30 22:26:19 2010
@@ -19,8 +19,8 @@ package org.apache.hadoop.mapreduce.util
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 
@@ -168,215 +168,215 @@ public class ConfigUtil {
     Configuration.addDeprecation("tasktracker.contention.tracking", 
       new String[] {TTConfig.TT_CONTENTION_TRACKING});
     Configuration.addDeprecation("hadoop.job.history.user.location", 
-      new String[] {JobContext.HISTORY_LOCATION});
+      new String[] {MRJobConfig.HISTORY_LOCATION});
     Configuration.addDeprecation("job.end.notification.url", 
-      new String[] {JobContext.END_NOTIFICATION_URL});
+      new String[] {MRJobConfig.END_NOTIFICATION_URL});
     Configuration.addDeprecation("job.end.retry.attempts", 
-      new String[] {JobContext.END_NOTIFICATION_RETRIES});
+      new String[] {MRJobConfig.END_NOTIFICATION_RETRIES});
     Configuration.addDeprecation("job.end.retry.interval", 
-      new String[] {JobContext.END_NOTIFICATION_RETRIE_INTERVAL});
+      new String[] {MRJobConfig.END_NOTIFICATION_RETRIE_INTERVAL});
     Configuration.addDeprecation("mapred.committer.job.setup.cleanup.needed", 
-      new String[] {JobContext.SETUP_CLEANUP_NEEDED});
+      new String[] {MRJobConfig.SETUP_CLEANUP_NEEDED});
     Configuration.addDeprecation("mapred.jar", 
-      new String[] {JobContext.JAR});
+      new String[] {MRJobConfig.JAR});
     Configuration.addDeprecation("mapred.job.id", 
-      new String[] {JobContext.ID});
+      new String[] {MRJobConfig.ID});
     Configuration.addDeprecation("mapred.job.name", 
-      new String[] {JobContext.JOB_NAME});
+      new String[] {MRJobConfig.JOB_NAME});
     Configuration.addDeprecation("mapred.job.priority", 
-      new String[] {JobContext.PRIORITY});
+      new String[] {MRJobConfig.PRIORITY});
     Configuration.addDeprecation("mapred.job.queue.name", 
-      new String[] {JobContext.QUEUE_NAME});
+      new String[] {MRJobConfig.QUEUE_NAME});
     Configuration.addDeprecation("mapred.job.reuse.jvm.num.tasks", 
-      new String[] {JobContext.JVM_NUMTASKS_TORUN});
+      new String[] {MRJobConfig.JVM_NUMTASKS_TORUN});
     Configuration.addDeprecation("mapred.map.tasks", 
-      new String[] {JobContext.NUM_MAPS});
+      new String[] {MRJobConfig.NUM_MAPS});
     Configuration.addDeprecation("mapred.max.tracker.failures", 
-      new String[] {JobContext.MAX_TASK_FAILURES_PER_TRACKER});
+      new String[] {MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER});
     Configuration.addDeprecation("mapred.reduce.slowstart.completed.maps", 
-      new String[] {JobContext.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART});
+      new String[] {MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART});
     Configuration.addDeprecation("mapred.reduce.tasks", 
-      new String[] {JobContext.NUM_REDUCES});
+      new String[] {MRJobConfig.NUM_REDUCES});
     Configuration.addDeprecation("mapred.skip.on", 
-      new String[] {JobContext.SKIP_RECORDS});
+      new String[] {MRJobConfig.SKIP_RECORDS});
     Configuration.addDeprecation("mapred.skip.out.dir", 
-      new String[] {JobContext.SKIP_OUTDIR});
+      new String[] {MRJobConfig.SKIP_OUTDIR});
     Configuration.addDeprecation(
       "mapred.speculative.execution.slowNodeThreshold", 
-      new String[] {JobContext.SPECULATIVE_SLOWNODE_THRESHOLD});
+      new String[] {MRJobConfig.SPECULATIVE_SLOWNODE_THRESHOLD});
     Configuration.addDeprecation(
       "mapred.speculative.execution.slowTaskThreshold", 
-      new String[] {JobContext.SPECULATIVE_SLOWTASK_THRESHOLD});
+      new String[] {MRJobConfig.SPECULATIVE_SLOWTASK_THRESHOLD});
     Configuration.addDeprecation("mapred.speculative.execution.speculativeCap", 
-      new String[] {JobContext.SPECULATIVECAP});
+      new String[] {MRJobConfig.SPECULATIVECAP});
     Configuration.addDeprecation("job.local.dir", 
-      new String[] {JobContext.JOB_LOCAL_DIR});
+      new String[] {MRJobConfig.JOB_LOCAL_DIR});
     Configuration.addDeprecation("mapreduce.inputformat.class", 
-      new String[] {JobContext.INPUT_FORMAT_CLASS_ATTR});
+      new String[] {MRJobConfig.INPUT_FORMAT_CLASS_ATTR});
     Configuration.addDeprecation("mapreduce.map.class", 
-      new String[] {JobContext.MAP_CLASS_ATTR});
+      new String[] {MRJobConfig.MAP_CLASS_ATTR});
     Configuration.addDeprecation("mapreduce.combine.class", 
-      new String[] {JobContext.COMBINE_CLASS_ATTR});
+      new String[] {MRJobConfig.COMBINE_CLASS_ATTR});
     Configuration.addDeprecation("mapreduce.reduce.class", 
-      new String[] {JobContext.REDUCE_CLASS_ATTR});
+      new String[] {MRJobConfig.REDUCE_CLASS_ATTR});
     Configuration.addDeprecation("mapreduce.outputformat.class", 
-      new String[] {JobContext.OUTPUT_FORMAT_CLASS_ATTR});
+      new String[] {MRJobConfig.OUTPUT_FORMAT_CLASS_ATTR});
     Configuration.addDeprecation("mapreduce.partitioner.class", 
-      new String[] {JobContext.PARTITIONER_CLASS_ATTR});
+      new String[] {MRJobConfig.PARTITIONER_CLASS_ATTR});
     Configuration.addDeprecation("mapred.job.classpath.archives", 
-      new String[] {JobContext.CLASSPATH_ARCHIVES});
+      new String[] {MRJobConfig.CLASSPATH_ARCHIVES});
     Configuration.addDeprecation("mapred.job.classpath.files", 
-      new String[] {JobContext.CLASSPATH_FILES});
+      new String[] {MRJobConfig.CLASSPATH_FILES});
     Configuration.addDeprecation("mapred.cache.files", 
-      new String[] {JobContext.CACHE_FILES});
+      new String[] {MRJobConfig.CACHE_FILES});
     Configuration.addDeprecation("mapred.cache.archives", 
-      new String[] {JobContext.CACHE_ARCHIVES});
+      new String[] {MRJobConfig.CACHE_ARCHIVES});
     Configuration.addDeprecation("mapred.cache.localFiles", 
-      new String[] {JobContext.CACHE_LOCALFILES});
+      new String[] {MRJobConfig.CACHE_LOCALFILES});
     Configuration.addDeprecation("mapred.cache.localArchives", 
-      new String[] {JobContext.CACHE_LOCALARCHIVES});
+      new String[] {MRJobConfig.CACHE_LOCALARCHIVES});
     Configuration.addDeprecation("mapred.cache.files.timestamps", 
-      new String[] {JobContext.CACHE_FILE_TIMESTAMPS});
+      new String[] {MRJobConfig.CACHE_FILE_TIMESTAMPS});
     Configuration.addDeprecation("mapred.cache.archives.timestamps", 
-      new String[] {JobContext.CACHE_ARCHIVES_TIMESTAMPS});
+      new String[] {MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS});
     Configuration.addDeprecation("mapred.create.symlink", 
-      new String[] {JobContext.CACHE_SYMLINK});
+      new String[] {MRJobConfig.CACHE_SYMLINK});
     Configuration.addDeprecation("mapred.working.dir", 
-      new String[] {JobContext.WORKING_DIR});
+      new String[] {MRJobConfig.WORKING_DIR});
     Configuration.addDeprecation("hadoop.job.history.user.location", 
-      new String[] {JobContext.HISTORY_LOCATION});
+      new String[] {MRJobConfig.HISTORY_LOCATION});
     Configuration.addDeprecation("user.name", 
-      new String[] {JobContext.USER_NAME});
+      new String[] {MRJobConfig.USER_NAME});
     Configuration.addDeprecation("mapred.output.key.class", 
-      new String[] {JobContext.OUTPUT_KEY_CLASS});
+      new String[] {MRJobConfig.OUTPUT_KEY_CLASS});
     Configuration.addDeprecation("mapred.output.value.class", 
-      new String[] {JobContext.OUTPUT_VALUE_CLASS});
+      new String[] {MRJobConfig.OUTPUT_VALUE_CLASS});
     Configuration.addDeprecation("mapred.output.value.groupfn.class", 
-      new String[] {JobContext.GROUP_COMPARATOR_CLASS});
+      new String[] {MRJobConfig.GROUP_COMPARATOR_CLASS});
     Configuration.addDeprecation("mapred.output.key.comparator.class", 
-      new String[] {JobContext.KEY_COMPARATOR});
+      new String[] {MRJobConfig.KEY_COMPARATOR});
     Configuration.addDeprecation("io.sort.factor", 
-      new String[] {JobContext.IO_SORT_FACTOR});
+      new String[] {MRJobConfig.IO_SORT_FACTOR});
     Configuration.addDeprecation("io.sort.mb", 
-      new String[] {JobContext.IO_SORT_MB});
+      new String[] {MRJobConfig.IO_SORT_MB});
     Configuration.addDeprecation("keep.failed.task.files", 
-      new String[] {JobContext.PRESERVE_FAILED_TASK_FILES});
+      new String[] {MRJobConfig.PRESERVE_FAILED_TASK_FILES});
     Configuration.addDeprecation("keep.task.files.pattern", 
-      new String[] {JobContext.PRESERVE_FILES_PATTERN});
+      new String[] {MRJobConfig.PRESERVE_FILES_PATTERN});
     Configuration.addDeprecation("mapred.child.tmp", 
-      new String[] {JobContext.TASK_TEMP_DIR});
+      new String[] {MRJobConfig.TASK_TEMP_DIR});
     Configuration.addDeprecation("mapred.debug.out.lines", 
-      new String[] {JobContext.TASK_DEBUGOUT_LINES});
+      new String[] {MRJobConfig.TASK_DEBUGOUT_LINES});
     Configuration.addDeprecation("mapred.merge.recordsBeforeProgress", 
-      new String[] {JobContext.RECORDS_BEFORE_PROGRESS});
+      new String[] {MRJobConfig.RECORDS_BEFORE_PROGRESS});
     Configuration.addDeprecation("mapred.skip.attempts.to.start.skipping", 
-      new String[] {JobContext.SKIP_START_ATTEMPTS});
+      new String[] {MRJobConfig.SKIP_START_ATTEMPTS});
     Configuration.addDeprecation("mapred.task.id", 
-      new String[] {JobContext.TASK_ATTEMPT_ID});
+      new String[] {MRJobConfig.TASK_ATTEMPT_ID});
     Configuration.addDeprecation("mapred.task.is.map", 
-      new String[] {JobContext.TASK_ISMAP});
+      new String[] {MRJobConfig.TASK_ISMAP});
     Configuration.addDeprecation("mapred.task.partition", 
-      new String[] {JobContext.TASK_PARTITION});
+      new String[] {MRJobConfig.TASK_PARTITION});
     Configuration.addDeprecation("mapred.task.profile", 
-      new String[] {JobContext.TASK_PROFILE});
+      new String[] {MRJobConfig.TASK_PROFILE});
     Configuration.addDeprecation("mapred.task.profile.maps", 
-      new String[] {JobContext.NUM_MAP_PROFILES});
+      new String[] {MRJobConfig.NUM_MAP_PROFILES});
     Configuration.addDeprecation("mapred.task.profile.reduces", 
-      new String[] {JobContext.NUM_REDUCE_PROFILES});
+      new String[] {MRJobConfig.NUM_REDUCE_PROFILES});
     Configuration.addDeprecation("mapred.task.timeout", 
-      new String[] {JobContext.TASK_TIMEOUT});
+      new String[] {MRJobConfig.TASK_TIMEOUT});
     Configuration.addDeprecation("mapred.tip.id", 
-      new String[] {JobContext.TASK_ID});
+      new String[] {MRJobConfig.TASK_ID});
     Configuration.addDeprecation("mapred.work.output.dir", 
-      new String[] {JobContext.TASK_OUTPUT_DIR});
+      new String[] {MRJobConfig.TASK_OUTPUT_DIR});
     Configuration.addDeprecation("mapred.userlog.limit.kb", 
-      new String[] {JobContext.TASK_USERLOG_LIMIT});
+      new String[] {MRJobConfig.TASK_USERLOG_LIMIT});
     Configuration.addDeprecation("mapred.userlog.retain.hours", 
-      new String[] {JobContext.USER_LOG_RETAIN_HOURS});
+      new String[] {MRJobConfig.USER_LOG_RETAIN_HOURS});
     Configuration.addDeprecation("mapred.task.profile.params", 
-      new String[] {JobContext.TASK_PROFILE_PARAMS});
+      new String[] {MRJobConfig.TASK_PROFILE_PARAMS});
     Configuration.addDeprecation("io.sort.spill.percent", 
-      new String[] {JobContext.MAP_SORT_SPILL_PERCENT});
+      new String[] {MRJobConfig.MAP_SORT_SPILL_PERCENT});
     Configuration.addDeprecation("map.input.file", 
-      new String[] {JobContext.MAP_INPUT_FILE});
+      new String[] {MRJobConfig.MAP_INPUT_FILE});
     Configuration.addDeprecation("map.input.length", 
-      new String[] {JobContext.MAP_INPUT_PATH});
+      new String[] {MRJobConfig.MAP_INPUT_PATH});
     Configuration.addDeprecation("map.input.start", 
-      new String[] {JobContext.MAP_INPUT_START});
+      new String[] {MRJobConfig.MAP_INPUT_START});
     Configuration.addDeprecation("mapred.job.map.memory.mb", 
-      new String[] {JobContext.MAP_MEMORY_MB});
+      new String[] {MRJobConfig.MAP_MEMORY_MB});
     Configuration.addDeprecation("mapred.map.child.env", 
-      new String[] {JobContext.MAP_ENV});
+      new String[] {MRJobConfig.MAP_ENV});
     Configuration.addDeprecation("mapred.map.child.java.opts", 
-      new String[] {JobContext.MAP_JAVA_OPTS});
+      new String[] {MRJobConfig.MAP_JAVA_OPTS});
     Configuration.addDeprecation("mapred.map.child.ulimit", 
-      new String[] {JobContext.MAP_ULIMIT});
+      new String[] {MRJobConfig.MAP_ULIMIT});
     Configuration.addDeprecation("mapred.map.max.attempts", 
-      new String[] {JobContext.MAP_MAX_ATTEMPTS});
+      new String[] {MRJobConfig.MAP_MAX_ATTEMPTS});
     Configuration.addDeprecation("mapred.map.task.debug.script", 
-      new String[] {JobContext.MAP_DEBUG_SCRIPT});
+      new String[] {MRJobConfig.MAP_DEBUG_SCRIPT});
     Configuration.addDeprecation("mapred.map.tasks.speculative.execution", 
-      new String[] {JobContext.MAP_SPECULATIVE});
+      new String[] {MRJobConfig.MAP_SPECULATIVE});
     Configuration.addDeprecation("mapred.max.map.failures.percent", 
-      new String[] {JobContext.MAP_FAILURES_MAX_PERCENT});
+      new String[] {MRJobConfig.MAP_FAILURES_MAX_PERCENT});
     Configuration.addDeprecation("mapred.skip.map.auto.incr.proc.count", 
-      new String[] {JobContext.MAP_SKIP_INCR_PROC_COUNT});
+      new String[] {MRJobConfig.MAP_SKIP_INCR_PROC_COUNT});
     Configuration.addDeprecation("mapred.skip.map.max.skip.records", 
-      new String[] {JobContext.MAP_SKIP_MAX_RECORDS});
+      new String[] {MRJobConfig.MAP_SKIP_MAX_RECORDS});
     Configuration.addDeprecation("min.num.spills.for.combine", 
-      new String[] {JobContext.MAP_COMBINE_MIN_SPILLS});
+      new String[] {MRJobConfig.MAP_COMBINE_MIN_SPILLS});
     Configuration.addDeprecation("mapred.compress.map.output", 
-      new String[] {JobContext.MAP_OUTPUT_COMPRESS});
+      new String[] {MRJobConfig.MAP_OUTPUT_COMPRESS});
     Configuration.addDeprecation("mapred.map.output.compression.codec", 
-      new String[] {JobContext.MAP_OUTPUT_COMPRESS_CODEC});
+      new String[] {MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC});
     Configuration.addDeprecation("mapred.mapoutput.key.class", 
-      new String[] {JobContext.MAP_OUTPUT_KEY_CLASS});
+      new String[] {MRJobConfig.MAP_OUTPUT_KEY_CLASS});
     Configuration.addDeprecation("mapred.mapoutput.value.class", 
-      new String[] {JobContext.MAP_OUTPUT_VALUE_CLASS});
+      new String[] {MRJobConfig.MAP_OUTPUT_VALUE_CLASS});
     Configuration.addDeprecation("map.output.key.field.separator", 
-      new String[] {JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR});
+      new String[] {MRJobConfig.MAP_OUTPUT_KEY_FIELD_SEPERATOR});
     Configuration.addDeprecation("mapred.map.child.log.level", 
-      new String[] {JobContext.MAP_LOG_LEVEL});
+      new String[] {MRJobConfig.MAP_LOG_LEVEL});
     Configuration.addDeprecation("mapred.inmem.merge.threshold", 
-      new String[] {JobContext.REDUCE_MERGE_INMEM_THRESHOLD});
+      new String[] {MRJobConfig.REDUCE_MERGE_INMEM_THRESHOLD});
     Configuration.addDeprecation("mapred.job.reduce.input.buffer.percent", 
-      new String[] {JobContext.REDUCE_INPUT_BUFFER_PERCENT});
+      new String[] {MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT});
     Configuration.addDeprecation("mapred.job.reduce.markreset.buffer.percent", 
-      new String[] {JobContext.REDUCE_MARKRESET_BUFFER_PERCENT});
+      new String[] {MRJobConfig.REDUCE_MARKRESET_BUFFER_PERCENT});
     Configuration.addDeprecation("mapred.job.reduce.memory.mb", 
-      new String[] {JobContext.REDUCE_MEMORY_MB});
+      new String[] {MRJobConfig.REDUCE_MEMORY_MB});
     Configuration.addDeprecation("mapred.job.reduce.total.mem.bytes", 
-      new String[] {JobContext.REDUCE_MEMORY_TOTAL_BYTES});
+      new String[] {MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES});
     Configuration.addDeprecation("mapred.job.shuffle.input.buffer.percent", 
-      new String[] {JobContext.SHUFFLE_INPUT_BUFFER_PERCENT});
+      new String[] {MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT});
     Configuration.addDeprecation("mapred.job.shuffle.merge.percent", 
-      new String[] {JobContext.SHUFFLE_MERGE_EPRCENT});
+      new String[] {MRJobConfig.SHUFFLE_MERGE_EPRCENT});
     Configuration.addDeprecation("mapred.max.reduce.failures.percent", 
-      new String[] {JobContext.REDUCE_FAILURES_MAXPERCENT});
+      new String[] {MRJobConfig.REDUCE_FAILURES_MAXPERCENT});
     Configuration.addDeprecation("mapred.reduce.child.env", 
-      new String[] {JobContext.REDUCE_ENV});
+      new String[] {MRJobConfig.REDUCE_ENV});
     Configuration.addDeprecation("mapred.reduce.child.java.opts", 
-      new String[] {JobContext.REDUCE_JAVA_OPTS});
+      new String[] {MRJobConfig.REDUCE_JAVA_OPTS});
     Configuration.addDeprecation("mapred.reduce.child.ulimit", 
-      new String[] {JobContext.REDUCE_ULIMIT});
+      new String[] {MRJobConfig.REDUCE_ULIMIT});
     Configuration.addDeprecation("mapred.reduce.max.attempts", 
-      new String[] {JobContext.REDUCE_MAX_ATTEMPTS});
+      new String[] {MRJobConfig.REDUCE_MAX_ATTEMPTS});
     Configuration.addDeprecation("mapred.reduce.parallel.copies", 
-      new String[] {JobContext.SHUFFLE_PARALLEL_COPIES});
+      new String[] {MRJobConfig.SHUFFLE_PARALLEL_COPIES});
     Configuration.addDeprecation("mapred.reduce.task.debug.script", 
-      new String[] {JobContext.REDUCE_DEBUG_SCRIPT});
+      new String[] {MRJobConfig.REDUCE_DEBUG_SCRIPT});
     Configuration.addDeprecation("mapred.reduce.tasks.speculative.execution", 
-      new String[] {JobContext.REDUCE_SPECULATIVE});
+      new String[] {MRJobConfig.REDUCE_SPECULATIVE});
     Configuration.addDeprecation("mapred.shuffle.connect.timeout", 
-      new String[] {JobContext.SHUFFLE_CONNECT_TIMEOUT});
+      new String[] {MRJobConfig.SHUFFLE_CONNECT_TIMEOUT});
     Configuration.addDeprecation("mapred.shuffle.read.timeout", 
-      new String[] {JobContext.SHUFFLE_READ_TIMEOUT});
+      new String[] {MRJobConfig.SHUFFLE_READ_TIMEOUT});
     Configuration.addDeprecation("mapred.skip.reduce.auto.incr.proc.count", 
-      new String[] {JobContext.REDUCE_SKIP_INCR_PROC_COUNT});
+      new String[] {MRJobConfig.REDUCE_SKIP_INCR_PROC_COUNT});
     Configuration.addDeprecation("mapred.skip.reduce.max.skip.groups", 
-      new String[] {JobContext.REDUCE_SKIP_MAXGROUPS});
+      new String[] {MRJobConfig.REDUCE_SKIP_MAXGROUPS});
     Configuration.addDeprecation("mapred.reduce.child.log.level", 
-      new String[] {JobContext.REDUCE_LOG_LEVEL});
+      new String[] {MRJobConfig.REDUCE_LOG_LEVEL});
     Configuration.addDeprecation("jobclient.completion.poll.interval", 
       new String[] {Job.COMPLETION_POLL_INTERVAL_KEY});
     Configuration.addDeprecation("jobclient.progress.monitor.poll.interval", 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java Fri Apr 30 22:26:19 2010
@@ -21,7 +21,7 @@ import org.junit.Assert;
 import org.junit.Test;
 
 import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 public class TestJobConf {
 
@@ -29,7 +29,7 @@ public class TestJobConf {
   public void testProfileParamsDefaults() {
     JobConf configuration = new JobConf();
 
-    Assert.assertNull(configuration.get(JobContext.TASK_PROFILE_PARAMS));
+    Assert.assertNull(configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
 
     String result = configuration.getProfileParams();
 
@@ -43,14 +43,14 @@ public class TestJobConf {
     JobConf configuration = new JobConf();
 
     configuration.setProfileParams("test");
-    Assert.assertEquals("test", configuration.get(JobContext.TASK_PROFILE_PARAMS));
+    Assert.assertEquals("test", configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
   }
 
   @Test
   public void testProfileParamsGetter() {
     JobConf configuration = new JobConf();
 
-    configuration.set(JobContext.TASK_PROFILE_PARAMS, "test");
+    configuration.set(MRJobConfig.TASK_PROFILE_PARAMS, "test");
     Assert.assertEquals("test", configuration.getProfileParams());
   }
 
@@ -61,42 +61,42 @@ public class TestJobConf {
   @Test
   public void testMemoryConfigForMapOrReduceTask(){
     JobConf configuration = new JobConf();
-    configuration.set(JobContext.MAP_MEMORY_MB,String.valueOf(300));
-    configuration.set(JobContext.REDUCE_MEMORY_MB,String.valueOf(300));
+    configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
     Assert.assertEquals(configuration.getMemoryForMapTask(),300);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),300);
 
     configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024));
-    configuration.set(JobContext.MAP_MEMORY_MB,String.valueOf(300));
-    configuration.set(JobContext.REDUCE_MEMORY_MB,String.valueOf(300));
+    configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
     Assert.assertEquals(configuration.getMemoryForMapTask(),2);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
 
     configuration = new JobConf();
     configuration.set("mapred.task.maxvmem" , "-1");
-    configuration.set(JobContext.MAP_MEMORY_MB,String.valueOf(300));
-    configuration.set(JobContext.REDUCE_MEMORY_MB,String.valueOf(400));
+    configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(400));
     Assert.assertEquals(configuration.getMemoryForMapTask(), 300);
     Assert.assertEquals(configuration.getMemoryForReduceTask(), 400);
 
     configuration = new JobConf();
     configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024));
-    configuration.set(JobContext.MAP_MEMORY_MB,"-1");
-    configuration.set(JobContext.REDUCE_MEMORY_MB,"-1");
+    configuration.set(MRJobConfig.MAP_MEMORY_MB,"-1");
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-1");
     Assert.assertEquals(configuration.getMemoryForMapTask(),2);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
 
     configuration = new JobConf();
     configuration.set("mapred.task.maxvmem" , String.valueOf(-1));
-    configuration.set(JobContext.MAP_MEMORY_MB,"-1");
-    configuration.set(JobContext.REDUCE_MEMORY_MB,"-1");
+    configuration.set(MRJobConfig.MAP_MEMORY_MB,"-1");
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB,"-1");
     Assert.assertEquals(configuration.getMemoryForMapTask(),-1);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),-1);    
 
     configuration = new JobConf();
     configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024));
-    configuration.set(JobContext.MAP_MEMORY_MB, "3");
-    configuration.set(JobContext.REDUCE_MEMORY_MB, "3");
+    configuration.set(MRJobConfig.MAP_MEMORY_MB, "3");
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "3");
     Assert.assertEquals(configuration.getMemoryForMapTask(),2);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
     
@@ -111,8 +111,8 @@ public class TestJobConf {
     JobConf configuration = new JobConf();
     
     configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, "-3");
-    configuration.set(JobContext.MAP_MEMORY_MB, "4");
-    configuration.set(JobContext.REDUCE_MEMORY_MB, "5");
+    configuration.set(MRJobConfig.MAP_MEMORY_MB, "4");
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "5");
     Assert.assertEquals(4, configuration.getMemoryForMapTask());
     Assert.assertEquals(5, configuration.getMemoryForReduceTask());
     
@@ -127,8 +127,8 @@ public class TestJobConf {
     JobConf configuration = new JobConf();
     
     configuration.set(JobConf.MAPRED_TASK_MAXVMEM_PROPERTY, "-4");
-    configuration.set(JobContext.MAP_MEMORY_MB, "-5");
-    configuration.set(JobContext.REDUCE_MEMORY_MB, "-6");
+    configuration.set(MRJobConfig.MAP_MEMORY_MB, "-5");
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB, "-6");
     
     Assert.assertEquals(JobConf.DISABLED_MEMORY_LIMIT,
                         configuration.getMemoryForMapTask());
@@ -146,20 +146,20 @@ public class TestJobConf {
     JobConf configuration = new JobConf();
 
     //get test case
-    configuration.set(JobContext.MAP_MEMORY_MB, String.valueOf(300));
-    configuration.set(JobContext.REDUCE_MEMORY_MB, String.valueOf(-1));
+    configuration.set(MRJobConfig.MAP_MEMORY_MB, String.valueOf(300));
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB, String.valueOf(-1));
     Assert.assertEquals(
       configuration.getMaxVirtualMemoryForTask(), 300 * 1024 * 1024);
 
     configuration = new JobConf();
-    configuration.set(JobContext.MAP_MEMORY_MB, String.valueOf(-1));
-    configuration.set(JobContext.REDUCE_MEMORY_MB, String.valueOf(200));
+    configuration.set(MRJobConfig.MAP_MEMORY_MB, String.valueOf(-1));
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB, String.valueOf(200));
     Assert.assertEquals(
       configuration.getMaxVirtualMemoryForTask(), 200 * 1024 * 1024);
 
     configuration = new JobConf();
-    configuration.set(JobContext.MAP_MEMORY_MB, String.valueOf(-1));
-    configuration.set(JobContext.REDUCE_MEMORY_MB, String.valueOf(-1));
+    configuration.set(MRJobConfig.MAP_MEMORY_MB, String.valueOf(-1));
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB, String.valueOf(-1));
     configuration.set("mapred.task.maxvmem", String.valueOf(1 * 1024 * 1024));
     Assert.assertEquals(
       configuration.getMaxVirtualMemoryForTask(), 1 * 1024 * 1024);
@@ -177,8 +177,8 @@ public class TestJobConf {
     Assert.assertEquals(configuration.getMemoryForReduceTask(), 2);
 
     configuration = new JobConf();   
-    configuration.set(JobContext.MAP_MEMORY_MB, String.valueOf(300));
-    configuration.set(JobContext.REDUCE_MEMORY_MB, String.valueOf(400));
+    configuration.set(MRJobConfig.MAP_MEMORY_MB, String.valueOf(300));
+    configuration.set(MRJobConfig.REDUCE_MEMORY_MB, String.valueOf(400));
     configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024);
     Assert.assertEquals(configuration.getMemoryForMapTask(), 2);
     Assert.assertEquals(configuration.getMemoryForReduceTask(), 2);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MRCaching.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MRCaching.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MRCaching.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/MRCaching.java Fri Apr 30 22:26:19 2010
@@ -35,7 +35,7 @@ import org.apache.hadoop.mapred.Reporter
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.mapred.MapReduceBase;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 import java.net.URI;
 
@@ -311,9 +311,9 @@ public class MRCaching {
     // Note, the underlying job clones the original conf before determine
     // various stats (timestamps etc.), so we have to getConfiguration here.
     validateCacheFileSizes(job.getConfiguration(), fileSizes,
-                           JobContext.CACHE_FILES_SIZES);
+                           MRJobConfig.CACHE_FILES_SIZES);
     validateCacheFileSizes(job.getConfiguration(), archiveSizes,
-                           JobContext.CACHE_ARCHIVES_SIZES);
+                           MRJobConfig.CACHE_ARCHIVES_SIZES);
 
     return new TestResult(job, true);
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java Fri Apr 30 22:26:19 2010
@@ -24,7 +24,7 @@ import junit.framework.TestCase;
 import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobInProgress;
 import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker;
 import org.apache.hadoop.mapred.UtilsForTests.FakeClock;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 
 /**
@@ -103,8 +103,8 @@ public class TestLostTracker extends Tes
     JobConf conf = new JobConf();
     conf.setNumMapTasks(1);
     conf.setNumReduceTasks(1);
-    conf.set(JobContext.MAX_TASK_FAILURES_PER_TRACKER, "1");
-    conf.set(JobContext.SETUP_CLEANUP_NEEDED, "false");
+    conf.set(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, "1");
+    conf.set(MRJobConfig.SETUP_CLEANUP_NEEDED, "false");
     FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker);
     job.initTasks();
     job.setClusterSize(4);
@@ -170,8 +170,8 @@ public class TestLostTracker extends Tes
     JobConf conf = new JobConf();
     conf.setNumMapTasks(1);
     conf.setNumReduceTasks(0);
-    conf.set(JobContext.MAX_TASK_FAILURES_PER_TRACKER, "1");
-    conf.set(JobContext.SETUP_CLEANUP_NEEDED, "false");
+    conf.set(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, "1");
+    conf.set(MRJobConfig.SETUP_CLEANUP_NEEDED, "false");
     FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker);
     job.initTasks();
     job.setClusterSize(4);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java Fri Apr 30 22:26:19 2010
@@ -41,8 +41,8 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapred.lib.IdentityReducer;
-import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -423,7 +423,7 @@ public class TestNodeRefresh extends Tes
 
     // run a failing job to blacklist the tracker
     JobConf jConf = mr.createJobConf();
-    jConf.set(JobContext.MAX_TASK_FAILURES_PER_TRACKER, "1");
+    jConf.set(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, "1");
     jConf.setJobName("test-job-fail-once");
     jConf.setMapperClass(FailOnceMapper.class);
     jConf.setReducerClass(IdentityReducer.class);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java Fri Apr 30 22:26:19 2010
@@ -26,7 +26,7 @@ import junit.framework.TestCase;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 /**
  * Verifies if TaskRunner.SetupWorkDir() is cleaning up files/dirs pointed
@@ -92,7 +92,7 @@ public class TestSetupWorkDir extends Te
         (fs.listStatus(myTargetDir).length == 2));
 
     // let us disable creation of symlinks in setupWorkDir()
-    jConf.set(JobContext.CACHE_SYMLINK, "no");
+    jConf.set(MRJobConfig.CACHE_SYMLINK, "no");
 
     // Deletion of myWorkDir should not affect contents of myTargetDir.
     // myTargetDir is like $user/jobcache/distcache

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java Fri Apr 30 22:26:19 2010
@@ -34,9 +34,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
@@ -133,9 +133,9 @@ public class TestTaskTrackerLocalization
     // Set job view ACLs in conf sothat validation of contents of jobACLsFile
     // can be done against this value. Have both users and groups
     String jobViewACLs = "user1,user2, group1,group2";
-    jobConf.set(JobContext.JOB_ACL_VIEW_JOB, jobViewACLs);
+    jobConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, jobViewACLs);
 
-    jobConf.setInt(JobContext.USER_LOG_RETAIN_HOURS, 0);
+    jobConf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, 0);
     jobConf.setUser(getJobOwner().getShortUserName());
 
     Job job = new Job(jobConf);
@@ -657,10 +657,10 @@ public class TestTaskTrackerLocalization
     // Validate the contents of jobACLsFile(both user name and job-view-acls)
     Configuration jobACLsConf =
         TaskLogServlet.getConfFromJobACLsFile(task.getTaskID().toString());
-    assertTrue(jobACLsConf.get(JobContext.USER_NAME).equals(
+    assertTrue(jobACLsConf.get(MRJobConfig.USER_NAME).equals(
         localizedJobConf.getUser()));
-    assertTrue(jobACLsConf.get(JobContext.JOB_ACL_VIEW_JOB).
-        equals(localizedJobConf.get(JobContext.JOB_ACL_VIEW_JOB)));
+    assertTrue(jobACLsConf.get(MRJobConfig.JOB_ACL_VIEW_JOB).
+        equals(localizedJobConf.get(MRJobConfig.JOB_ACL_VIEW_JOB)));
   }
 
   /**

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java Fri Apr 30 22:26:19 2010
@@ -32,13 +32,13 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.mapreduce.util.LinuxResourceCalculatorPlugin;
 import org.apache.hadoop.mapreduce.util.ProcfsBasedProcessTree;
 import org.apache.hadoop.mapreduce.SleepJob;
 import org.apache.hadoop.mapreduce.util.TestProcfsBasedProcessTree;
-import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -185,8 +185,8 @@ public class TestTaskTrackerMemoryManage
     conf.setMemoryForMapTask(PER_TASK_LIMIT);
     conf.setMemoryForReduceTask(PER_TASK_LIMIT);
     // Set task physical memory limits
-    conf.setLong(JobContext.MAP_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
-    conf.setLong(JobContext.REDUCE_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
+    conf.setLong(MRJobConfig.MAP_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
+    conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
     runAndCheckSuccessfulJob(conf);
   }
 
@@ -287,8 +287,8 @@ public class TestTaskTrackerMemoryManage
     // Set up job.
     JobConf conf = new JobConf(miniMRCluster.createJobConf());
     if (doPhysicalMemory) {
-      conf.setLong(JobContext.MAP_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
-      conf.setLong(JobContext.REDUCE_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
+      conf.setLong(MRJobConfig.MAP_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
+      conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT);
     } else {
       conf.setMemoryForMapTask(PER_TASK_LIMIT);
       conf.setMemoryForReduceTask(PER_TASK_LIMIT);
@@ -554,8 +554,8 @@ public class TestTaskTrackerMemoryManage
     // Set up job.
     JobConf conf = new JobConf(miniMRCluster.createJobConf());
     // Set per task physical memory limits to be a higher value
-    conf.setLong(JobContext.MAP_MEMORY_PHYSICAL_MB, 2 * 1024L);
-    conf.setLong(JobContext.REDUCE_MEMORY_PHYSICAL_MB, 2 * 1024L);
+    conf.setLong(MRJobConfig.MAP_MEMORY_PHYSICAL_MB, 2 * 1024L);
+    conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, 2 * 1024L);
     JobClient jClient = new JobClient(conf);
     SleepJob sleepJob = new SleepJob();
     sleepJob.setConf(conf);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java Fri Apr 30 22:26:19 2010
@@ -24,8 +24,8 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.mapred.UtilsForTests.FakeClock;
-import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
 import org.apache.hadoop.mapreduce.util.MRAsyncDiskService;
 
@@ -75,7 +75,7 @@ public class TestUserLogCleanup {
 
   private void jobFinished(JobID jobid, int logRetainHours) {
     Configuration jobconf = new Configuration();
-    jobconf.setInt(JobContext.USER_LOG_RETAIN_HOURS, logRetainHours);
+    jobconf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, logRetainHours);
     taskLogCleanupThread.markJobLogsForDeletion(myClock.getTime(), jobconf,
         jobid);
   }
@@ -147,7 +147,7 @@ public class TestUserLogCleanup {
     // job directories will be added with 3 hours as retain hours. They will be
     // deleted at time 4.
     Configuration conf = new Configuration();
-    conf.setInt(JobContext.USER_LOG_RETAIN_HOURS, 3);
+    conf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, 3);
     taskLogCleanupThread.clearOldUserLogs(conf);
     assertFalse(foo.exists());
     assertFalse(bar.exists());
@@ -229,7 +229,7 @@ public class TestUserLogCleanup {
     // clear userlog directory
     // job directories will be added with 3 hours as retain hours. 
     Configuration conf = new Configuration();
-    conf.setInt(JobContext.USER_LOG_RETAIN_HOURS, 3);
+    conf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, 3);
     taskLogCleanupThread = new UserLogCleaner(conf);
     myClock = new FakeClock(); // clock is reset.
     taskLogCleanupThread.setClock(myClock);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestWebUIAuthorization.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestWebUIAuthorization.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestWebUIAuthorization.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/TestWebUIAuthorization.java Fri Apr 30 22:26:19 2010
@@ -30,9 +30,9 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.http.TestHttpServer.DummyFilterInitializer;
-import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.SleepJob;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskID;
@@ -264,11 +264,11 @@ public class TestWebUIAuthorization exte
     int infoPort = cluster.getJobTrackerRunner().getJobTrackerInfoPort();
 
     conf = new JobConf(cluster.createJobConf());
-    conf.set(JobContext.JOB_ACL_VIEW_JOB, viewColleague + " group3");
+    conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, viewColleague + " group3");
 
     // Let us add group1 and group3 to modify-job-acl. So modifyColleague and
     // viewAndModifyColleague will be able to modify the job
-    conf.set(JobContext.JOB_ACL_MODIFY_JOB, " group1,group3");
+    conf.set(MRJobConfig.JOB_ACL_MODIFY_JOB, " group1,group3");
 
     final SleepJob sleepJob = new SleepJob();
     sleepJob.setConf(conf);
@@ -398,11 +398,11 @@ public class TestWebUIAuthorization exte
       JobConf clusterConf, String jtURL) throws Exception {
 
     JobConf conf = new JobConf(cluster.createJobConf());
-    conf.set(JobContext.JOB_ACL_VIEW_JOB, viewColleague + " group3");
+    conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, viewColleague + " group3");
 
     // Let us add group1 and group3 to modify-job-acl. So modifyColleague and
     // viewAndModifyColleague will be able to modify the job
-    conf.set(JobContext.JOB_ACL_MODIFY_JOB, " group1,group3");
+    conf.set(MRJobConfig.JOB_ACL_MODIFY_JOB, " group1,group3");
 
     String jobTrackerJSP =  jtURL + "/jobtracker.jsp?a=b";
     Job job = startSleepJobAsUser(jobSubmitter, conf);
@@ -485,7 +485,7 @@ public class TestWebUIAuthorization exte
     // jobTrackerJSP killJob url
     String url = jobTrackerJSP + "&killJobs=true";
     // view-job-acl doesn't matter for killJob from jobtracker jsp page
-    conf.set(JobContext.JOB_ACL_VIEW_JOB, "");
+    conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "");
     
     // Let us start jobs as 4 different users(none of these 4 users is
     // mrOwner and none of these users is a member of superGroup). So only
@@ -494,7 +494,7 @@ public class TestWebUIAuthorization exte
 
     // start 1st job.
     // Out of these 4 users, only jobSubmitter can do killJob on 1st job
-    conf.set(JobContext.JOB_ACL_MODIFY_JOB, "");
+    conf.set(MRJobConfig.JOB_ACL_MODIFY_JOB, "");
     Job job1 = startSleepJobAsUser(jobSubmitter, conf);
     org.apache.hadoop.mapreduce.JobID jobid = job1.getID();
     getTIPId(cluster, jobid);// wait till the map task is started
@@ -514,7 +514,7 @@ public class TestWebUIAuthorization exte
     // start 4rd job.
     // Out of these 4 users, viewColleague and viewAndModifyColleague
     // can do killJob on 4th job
-    conf.set(JobContext.JOB_ACL_MODIFY_JOB, viewColleague);
+    conf.set(MRJobConfig.JOB_ACL_MODIFY_JOB, viewColleague);
     Job job4 = startSleepJobAsUser(viewAndModifyColleague, conf);
     jobid = job4.getID();
     getTIPId(cluster, jobid);// wait till the map task is started
@@ -581,11 +581,11 @@ public class TestWebUIAuthorization exte
 
     JobConf clusterConf = cluster.createJobConf();
     conf = new JobConf(clusterConf);
-    conf.set(JobContext.JOB_ACL_VIEW_JOB, viewColleague + " group3");
+    conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, viewColleague + " group3");
 
     // Let us add group1 and group3 to modify-job-acl. So modifyColleague and
     // viewAndModifyColleague will be able to modify the job
-    conf.set(JobContext.JOB_ACL_MODIFY_JOB, " group1,group3");
+    conf.set(MRJobConfig.JOB_ACL_MODIFY_JOB, " group1,group3");
     
     Job job = startSleepJobAsUser(jobSubmitter, conf);
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/GenericMRLoadGenerator.java Fri Apr 30 22:26:19 2010
@@ -207,7 +207,7 @@ public class GenericMRLoadGenerator exte
   static class RandomInputFormat extends InputFormat<Text, Text> {
 
     public List<InputSplit> getSplits(JobContext job) {
-      int numSplits = job.getConfiguration().getInt(JobContext.NUM_MAPS, 1);
+      int numSplits = job.getConfiguration().getInt(MRJobConfig.NUM_MAPS, 1);
       List<InputSplit> splits = new ArrayList<InputSplit>();
       for (int i = 0; i < numSplits; ++i) {
         splits.add(new IndirectInputFormat.IndirectSplit(
@@ -324,7 +324,7 @@ public class GenericMRLoadGenerator exte
       numMaps = 1;
       conf.setLong(RandomTextWriter.BYTES_PER_MAP, totalBytesToWrite);
     }
-    conf.setInt(JobContext.NUM_MAPS, numMaps);
+    conf.setInt(MRJobConfig.NUM_MAPS, numMaps);
   }
 
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java Fri Apr 30 22:26:19 2010
@@ -133,7 +133,7 @@ public class MapReduceTestUtil {
    */
   public static Job createCopyJob(Configuration conf, Path outdir, 
       Path... indirs) throws Exception {
-    conf.setInt(JobContext.NUM_MAPS, 3);
+    conf.setInt(MRJobConfig.NUM_MAPS, 3);
     Job theJob = new Job(conf);
     theJob.setJobName("DataMoveJob");
 
@@ -162,7 +162,7 @@ public class MapReduceTestUtil {
     if (fs.exists(outdir)) {
       fs.delete(outdir, true);
     }
-    conf.setInt(JobContext.MAP_MAX_ATTEMPTS, 2);
+    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 2);
     Job theJob = new Job(conf);
     theJob.setJobName("Fail-Job");
 
@@ -374,7 +374,7 @@ public class MapReduceTestUtil {
   public static TaskAttemptContext createDummyMapTaskAttemptContext(
       Configuration conf) {
     TaskAttemptID tid = new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0);
-    conf.set(JobContext.TASK_ATTEMPT_ID, tid.toString());
+    conf.set(MRJobConfig.TASK_ATTEMPT_ID, tid.toString());
     return new TaskAttemptContextImpl(conf, tid);    
   }
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/SleepJob.java Fri Apr 30 22:26:19 2010
@@ -69,7 +69,7 @@ public class SleepJob extends Configured
     public List<InputSplit> getSplits(JobContext jobContext) {
       List<InputSplit> ret = new ArrayList<InputSplit>();
       int numSplits = jobContext.getConfiguration().
-                        getInt(JobContext.NUM_MAPS, 1);
+                        getInt(MRJobConfig.NUM_MAPS, 1);
       for (int i = 0; i < numSplits; ++i) {
         ret.add(new EmptySplit());
       }
@@ -201,7 +201,7 @@ public class SleepJob extends Configured
     conf.setLong(REDUCE_SLEEP_TIME, reduceSleepTime);
     conf.setInt(MAP_SLEEP_COUNT, mapSleepCount);
     conf.setInt(REDUCE_SLEEP_COUNT, reduceSleepCount);
-    conf.setInt(JobContext.NUM_MAPS, numMapper);
+    conf.setInt(MRJobConfig.NUM_MAPS, numMapper);
     Job job = new Job(conf, "sleep");
     job.setNumReduceTasks(numReducer);
     job.setJarByClass(SleepJob.class);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java?rev=939849&r1=939848&r2=939849&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java Fri Apr 30 22:26:19 2010
@@ -122,7 +122,7 @@ public class TestJobACLs {
 
     // Set the job up.
     final Configuration myConf = mr.createJobConf();
-    myConf.set(JobContext.JOB_ACL_VIEW_JOB, "user1,user3");
+    myConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "user1,user3");
 
     // Submit the job as user1
     Job job = submitJobAsUser(myConf, "user1");
@@ -256,7 +256,7 @@ public class TestJobACLs {
 
     // Set the job up.
     final Configuration myConf = mr.createJobConf();
-    myConf.set(JobContext.JOB_ACL_MODIFY_JOB, "user1,user3");
+    myConf.set(MRJobConfig.JOB_ACL_MODIFY_JOB, "user1,user3");
 
     // Submit the job as user1
     Job job = submitJobAsUser(myConf, "user1");
@@ -364,7 +364,7 @@ public class TestJobACLs {
 
     // Set the job up.
     final Configuration myConf = mr.createJobConf();
-    myConf.set(JobContext.JOB_ACL_VIEW_JOB, "user1,user2");
+    myConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "user1,user2");
 
     // Submit the job as user1
     Job job = submitJobAsUser(myConf, "user1");



Mime
View raw message