hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sha...@apache.org
Subject svn commit: r816664 [7/9] - in /hadoop/mapreduce/trunk: ./ conf/ src/benchmarks/gridmix/ src/benchmarks/gridmix/pipesort/ src/benchmarks/gridmix2/ src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/ src/c++/pipes/impl/ src/c++/task-controller...
Date Fri, 18 Sep 2009 15:10:02 GMT
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -49,6 +49,11 @@
     NUMBER_FORMAT.setGroupingUsed(false);
   }
   private FileOutputCommitter committer = null;
+public static final String COMPRESS ="mapreduce.output.fileoutputformat.compress";
+public static final String COMPRESS_CODEC = 
+"mapreduce.output.fileoutputformat.compress.codec";
+public static final String COMPRESS_TYPE = "mapreduce.output.fileoutputformat.compress.type";
+public static final String OUTDIR = "mapreduce.output.fileoutputformat.outputdir";
 
   /**
    * Set whether the output of the job is compressed.
@@ -56,7 +61,7 @@
    * @param compress should the output of the job be compressed?
    */
   public static void setCompressOutput(Job job, boolean compress) {
-    job.getConfiguration().setBoolean("mapred.output.compress", compress);
+    job.getConfiguration().setBoolean(FileOutputFormat.COMPRESS, compress);
   }
   
   /**
@@ -66,7 +71,8 @@
    *         <code>false</code> otherwise
    */
   public static boolean getCompressOutput(JobContext job) {
-    return job.getConfiguration().getBoolean("mapred.output.compress", false);
+    return job.getConfiguration().getBoolean(
+      FileOutputFormat.COMPRESS, false);
   }
   
   /**
@@ -79,7 +85,7 @@
   setOutputCompressorClass(Job job, 
                            Class<? extends CompressionCodec> codecClass) {
     setCompressOutput(job, true);
-    job.getConfiguration().setClass("mapred.output.compression.codec", 
+    job.getConfiguration().setClass(FileOutputFormat.COMPRESS_CODEC, 
                                     codecClass, 
                                     CompressionCodec.class);
   }
@@ -97,7 +103,7 @@
 		                       Class<? extends CompressionCodec> defaultValue) {
     Class<? extends CompressionCodec> codecClass = defaultValue;
     Configuration conf = job.getConfiguration();
-    String name = conf.get("mapred.output.compression.codec");
+    String name = conf.get(FileOutputFormat.COMPRESS_CODEC);
     if (name != null) {
       try {
         codecClass = 
@@ -135,7 +141,7 @@
    * the map-reduce job.
    */
   public static void setOutputPath(Job job, Path outputDir) {
-    job.getConfiguration().set("mapred.output.dir", outputDir.toString());
+    job.getConfiguration().set(FileOutputFormat.OUTDIR, outputDir.toString());
   }
 
   /**
@@ -145,7 +151,7 @@
    * @see FileOutputFormat#getWorkOutputPath(TaskInputOutputContext)
    */
   public static Path getOutputPath(JobContext job) {
-    String name = job.getConfiguration().get("mapred.output.dir");
+    String name = job.getConfiguration().get(FileOutputFormat.OUTDIR);
     return name == null ? null: new Path(name);
   }
   
@@ -166,11 +172,11 @@
    * 
    * <p>To get around this the Map-Reduce framework helps the application-writer 
    * out by maintaining a special 
-   * <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> 
+   * <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> 
    * sub-directory for each task-attempt on HDFS where the output of the 
    * task-attempt goes. On successful completion of the task-attempt the files 
-   * in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only) 
-   * are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the 
+   * in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only) 
+   * are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the 
    * framework discards the sub-directory of unsuccessful task-attempts. This 
    * is completely transparent to the application.</p>
    * 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/LazyOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -33,6 +33,8 @@
  * A Convenience class that creates output lazily.  
  */
 public class LazyOutputFormat <K,V> extends FilterOutputFormat<K, V> {
+  public static String OUTPUT_FORMAT = 
+    "mapreduce.output.lazyoutputformat.outputformat";
   /**
    * Set the underlying output format for LazyOutputFormat.
    * @param job the {@link Job} to modify
@@ -42,7 +44,7 @@
   public static void  setOutputFormatClass(Job job, 
                                      Class<? extends OutputFormat> theClass) {
       job.setOutputFormatClass(LazyOutputFormat.class);
-      job.getConfiguration().setClass("mapred.lazy.output.format", 
+      job.getConfiguration().setClass(OUTPUT_FORMAT, 
           theClass, OutputFormat.class);
   }
 
@@ -50,7 +52,7 @@
   private void getBaseOutputFormat(Configuration conf) 
   throws IOException {
     baseOut =  ((OutputFormat<K, V>) ReflectionUtils.newInstance(
-        conf.getClass("mapred.lazy.output.format", null), conf));
+      conf.getClass(OUTPUT_FORMAT, null), conf));
     if (baseOut == null) {
       throw new IOException("Output Format not set for LazyOutputFormat");
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileAsBinaryOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -38,6 +38,8 @@
  */
 public class SequenceFileAsBinaryOutputFormat 
     extends SequenceFileOutputFormat <BytesWritable,BytesWritable> {
+  public static String KEY_CLASS = "mapreduce.output.seqbinaryoutputformat.key.class"; 
+  public static String VALUE_CLASS = "mapreduce.output.seqbinaryoutputformat.value.class"; 
 
   /** 
    * Inner class used for appendRaw
@@ -83,7 +85,7 @@
    */
   static public void setSequenceFileOutputKeyClass(Job job, 
       Class<?> theClass) {
-    job.getConfiguration().setClass("mapred.seqbinary.output.key.class",
+    job.getConfiguration().setClass(KEY_CLASS,
       theClass, Object.class);
   }
 
@@ -97,7 +99,7 @@
    */
   static public void setSequenceFileOutputValueClass(Job job, 
       Class<?> theClass) {
-    job.getConfiguration().setClass("mapred.seqbinary.output.value.class", 
+    job.getConfiguration().setClass(VALUE_CLASS, 
                   theClass, Object.class);
   }
 
@@ -108,7 +110,7 @@
    */
   static public Class<? extends WritableComparable> 
       getSequenceFileOutputKeyClass(JobContext job) { 
-    return job.getConfiguration().getClass("mapred.seqbinary.output.key.class",
+    return job.getConfiguration().getClass(KEY_CLASS,
       job.getOutputKeyClass().asSubclass(WritableComparable.class), 
       WritableComparable.class);
   }
@@ -120,8 +122,7 @@
    */
   static public Class<? extends Writable> getSequenceFileOutputValueClass(
       JobContext job) { 
-    return job.getConfiguration().getClass(
-      "mapred.seqbinary.output.value.class", 
+    return job.getConfiguration().getClass(VALUE_CLASS, 
       job.getOutputValueClass().asSubclass(Writable.class), Writable.class);
   }
   

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/SequenceFileOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -92,7 +92,7 @@
    *         defaulting to {@link CompressionType#RECORD}
    */
   public static CompressionType getOutputCompressionType(JobContext job) {
-    String val = job.getConfiguration().get("mapred.output.compression.type", 
+    String val = job.getConfiguration().get(FileOutputFormat.COMPRESS_TYPE, 
                                             CompressionType.RECORD.toString());
     return CompressionType.valueOf(val);
   }
@@ -106,7 +106,7 @@
   public static void setOutputCompressionType(Job job, 
 		                                          CompressionType style) {
     setCompressOutput(job, true);
-    job.getConfiguration().set("mapred.output.compression.type", 
+    job.getConfiguration().set(FileOutputFormat.COMPRESS_TYPE, 
                                style.toString());
   }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/TextOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -38,6 +38,7 @@
 
 /** An {@link OutputFormat} that writes plain text files. */
 public class TextOutputFormat<K, V> extends FileOutputFormat<K, V> {
+  public static String SEPERATOR = "mapreduce.output.textoutputformat.separator";
   protected static class LineRecordWriter<K, V>
     extends RecordWriter<K, V> {
     private static final String utf8 = "UTF-8";
@@ -112,8 +113,7 @@
                          ) throws IOException, InterruptedException {
     Configuration conf = job.getConfiguration();
     boolean isCompressed = getCompressOutput(job);
-    String keyValueSeparator= conf.get("mapred.textoutputformat.separator",
-                                       "\t");
+    String keyValueSeparator= conf.get(SEPERATOR, "\t");
     CompressionCodec codec = null;
     String extension = "";
     if (isCompressed) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/BinaryPartitioner.java Fri Sep 18 15:09:48 2009
@@ -32,11 +32,11 @@
  * of the following properties:
  * <ul>
  *   <li>
- *     <i>mapred.binary.partitioner.left.offset</i>:
+ *     <i>mapreduce.partition.binarypartitioner.left.offset</i>:
  *     left offset in array (0 by default)
  *   </li>
  *   <li>
- *     <i>mapred.binary.partitioner.right.offset</i>: 
+ *     <i>mapreduce.partition.binarypartitioner.right.offset</i>: 
  *     right offset in array (-1 by default)
  *   </li>
  * </ul>
@@ -67,10 +67,10 @@
 public class BinaryPartitioner<V> extends Partitioner<BinaryComparable, V> 
   implements Configurable {
 
-  private static final String LEFT_OFFSET_PROPERTY_NAME = 
-    "mapred.binary.partitioner.left.offset";
-  private static final String RIGHT_OFFSET_PROPERTY_NAME = 
-    "mapred.binary.partitioner.right.offset";
+  public static final String LEFT_OFFSET_PROPERTY_NAME = 
+    "mapreduce.partition.binarypartitioner.left.offset";
+  public static final String RIGHT_OFFSET_PROPERTY_NAME = 
+    "mapreduce.partition.binarypartitioner.right.offset";
   
   /**
    * Set the subarray to be used for partitioning to 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedComparator.java Fri Sep 18 15:09:48 2009
@@ -25,6 +25,8 @@
 import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription;
 
 
@@ -41,12 +43,13 @@
  *  of the field); if omitted from pos2, it defaults to 0 (the end of the
  *  field). opts are ordering options (any of 'nr' as described above). 
  * We assume that the fields in the key are separated by 
- * map.output.key.field.separator.
+ * {@link JobContext#MAP_OUTPUT_KEY_FIELD_SEPERATOR}.
  */
 
 public class KeyFieldBasedComparator<K, V> extends WritableComparator 
     implements Configurable {
   private KeyFieldHelper keyFieldHelper = new KeyFieldHelper();
+  public static String COMPARATOR_OPTIONS = "mapreduce.partition.keycomparator.options";
   private static final byte NEGATIVE = (byte)'-';
   private static final byte ZERO = (byte)'0';
   private static final byte DECIMAL = (byte)'.';
@@ -54,8 +57,8 @@
 
   public void setConf(Configuration conf) {
     this.conf = conf;
-    String option = conf.get("mapred.text.key.comparator.options");
-    String keyFieldSeparator = conf.get("map.output.key.field.separator","\t");
+    String option = conf.get(COMPARATOR_OPTIONS);
+    String keyFieldSeparator = conf.get(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR,"\t");
     keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator);
     keyFieldHelper.parseOption(option);
   }
@@ -338,4 +341,31 @@
     }
     return true;
   }
+  /**
+   * Set the {@link KeyFieldBasedComparator} options used to compare keys.
+   * 
+   * @param keySpec the key specification of the form -k pos1[,pos2], where,
+   *  pos is of the form f[.c][opts], where f is the number
+   *  of the key field to use, and c is the number of the first character from
+   *  the beginning of the field. Fields and character posns are numbered 
+   *  starting with 1; a character position of zero in pos2 indicates the
+   *  field's last character. If '.c' is omitted from pos1, it defaults to 1
+   *  (the beginning of the field); if omitted from pos2, it defaults to 0 
+   *  (the end of the field). opts are ordering options. The supported options
+   *  are:
+   *    -n, (Sort numerically)
+   *    -r, (Reverse the result of comparison)                 
+   */
+  public static void setKeyFieldComparatorOptions(Job job, String keySpec) {
+    job.getConfiguration().set(COMPARATOR_OPTIONS, keySpec);
+  }
+  
+  /**
+   * Get the {@link KeyFieldBasedComparator} options
+   */
+  public static String getKeyFieldComparatorOption(JobContext job) {
+    return job.getConfiguration().get(COMPARATOR_OPTIONS);
+  }
+
+
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java Fri Sep 18 15:09:48 2009
@@ -25,6 +25,8 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Partitioner;
 import org.apache.hadoop.mapreduce.lib.partition.KeyFieldHelper.KeyDescription;
 
@@ -46,6 +48,8 @@
 
   private static final Log LOG = LogFactory.getLog(
                                    KeyFieldBasedPartitioner.class.getName());
+  public static String PARTITIONER_OPTIONS = 
+    "mapreduce.partition.keypartitioner.options";
   private int numOfPartitionFields;
   
   private KeyFieldHelper keyFieldHelper = new KeyFieldHelper();
@@ -55,15 +59,15 @@
   public void setConf(Configuration conf) {
     this.conf = conf;
     String keyFieldSeparator = 
-      conf.get("map.output.key.field.separator", "\t");
+      conf.get(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, "\t");
     keyFieldHelper.setKeyFieldSeparator(keyFieldSeparator);
     if (conf.get("num.key.fields.for.partition") != null) {
       LOG.warn("Using deprecated num.key.fields.for.partition. " +
-      		"Use mapred.text.key.partitioner.options instead");
+      		"Use mapreduce.partition.keypartitioner.options instead");
       this.numOfPartitionFields = conf.getInt("num.key.fields.for.partition",0);
       keyFieldHelper.setKeyFieldSpec(1,numOfPartitionFields);
     } else {
-      String option = conf.get("mapred.text.key.partitioner.options");
+      String option = conf.get(PARTITIONER_OPTIONS);
       keyFieldHelper.parseOption(option);
     }
   }
@@ -119,4 +123,30 @@
   protected int getPartition(int hash, int numReduceTasks) {
     return (hash & Integer.MAX_VALUE) % numReduceTasks;
   }
+  
+  /**
+   * Set the {@link KeyFieldBasedPartitioner} options used for 
+   * {@link Partitioner}
+   * 
+   * @param keySpec the key specification of the form -k pos1[,pos2], where,
+   *  pos is of the form f[.c][opts], where f is the number
+   *  of the key field to use, and c is the number of the first character from
+   *  the beginning of the field. Fields and character posns are numbered 
+   *  starting with 1; a character position of zero in pos2 indicates the
+   *  field's last character. If '.c' is omitted from pos1, it defaults to 1
+   *  (the beginning of the field); if omitted from pos2, it defaults to 0 
+   *  (the end of the field).
+   */
+  public void setKeyFieldPartitionerOptions(Job job, String keySpec) {
+    job.getConfiguration().set(PARTITIONER_OPTIONS, keySpec);
+  }
+  
+  /**
+   * Get the {@link KeyFieldBasedPartitioner} options
+   */
+  public String getKeyFieldPartitionerOption(JobContext job) {
+    return job.getConfiguration().get(PARTITIONER_OPTIONS);
+  }
+
+
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/partition/TotalOrderPartitioner.java Fri Sep 18 15:09:48 2009
@@ -45,6 +45,12 @@
 
   private Node partitions;
   public static final String DEFAULT_PATH = "_partition.lst";
+  public static final String PARTITIONER_PATH = 
+    "mapreduce.totalorderpartitioner.path";
+  public static final String MAX_TRIE_DEPTH = 
+    "mapreduce.totalorderpartitioner.trie.maxdepth"; 
+  public static final String NATURAL_ORDER = 
+    "mapreduce.totalorderpartitioner.naturalorder";
   Configuration conf;
 
   public TotalOrderPartitioner() { }
@@ -83,7 +89,7 @@
         }
       }
       boolean natOrder =
-        conf.getBoolean("total.order.partitioner.natural.order", true);
+        conf.getBoolean(NATURAL_ORDER, true);
       if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) {
         partitions = buildTrie((BinaryComparable[])splitPoints, 0,
             splitPoints.length, new byte[0],
@@ -94,7 +100,7 @@
             // case where the split points are long and mostly look like bytes 
             // iii...iixii...iii   .  Therefore, we make the default depth
             // limit large but not huge.
-            conf.getInt("total.order.partitioner.max.trie.depth", 200));
+            conf.getInt(MAX_TRIE_DEPTH, 200));
       } else {
         partitions = new BinarySearchNode(splitPoints, comparator);
       }
@@ -119,7 +125,7 @@
    * keys in the SequenceFile.
    */
   public static void setPartitionFile(Configuration conf, Path p) {
-    conf.set("total.order.partitioner.path", p.toString());
+    conf.set(PARTITIONER_PATH, p.toString());
   }
 
   /**
@@ -127,7 +133,7 @@
    * @see #setPartitionFile(Configuration, Path)
    */
   public static String getPartitionFile(Configuration conf) {
-    return conf.get("total.order.partitioner.path", DEFAULT_PATH);
+    return conf.get(PARTITIONER_PATH, DEFAULT_PATH);
   }
 
   /**

Added: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java?rev=816664&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java (added)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/jobtracker/JTConfig.java Fri Sep 18 15:09:48 2009
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.server.jobtracker;
+
+import org.apache.hadoop.mapreduce.MRConfig;
+
+/**
+ * Place holder for JobTracker server-level configuration.
+ * 
+ * The keys should have "mapreduce.jobtracker." as the prefix
+ */
+public interface JTConfig extends MRConfig {
+  // JobTracker configuration parameters
+  public static final String JT_IPC_ADDRESS  = "mapreduce.jobtracker.address";
+  public static final String JT_HTTP_ADDRESS = 
+    "mapreduce.jobtracker.http.address";
+  public static final String JT_IPC_HANDLER_COUNT = 
+    "mapreduce.jobtracker.handler.count";
+  public static final String JT_RESTART_ENABLED = 
+    "mapreduce.jobtracker.restart.recover";
+  public static final String JT_TASK_SCHEDULER = 
+    "mapreduce.jobtracker.taskscheduler";
+  public static final String JT_INSTRUMENTATION = 
+    "mapreduce.jobtracker.instrumentation";
+  public static final String JT_TASKS_PER_JOB = 
+    "mapreduce.jobtracker.maxtasks.perjob";
+  public static final String JT_HEARTBEATS_IN_SECOND = 
+    "mapreduce.jobtracker.heartbeats.in.second";
+  public static final String JT_PERSIST_JOBSTATUS = 
+    "mapreduce.jobtracker.persist.jobstatus.active";
+  public static final String JT_PERSIST_JOBSTATUS_HOURS = 
+    "mapreduce.jobtracker.persist.jobstatus.hours";
+  public static final String JT_PERSIST_JOBSTATUS_DIR = 
+    "mapreduce.jobtracker.persist.jobstatus.dir";
+  public static final String JT_SUPERGROUP = 
+    "mapreduce.jobtracker.permissions.supergroup";
+  public static final String JT_RETIREJOBS = 
+    "mapreduce.jobtracker.retirejobs";
+  public static final String JT_RETIREJOB_CACHE_SIZE = 
+    "mapreduce.jobtracker.retiredjobs.cache.size";
+  public static final String JT_TASKCACHE_LEVELS = 
+    "mapreduce.jobtracker.taskcache.levels";
+  public static final String JT_TASK_ALLOC_PAD_FRACTION = 
+    "mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad";
+  public static final String JT_JOBINIT_THREADS = 
+    "mapreduce.jobtracker.jobinit.threads";
+  public static final String JT_TRACKER_EXPIRY_INTERVAL = 
+    "mapreduce.jobtracker.expire.trackers.interval";
+  public static final String JT_RUNNINGTASKS_PER_JOB = 
+    "mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob";
+  public static final String JT_HOSTS_FILENAME = 
+    "mapreduce.jobtracker.hosts.filename";
+  public static final String JT_HOSTS_EXCLUDE_FILENAME = 
+    "mapreduce.jobtracker.hosts.exclude.filename";
+  public static final String JT_JOBHISTORY_CACHE_SIZE =
+    "mapreduce.jobtracker.jobhistory.lru.cache.size";
+  public static final String JT_JOBHISTORY_BLOCK_SIZE = 
+    "mapreduce.jobtracker.jobhistory.block.size";
+  public static final String JT_JOBHISTORY_COMPLETED_LOCATION = 
+    "mapreduce.jobtracker.jobhistory.completed.location";
+  public static final String JT_JOBHISTORY_LOCATION = 
+    "mapreduce.jobtracker.jobhistory.location";
+  public static final String JT_AVG_BLACKLIST_THRESHOLD = 
+    "mapreduce.jobtracker.blacklist.average.threshold";
+  public static final String JT_SYSTEM_DIR = "mapreduce.jobtracker.system.dir";
+  public static final String JT_MAX_TRACKER_BLACKLISTS = 
+    "mapreduce.jobtracker.tasktracker.maxblacklists";
+  public static final String JT_JOBHISTORY_MAXAGE = 
+    "mapreduce.jobtracker.jobhistory.maxage";
+  public static final String JT_MAX_MAPMEMORY_MB = 
+    "mapreduce.jobtracker.maxmapmemory.mb";
+  public static final String JT_MAX_REDUCEMEMORY_MB = 
+    "mapreduce.jobtracker.maxreducememory.mb";
+}

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java Fri Sep 18 15:09:48 2009
@@ -165,9 +165,9 @@
    * Initialize the local directories for a particular user on this TT. This
    * involves creation and setting permissions of the following directories
    * <ul>
-   * <li>$mapred.local.dir/taskTracker/$user</li>
-   * <li>$mapred.local.dir/taskTracker/$user/jobcache</li>
-   * <li>$mapred.local.dir/taskTracker/$user/distcache</li>
+   * <li>$mapreduce.cluster.local.dir/taskTracker/$user</li>
+   * <li>$mapreduce.cluster.local.dir/taskTracker/$user/jobcache</li>
+   * <li>$mapreduce.cluster.local.dir/taskTracker/$user/distcache</li>
    * </ul>
    * 
    * @param user

Added: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java?rev=816664&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java (added)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/server/tasktracker/TTConfig.java Fri Sep 18 15:09:48 2009
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.server.tasktracker;
+
+import org.apache.hadoop.mapreduce.MRConfig;
+
+/**
+ * Place holder for TaskTracker server-level configuration.
+ * 
+ * The keys should have "mapreduce.tasktracker." as the prefix
+ */
+
+public interface TTConfig extends MRConfig {
+
+  // Task-tracker configuration properties
+  public static final String TT_HEALTH_CHECKER_INTERVAL = 
+    "mapreduce.tasktracker.healthchecker.interval";
+  public static final String TT_HEALTH_CHECKER_SCRIPT_ARGS =
+    "mapreduce.tasktracker.healthchecker.script.args";
+  public static final String TT_HEALTH_CHECKER_SCRIPT_PATH =
+    "mapreduce.tasktracker.healthchecker.script.path";
+  public static final String TT_HEALTH_CHECKER_SCRIPT_TIMEOUT =
+    "mapreduce.tasktracker.healthchecker.script.timeout";
+  public static final String TT_LOCAL_DIR_MINSPACE_KILL = 
+    "mapreduce.tasktracker.local.dir.minspacekill";
+  public static final String TT_LOCAL_DIR_MINSPACE_START = 
+    "mapreduce.tasktracker.local.dir.minspacestart";
+  public static final String TT_HTTP_ADDRESS = 
+    "mapreduce.tasktracker.http.address";
+  public static final String TT_REPORT_ADDRESS = 
+    "mapreduce.tasktracker.report.address";
+  public static final String TT_TASK_CONTROLLER = 
+    "mapreduce.tasktracker.taskcontroller";
+  public static final String TT_CONTENTION_TRACKING = 
+    "mapreduce.tasktracker.contention.tracking";
+  public static final String TT_STATIC_RESOLUTIONS = 
+    "mapreduce.tasktracker.net.static.resolutions";
+  public static final String TT_HTTP_THREADS = 
+    "mapreduce.tasktracker.http.threads";
+  public static final String TT_HOST_NAME = "mapreduce.tasktracker.host.name";
+  public static final String TT_SLEEP_TIME_BEFORE_SIG_KILL =
+    "mapreduce.tasktracker.tasks.sleeptimebeforesigkill";
+  public static final String TT_DNS_INTERFACE = 
+    "mapreduce.tasktracker.dns.interface";
+  public static final String TT_DNS_NAMESERVER = 
+    "mapreduce.tasktracker.dns.nameserver";
+  public static final String TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL  = 
+    "mapreduce.tasktracker.events.batchsize";
+  public static final String TT_INDEX_CACHE = 
+    "mapreduce.tasktracker.indexcache.mb";
+  public static final String TT_INSTRUMENTATION = 
+    "mapreduce.tasktracker.instrumentation";
+  public static final String TT_MAP_SLOTS = 
+    "mapreduce.tasktracker.map.tasks.maximum";
+  public static final String TT_MEMORY_CALCULATOR_PLUGIN = 
+    "mapreduce.tasktracker.memorycalculatorplugin";
+  public static final String TT_REDUCE_SLOTS = 
+    "mapreduce.tasktracker.reduce.tasks.maximum";
+  public static final String TT_MEMORY_MANAGER_MONITORING_INTERVAL = 
+    "mapreduce.tasktracker.taskmemorymanager.monitoringinterval";
+  public static final String TT_LOCAL_CACHE_SIZE = 
+    "mapreduce.tasktracker.cache.local.size";
+
+}

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java Fri Sep 18 15:09:48 2009
@@ -41,6 +41,7 @@
 import org.apache.hadoop.mapred.IFileInputStream;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.task.reduce.MapOutput.Type;
 import org.apache.hadoop.util.Progressable;
@@ -112,10 +113,10 @@
     }
 
     this.connectionTimeout = 
-      job.getInt("mapred.shuffle.connect.timeout",
+      job.getInt(JobContext.SHUFFLE_CONNECT_TIMEOUT,
                  DEFAULT_STALLED_COPY_TIMEOUT);
     this.readTimeout = 
-      job.getInt("mapred.shuffle.read.timeout", DEFAULT_READ_TIMEOUT);
+      job.getInt(JobContext.SHUFFLE_READ_TIMEOUT, DEFAULT_READ_TIMEOUT);
     
     setName("fetcher#" + id);
     setDaemon(true);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java Fri Sep 18 15:09:48 2009
@@ -50,6 +50,7 @@
 import org.apache.hadoop.mapred.Merger.Segment;
 import org.apache.hadoop.mapred.Task.CombineOutputCollector;
 import org.apache.hadoop.mapred.Task.CombineValuesIterator;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator;
@@ -149,27 +150,27 @@
     this.rfs = ((LocalFileSystem)localFS).getRaw();
     
     final float maxInMemCopyUse =
-      jobConf.getFloat("mapred.job.shuffle.input.buffer.percent", 0.90f);
+      jobConf.getFloat(JobContext.SHUFFLE_INPUT_BUFFER_PERCENT, 0.90f);
     if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) {
       throw new IllegalArgumentException("Invalid value for " +
-          "mapred.job.shuffle.input.buffer.percent: " +
+          JobContext.SHUFFLE_INPUT_BUFFER_PERCENT + ": " +
           maxInMemCopyUse);
     }
 
     // Allow unit tests to fix Runtime memory
     this.memoryLimit = 
-      (int)(jobConf.getInt("mapred.job.reduce.total.mem.bytes",
+      (int)(jobConf.getInt(JobContext.REDUCE_MEMORY_TOTAL_BYTES,
           (int)Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE))
         * maxInMemCopyUse);
  
-    this.ioSortFactor = jobConf.getInt("io.sort.factor", 100);
+    this.ioSortFactor = jobConf.getInt(JobContext.IO_SORT_FACTOR, 100);
 
     this.maxSingleShuffleLimit = 
       (int)(memoryLimit * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION);
     this.memToMemMergeOutputsThreshold = 
-            jobConf.getInt("mapred.memtomem.merge.threshold", ioSortFactor);
+            jobConf.getInt(JobContext.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
     this.mergeThreshold = (int)(this.memoryLimit * 
-                          jobConf.getFloat("mapred.job.shuffle.merge.percent", 
+                          jobConf.getFloat(JobContext.SHUFFLE_MERGE_EPRCENT, 
                                            0.90f));
     LOG.info("MergerManager: memoryLimit=" + memoryLimit + ", " +
              "maxSingleShuffleLimit=" + maxSingleShuffleLimit + ", " +
@@ -178,7 +179,7 @@
              "memToMemMergeOutputsThreshold=" + memToMemMergeOutputsThreshold);
 
     boolean allowMemToMemMerge = 
-      jobConf.getBoolean("mapred.job.shuffle.allow.memtomem.merge", false);
+      jobConf.getBoolean(JobContext.REDUCE_MEMTOMEM_ENABLED, false);
     if (allowMemToMemMerge) {
       this.memToMemMerger = 
         new IntermediateMemoryToMemoryMerger(this,
@@ -627,9 +628,9 @@
              onDiskMapOutputs.size() + " on-disk map-outputs");
     
     final float maxRedPer =
-      job.getFloat("mapred.job.reduce.input.buffer.percent", 0f);
+      job.getFloat(JobContext.REDUCE_INPUT_BUFFER_PERCENT, 0f);
     if (maxRedPer > 1.0 || maxRedPer < 0.0) {
-      throw new IOException("mapred.job.reduce.input.buffer.percent" +
+      throw new IOException(JobContext.REDUCE_INPUT_BUFFER_PERCENT +
                             maxRedPer);
     }
     int maxInMemReduce = (int)Math.min(

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java Fri Sep 18 15:09:48 2009
@@ -33,6 +33,7 @@
 import org.apache.hadoop.mapred.TaskStatus;
 import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
 import org.apache.hadoop.mapred.Task.CombineOutputCollector;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.util.Progress;
 
@@ -101,7 +102,7 @@
     eventFetcher.start();
     
     // Start the map-output fetcher threads
-    final int numFetchers = jobConf.getInt("mapred.reduce.parallel.copies", 5);
+    final int numFetchers = jobConf.getInt(JobContext.SHUFFLE_PARALLEL_COPIES, 5);
     Fetcher<K,V>[] fetchers = new Fetcher[numFetchers];
     for (int i=0; i < numFetchers; ++i) {
       fetchers[i] = new Fetcher<K,V>(jobConf, reduceId, scheduler, merger, 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/task/reduce/ShuffleClientMetrics.java Fri Sep 18 15:09:48 2009
@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapreduce.task.reduce;
 
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
@@ -34,7 +35,7 @@
   private final int numCopiers;
   
   ShuffleClientMetrics(TaskAttemptID reduceId, JobConf jobConf) {
-    this.numCopiers = jobConf.getInt("mapred.reduce.parallel.copies", 5);
+    this.numCopiers = jobConf.getInt(JobContext.SHUFFLE_PARALLEL_COPIES, 5);
 
     MetricsContext metricsContext = MetricsUtil.getContext("mapred");
     this.shuffleMetrics = 

Added: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java?rev=816664&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java (added)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java Fri Sep 18 15:09:48 2009
@@ -0,0 +1,511 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.util;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+
+/**
+ * Place holder for deprecated keys in the framework 
+ */
+public class ConfigUtil {
+
+  /**
+   * Adds all the deprecated keys. Loads mapred-default.xml and mapred-site.xml
+   */
+  public static void loadResources() {
+    addDeprecatedKeys();
+    Configuration.addDefaultResource("mapred-default.xml");
+    Configuration.addDefaultResource("mapred-site.xml");
+  }
+  
+  /**
+   * Adds deprecated keys and the corresponding new keys to the Configuration
+   */
+  private static void addDeprecatedKeys()  {
+    Configuration.addDeprecation("mapred.temp.dir", 
+      new String[] {MRConfig.TEMP_DIR});
+    Configuration.addDeprecation("mapred.local.dir", 
+      new String[] {MRConfig.LOCAL_DIR});
+    Configuration.addDeprecation("mapred.cluster.map.memory.mb", 
+      new String[] {MRConfig.MAPMEMORY_MB});
+    Configuration.addDeprecation("mapred.cluster.reduce.memory.mb", 
+      new String[] {MRConfig.REDUCEMEMORY_MB});
+    Configuration.addDeprecation("mapred.cluster.max.map.memory.mb", 
+      new String[] {JTConfig.JT_MAX_MAPMEMORY_MB});
+    Configuration.addDeprecation("mapred.cluster.max.reduce.memory.mb", 
+      new String[] {JTConfig.JT_MAX_REDUCEMEMORY_MB});
+
+    Configuration.addDeprecation("mapred.cluster.average.blacklist.threshold", 
+      new String[] {JTConfig.JT_AVG_BLACKLIST_THRESHOLD});
+    Configuration.addDeprecation("hadoop.job.history.location", 
+      new String[] {JTConfig.JT_JOBHISTORY_LOCATION});
+    Configuration.addDeprecation(
+      "mapred.job.tracker.history.completed.location", 
+      new String[] {JTConfig.JT_JOBHISTORY_COMPLETED_LOCATION});
+    Configuration.addDeprecation("mapred.jobtracker.job.history.block.size", 
+      new String[] {JTConfig.JT_JOBHISTORY_BLOCK_SIZE});
+    Configuration.addDeprecation("mapred.job.tracker.jobhistory.lru.cache.size", 
+      new String[] {JTConfig.JT_JOBHISTORY_CACHE_SIZE});
+    Configuration.addDeprecation("mapred.hosts", 
+      new String[] {JTConfig.JT_HOSTS_FILENAME});
+    Configuration.addDeprecation("mapred.hosts.exclude", 
+      new String[] {JTConfig.JT_HOSTS_EXCLUDE_FILENAME});
+    Configuration.addDeprecation("mapred.system.dir", 
+      new String[] {JTConfig.JT_SYSTEM_DIR});
+    Configuration.addDeprecation("mapred.max.tracker.blacklists", 
+      new String[] {JTConfig.JT_MAX_TRACKER_BLACKLISTS});
+    Configuration.addDeprecation("mapred.job.tracker", 
+      new String[] {JTConfig.JT_IPC_ADDRESS});
+    Configuration.addDeprecation("mapred.job.tracker.http.address", 
+      new String[] {JTConfig.JT_HTTP_ADDRESS});
+    Configuration.addDeprecation("mapred.job.tracker.handler.count", 
+      new String[] {JTConfig.JT_IPC_HANDLER_COUNT});
+    Configuration.addDeprecation("mapred.jobtracker.restart.recover", 
+      new String[] {JTConfig.JT_RESTART_ENABLED});
+    Configuration.addDeprecation("mapred.jobtracker.taskScheduler", 
+      new String[] {JTConfig.JT_TASK_SCHEDULER});
+    Configuration.addDeprecation(
+      "mapred.jobtracker.taskScheduler.maxRunningTasksPerJob", 
+      new String[] {JTConfig.JT_RUNNINGTASKS_PER_JOB});
+    Configuration.addDeprecation("mapred.jobtracker.instrumentation", 
+      new String[] {JTConfig.JT_INSTRUMENTATION});
+    Configuration.addDeprecation("mapred.jobtracker.maxtasks.per.job", 
+      new String[] {JTConfig.JT_TASKS_PER_JOB});
+    Configuration.addDeprecation("mapred.heartbeats.in.second", 
+      new String[] {JTConfig.JT_HEARTBEATS_IN_SECOND});
+    Configuration.addDeprecation("mapred.job.tracker.persist.jobstatus.active", 
+      new String[] {JTConfig.JT_PERSIST_JOBSTATUS});
+    Configuration.addDeprecation("mapred.job.tracker.persist.jobstatus.hours", 
+      new String[] {JTConfig.JT_PERSIST_JOBSTATUS_HOURS});
+    Configuration.addDeprecation("mapred.job.tracker.persist.jobstatus.dir", 
+      new String[] {JTConfig.JT_PERSIST_JOBSTATUS_DIR});
+    Configuration.addDeprecation("mapred.permissions.supergroup", 
+      new String[] {JTConfig.JT_SUPERGROUP});
+    Configuration.addDeprecation("mapred.task.cache.levels", 
+      new String[] {JTConfig.JT_TASKCACHE_LEVELS});
+    Configuration.addDeprecation("mapred.jobtracker.taskalloc.capacitypad", 
+      new String[] {JTConfig.JT_TASK_ALLOC_PAD_FRACTION});
+    Configuration.addDeprecation("mapred.jobinit.threads", 
+      new String[] {JTConfig.JT_JOBINIT_THREADS});
+    Configuration.addDeprecation("mapred.tasktracker.expiry.interval", 
+      new String[] {JTConfig.JT_TRACKER_EXPIRY_INTERVAL});
+    Configuration.addDeprecation("mapred.job.tracker.retiredjobs.cache.size", 
+      new String[] {JTConfig.JT_RETIREJOB_CACHE_SIZE});
+    Configuration.addDeprecation("mapred.job.tracker.retire.jobs", 
+      new String[] {JTConfig.JT_RETIREJOBS});
+    Configuration.addDeprecation("mapred.healthChecker.interval", 
+      new String[] {TTConfig.TT_HEALTH_CHECKER_INTERVAL});
+    Configuration.addDeprecation("mapred.healthChecker.script.args", 
+      new String[] {TTConfig.TT_HEALTH_CHECKER_SCRIPT_ARGS});
+    Configuration.addDeprecation("mapred.healthChecker.script.path", 
+      new String[] {TTConfig.TT_HEALTH_CHECKER_SCRIPT_PATH});
+    Configuration.addDeprecation("mapred.healthChecker.script.timeout", 
+      new String[] {TTConfig.TT_HEALTH_CHECKER_SCRIPT_TIMEOUT});
+    Configuration.addDeprecation("mapred.local.dir.minspacekill", 
+      new String[] {TTConfig.TT_LOCAL_DIR_MINSPACE_KILL});
+    Configuration.addDeprecation("mapred.local.dir.minspacestart", 
+      new String[] {TTConfig.TT_LOCAL_DIR_MINSPACE_START});
+    Configuration.addDeprecation("mapred.task.tracker.http.address", 
+      new String[] {TTConfig.TT_HTTP_ADDRESS});
+    Configuration.addDeprecation("mapred.task.tracker.report.address", 
+      new String[] {TTConfig.TT_REPORT_ADDRESS});
+    Configuration.addDeprecation("mapred.task.tracker.task-controller", 
+      new String[] {TTConfig.TT_TASK_CONTROLLER});
+    Configuration.addDeprecation("mapred.tasktracker.dns.interface", 
+      new String[] {TTConfig.TT_DNS_INTERFACE});
+    Configuration.addDeprecation("mapred.tasktracker.dns.nameserver", 
+      new String[] {TTConfig.TT_DNS_NAMESERVER});
+    Configuration.addDeprecation("mapred.tasktracker.events.batchsize", 
+      new String[] {TTConfig.TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL});
+    Configuration.addDeprecation("mapred.tasktracker.indexcache.mb", 
+      new String[] {TTConfig.TT_INDEX_CACHE});
+    Configuration.addDeprecation("mapred.tasktracker.instrumentation", 
+      new String[] {TTConfig.TT_INSTRUMENTATION});
+    Configuration.addDeprecation("mapred.tasktracker.map.tasks.maximum", 
+      new String[] {TTConfig.TT_MAP_SLOTS});
+    Configuration.addDeprecation("mapred.tasktracker.memory_calculator_plugin", 
+      new String[] {TTConfig.TT_MEMORY_CALCULATOR_PLUGIN});
+    Configuration.addDeprecation("mapred.tasktracker.reduce.tasks.maximum", 
+      new String[] {TTConfig.TT_REDUCE_SLOTS});
+    Configuration.addDeprecation(
+      "mapred.tasktracker.taskmemorymanager.monitoring-interval", 
+      new String[] {TTConfig.TT_MEMORY_MANAGER_MONITORING_INTERVAL});
+    Configuration.addDeprecation(
+      "mapred.tasktracker.tasks.sleeptime-before-sigkill", 
+      new String[] {TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL});
+    Configuration.addDeprecation("slave.host.name", 
+      new String[] {TTConfig.TT_HOST_NAME});
+    Configuration.addDeprecation("tasktracker.http.threads", 
+      new String[] {TTConfig.TT_HTTP_THREADS});
+    Configuration.addDeprecation("hadoop.net.static.resolutions", 
+      new String[] {TTConfig.TT_STATIC_RESOLUTIONS});
+    Configuration.addDeprecation("local.cache.size", 
+      new String[] {TTConfig.TT_LOCAL_CACHE_SIZE});
+    Configuration.addDeprecation("tasktracker.contention.tracking", 
+      new String[] {TTConfig.TT_CONTENTION_TRACKING});
+    Configuration.addDeprecation("hadoop.job.history.user.location", 
+      new String[] {JobContext.HISTORY_LOCATION});
+    Configuration.addDeprecation("job.end.notification.url", 
+      new String[] {JobContext.END_NOTIFICATION_URL});
+    Configuration.addDeprecation("job.end.retry.attempts", 
+      new String[] {JobContext.END_NOTIFICATION_RETRIES});
+    Configuration.addDeprecation("job.end.retry.interval", 
+      new String[] {JobContext.END_NOTIFICATION_RETRIE_INTERVAL});
+    Configuration.addDeprecation("mapred.committer.job.setup.cleanup.needed", 
+      new String[] {JobContext.SETUP_CLEANUP_NEEDED});
+    Configuration.addDeprecation("mapred.jar", 
+      new String[] {JobContext.JAR});
+    Configuration.addDeprecation("mapred.job.id", 
+      new String[] {JobContext.ID});
+    Configuration.addDeprecation("mapred.job.name", 
+      new String[] {JobContext.JOB_NAME});
+    Configuration.addDeprecation("mapred.job.priority", 
+      new String[] {JobContext.PRIORITY});
+    Configuration.addDeprecation("mapred.job.queue.name", 
+      new String[] {JobContext.QUEUE_NAME});
+    Configuration.addDeprecation("mapred.job.reuse.jvm.num.tasks", 
+      new String[] {JobContext.JVM_NUMTASKS_TORUN});
+    Configuration.addDeprecation("mapred.job.split.file", 
+      new String[] {JobContext.SPLIT_FILE});
+    Configuration.addDeprecation("mapred.map.tasks", 
+      new String[] {JobContext.NUM_MAPS});
+    Configuration.addDeprecation("mapred.max.tracker.failures", 
+      new String[] {JobContext.MAX_TASK_FAILURES_PER_TRACKER});
+    Configuration.addDeprecation("mapred.reduce.slowstart.completed.maps", 
+      new String[] {JobContext.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART});
+    Configuration.addDeprecation("mapred.reduce.tasks", 
+      new String[] {JobContext.NUM_REDUCES});
+    Configuration.addDeprecation("mapred.skip.on", 
+      new String[] {JobContext.SKIP_RECORDS});
+    Configuration.addDeprecation("mapred.skip.out.dir", 
+      new String[] {JobContext.SKIP_OUTDIR});
+    Configuration.addDeprecation(
+      "mapred.speculative.execution.slowNodeThreshold", 
+      new String[] {JobContext.SPECULATIVE_SLOWNODE_THRESHOLD});
+    Configuration.addDeprecation(
+      "mapred.speculative.execution.slowTaskThreshold", 
+      new String[] {JobContext.SPECULATIVE_SLOWTASK_THRESHOLD});
+    Configuration.addDeprecation("mapred.speculative.execution.speculativeCap", 
+      new String[] {JobContext.SPECULATIVECAP});
+    Configuration.addDeprecation("job.local.dir", 
+      new String[] {JobContext.JOB_LOCAL_DIR});
+    Configuration.addDeprecation("mapreduce.inputformat.class", 
+      new String[] {JobContext.INPUT_FORMAT_CLASS_ATTR});
+    Configuration.addDeprecation("mapreduce.map.class", 
+      new String[] {JobContext.MAP_CLASS_ATTR});
+    Configuration.addDeprecation("mapreduce.combine.class", 
+      new String[] {JobContext.COMBINE_CLASS_ATTR});
+    Configuration.addDeprecation("mapreduce.reduce.class", 
+      new String[] {JobContext.REDUCE_CLASS_ATTR});
+    Configuration.addDeprecation("mapreduce.outputformat.class", 
+      new String[] {JobContext.OUTPUT_FORMAT_CLASS_ATTR});
+    Configuration.addDeprecation("mapreduce.partitioner.class", 
+      new String[] {JobContext.PARTITIONER_CLASS_ATTR});
+    Configuration.addDeprecation("mapred.job.classpath.archives", 
+      new String[] {JobContext.CLASSPATH_ARCHIVES});
+    Configuration.addDeprecation("mapred.job.classpath.files", 
+      new String[] {JobContext.CLASSPATH_FILES});
+    Configuration.addDeprecation("mapred.cache.files", 
+      new String[] {JobContext.CACHE_FILES});
+    Configuration.addDeprecation("mapred.cache.archives", 
+      new String[] {JobContext.CACHE_ARCHIVES});
+    Configuration.addDeprecation("mapred.cache.localFiles", 
+      new String[] {JobContext.CACHE_LOCALFILES});
+    Configuration.addDeprecation("mapred.cache.localArchives", 
+      new String[] {JobContext.CACHE_LOCALARCHIVES});
+    Configuration.addDeprecation("mapred.cache.files.timestamps", 
+      new String[] {JobContext.CACHE_FILE_TIMESTAMPS});
+    Configuration.addDeprecation("mapred.cache.archives.timestamps", 
+      new String[] {JobContext.CACHE_ARCHIVES_TIMESTAMPS});
+    Configuration.addDeprecation("mapred.create.symlink", 
+      new String[] {JobContext.CACHE_SYMLINK});
+    Configuration.addDeprecation("mapred.working.dir", 
+      new String[] {JobContext.WORKING_DIR});
+    Configuration.addDeprecation("hadoop.job.history.user.location", 
+      new String[] {JobContext.HISTORY_LOCATION});
+    Configuration.addDeprecation("user.name", 
+      new String[] {JobContext.USER_NAME});
+    Configuration.addDeprecation("mapred.output.key.class", 
+      new String[] {JobContext.OUTPUT_KEY_CLASS});
+    Configuration.addDeprecation("mapred.output.value.class", 
+      new String[] {JobContext.OUTPUT_VALUE_CLASS});
+    Configuration.addDeprecation("mapred.output.value.groupfn.class", 
+      new String[] {JobContext.GROUP_COMPARATOR_CLASS});
+    Configuration.addDeprecation("mapred.output.key.comparator.class", 
+      new String[] {JobContext.KEY_COMPARATOR});
+    Configuration.addDeprecation("io.sort.factor", 
+      new String[] {JobContext.IO_SORT_FACTOR});
+    Configuration.addDeprecation("io.sort.mb", 
+      new String[] {JobContext.IO_SORT_MB});
+    Configuration.addDeprecation("keep.failed.task.files", 
+      new String[] {JobContext.PRESERVE_FAILED_TASK_FILES});
+    Configuration.addDeprecation("keep.task.files.pattern", 
+      new String[] {JobContext.PRESERVE_FILES_PATTERN});
+    Configuration.addDeprecation("mapred.child.tmp", 
+      new String[] {JobContext.TASK_TEMP_DIR});
+    Configuration.addDeprecation("mapred.debug.out.lines", 
+      new String[] {JobContext.TASK_DEBUGOUT_LINES});
+    Configuration.addDeprecation("mapred.merge.recordsBeforeProgress", 
+      new String[] {JobContext.RECORDS_BEFORE_PROGRESS});
+    Configuration.addDeprecation("mapred.skip.attempts.to.start.skipping", 
+      new String[] {JobContext.SKIP_START_ATTEMPTS});
+    Configuration.addDeprecation("mapred.task.id", 
+      new String[] {JobContext.TASK_ATTEMPT_ID});
+    Configuration.addDeprecation("mapred.task.is.map", 
+      new String[] {JobContext.TASK_ISMAP});
+    Configuration.addDeprecation("mapred.task.partition", 
+      new String[] {JobContext.TASK_PARTITION});
+    Configuration.addDeprecation("mapred.task.profile", 
+      new String[] {JobContext.TASK_PROFILE});
+    Configuration.addDeprecation("mapred.task.profile.maps", 
+      new String[] {JobContext.NUM_MAP_PROFILES});
+    Configuration.addDeprecation("mapred.task.profile.reduces", 
+      new String[] {JobContext.NUM_REDUCE_PROFILES});
+    Configuration.addDeprecation("mapred.task.timeout", 
+      new String[] {JobContext.TASK_TIMEOUT});
+    Configuration.addDeprecation("mapred.tip.id", 
+      new String[] {JobContext.TASK_ID});
+    Configuration.addDeprecation("mapred.work.output.dir", 
+      new String[] {JobContext.TASK_OUTPUT_DIR});
+    Configuration.addDeprecation("mapred.userlog.limit.kb", 
+      new String[] {JobContext.TASK_USERLOG_LIMIT});
+    Configuration.addDeprecation("mapred.userlog.retain.hours", 
+      new String[] {JobContext.TASK_LOG_RETAIN_HOURS});
+    Configuration.addDeprecation("mapred.task.profile.params", 
+      new String[] {JobContext.TASK_PROFILE_PARAMS});
+    Configuration.addDeprecation("io.sort.record.percent", 
+      new String[] {JobContext.MAP_SORT_RECORD_PERCENT});
+    Configuration.addDeprecation("io.sort.spill.percent", 
+      new String[] {JobContext.MAP_SORT_SPILL_PERCENT});
+    Configuration.addDeprecation("map.input.file", 
+      new String[] {JobContext.MAP_INPUT_FILE});
+    Configuration.addDeprecation("map.input.length", 
+      new String[] {JobContext.MAP_INPUT_PATH});
+    Configuration.addDeprecation("map.input.start", 
+      new String[] {JobContext.MAP_INPUT_START});
+    Configuration.addDeprecation("mapred.job.map.memory.mb", 
+      new String[] {JobContext.MAP_MEMORY_MB});
+    Configuration.addDeprecation("mapred.map.child.env", 
+      new String[] {JobContext.MAP_ENV});
+    Configuration.addDeprecation("mapred.map.child.java.opts", 
+      new String[] {JobContext.MAP_JAVA_OPTS});
+    Configuration.addDeprecation("mapred.map.child.ulimit", 
+      new String[] {JobContext.MAP_ULIMIT});
+    Configuration.addDeprecation("mapred.map.max.attempts", 
+      new String[] {JobContext.MAP_MAX_ATTEMPTS});
+    Configuration.addDeprecation("mapred.map.task.debug.script", 
+      new String[] {JobContext.MAP_DEBUG_SCRIPT});
+    Configuration.addDeprecation("mapred.map.tasks.speculative.execution", 
+      new String[] {JobContext.MAP_SPECULATIVE});
+    Configuration.addDeprecation("mapred.max.map.failures.percent", 
+      new String[] {JobContext.MAP_FAILURES_MAX_PERCENT});
+    Configuration.addDeprecation("mapred.skip.map.auto.incr.proc.count", 
+      new String[] {JobContext.MAP_SKIP_INCR_PROC_COUNT});
+    Configuration.addDeprecation("mapred.skip.map.max.skip.records", 
+      new String[] {JobContext.MAP_SKIP_MAX_RECORDS});
+    Configuration.addDeprecation("min.num.spills.for.combine", 
+      new String[] {JobContext.MAP_COMBINE_MIN_SPISS});
+    Configuration.addDeprecation("mapred.compress.map.output", 
+      new String[] {JobContext.MAP_OUTPUT_COMPRESS});
+    Configuration.addDeprecation("mapred.map.output.compression.codec", 
+      new String[] {JobContext.MAP_OUTPUT_COMPRESS_CODEC});
+    Configuration.addDeprecation("mapred.mapoutput.key.class", 
+      new String[] {JobContext.MAP_OUTPUT_KEY_CLASS});
+    Configuration.addDeprecation("mapred.mapoutput.value.class", 
+      new String[] {JobContext.MAP_OUTPUT_VALUE_CLASS});
+    Configuration.addDeprecation("map.output.key.field.separator", 
+      new String[] {JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR});
+    Configuration.addDeprecation("mapred.map.child.log.level", 
+      new String[] {JobContext.MAP_LOG_LEVEL});
+    Configuration.addDeprecation("mapred.inmem.merge.threshold", 
+      new String[] {JobContext.REDUCE_MERGE_INMEM_THRESHOLD});
+    Configuration.addDeprecation("mapred.job.reduce.input.buffer.percent", 
+      new String[] {JobContext.REDUCE_INPUT_BUFFER_PERCENT});
+    Configuration.addDeprecation("mapred.job.reduce.markreset.buffer.percent", 
+      new String[] {JobContext.REDUCE_MARKRESET_BUFFER_PERCENT});
+    Configuration.addDeprecation("mapred.job.reduce.memory.mb", 
+      new String[] {JobContext.REDUCE_MEMORY_MB});
+    Configuration.addDeprecation("mapred.job.reduce.total.mem.bytes", 
+      new String[] {JobContext.REDUCE_MEMORY_TOTAL_BYTES});
+    Configuration.addDeprecation("mapred.job.shuffle.input.buffer.percent", 
+      new String[] {JobContext.SHUFFLE_INPUT_BUFFER_PERCENT});
+    Configuration.addDeprecation("mapred.job.shuffle.merge.percent", 
+      new String[] {JobContext.SHUFFLE_MERGE_EPRCENT});
+    Configuration.addDeprecation("mapred.max.reduce.failures.percent", 
+      new String[] {JobContext.REDUCE_FAILURES_MAXPERCENT});
+    Configuration.addDeprecation("mapred.reduce.child.env", 
+      new String[] {JobContext.REDUCE_ENV});
+    Configuration.addDeprecation("mapred.reduce.child.java.opts", 
+      new String[] {JobContext.REDUCE_JAVA_OPTS});
+    Configuration.addDeprecation("mapred.reduce.child.ulimit", 
+      new String[] {JobContext.REDUCE_ULIMIT});
+    Configuration.addDeprecation("mapred.reduce.max.attempts", 
+      new String[] {JobContext.REDUCE_MAX_ATTEMPTS});
+    Configuration.addDeprecation("mapred.reduce.parallel.copies", 
+      new String[] {JobContext.SHUFFLE_PARALLEL_COPIES});
+    Configuration.addDeprecation("mapred.reduce.task.debug.script", 
+      new String[] {JobContext.REDUCE_DEBUG_SCRIPT});
+    Configuration.addDeprecation("mapred.reduce.tasks.speculative.execution", 
+      new String[] {JobContext.REDUCE_SPECULATIVE});
+    Configuration.addDeprecation("mapred.shuffle.connect.timeout", 
+      new String[] {JobContext.SHUFFLE_CONNECT_TIMEOUT});
+    Configuration.addDeprecation("mapred.shuffle.read.timeout", 
+      new String[] {JobContext.SHUFFLE_READ_TIMEOUT});
+    Configuration.addDeprecation("mapred.skip.reduce.auto.incr.proc.count", 
+      new String[] {JobContext.REDUCE_SKIP_INCR_PROC_COUNT});
+    Configuration.addDeprecation("mapred.skip.reduce.max.skip.groups", 
+      new String[] {JobContext.REDUCE_SKIP_MAXGROUPS});
+    Configuration.addDeprecation("mapred.reduce.child.log.level", 
+      new String[] {JobContext.REDUCE_LOG_LEVEL});
+    Configuration.addDeprecation("jobclient.completion.poll.interval", 
+      new String[] {Job.COMPLETION_POLL_INTERVAL_KEY});
+    Configuration.addDeprecation("jobclient.progress.monitor.poll.interval", 
+      new String[] {Job.PROGRESS_MONITOR_POLL_INTERVAL_KEY});
+    Configuration.addDeprecation("jobclient.output.filter", 
+      new String[] {Job.OUTPUT_FILTER});
+    Configuration.addDeprecation("mapred.submit.replication", 
+      new String[] {Job.SUBMIT_REPLICATION});
+    Configuration.addDeprecation("mapred.used.genericoptionsparser", 
+      new String[] {Job.USED_GENERIC_PARSER});
+    Configuration.addDeprecation("mapred.input.dir", 
+      new String[] {
+        org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR});
+    Configuration.addDeprecation("mapred.input.pathFilter.class", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        FileInputFormat.PATHFILTER_CLASS});
+    Configuration.addDeprecation("mapred.max.split.size", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        FileInputFormat.SPLIT_MAXSIZE});
+    Configuration.addDeprecation("mapred.min.split.size", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        FileInputFormat.SPLIT_MINSIZE});
+    Configuration.addDeprecation("mapred.output.compress", 
+      new String[] {org.apache.hadoop.mapreduce.lib.output.
+        FileOutputFormat.COMPRESS});
+    Configuration.addDeprecation("mapred.output.compression.codec", 
+      new String[] {org.apache.hadoop.mapreduce.lib.output.
+        FileOutputFormat.COMPRESS_CODEC});
+    Configuration.addDeprecation("mapred.output.compression.type", 
+      new String[] {org.apache.hadoop.mapreduce.lib.output.
+        FileOutputFormat.COMPRESS_TYPE});
+    Configuration.addDeprecation("mapred.output.dir", 
+      new String[] {org.apache.hadoop.mapreduce.lib.output.
+        FileOutputFormat.OUTDIR});
+    Configuration.addDeprecation("mapred.seqbinary.output.key.class", 
+      new String[] {org.apache.hadoop.mapreduce.lib.output.
+        SequenceFileAsBinaryOutputFormat.KEY_CLASS});
+    Configuration.addDeprecation("mapred.seqbinary.output.value.class", 
+      new String[] {org.apache.hadoop.mapreduce.lib.output.
+        SequenceFileAsBinaryOutputFormat.VALUE_CLASS});
+    Configuration.addDeprecation("sequencefile.filter.class", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        SequenceFileInputFilter.FILTER_CLASS});
+    Configuration.addDeprecation("sequencefile.filter.regex", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        SequenceFileInputFilter.FILTER_REGEX});
+    Configuration.addDeprecation("sequencefile.filter.frequency", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        SequenceFileInputFilter.FILTER_FREQUENCY});
+    Configuration.addDeprecation("mapred.input.dir.mappers", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        MultipleInputs.DIR_MAPPERS});
+    Configuration.addDeprecation("mapred.input.dir.formats", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        MultipleInputs.DIR_FORMATS});
+    Configuration.addDeprecation("mapred.line.input.format.linespermap", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+        NLineInputFormat.LINES_PER_MAP});
+    Configuration.addDeprecation("mapred.binary.partitioner.left.offset", 
+      new String[] {org.apache.hadoop.mapreduce.lib.partition.
+        BinaryPartitioner.LEFT_OFFSET_PROPERTY_NAME});
+    Configuration.addDeprecation("mapred.binary.partitioner.right.offset", 
+      new String[] {org.apache.hadoop.mapreduce.lib.partition.
+        BinaryPartitioner.RIGHT_OFFSET_PROPERTY_NAME});
+    Configuration.addDeprecation("mapred.text.key.comparator.options", 
+      new String[] {org.apache.hadoop.mapreduce.lib.partition.
+        KeyFieldBasedComparator.COMPARATOR_OPTIONS});
+    Configuration.addDeprecation("mapred.text.key.partitioner.options", 
+      new String[] {org.apache.hadoop.mapreduce.lib.partition.
+        KeyFieldBasedPartitioner.PARTITIONER_OPTIONS});
+    Configuration.addDeprecation("mapred.mapper.regex.group", 
+      new String[] {org.apache.hadoop.mapreduce.lib.map.RegexMapper.GROUP});
+    Configuration.addDeprecation("mapred.mapper.regex", 
+      new String[] {org.apache.hadoop.mapreduce.lib.map.RegexMapper.PATTERN});
+    Configuration.addDeprecation("create.empty.dir.if.nonexist", 
+      new String[] {org.apache.hadoop.mapreduce.lib.jobcontrol.
+                    ControlledJob.CREATE_DIR});
+    Configuration.addDeprecation("mapred.data.field.separator", 
+      new String[] {org.apache.hadoop.mapreduce.lib.fieldsel.
+                    FieldSelectionHelper.DATA_FIELD_SEPERATOR});
+    Configuration.addDeprecation("map.output.key.value.fields.spec", 
+      new String[] {org.apache.hadoop.mapreduce.lib.fieldsel.
+                    FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC});
+    Configuration.addDeprecation("reduce.output.key.value.fields.spec", 
+      new String[] {org.apache.hadoop.mapreduce.lib.fieldsel.
+                    FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC});
+    Configuration.addDeprecation("mapred.min.split.size.per.node", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+                    CombineFileInputFormat.SPLIT_MINSIZE_PERNODE});
+    Configuration.addDeprecation("mapred.min.split.size.per.rack", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+                    CombineFileInputFormat.SPLIT_MINSIZE_PERRACK});
+    Configuration.addDeprecation("key.value.separator.in.input.line", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+                    KeyValueLineRecordReader.KEY_VALUE_SEPERATOR});
+    Configuration.addDeprecation("mapred.linerecordreader.maxlength", 
+      new String[] {org.apache.hadoop.mapreduce.lib.input.
+                    LineRecordReader.MAX_LINE_LENGTH});
+    Configuration.addDeprecation("mapred.lazy.output.format", 
+      new String[] {org.apache.hadoop.mapreduce.lib.output.
+                    LazyOutputFormat.OUTPUT_FORMAT});
+    Configuration.addDeprecation("mapred.textoutputformat.separator", 
+      new String[] {org.apache.hadoop.mapreduce.lib.output.
+                    TextOutputFormat.SEPERATOR});
+    Configuration.addDeprecation("mapred.join.expr", 
+      new String[] {org.apache.hadoop.mapreduce.lib.join.
+                    CompositeInputFormat.JOIN_EXPR});
+    Configuration.addDeprecation("mapred.join.keycomparator", 
+      new String[] {org.apache.hadoop.mapreduce.lib.join.
+                    CompositeInputFormat.JOIN_COMPARATOR});
+    Configuration.addDeprecation("hadoop.pipes.command-file.keep", 
+      new String[] {org.apache.hadoop.mapred.pipes.
+                    Submitter.PRESERVE_COMMANDFILE});
+    Configuration.addDeprecation("hadoop.pipes.executable", 
+      new String[] {org.apache.hadoop.mapred.pipes.Submitter.EXECUTABLE});
+    Configuration.addDeprecation("hadoop.pipes.executable.interpretor", 
+      new String[] {org.apache.hadoop.mapred.pipes.Submitter.INTERPRETOR});
+    Configuration.addDeprecation("hadoop.pipes.java.mapper", 
+      new String[] {org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_MAP});
+    Configuration.addDeprecation("hadoop.pipes.java.recordreader", 
+      new String[] {org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_RR});
+    Configuration.addDeprecation("hadoop.pipes.java.recordwriter", 
+      new String[] {org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_RW});
+    Configuration.addDeprecation("hadoop.pipes.java.reducer", 
+      new String[] {org.apache.hadoop.mapred.pipes.Submitter.IS_JAVA_REDUCE});
+    Configuration.addDeprecation("hadoop.pipes.partitioner", 
+      new String[] {org.apache.hadoop.mapred.pipes.Submitter.PARTITIONER});
+    Configuration.addDeprecation("mapred.pipes.user.inputformat", 
+      new String[] {org.apache.hadoop.mapred.pipes.Submitter.INPUT_FORMAT});
+  }
+}
+

Modified: hadoop/mapreduce/trunk/src/java/overview.html
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/overview.html?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/overview.html (original)
+++ hadoop/mapreduce/trunk/src/java/overview.html Fri Sep 18 15:09:48 2009
@@ -156,7 +156,7 @@
 <li>The {@link org.apache.hadoop.mapred.JobTracker} (MapReduce master)
 host and port.  This is specified with the configuration property
 <tt><a
-href="../mapred-default.html#mapred.job.tracker">mapred.job.tracker</a></tt>.
+href="../mapred-default.html#mapreduce.jobtracker.address">mapreduce.jobtracker.address</a></tt>.
 </li>
 
 <li>A <em>slaves</em> file that lists the names of all the hosts in
@@ -194,7 +194,7 @@
 <xmp><configuration>
 
   <property>
-    <name>mapred.job.tracker</name>
+    <name>mapreduce.jobtracker.address</name>
     <value>localhost:9001</value>
   </property>
 
@@ -255,7 +255,7 @@
   as <tt><em>hdfs://master.example.com/</em></tt> in <tt>conf/core-site.xml</tt>.</li>
 
 <li>The host and port of the your master server in the value
-of <tt><a href="../mapred-default.html#mapred.job.tracker">mapred.job.tracker</a></tt>
+of <tt><a href="../mapred-default.html#mapreduce.jobtracker.address">mapreduce.jobtracker.address</a></tt>
 as <tt><em>master.example.com</em>:<em>port</em></tt> in <tt>conf/mapred-site.xml</tt>.</li>
 
 <li>Directories for <tt><a
@@ -268,18 +268,18 @@
 list of directory names, so that data may be stored on multiple local
 devices.</li>
 
-<li><tt><a href="../mapred-default.html#mapred.local.dir">mapred.local.dir</a></tt>
+<li><tt><a href="../mapred-default.html#mapreduce.cluster.local.dir">mapreduce.cluster.local.dir</a></tt>
   in <tt>conf/mapred-site.xml</tt>, the local directory where temporary 
   MapReduce data is stored.  It also may be a list of directories.</li>
 
 <li><tt><a
-href="../mapred-default.html#mapred.map.tasks">mapred.map.tasks</a></tt>
+href="../mapred-default.html#mapreduce.job.maps">mapreduce.job.maps</a></tt>
 and <tt><a
-href="../mapred-default.html#mapred.reduce.tasks">mapred.reduce.tasks</a></tt> 
+href="../mapred-default.html#mapreduce.job.reduces">mapreduce.job.reduces</a></tt> 
 in <tt>conf/mapred-site.xml</tt>.
 As a rule of thumb, use 10x the
-number of slave processors for <tt>mapred.map.tasks</tt>, and 2x the
-number of slave processors for <tt>mapred.reduce.tasks</tt>.</li>
+number of slave processors for <tt>mapreduce.job.maps</tt>, and 2x the
+number of slave processors for <tt>mapreduce.job.reduces</tt>.</li>
 
 </ol>
 

Modified: hadoop/mapreduce/trunk/src/test/mapred-site.xml
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred-site.xml?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred-site.xml (original)
+++ hadoop/mapreduce/trunk/src/test/mapred-site.xml Fri Sep 18 15:09:48 2009
@@ -6,16 +6,16 @@
 <configuration>
 
 <property>
-  <name>io.sort.mb</name>
+  <name>mapreduce.task.io.sort.mb</name>
   <value>10</value>
 </property>
 <property>
-  <name>mapred.hosts.exclude</name>
+  <name>mapreduce.jobtracker.hosts.exclude.filename</name>
   <value>hosts.exclude</value>
   <description></description>
 </property>
 <property>
-  <name>mapred.job.tracker.retire.jobs</name>
+  <name>mapreduce.jobtracker.retirejobs</name>
   <value>false</value>
   <description></description>
 </property>

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java Fri Sep 18 15:09:48 2009
@@ -25,6 +25,7 @@
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.mapred.tools.MRAdmin;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.security.authorize.HadoopPolicyProvider;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.util.ToolRunner;
@@ -43,7 +44,7 @@
     JobConf mrConf = new JobConf(conf);
     mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, 
                            null, null, mrConf);
-    jobtracker = mrCluster.createJobConf().get("mapred.job.tracker", "local");
+    jobtracker = mrCluster.createJobConf().get(JTConfig.JT_IPC_ADDRESS, "local");
     cmdExecutor = new MRCmdExecutor(jobtracker);
     archiveCmdExecutor = new ArchiveCmdExecutor(namenode, mrConf);
   }

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestJobConf.java Fri Sep 18 15:09:48 2009
@@ -21,13 +21,14 @@
 import junit.framework.TestCase;
 
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.JobContext;
 
 public class TestJobConf extends TestCase {
 
   public void testProfileParamsDefaults() {
     JobConf configuration = new JobConf();
 
-    Assert.assertNull(configuration.get("mapred.task.profile.params"));
+    Assert.assertNull(configuration.get(JobContext.TASK_PROFILE_PARAMS));
 
     String result = configuration.getProfileParams();
 
@@ -40,13 +41,13 @@
     JobConf configuration = new JobConf();
 
     configuration.setProfileParams("test");
-    Assert.assertEquals("test", configuration.get("mapred.task.profile.params"));
+    Assert.assertEquals("test", configuration.get(JobContext.TASK_PROFILE_PARAMS));
   }
 
   public void testProfileParamsGetter() {
     JobConf configuration = new JobConf();
 
-    configuration.set("mapred.task.profile.params", "test");
+    configuration.set(JobContext.TASK_PROFILE_PARAMS, "test");
     Assert.assertEquals("test", configuration.getProfileParams());
   }
 
@@ -56,35 +57,35 @@
    */
   public void testMemoryConfigForMapOrReduceTask(){
     JobConf configuration = new JobConf();
-    configuration.set("mapred.job.map.memory.mb",String.valueOf(300));
-    configuration.set("mapred.job.reduce.memory.mb",String.valueOf(300));
+    configuration.set(JobContext.MAP_MEMORY_MB,String.valueOf(300));
+    configuration.set(JobContext.REDUCE_MEMORY_MB,String.valueOf(300));
     Assert.assertEquals(configuration.getMemoryForMapTask(),300);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),300);
 
     configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024));
-    configuration.set("mapred.job.map.memory.mb",String.valueOf(300));
-    configuration.set("mapred.job.reduce.memory.mb",String.valueOf(300));
+    configuration.set(JobContext.MAP_MEMORY_MB,String.valueOf(300));
+    configuration.set(JobContext.REDUCE_MEMORY_MB,String.valueOf(300));
     Assert.assertEquals(configuration.getMemoryForMapTask(),2);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
 
     configuration = new JobConf();
     configuration.set("mapred.task.maxvmem" , "-1");
-    configuration.set("mapred.job.map.memory.mb",String.valueOf(300));
-    configuration.set("mapred.job.reduce.memory.mb",String.valueOf(300));
+    configuration.set(JobContext.MAP_MEMORY_MB,String.valueOf(300));
+    configuration.set(JobContext.REDUCE_MEMORY_MB,String.valueOf(300));
     Assert.assertEquals(configuration.getMemoryForMapTask(),-1);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),-1);
 
     configuration = new JobConf();
     configuration.set("mapred.task.maxvmem" , String.valueOf(2*1024 * 1024));
-    configuration.set("mapred.job.map.memory.mb","-1");
-    configuration.set("mapred.job.reduce.memory.mb","-1");
+    configuration.set(JobContext.MAP_MEMORY_MB,"-1");
+    configuration.set(JobContext.REDUCE_MEMORY_MB,"-1");
     Assert.assertEquals(configuration.getMemoryForMapTask(),2);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
 
     configuration = new JobConf();
     configuration.set("mapred.task.maxvmem" , String.valueOf(-1));
-    configuration.set("mapred.job.map.memory.mb","-1");
-    configuration.set("mapred.job.reduce.memory.mb","-1");
+    configuration.set(JobContext.MAP_MEMORY_MB,"-1");
+    configuration.set(JobContext.REDUCE_MEMORY_MB,"-1");
     Assert.assertEquals(configuration.getMemoryForMapTask(),-1);
     Assert.assertEquals(configuration.getMemoryForReduceTask(),-1);    
 
@@ -101,20 +102,20 @@
     JobConf configuration = new JobConf();
 
     //get test case
-    configuration.set("mapred.job.map.memory.mb", String.valueOf(300));
-    configuration.set("mapred.job.reduce.memory.mb", String.valueOf(-1));
+    configuration.set(JobContext.MAP_MEMORY_MB, String.valueOf(300));
+    configuration.set(JobContext.REDUCE_MEMORY_MB, String.valueOf(-1));
     Assert.assertEquals(
       configuration.getMaxVirtualMemoryForTask(), 300 * 1024 * 1024);
 
     configuration = new JobConf();
-    configuration.set("mapred.job.map.memory.mb", String.valueOf(-1));
-    configuration.set("mapred.job.reduce.memory.mb", String.valueOf(200));
+    configuration.set(JobContext.MAP_MEMORY_MB, String.valueOf(-1));
+    configuration.set(JobContext.REDUCE_MEMORY_MB, String.valueOf(200));
     Assert.assertEquals(
       configuration.getMaxVirtualMemoryForTask(), 200 * 1024 * 1024);
 
     configuration = new JobConf();
-    configuration.set("mapred.job.map.memory.mb", String.valueOf(-1));
-    configuration.set("mapred.job.reduce.memory.mb", String.valueOf(-1));
+    configuration.set(JobContext.MAP_MEMORY_MB, String.valueOf(-1));
+    configuration.set(JobContext.REDUCE_MEMORY_MB, String.valueOf(-1));
     configuration.set("mapred.task.maxvmem", String.valueOf(1 * 1024 * 1024));
     Assert.assertEquals(
       configuration.getMaxVirtualMemoryForTask(), 1 * 1024 * 1024);
@@ -132,8 +133,8 @@
     Assert.assertEquals(configuration.getMemoryForReduceTask(), 2);
 
     configuration = new JobConf();   
-    configuration.set("mapred.job.map.memory.mb", String.valueOf(300));
-    configuration.set("mapred.job.reduce.memory.mb", String.valueOf(400));
+    configuration.set(JobContext.MAP_MEMORY_MB, String.valueOf(300));
+    configuration.set(JobContext.REDUCE_MEMORY_MB, String.valueOf(400));
     configuration.setMaxVirtualMemoryForTask(2 * 1024 * 1024);
     Assert.assertEquals(configuration.getMemoryForMapTask(), 2);
     Assert.assertEquals(configuration.getMemoryForReduceTask(), 2);

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestNoDefaultsJobConf.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestNoDefaultsJobConf.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestNoDefaultsJobConf.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/conf/TestNoDefaultsJobConf.java Fri Sep 18 15:09:48 2009
@@ -20,6 +20,7 @@
 import junit.framework.Assert;
 
 import org.apache.hadoop.mapred.*;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.LongWritable;
@@ -58,7 +59,7 @@
     JobConf conf = new JobConf(false);
 
     //seeding JT and NN info into non-defaults (empty jobconf)
-    conf.set("mapred.job.tracker", createJobConf().get("mapred.job.tracker"));
+    conf.set(JTConfig.JT_IPC_ADDRESS, createJobConf().get(JTConfig.JT_IPC_ADDRESS));
     conf.set("fs.default.name", createJobConf().get("fs.default.name"));
 
     conf.setJobName("mr");

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/BigMapOutput.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/BigMapOutput.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/BigMapOutput.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/BigMapOutput.java Fri Sep 18 15:09:48 2009
@@ -42,7 +42,11 @@
   public static final Log LOG =
     LogFactory.getLog(BigMapOutput.class.getName());
   private static Random random = new Random();
-  
+  public static String MIN_KEY = "mapreduce.bmo.minkey";
+  public static String MIN_VALUE = "mapreduce.bmo.minvalue";
+  public static String MAX_KEY = "mapreduce.bmo.maxkey";
+  public static String MAX_VALUE = "mapreduce.bmo.maxvalue";
+
   private static void randomizeBytes(byte[] data, int offset, int length) {
     for(int i=offset + length - 1; i >= offset; --i) {
       data[i] = (byte) random.nextInt(256);
@@ -66,12 +70,12 @@
                                 BytesWritable.class, BytesWritable.class,
                                 CompressionType.NONE);
     long numBytesToWrite = fileSizeInMB * 1024 * 1024;
-    int minKeySize = conf.getInt("test.bmo.min_key", 10);;
+    int minKeySize = conf.getInt(MIN_KEY, 10);;
     int keySizeRange = 
-      conf.getInt("test.bmo.max_key", 1000) - minKeySize;
-    int minValueSize = conf.getInt("test.bmo.min_value", 0);
+      conf.getInt(MAX_KEY, 1000) - minKeySize;
+    int minValueSize = conf.getInt(MIN_VALUE, 0);
     int valueSizeRange = 
-      conf.getInt("test.bmo.max_value", 20000) - minValueSize;
+      conf.getInt(MAX_VALUE, 20000) - minValueSize;
     BytesWritable randomKey = new BytesWritable();
     BytesWritable randomValue = new BytesWritable();
 

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java Fri Sep 18 15:09:48 2009
@@ -29,6 +29,8 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -96,7 +98,7 @@
       throws IOException {
     JobConf conf = new JobConf();
     dfsCluster = new MiniDFSCluster(conf, NUMBER_OF_NODES, true, null);
-    conf.set("mapred.task.tracker.task-controller",
+    conf.set(TTConfig.TT_TASK_CONTROLLER,
         MyLinuxTaskController.class.getName());
     mrCluster =
         new MiniMRCluster(NUMBER_OF_NODES, dfsCluster.getFileSystem().getUri()
@@ -163,7 +165,7 @@
         sb.append(",");
       }
     }
-    writer.println(String.format("mapred.local.dir=%s", sb.toString()));
+    writer.println(String.format(MRConfig.LOCAL_DIR + "=%s", sb.toString()));
 
     writer
         .println(String.format("hadoop.log.dir=%s", TaskLog.getBaseLogDir()));

Modified: hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java (original)
+++ hadoop/mapreduce/trunk/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java Fri Sep 18 15:09:48 2009
@@ -263,7 +263,7 @@
       signalFileDir = new Path(conf.get("signal.dir.path"));
       numReducers = conf.getNumReduceTasks();
       fs = FileSystem.get(conf);
-      String taskAttemptId = conf.get("mapred.task.id");
+      String taskAttemptId = conf.get(JobContext.TASK_ATTEMPT_ID);
       if (taskAttemptId != null) {
         TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptId);
         taskNumber = taskAttemptID.getTaskID().getId();
@@ -421,7 +421,7 @@
 
     // Set the following for reduce tasks to be able to be started running
     // immediately along with maps.
-    conf.set("mapred.reduce.slowstart.completed.maps", String.valueOf(0));
+    conf.set(JobContext.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, String.valueOf(0));
 
     return conf;
   }



Mime
View raw message