hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sha...@apache.org
Subject svn commit: r816664 [6/9] - in /hadoop/mapreduce/trunk: ./ conf/ src/benchmarks/gridmix/ src/benchmarks/gridmix/pipesort/ src/benchmarks/gridmix2/ src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/ src/c++/pipes/impl/ src/c++/task-controller...
Date Fri, 18 Sep 2009 15:10:02 GMT
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Application.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Application.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Application.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Application.java Fri Sep 18 15:09:48 2009
@@ -29,6 +29,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.FloatWritable;
@@ -80,10 +81,10 @@
     Map<String, String> env = new HashMap<String,String>();
     // add TMPDIR environment variable with the value of java.io.tmpdir
     env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
-    env.put("hadoop.pipes.command.port", 
+    env.put(Submitter.PORT, 
             Integer.toString(serverSocket.getLocalPort()));
     List<String> cmd = new ArrayList<String>();
-    String interpretor = conf.get("hadoop.pipes.executable.interpretor");
+    String interpretor = conf.get(Submitter.INTERPRETOR);
     if (interpretor != null) {
       cmd.add(interpretor);
     }
@@ -96,7 +97,8 @@
     }
     cmd.add(executable);
     // wrap the command in a stdout/stderr capture
-    TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id"));
+    TaskAttemptID taskid = 
+      TaskAttemptID.forName(conf.get(JobContext.TASK_ATTEMPT_ID));
     File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
     File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
     long logLength = TaskLog.getTaskLogLength(conf);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java Fri Sep 18 15:09:48 2009
@@ -30,6 +30,7 @@
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.SkipBadRecords;
+import org.apache.hadoop.mapreduce.JobContext;
 
 /**
  * An adaptor to run a C++ mapper.
@@ -76,7 +77,7 @@
     boolean isJavaInput = Submitter.getIsJavaRecordReader(job);
     downlink.runMap(reporter.getInputSplit(), 
                     job.getNumReduceTasks(), isJavaInput);
-    boolean skipping = job.getBoolean("mapred.skip.on", false);
+    boolean skipping = job.getBoolean(JobContext.SKIP_RECORDS, false);
     try {
       if (isJavaInput) {
         // allocate key & value instances that are re-used for all entries

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java Fri Sep 18 15:09:48 2009
@@ -37,7 +37,7 @@
  * The only useful thing this does is set up the Map-Reduce job to get the
  * {@link PipesDummyRecordReader}, everything else left for the 'actual'
  * InputFormat specified by the user which is given by 
- * <i>mapred.pipes.user.inputformat</i>.
+ * <i>mapreduce.pipes.inputformat</i>.
  */
 class PipesNonJavaInputFormat 
 implements InputFormat<FloatWritable, NullWritable> {
@@ -51,7 +51,7 @@
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     // Delegate the generation of input splits to the 'original' InputFormat
     return ReflectionUtils.newInstance(
-        job.getClass("mapred.pipes.user.inputformat", 
+        job.getClass(Submitter.INPUT_FORMAT, 
                      TextInputFormat.class, 
                      InputFormat.class), job).getSplits(job, numSplits);
   }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java Fri Sep 18 15:09:48 2009
@@ -27,6 +27,7 @@
 import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.SkipBadRecords;
+import org.apache.hadoop.mapreduce.JobContext;
 
 import java.io.IOException;
 import java.util.Iterator;
@@ -49,7 +50,7 @@
     //disable the auto increment of the counter. For pipes, no of processed 
     //records could be different(equal or less) than the no of records input.
     SkipBadRecords.setAutoIncrReducerProcCount(job, false);
-    skipping = job.getBoolean("mapred.skip.on", false);
+    skipping = job.getBoolean(JobContext.SKIP_RECORDS, false);
   }
 
   /**

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Submitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Submitter.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Submitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Submitter.java Fri Sep 18 15:09:48 2009
@@ -32,8 +32,6 @@
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.OptionBuilder;
-import org.apache.commons.cli.OptionGroup;
-import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.Parser;
@@ -41,6 +39,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -68,6 +67,18 @@
 public class Submitter extends Configured implements Tool {
 
   protected static final Log LOG = LogFactory.getLog(Submitter.class);
+  public static final String PRESERVE_COMMANDFILE = 
+    "mapreduce.pipes.commandfile.preserve";
+  public static final String EXECUTABLE = "mapreduce.pipes.executable";
+  public static final String INTERPRETOR = 
+    "mapreduce.pipes.executable.interpretor";
+  public static final String IS_JAVA_MAP = "mapreduce.pipes.isjavamapper";
+  public static final String IS_JAVA_RR = "mapreduce.pipes.isjavarecordreader";
+  public static final String IS_JAVA_RW = "mapreduce.pipes.isjavarecordwriter";
+  public static final String IS_JAVA_REDUCE = "mapreduce.pipes.isjavareducer";
+  public static final String PARTITIONER = "mapreduce.pipes.partitioner";
+  public static final String INPUT_FORMAT = "mapreduce.pipes.inputformat";
+  public static final String PORT = "mapreduce.pipes.command.port";
   
   public Submitter() {
     this(new Configuration());
@@ -83,9 +94,9 @@
    * @return the URI where the application's executable is located
    */
   public static String getExecutable(JobConf conf) {
-    return conf.get("hadoop.pipes.executable");
+    return conf.get(Submitter.EXECUTABLE);
   }
-  
+
   /**
    * Set the URI for the application's executable. Normally this is a hdfs: 
    * location.
@@ -93,7 +104,7 @@
    * @param executable The URI of the application's executable.
    */
   public static void setExecutable(JobConf conf, String executable) {
-    conf.set("hadoop.pipes.executable", executable);
+    conf.set(Submitter.EXECUTABLE, executable);
   }
 
   /**
@@ -102,7 +113,7 @@
    * @param value the new value
    */
   public static void setIsJavaRecordReader(JobConf conf, boolean value) {
-    conf.setBoolean("hadoop.pipes.java.recordreader", value);
+    conf.setBoolean(Submitter.IS_JAVA_RR, value);
   }
 
   /**
@@ -111,7 +122,7 @@
    * @return is it a Java RecordReader?
    */
   public static boolean getIsJavaRecordReader(JobConf conf) {
-    return conf.getBoolean("hadoop.pipes.java.recordreader", false);
+    return conf.getBoolean(Submitter.IS_JAVA_RR, false);
   }
 
   /**
@@ -120,7 +131,7 @@
    * @param value the new value
    */
   public static void setIsJavaMapper(JobConf conf, boolean value) {
-    conf.setBoolean("hadoop.pipes.java.mapper", value);
+    conf.setBoolean(Submitter.IS_JAVA_MAP, value);
   }
 
   /**
@@ -129,7 +140,7 @@
    * @return is it a Java Mapper?
    */
   public static boolean getIsJavaMapper(JobConf conf) {
-    return conf.getBoolean("hadoop.pipes.java.mapper", false);
+    return conf.getBoolean(Submitter.IS_JAVA_MAP, false);
   }
 
   /**
@@ -138,7 +149,7 @@
    * @param value the new value
    */
   public static void setIsJavaReducer(JobConf conf, boolean value) {
-    conf.setBoolean("hadoop.pipes.java.reducer", value);
+    conf.setBoolean(Submitter.IS_JAVA_REDUCE, value);
   }
 
   /**
@@ -147,7 +158,7 @@
    * @return is it a Java Reducer?
    */
   public static boolean getIsJavaReducer(JobConf conf) {
-    return conf.getBoolean("hadoop.pipes.java.reducer", false);
+    return conf.getBoolean(Submitter.IS_JAVA_REDUCE, false);
   }
 
   /**
@@ -156,7 +167,7 @@
    * @param value the new value to set
    */
   public static void setIsJavaRecordWriter(JobConf conf, boolean value) {
-    conf.setBoolean("hadoop.pipes.java.recordwriter", value);
+    conf.setBoolean(Submitter.IS_JAVA_RW, value);
   }
 
   /**
@@ -165,7 +176,7 @@
    * @return true, if the output of the job will be written by Java
    */
   public static boolean getIsJavaRecordWriter(JobConf conf) {
-    return conf.getBoolean("hadoop.pipes.java.recordwriter", false);
+    return conf.getBoolean(Submitter.IS_JAVA_RW, false);
   }
 
   /**
@@ -187,7 +198,7 @@
    * @param cls the user's partitioner class
    */
   static void setJavaPartitioner(JobConf conf, Class cls) {
-    conf.set("hadoop.pipes.partitioner", cls.getName());
+    conf.set(Submitter.PARTITIONER, cls.getName());
   }
   
   /**
@@ -196,7 +207,7 @@
    * @return the class that the user submitted
    */
   static Class<? extends Partitioner> getJavaPartitioner(JobConf conf) {
-    return conf.getClass("hadoop.pipes.partitioner", 
+    return conf.getClass(Submitter.PARTITIONER, 
                          HashPartitioner.class,
                          Partitioner.class);
   }
@@ -209,12 +220,12 @@
    * JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
    * being deleted.
    * To run using the data file, set the environment variable 
-   * "hadoop.pipes.command.file" to point to the file.
+   * "mapreduce.pipes.commandfile" to point to the file.
    * @param conf the configuration to check
    * @return will the framework save the command file?
    */
   public static boolean getKeepCommandFile(JobConf conf) {
-    return conf.getBoolean("hadoop.pipes.command-file.keep", false);
+    return conf.getBoolean(Submitter.PRESERVE_COMMANDFILE, false);
   }
 
   /**
@@ -223,7 +234,7 @@
    * @param keep the new value
    */
   public static void setKeepCommandFile(JobConf conf, boolean keep) {
-    conf.setBoolean("hadoop.pipes.command-file.keep", keep);
+    conf.setBoolean(Submitter.PRESERVE_COMMANDFILE, keep);
   }
 
   /**
@@ -279,15 +290,15 @@
       }
     }
     String textClassname = Text.class.getName();
-    setIfUnset(conf, "mapred.mapoutput.key.class", textClassname);
-    setIfUnset(conf, "mapred.mapoutput.value.class", textClassname);
-    setIfUnset(conf, "mapred.output.key.class", textClassname);
-    setIfUnset(conf, "mapred.output.value.class", textClassname);
+    setIfUnset(conf, JobContext.MAP_OUTPUT_KEY_CLASS, textClassname);
+    setIfUnset(conf, JobContext.MAP_OUTPUT_VALUE_CLASS, textClassname);
+    setIfUnset(conf, JobContext.OUTPUT_KEY_CLASS, textClassname);
+    setIfUnset(conf, JobContext.OUTPUT_VALUE_CLASS, textClassname);
     
     // Use PipesNonJavaInputFormat if necessary to handle progress reporting
     // from C++ RecordReaders ...
     if (!getIsJavaRecordReader(conf) && !getIsJavaMapper(conf)) {
-      conf.setClass("mapred.pipes.user.inputformat", 
+      conf.setClass(Submitter.INPUT_FORMAT, 
                     conf.getInputFormat().getClass(), InputFormat.class);
       conf.setInputFormat(PipesNonJavaInputFormat.class);
     }
@@ -302,8 +313,8 @@
       DistributedCache.createSymlink(conf);
       // set default gdb commands for map and reduce task 
       String defScript = "$HADOOP_HOME/src/c++/pipes/debug/pipes-default-script";
-      setIfUnset(conf,"mapred.map.task.debug.script",defScript);
-      setIfUnset(conf,"mapred.reduce.task.debug.script",defScript);
+      setIfUnset(conf, JobContext.MAP_DEBUG_SCRIPT,defScript);
+      setIfUnset(conf, JobContext.REDUCE_DEBUG_SCRIPT,defScript);
     }
     URI[] fileCache = DistributedCache.getCacheFiles(conf);
     if (fileCache == null) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java Fri Sep 18 15:09:48 2009
@@ -33,6 +33,7 @@
 import org.apache.hadoop.mapreduce.jobhistory.JobHistory;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.server.jobtracker.State;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 
@@ -48,8 +49,7 @@
   private Path jobHistoryDir = null;
 
   static {
-    Configuration.addDefaultResource("mapred-default.xml");
-    Configuration.addDefaultResource("mapred-site.xml");
+    ConfigUtil.loadResources();
   }
   
   public Cluster(Configuration conf) throws IOException {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/InputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/InputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/InputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/InputFormat.java Fri Sep 18 15:09:48 2009
@@ -50,8 +50,8 @@
  * bytes, of the input files. However, the {@link FileSystem} blocksize of  
  * the input files is treated as an upper bound for input splits. A lower bound 
  * on the split size can be set via 
- * <a href="{@docRoot}/../mapred-default.html#mapred.min.split.size">
- * mapred.min.split.size</a>.</p>
+ * <a href="{@docRoot}/../mapred-default.html#mapreduce.input.fileinputformat.split.minsize">
+ * mapreduce.input.fileinputformat.split.minsize</a>.</p>
  * 
  * <p>Clearly, logical splits based on input-size is insufficient for many 
  * applications since record boundaries are to respected. In such cases, the

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java Fri Sep 18 15:09:48 2009
@@ -47,6 +47,7 @@
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
@@ -87,24 +88,28 @@
   private static final Log LOG = LogFactory.getLog(Job.class);
   public static enum JobState {DEFINE, RUNNING};
   private static final long MAX_JOBSTATUS_AGE = 1000 * 2;
-  public static final String OUTPUT_FILTER = "jobclient.output.filter";
+  public static final String OUTPUT_FILTER = "mapreduce.client.output.filter";
   /** Key in mapred-*.xml that sets completionPollInvervalMillis */
   public static final String COMPLETION_POLL_INTERVAL_KEY = 
-    "jobclient.completion.poll.interval";
+    "mapreduce.client.completion.pollinterval";
   
   /** Default completionPollIntervalMillis is 5000 ms. */
   static final int DEFAULT_COMPLETION_POLL_INTERVAL = 5000;
   /** Key in mapred-*.xml that sets progMonitorPollIntervalMillis */
   public static final String PROGRESS_MONITOR_POLL_INTERVAL_KEY =
-      "jobclient.progress.monitor.poll.interval";
+    "mapreduce.client.progressmonitor.pollinterval";
   /** Default progMonitorPollIntervalMillis is 1000 ms. */
   static final int DEFAULT_MONITOR_POLL_INTERVAL = 1000;
 
+  public static final String USED_GENERIC_PARSER = 
+    "mapreduce.client.genericoptionsparser.used";
+  public static final String SUBMIT_REPLICATION = 
+    "mapreduce.client.submit.file.replication";
+
   public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
 
   static {
-    Configuration.addDefaultResource("mapred-default.xml");
-    Configuration.addDefaultResource("mapred-site.xml");
+    ConfigUtil.loadResources();
   }
 
   private JobState state = JobState.DEFINE;
@@ -757,7 +762,7 @@
    */
   public void setJobSetupCleanupNeeded(boolean needed) {
     ensureState(JobState.DEFINE);
-    conf.setBoolean("mapred.committer.job.setup.cleanup.needed", needed);
+    conf.setBoolean(SETUP_CLEANUP_NEEDED, needed);
   }
 
   /**

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobContext.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobContext.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobContext.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobContext.java Fri Sep 18 15:09:48 2009
@@ -38,15 +38,188 @@
 public class JobContext {
   // Put all of the attribute names in here so that Job and JobContext are
   // consistent.
-  protected static final String INPUT_FORMAT_CLASS_ATTR = 
-    "mapreduce.inputformat.class";
-  protected static final String MAP_CLASS_ATTR = "mapreduce.map.class";
-  protected static final String COMBINE_CLASS_ATTR = "mapreduce.combine.class";
-  protected static final String REDUCE_CLASS_ATTR = "mapreduce.reduce.class";
-  protected static final String OUTPUT_FORMAT_CLASS_ATTR = 
-    "mapreduce.outputformat.class";
-  protected static final String PARTITIONER_CLASS_ATTR = 
-    "mapreduce.partitioner.class";
+  public static final String INPUT_FORMAT_CLASS_ATTR = 
+    "mapreduce.job.inputformat.class";
+  public static final String MAP_CLASS_ATTR = "mapreduce.job.map.class";
+  public static final String COMBINE_CLASS_ATTR = 
+    "mapreduce.job.combine.class";
+  public static final String REDUCE_CLASS_ATTR = 
+    "mapreduce.job.reduce.class";
+  public static final String OUTPUT_FORMAT_CLASS_ATTR = 
+    "mapreduce.job.outputformat.class";
+  public static final String PARTITIONER_CLASS_ATTR = 
+    "mapreduce.job.partitioner.class";
+
+  public static final String SETUP_CLEANUP_NEEDED = 
+    "mapreduce.job.committer.setup.cleanup.needed";
+  public static final String JAR = "mapreduce.job.jar";
+  public static final String ID = "mapreduce.job.id";
+  public static final String JOB_NAME = "mapreduce.job.name";
+  public static final String USER_NAME = "mapreduce.job.user.name";
+  public static final String PRIORITY = "mapreduce.job.priority";
+  public static final String QUEUE_NAME = "mapreduce.job.queuename";
+  public static final String JVM_NUMTASKS_TORUN = 
+    "mapreduce.job.jvm.numtasks";
+  public static final String SPLIT_FILE = "mapreduce.job.splitfile";
+  public static final String NUM_MAPS = "mapreduce.job.maps";
+  public static final String MAX_TASK_FAILURES_PER_TRACKER = 
+    "mapreduce.job.maxtaskfailures.per.tracker";
+  public static final String COMPLETED_MAPS_FOR_REDUCE_SLOWSTART =
+    "mapreduce.job.reduce.slowstart.completedmaps";
+  public static final String NUM_REDUCES = "mapreduce.job.reduces";
+  public static final String SKIP_RECORDS = "mapreduce.job.skiprecords";
+  public static final String SKIP_OUTDIR = "mapreduce.job.skip.outdir";
+  public static final String SPECULATIVE_SLOWNODE_THRESHOLD =
+    "mapreduce.job.speculative.slownodethreshold";
+  public static final String SPECULATIVE_SLOWTASK_THRESHOLD = 
+    "mapreduce.job.speculative.slowtaskthreshold";
+  public static final String SPECULATIVECAP = 
+    "mapreduce.job.speculative.speculativecap";
+  public static final String JOB_LOCAL_DIR = "mapreduce.job.local.dir";
+  public static final String OUTPUT_KEY_CLASS = 
+    "mapreduce.job.output.key.class";
+  public static final String OUTPUT_VALUE_CLASS = 
+    "mapreduce.job.output.value.class";
+  public static final String KEY_COMPARATOR = 
+    "mapreduce.job.output.key.comparator.class";
+  public static final String GROUP_COMPARATOR_CLASS = 
+    "mapreduce.job.output.group.comparator.class";
+  public static final String WORKING_DIR = "mapreduce.job.working.dir";
+  public static final String HISTORY_LOCATION = 
+    "mapreduce.job.userhistorylocation"; 
+  public static final String END_NOTIFICATION_URL = 
+    "mapreduce.job.end-notification.url";
+  public static final String END_NOTIFICATION_RETRIES = 
+    "mapreduce.job.end-notification.retry.attempts";
+  public static final String END_NOTIFICATION_RETRIE_INTERVAL = 
+    "mapreduce.job.end-notification.retry.interval";
+  public static final String CLASSPATH_ARCHIVES = 
+    "mapreduce.job.classpath.archives";
+  public static final String CLASSPATH_FILES = "mapreduce.job.classpath.files";
+  public static final String CACHE_FILES = "mapreduce.job.cache.files";
+  public static final String CACHE_ARCHIVES = "mapreduce.job.cache.archives";
+  public static final String CACHE_LOCALFILES = 
+    "mapreduce.job.cache.local.files";
+  public static final String CACHE_LOCALARCHIVES = 
+    "mapreduce.job.cache.local.archives";
+  public static final String CACHE_FILE_TIMESTAMPS = 
+    "mapreduce.job.cache.files.timestamps";
+  public static final String CACHE_ARCHIVES_TIMESTAMPS = 
+    "mapreduce.job.cache.archives.timestamps";
+  public static final String CACHE_SYMLINK = 
+    "mapreduce.job.cache.symlink.create";
+  
+  public static final String IO_SORT_FACTOR = 
+    "mapreduce.task.io.sort.factor"; 
+  public static final String IO_SORT_MB = "mapreduce.task.io.sort.mb";
+  public static final String PRESERVE_FAILED_TASK_FILES = 
+    "mapreduce.task.files.preserve.failedtasks";
+  public static final String PRESERVE_FILES_PATTERN = 
+    "mapreduce.task.files.preserve.filepattern";
+  public static final String TASK_TEMP_DIR = "mapreduce.task.tmp.dir";
+  public static final String TASK_DEBUGOUT_LINES = 
+    "mapreduce.task.debugout.lines";
+  public static final String RECORDS_BEFORE_PROGRESS = 
+    "mapreduce.task.merge.progress.records";
+  public static final String SKIP_START_ATTEMPTS = 
+    "mapreduce.task.skip.start.attempts";
+  public static final String TASK_ATTEMPT_ID = "mapreduce.task.attempt.id";
+  public static final String TASK_ISMAP = "mapreduce.task.ismap";
+  public static final String TASK_PARTITION = "mapreduce.task.partition";
+  public static final String TASK_PROFILE = "mapreduce.task.profile";
+  public static final String TASK_PROFILE_PARAMS = 
+    "mapreduce.task.profile.params";
+  public static final String NUM_MAP_PROFILES = 
+    "mapreduce.task.profile.maps";
+  public static final String NUM_REDUCE_PROFILES = 
+    "mapreduce.task.profile.reduces";
+  public static final String TASK_TIMEOUT = "mapreduce.task.timeout";
+  public static final String TASK_ID = "mapreduce.task.id";
+  public static final String TASK_OUTPUT_DIR = "mapreduce.task.output.dir";
+  public static final String TASK_USERLOG_LIMIT = 
+    "mapreduce.task.userlog.limit.kb";
+  public static final String TASK_LOG_RETAIN_HOURS = 
+    "mapred.task.userlog.retain.hours";
+  
+  public static final String MAP_SORT_RECORD_PERCENT = 
+    "mapreduce.map.sort.record.percent";
+  public static final String MAP_SORT_SPILL_PERCENT =
+    "mapreduce.map.sort.spill.percent";
+  public static final String MAP_INPUT_FILE = "mapreduce.map.input.file";
+  public static final String MAP_INPUT_PATH = "mapreduce.map.input.length";
+  public static final String MAP_INPUT_START = "mapreduce.map.input.start";
+  public static final String MAP_MEMORY_MB = "mapreduce.map.memory.mb";
+  public static final String MAP_ENV = "mapreduce.map.env";
+  public static final String MAP_JAVA_OPTS = "mapreduce.map.java.opts";
+  public static final String MAP_ULIMIT = "mapreduce.map.ulimit"; 
+  public static final String MAP_MAX_ATTEMPTS = "mapreduce.map.maxattempts";
+  public static final String MAP_DEBUG_SCRIPT = 
+    "mapreduce.map.debug.script";
+  public static final String MAP_SPECULATIVE = "mapreduce.map.speculative";
+  public static final String MAP_FAILURES_MAX_PERCENT = 
+    "mapreduce.map.failures.maxpercent";
+  public static final String MAP_SKIP_INCR_PROC_COUNT = 
+    "mapreduce.map.skip.proc-count.auto-incr";
+  public static final String MAP_SKIP_MAX_RECORDS = 
+    "mapreduce.map.skip.maxrecords";
+  public static final String MAP_COMBINE_MIN_SPISS = 
+    "mapreduce.map.combine.minspills";
+  public static final String MAP_OUTPUT_COMPRESS = 
+    "mapreduce.map.output.compress";
+  public static final String MAP_OUTPUT_COMPRESS_CODEC = 
+    "mapreduce.map.output.compress.codec";
+  public static final String MAP_OUTPUT_KEY_CLASS = 
+    "mapreduce.map.output.key.class";
+  public static final String MAP_OUTPUT_VALUE_CLASS = 
+    "mapreduce.map.output.value.class";
+  public static final String MAP_OUTPUT_KEY_FIELD_SEPERATOR = 
+    "mapreduce.map.output.key.field.separator";
+  public static final String MAP_LOG_LEVEL = "mapreduce.map.log.level";
+ 
+  public static final String REDUCE_LOG_LEVEL = 
+    "mapreduce.reduce.log.level";
+  public static final String REDUCE_MERGE_INMEM_THRESHOLD = 
+    "mapreduce.reduce.merge.inmem.threshold";
+  public static final String REDUCE_INPUT_BUFFER_PERCENT = 
+    "mapreduce.reduce.input.buffer.percent";
+  public static final String REDUCE_MARKRESET_BUFFER_PERCENT = 
+    "mapreduce.reduce.markreset.buffer.percent";
+  public static final String REDUCE_MARKRESET_BUFFER_SIZE = 
+    "mapreduce.reduce.markreset.buffer.size";
+  public static final String REDUCE_MEMORY_MB = 
+    "mapreduce.reduce.memory.mb";
+  public static final String REDUCE_MEMORY_TOTAL_BYTES = 
+    "mapreduce.reduce.memory.totalbytes";
+  public static final String SHUFFLE_INPUT_BUFFER_PERCENT = 
+    "mapreduce.reduce.shuffle.input.buffer.percent";
+  public static final String SHUFFLE_MERGE_EPRCENT = 
+    "mapreduce.reduce.shuffle.merge.percent";
+  public static final String REDUCE_FAILURES_MAXPERCENT = 
+   "mapreduce.reduce.failures.maxpercent";
+  public static final String REDUCE_ENV = "mapreduce.reduce.env";
+  public static final String REDUCE_JAVA_OPTS = 
+    "mapreduce.reduce.java.opts";
+  public static final String REDUCE_ULIMIT = "mapreduce.reduce.ulimit"; 
+  public static final String REDUCE_MAX_ATTEMPTS = 
+    "mapreduce.reduce.maxattempts";
+  public static final String SHUFFLE_PARALLEL_COPIES = 
+    "mapreduce.reduce.shuffle.parallelcopies";
+  public static final String REDUCE_DEBUG_SCRIPT = 
+    "mapreduce.reduce.debug.script";
+  public static final String REDUCE_SPECULATIVE = 
+    "mapreduce.reduce.speculative";
+  public static final String SHUFFLE_CONNECT_TIMEOUT = 
+    "mapreduce.reduce.shuffle.connect.timeout";
+  public static final String SHUFFLE_READ_TIMEOUT = 
+    "mapreduce.reduce.shuffle.read.timeout";
+  public static final String REDUCE_SKIP_INCR_PROC_COUNT = 
+    "mapreduce.reduce.skip.proc-count.auto-incr";
+  public static final String REDUCE_SKIP_MAXGROUPS = 
+    "mapreduce.reduce.skip.maxgroups";
+  public static final String REDUCE_MEMTOMEM_THRESHOLD = 
+    "mapreduce.reduce.merge.memtomem.threshold";
+  public static final String REDUCE_MEMTOMEM_ENABLED = 
+    "mapreduce.reduce.merge.memtomem.enabled";
 
   protected final org.apache.hadoop.mapred.JobConf conf;
   private final JobID jobId;
@@ -243,7 +416,7 @@
    * @return boolean 
    */
   public boolean getJobSetupCleanupNeeded() {
-    return conf.getBoolean("mapred.committer.job.setup.cleanup.needed", true);
+    return conf.getBoolean(SETUP_CLEANUP_NEEDED, true);
   }
 
   /**

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java Fri Sep 18 15:09:48 2009
@@ -119,7 +119,7 @@
   private void copyAndConfigureFiles(Job job, Path submitJobDir,
       short replication) throws IOException {
     Configuration conf = job.getConfiguration();
-    if (!(conf.getBoolean("mapred.used.genericoptionsparser", false))) {
+    if (!(conf.getBoolean(Job.USED_GENERIC_PARSER, false))) {
       LOG.warn("Use GenericOptionsParser for parsing the arguments. " +
                "Applications should implement Tool for the same.");
     }
@@ -222,7 +222,7 @@
   private void configureCommandLineOptions(Job job, Path submitJobDir,
       Path submitJarFile) throws IOException {
     Configuration conf = job.getConfiguration();
-    short replication = (short)conf.getInt("mapred.submit.replication", 10);
+    short replication = (short)conf.getInt(Job.SUBMIT_REPLICATION, 10);
     copyAndConfigureFiles(job, submitJobDir, replication);
     
     /* set this user's id in job configuration, so later job files can be

Added: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/MRConfig.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/MRConfig.java?rev=816664&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/MRConfig.java (added)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/MRConfig.java Fri Sep 18 15:09:48 2009
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.hadoop.mapred.JobTracker;
+import org.apache.hadoop.mapred.TaskTracker;
+
+/**
+ * Place holder for cluster level configuration keys.
+ * 
+ * These keys are used by both {@link JobTracker} and {@link TaskTracker}. The 
+ * keys should have "mapreduce.cluster." as the prefix. 
+ *
+ */
+public interface MRConfig {
+
+  // Cluster-level configuration parameters
+  public static final String TEMP_DIR = "mapreduce.cluster.temp.dir";
+  public static final String LOCAL_DIR = "mapreduce.cluster.local.dir";
+  public static final String MAPMEMORY_MB = "mapreduce.cluster.mapmemory.mb";
+  public static final String REDUCEMEMORY_MB = 
+    "mapreduce.cluster.reducememory.mb";
+}

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java Fri Sep 18 15:09:48 2009
@@ -310,7 +310,7 @@
   @Deprecated
   public static void setCacheArchives(URI[] archives, Configuration conf) {
     String sarchives = StringUtils.uriToString(archives);
-    conf.set("mapred.cache.archives", sarchives);
+    conf.set(JobContext.CACHE_ARCHIVES, sarchives);
   }
 
   /**
@@ -323,7 +323,7 @@
   @Deprecated
   public static void setCacheFiles(URI[] files, Configuration conf) {
     String sfiles = StringUtils.uriToString(files);
-    conf.set("mapred.cache.files", sfiles);
+    conf.set(JobContext.CACHE_FILES, sfiles);
   }
 
   /**
@@ -336,7 +336,7 @@
    */
   @Deprecated
   public static URI[] getCacheArchives(Configuration conf) throws IOException {
-    return StringUtils.stringToURI(conf.getStrings("mapred.cache.archives"));
+    return StringUtils.stringToURI(conf.getStrings(JobContext.CACHE_ARCHIVES));
   }
 
   /**
@@ -349,7 +349,7 @@
    */
   @Deprecated
   public static URI[] getCacheFiles(Configuration conf) throws IOException {
-    return StringUtils.stringToURI(conf.getStrings("mapred.cache.files"));
+    return StringUtils.stringToURI(conf.getStrings(JobContext.CACHE_FILES));
   }
 
   /**
@@ -364,7 +364,7 @@
   public static Path[] getLocalCacheArchives(Configuration conf)
     throws IOException {
     return StringUtils.stringToPath(conf
-                                    .getStrings("mapred.cache.localArchives"));
+                                    .getStrings(JobContext.CACHE_LOCALARCHIVES));
   }
 
   /**
@@ -378,7 +378,7 @@
   @Deprecated
   public static Path[] getLocalCacheFiles(Configuration conf)
     throws IOException {
-    return StringUtils.stringToPath(conf.getStrings("mapred.cache.localFiles"));
+    return StringUtils.stringToPath(conf.getStrings(JobContext.CACHE_LOCALFILES));
   }
 
   /**
@@ -391,7 +391,7 @@
    */
   @Deprecated
   public static String[] getArchiveTimestamps(Configuration conf) {
-    return conf.getStrings("mapred.cache.archives.timestamps");
+    return conf.getStrings(JobContext.CACHE_ARCHIVES_TIMESTAMPS);
   }
 
 
@@ -405,7 +405,7 @@
    */
   @Deprecated
   public static String[] getFileTimestamps(Configuration conf) {
-    return conf.getStrings("mapred.cache.files.timestamps");
+    return conf.getStrings(JobContext.CACHE_FILE_TIMESTAMPS);
   }
 
   /**
@@ -475,8 +475,8 @@
    */
   @Deprecated
   public static void addCacheArchive(URI uri, Configuration conf) {
-    String archives = conf.get("mapred.cache.archives");
-    conf.set("mapred.cache.archives", archives == null ? uri.toString()
+    String archives = conf.get(JobContext.CACHE_ARCHIVES);
+    conf.set(JobContext.CACHE_ARCHIVES, archives == null ? uri.toString()
              : archives + "," + uri.toString());
   }
   
@@ -489,8 +489,8 @@
    */
   @Deprecated
   public static void addCacheFile(URI uri, Configuration conf) {
-    String files = conf.get("mapred.cache.files");
-    conf.set("mapred.cache.files", files == null ? uri.toString() : files + ","
+    String files = conf.get(JobContext.CACHE_FILES);
+    conf.set(JobContext.CACHE_FILES, files == null ? uri.toString() : files + ","
              + uri.toString());
   }
 
@@ -505,8 +505,8 @@
   @Deprecated
   public static void addFileToClassPath(Path file, Configuration conf)
     throws IOException {
-    String classpath = conf.get("mapred.job.classpath.files");
-    conf.set("mapred.job.classpath.files", classpath == null ? file.toString()
+    String classpath = conf.get(JobContext.CLASSPATH_FILES);
+    conf.set(JobContext.CLASSPATH_FILES, classpath == null ? file.toString()
              : classpath + "," + file.toString());
     FileSystem fs = FileSystem.get(conf);
     URI uri = fs.makeQualified(file).toUri();
@@ -524,7 +524,7 @@
   @Deprecated
   public static Path[] getFileClassPaths(Configuration conf) {
     ArrayList<String> list = (ArrayList<String>)conf.getStringCollection(
-                                "mapred.job.classpath.files");
+                                JobContext.CLASSPATH_FILES);
     if (list.size() == 0) { 
       return null; 
     }
@@ -546,8 +546,8 @@
   @Deprecated
   public static void addArchiveToClassPath(Path archive, Configuration conf)
     throws IOException {
-    String classpath = conf.get("mapred.job.classpath.archives");
-    conf.set("mapred.job.classpath.archives", classpath == null ? archive
+    String classpath = conf.get(JobContext.CLASSPATH_ARCHIVES);
+    conf.set(JobContext.CLASSPATH_ARCHIVES, classpath == null ? archive
              .toString() : classpath + "," + archive.toString());
     FileSystem fs = FileSystem.get(conf);
     URI uri = fs.makeQualified(archive).toUri();
@@ -565,7 +565,7 @@
   @Deprecated
   public static Path[] getArchiveClassPaths(Configuration conf) {
     ArrayList<String> list = (ArrayList<String>)conf.getStringCollection(
-                                "mapred.job.classpath.archives");
+                                JobContext.CLASSPATH_ARCHIVES);
     if (list.size() == 0) { 
       return null; 
     }
@@ -585,7 +585,7 @@
    */
   @Deprecated
   public static void createSymlink(Configuration conf){
-    conf.set("mapred.create.symlink", "yes");
+    conf.set(JobContext.CACHE_SYMLINK, "yes");
   }
   
   /**
@@ -598,7 +598,7 @@
    */
   @Deprecated
   public static boolean getSymlink(Configuration conf){
-    String result = conf.get("mapred.create.symlink");
+    String result = conf.get(JobContext.CACHE_SYMLINK);
     if ("yes".equals(result)){
       return true;
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java Fri Sep 18 15:09:48 2009
@@ -27,6 +27,8 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -124,7 +126,7 @@
       }
     }
     // setting the cache size to a default of 10GB
-    long allowedSize = conf.getLong("local.cache.size", DEFAULT_CACHE_SIZE);
+    long allowedSize = conf.getLong(TTConfig.TT_LOCAL_CACHE_SIZE, DEFAULT_CACHE_SIZE);
     if (allowedSize < size) {
       // try some cache deletions
       deleteCache(conf);
@@ -578,7 +580,7 @@
    * The order should be the same as the order in which the archives are added.
    */
   static void setArchiveTimestamps(Configuration conf, String timestamps) {
-    conf.set("mapred.cache.archives.timestamps", timestamps);
+    conf.set(JobContext.CACHE_ARCHIVES_TIMESTAMPS, timestamps);
   }
 
   /**
@@ -589,7 +591,7 @@
    * The order should be the same as the order in which the files are added.
    */
   static void setFileTimestamps(Configuration conf, String timestamps) {
-    conf.set("mapred.cache.files.timestamps", timestamps);
+    conf.set(JobContext.CACHE_FILE_TIMESTAMPS, timestamps);
   }
   
   /**
@@ -599,7 +601,7 @@
    * @param str a comma separated list of local archives
    */
   static void setLocalArchives(Configuration conf, String str) {
-    conf.set("mapred.cache.localArchives", str);
+    conf.set(JobContext.CACHE_LOCALARCHIVES, str);
   }
 
   /**
@@ -609,6 +611,6 @@
    * @param str a comma separated list of local files
    */
   static void setLocalFiles(Configuration conf, String str) {
-    conf.set("mapred.cache.localFiles", str);
+    conf.set(JobContext.CACHE_LOCALFILES, str);
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java Fri Sep 18 15:09:48 2009
@@ -43,6 +43,7 @@
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobTracker;
 import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -92,7 +93,7 @@
       long jobTrackerStartTime) throws IOException {
 
     // Get and create the log folder
-    String logDirLoc = conf.get("hadoop.job.history.location" ,
+    String logDirLoc = conf.get(JTConfig.JT_JOBHISTORY_LOCATION ,
         "file:///" +
         new File(System.getProperty("hadoop.log.dir")).getAbsolutePath()
         + File.separator + "history");
@@ -109,10 +110,10 @@
             logDir.toString());
       }
     }
-    conf.set("hadoop.job.history.location", logDirLoc);
+    conf.set(JTConfig.JT_JOBHISTORY_LOCATION, logDirLoc);
 
     jobHistoryBlockSize = 
-      conf.getLong("mapred.jobtracker.job.history.block.size", 
+      conf.getLong(JTConfig.JT_JOBHISTORY_BLOCK_SIZE, 
           3 * 1024 * 1024);
     
     jobTracker = jt;
@@ -122,7 +123,7 @@
   public void initDone(JobConf conf, FileSystem fs) throws IOException {
     //if completed job history location is set, use that
     String doneLocation =
-      conf.get("mapred.job.tracker.history.completed.location");
+      conf.get(JTConfig.JT_JOBHISTORY_COMPLETED_LOCATION);
     if (doneLocation != null) {
       done = fs.makeQualified(new Path(doneLocation));
       doneDirFs = fs;
@@ -147,7 +148,7 @@
 
     // Start the History Cleaner Thread
     long maxAgeOfHistoryFiles = conf.getLong(
-        "mapreduce.cluster.jobhistory.maxage", DEFAULT_HISTORY_MAX_AGE);
+        JTConfig.JT_JOBHISTORY_MAXAGE, DEFAULT_HISTORY_MAX_AGE);
     historyCleanerThread = new HistoryCleaner(maxAgeOfHistoryFiles);
     historyCleanerThread.start();
   }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java Fri Sep 18 15:09:48 2009
@@ -28,6 +28,8 @@
  * 
  */
 public class UniqValueCount implements ValueAggregator<Object> {
+  public static final String MAX_NUM_UNIQUE_VALUES = 
+    "mapreduce.aggregate.max.num.unique.values";
 
   private TreeMap<Object, Object> uniqItems = null;
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorBaseDescriptor.java Fri Sep 18 15:09:48 2009
@@ -23,6 +23,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.JobContext;
 
 /** 
  * This class implements the common functionalities of 
@@ -156,6 +157,6 @@
    * @param conf a configuration object
    */
   public void configure(Configuration conf) {
-    this.inputFile = conf.get("map.input.file");
+    this.inputFile = conf.get(JobContext.MAP_INPUT_FILE);
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorCombiner.java Fri Sep 18 15:09:48 2009
@@ -45,7 +45,7 @@
     int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
     String type = keyStr.substring(0, pos);
     long uniqCount = context.getConfiguration().
-      getLong("aggregate.max.num.unique.values", Long.MAX_VALUE);
+      getLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, Long.MAX_VALUE);
     ValueAggregator aggregator = ValueAggregatorBaseDescriptor
       .generateValueAggregator(type, uniqCount);
     for (Text val : values) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJob.java Fri Sep 18 15:09:48 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
@@ -154,9 +155,9 @@
     if (specFile != null) {
       conf.addResource(specFile);
     }
-    String userJarFile = conf.get("user.jar.file");
+    String userJarFile = conf.get(ValueAggregatorJobBase.USER_JAR);
     if (userJarFile != null) {
-      conf.set("mapred.jar", userJarFile);
+      conf.set(JobContext.JAR, userJarFile);
     }
 
     Job theJob = new Job(conf);
@@ -192,10 +193,10 @@
   public static Configuration setAggregatorDescriptors(
       Class<? extends ValueAggregatorDescriptor>[] descriptors) {
     Configuration conf = new Configuration();
-    conf.setInt("aggregator.descriptor.num", descriptors.length);
+    conf.setInt(ValueAggregatorJobBase.DESCRIPTOR_NUM, descriptors.length);
     //specify the aggregator descriptors
     for(int i=0; i< descriptors.length; i++) {
-      conf.set("aggregator.descriptor." + i, 
+      conf.set(ValueAggregatorJobBase.DESCRIPTOR + i, 
                "UserDefined," + descriptors[i].getName());
     }
     return conf;

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorJobBase.java Fri Sep 18 15:09:48 2009
@@ -31,7 +31,11 @@
 public class ValueAggregatorJobBase<K1 extends WritableComparable<?>,
                                              V1 extends Writable>
 {
-
+  public static final String DESCRIPTOR = "mapreduce.aggregate.descriptor";
+  public static final String DESCRIPTOR_NUM = 
+    "mapreduce.aggregate.descriptor.num";
+  public static final String USER_JAR = "mapreduce.aggregate.user.jar.file";
+  
   protected static ArrayList<ValueAggregatorDescriptor> aggregatorDescriptorList = null;
 
   public static void setup(Configuration job) {
@@ -54,12 +58,11 @@
 
   protected static ArrayList<ValueAggregatorDescriptor> getAggregatorDescriptors(
       Configuration conf) {
-    String advn = "aggregator.descriptor";
-    int num = conf.getInt(advn + ".num", 0);
+    int num = conf.getInt(DESCRIPTOR_NUM, 0);
     ArrayList<ValueAggregatorDescriptor> retv = 
       new ArrayList<ValueAggregatorDescriptor>(num);
     for (int i = 0; i < num; i++) {
-      String spec = conf.get(advn + "." + i);
+      String spec = conf.get(DESCRIPTOR + "." + i);
       ValueAggregatorDescriptor ad = getValueAggregatorDescriptor(spec, conf);
       if (ad != null) {
         retv.add(ad);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/ValueAggregatorReducer.java Fri Sep 18 15:09:48 2009
@@ -55,7 +55,7 @@
     keyStr = keyStr.substring(pos + 
                ValueAggregatorDescriptor.TYPE_SEPARATOR.length());
     long uniqCount = context.getConfiguration().
-      getLong("aggregate.max.num.unique.values", Long.MAX_VALUE);
+      getLong(UniqValueCount.MAX_NUM_UNIQUE_VALUES, Long.MAX_VALUE);
     ValueAggregator aggregator = ValueAggregatorBaseDescriptor
       .generateValueAggregator(type, uniqCount);
     for (Text value : values) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java Fri Sep 18 15:09:48 2009
@@ -43,58 +43,59 @@
 
   /** The JDBC Driver class name */
   public static final String DRIVER_CLASS_PROPERTY = 
-      "mapred.jdbc.driver.class";
+    "mapreduce.jdbc.driver.class";
   
   /** JDBC Database access URL */
-  public static final String URL_PROPERTY = "mapred.jdbc.url";
+  public static final String URL_PROPERTY = "mapreduce.jdbc.url";
 
   /** User name to access the database */
-  public static final String USERNAME_PROPERTY = "mapred.jdbc.username";
+  public static final String USERNAME_PROPERTY = "mapreduce.jdbc.username";
   
   /** Password to access the database */
-  public static final String PASSWORD_PROPERTY = "mapred.jdbc.password";
+  public static final String PASSWORD_PROPERTY = "mapreduce.jdbc.password";
 
   /** Input table name */
   public static final String INPUT_TABLE_NAME_PROPERTY = 
-      "mapred.jdbc.input.table.name";
+    "mapreduce.jdbc.input.table.name";
 
   /** Field names in the Input table */
   public static final String INPUT_FIELD_NAMES_PROPERTY = 
-      "mapred.jdbc.input.field.names";
+    "mapreduce.jdbc.input.field.names";
 
   /** WHERE clause in the input SELECT statement */
   public static final String INPUT_CONDITIONS_PROPERTY = 
-      "mapred.jdbc.input.conditions";
+    "mapreduce.jdbc.input.conditions";
   
   /** ORDER BY clause in the input SELECT statement */
   public static final String INPUT_ORDER_BY_PROPERTY = 
-      "mapred.jdbc.input.orderby";
+    "mapreduce.jdbc.input.orderby";
   
   /** Whole input query, exluding LIMIT...OFFSET */
-  public static final String INPUT_QUERY = "mapred.jdbc.input.query";
+  public static final String INPUT_QUERY = "mapreduce.jdbc.input.query";
   
   /** Input query to get the count of records */
   public static final String INPUT_COUNT_QUERY = 
-      "mapred.jdbc.input.count.query";
+    "mapreduce.jdbc.input.count.query";
   
   /** Input query to get the max and min values of the jdbc.input.query */
   public static final String INPUT_BOUNDING_QUERY =
       "mapred.jdbc.input.bounding.query";
   
   /** Class name implementing DBWritable which will hold input tuples */
-  public static final String INPUT_CLASS_PROPERTY = "mapred.jdbc.input.class";
+  public static final String INPUT_CLASS_PROPERTY = 
+    "mapreduce.jdbc.input.class";
 
   /** Output table name */
   public static final String OUTPUT_TABLE_NAME_PROPERTY = 
-      "mapred.jdbc.output.table.name";
+    "mapreduce.jdbc.output.table.name";
 
   /** Field names in the Output table */
   public static final String OUTPUT_FIELD_NAMES_PROPERTY = 
-      "mapred.jdbc.output.field.names";  
+    "mapreduce.jdbc.output.field.names";  
 
   /** Number of fields in the Output table */
   public static final String OUTPUT_FIELD_COUNT_PROPERTY = 
-      "mapred.jdbc.output.field.count";  
+    "mapreduce.jdbc.output.field.count";  
   
   /**
    * Sets the DB access related fields in the {@link Configuration}.  

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java Fri Sep 18 15:09:48 2009
@@ -226,7 +226,7 @@
       results.next();
 
       long count = results.getLong(1);
-      int chunks = job.getConfiguration().getInt("mapred.map.tasks", 1);
+      int chunks = job.getConfiguration().getInt(JobContext.NUM_MAPS, 1);
       long chunkSize = (count / chunks);
 
       results.close();

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionHelper.java Fri Sep 18 15:09:48 2009
@@ -32,9 +32,9 @@
  * fields are from the value only. Otherwise, the fields are the union of those
  * from the key and those from the value.
  * 
- * The field separator is under attribute "mapred.data.field.separator"
+ * The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
  * 
- * The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ * The map output field list spec is under attribute "mapreduce.fieldsel.mapreduce.fieldsel.map.output.key.value.fields.spec".
  * The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
  * key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
  * Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
@@ -45,7 +45,7 @@
  * Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
  * and use fields 6,5,1,2,3,7 and above for values.
  * 
- * The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+ * The reduce output field list spec is under attribute "mapreduce.fieldsel.mapreduce.fieldsel.reduce.output.key.value.fields.spec".
  * 
  * The reducer extracts output key/value pairs in a similar manner, except that
  * the key is never ignored.
@@ -54,6 +54,13 @@
 public class FieldSelectionHelper {
 
   public static Text emptyText = new Text("");
+  public static final String DATA_FIELD_SEPERATOR = 
+    "mapreduce.fieldsel.data.field.separator";
+  public static final String MAP_OUTPUT_KEY_VALUE_SPEC = 
+    "mapreduce.fieldsel.mapreduce.fieldsel.mapreduce.fieldsel.map.output.key.value.fields.spec";
+  public static final String REDUCE_OUTPUT_KEY_VALUE_SPEC = 
+    "mapreduce.fieldsel.mapreduce.fieldsel.mapreduce.fieldsel.reduce.output.key.value.fields.spec";
+
 
   /**
    * Extract the actual field numbers from the given field specs.

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionMapper.java Fri Sep 18 15:09:48 2009
@@ -39,10 +39,10 @@
  * fields are from the value only. Otherwise, the fields are the union of those
  * from the key and those from the value.
  * 
- * The field separator is under attribute "mapred.data.field.separator"
+ * The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
  * 
  * The map output field list spec is under attribute 
- * "map.output.key.value.fields.spec". The value is expected to be like
+ * "mapreduce.fieldsel.mapreduce.fieldsel.map.output.key.value.fields.spec". The value is expected to be like
  * "keyFieldsSpec:valueFieldsSpec" key/valueFieldsSpec are comma (,) separated
  * field spec: fieldSpec,fieldSpec,fieldSpec ... Each field spec can be a 
  * simple number (e.g. 5) specifying a specific field, or a range (like 2-5)
@@ -73,9 +73,10 @@
   public void setup(Context context) 
       throws IOException, InterruptedException {
     Configuration conf = context.getConfiguration();
-    this.fieldSeparator = conf.get("mapred.data.field.separator", "\t");
+    this.fieldSeparator = 
+      conf.get(FieldSelectionHelper.DATA_FIELD_SEPERATOR, "\t");
     this.mapOutputKeyValueSpec = 
-      conf.get("map.output.key.value.fields.spec", "0-:");
+      conf.get(FieldSelectionHelper.MAP_OUTPUT_KEY_VALUE_SPEC, "0-:");
     try {
       this.ignoreInputKey = TextInputFormat.class.getCanonicalName().equals(
         context.getInputFormatClass().getCanonicalName());

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/fieldsel/FieldSelectionReducer.java Fri Sep 18 15:09:48 2009
@@ -38,10 +38,10 @@
  * the reduce output values. The fields are the union of those from the key
  * and those from the value.
  * 
- * The field separator is under attribute "mapred.data.field.separator"
+ * The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
  * 
  * The reduce output field list spec is under attribute 
- * "reduce.output.key.value.fields.spec". The value is expected to be like
+ * "mapreduce.fieldsel.mapreduce.fieldsel.reduce.output.key.value.fields.spec". The value is expected to be like
  * "keyFieldsSpec:valueFieldsSpec" key/valueFieldsSpec are comma (,) 
  * separated field spec: fieldSpec,fieldSpec,fieldSpec ... Each field spec
  * can be a simple number (e.g. 5) specifying a specific field, or a range
@@ -71,10 +71,11 @@
       throws IOException, InterruptedException {
     Configuration conf = context.getConfiguration();
     
-    this.fieldSeparator = conf.get("mapred.data.field.separator", "\t");
+    this.fieldSeparator = 
+      conf.get(FieldSelectionHelper.DATA_FIELD_SEPERATOR, "\t");
     
     this.reduceOutputKeyValueSpec = 
-      conf.get("reduce.output.key.value.fields.spec", "0-:");
+      conf.get(FieldSelectionHelper.REDUCE_OUTPUT_KEY_VALUE_SPEC, "0-:");
     
     allReduceValueFieldsFrom = FieldSelectionHelper.parseOutputKeyValueSpec(
       reduceOutputKeyValueSpec, reduceOutputKeyFieldList,

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java Fri Sep 18 15:09:48 2009
@@ -68,6 +68,10 @@
 public abstract class CombineFileInputFormat<K, V>
   extends FileInputFormat<K, V> {
 
+  public static final String SPLIT_MINSIZE_PERNODE = 
+    "mapreduce.input.fileinputformat.split.minsize.per.node";
+  public static final String SPLIT_MINSIZE_PERRACK = 
+    "mapreduce.input.fileinputformat.split.minsize.per.rack";
   // ability to limit the size of a single split
   private long maxSplitSize = 0;
   private long minSplitSizeNode = 0;
@@ -151,17 +155,17 @@
     if (minSplitSizeNode != 0) {
       minSizeNode = minSplitSizeNode;
     } else {
-      minSizeNode = conf.getLong("mapred.min.split.size.per.node", 0);
+      minSizeNode = conf.getLong(SPLIT_MINSIZE_PERNODE, 0);
     }
     if (minSplitSizeRack != 0) {
       minSizeRack = minSplitSizeRack;
     } else {
-      minSizeRack = conf.getLong("mapred.min.split.size.per.rack", 0);
+      minSizeRack = conf.getLong(SPLIT_MINSIZE_PERRACK, 0);
     }
     if (maxSplitSize != 0) {
       maxSize = maxSplitSize;
     } else {
-      maxSize = conf.getLong("mapred.max.split.size", 0);
+      maxSize = conf.getLong("mapreduce.input.fileinputformat.split.maxsize", 0);
     }
     if (minSizeNode != 0 && maxSize != 0 && minSizeNode > maxSize) {
       throw new IOException("Minimum split size pernode " + minSizeNode +

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java Fri Sep 18 15:09:48 2009
@@ -140,9 +140,9 @@
 
       Configuration conf = context.getConfiguration();
       // setup some helper config variables.
-      conf.set("map.input.file", split.getPath(idx).toString());
-      conf.setLong("map.input.start", split.getOffset(idx));
-      conf.setLong("map.input.length", split.getLength(idx));
+      conf.set(JobContext.MAP_INPUT_FILE, split.getPath(idx).toString());
+      conf.setLong(JobContext.MAP_INPUT_START, split.getOffset(idx));
+      conf.setLong(JobContext.MAP_INPUT_PATH, split.getLength(idx));
     } catch (Exception e) {
       throw new RuntimeException (e);
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java Fri Sep 18 15:09:48 2009
@@ -52,6 +52,14 @@
   public static final String COUNTER_GROUP = 
                                 "FileInputFormatCounters";
   public static final String BYTES_READ = "BYTES_READ";
+  public static final String INPUT_DIR = 
+    "mapreduce.input.fileinputformat.inputdir";
+  public static final String SPLIT_MAXSIZE = 
+    "mapreduce.input.fileinputformat.split.maxsize";
+  public static final String SPLIT_MINSIZE = 
+    "mapreduce.input.fileinputformat.split.minsize";
+  public static final String PATHFILTER_CLASS = 
+    "mapreduce.input.pathFilter.class";
 
   private static final Log LOG = LogFactory.getLog(FileInputFormat.class);
 
@@ -117,7 +125,7 @@
    */
   public static void setInputPathFilter(Job job,
                                         Class<? extends PathFilter> filter) {
-    job.getConfiguration().setClass("mapred.input.pathFilter.class", filter, 
+    job.getConfiguration().setClass(PATHFILTER_CLASS, filter, 
                                     PathFilter.class);
   }
 
@@ -128,7 +136,7 @@
    */
   public static void setMinInputSplitSize(Job job,
                                           long size) {
-    job.getConfiguration().setLong("mapred.min.split.size", size);
+    job.getConfiguration().setLong(SPLIT_MINSIZE, size);
   }
 
   /**
@@ -137,7 +145,7 @@
    * @return the minimum number of bytes that can be in a split
    */
   public static long getMinSplitSize(JobContext job) {
-    return job.getConfiguration().getLong("mapred.min.split.size", 1L);
+    return job.getConfiguration().getLong(SPLIT_MINSIZE, 1L);
   }
 
   /**
@@ -147,7 +155,7 @@
    */
   public static void setMaxInputSplitSize(Job job,
                                           long size) {
-    job.getConfiguration().setLong("mapred.max.split.size", size);
+    job.getConfiguration().setLong(SPLIT_MAXSIZE, size);
   }
 
   /**
@@ -156,7 +164,7 @@
    * @return the maximum number of bytes a split can include
    */
   public static long getMaxSplitSize(JobContext context) {
-    return context.getConfiguration().getLong("mapred.max.split.size", 
+    return context.getConfiguration().getLong(SPLIT_MAXSIZE, 
                                               Long.MAX_VALUE);
   }
 
@@ -167,7 +175,7 @@
    */
   public static PathFilter getInputPathFilter(JobContext context) {
     Configuration conf = context.getConfiguration();
-    Class<?> filterClass = conf.getClass("mapred.input.pathFilter.class", null,
+    Class<?> filterClass = conf.getClass(PATHFILTER_CLASS, null,
         PathFilter.class);
     return (filterClass != null) ?
         (PathFilter) ReflectionUtils.newInstance(filterClass, conf) : null;
@@ -344,7 +352,7 @@
       path = inputPaths[i].makeQualified(fs);
       str.append(StringUtils.escapeString(path.toString()));
     }
-    conf.set("mapred.input.dir", str.toString());
+    conf.set(INPUT_DIR, str.toString());
   }
 
   /**
@@ -360,8 +368,8 @@
     FileSystem fs = FileSystem.get(conf);
     path = path.makeQualified(fs);
     String dirStr = StringUtils.escapeString(path.toString());
-    String dirs = conf.get("mapred.input.dir");
-    conf.set("mapred.input.dir", dirs == null ? dirStr : dirs + "," + dirStr);
+    String dirs = conf.get(INPUT_DIR);
+    conf.set(INPUT_DIR, dirs == null ? dirStr : dirs + "," + dirStr);
   }
   
   // This method escapes commas in the glob pattern of the given paths.
@@ -410,7 +418,7 @@
    * @return the list of input {@link Path}s for the map-reduce job.
    */
   public static Path[] getInputPaths(JobContext context) {
-    String dirs = context.getConfiguration().get("mapred.input.dir", "");
+    String dirs = context.getConfiguration().get(INPUT_DIR, "");
     String [] list = StringUtils.split(dirs);
     Path[] result = new Path[list.length];
     for (int i = 0; i < list.length; i++) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/KeyValueLineRecordReader.java Fri Sep 18 15:09:48 2009
@@ -29,10 +29,12 @@
 /**
  * This class treats a line in the input as a key/value pair separated by a 
  * separator character. The separator can be specified in config file 
- * under the attribute name key.value.separator.in.input.line. The default
+ * under the attribute name mapreduce.input.keyvaluelinerecordreader.key.value.separator. The default
  * separator is the tab character ('\t').
  */
 public class KeyValueLineRecordReader extends RecordReader<Text, Text> {
+  public static final String KEY_VALUE_SEPERATOR = 
+    "mapreduce.input.keyvaluelinerecordreader.key.value.separator";
   
   private final LineRecordReader lineRecordReader;
 
@@ -48,7 +50,7 @@
     throws IOException {
     
     lineRecordReader = new LineRecordReader();
-    String sepStr = conf.get("key.value.separator.in.input.line", "\t");
+    String sepStr = conf.get(KEY_VALUE_SEPERATOR, "\t");
     this.separator = (byte) sepStr.charAt(0);
   }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java Fri Sep 18 15:09:48 2009
@@ -48,6 +48,8 @@
  */
 public class LineRecordReader extends RecordReader<LongWritable, Text> {
   private static final Log LOG = LogFactory.getLog(LineRecordReader.class);
+  public static final String MAX_LINE_LENGTH = 
+    "mapreduce.input.linerecordreader.line.maxlength";
 
   private CompressionCodecFactory compressionCodecs = null;
   private long start;
@@ -69,8 +71,7 @@
     inputByteCounter = ((MapContext)context).getCounter(
       FileInputFormat.COUNTER_GROUP, FileInputFormat.BYTES_READ);
     Configuration job = context.getConfiguration();
-    this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength",
-                                    Integer.MAX_VALUE);
+    this.maxLineLength = job.getInt(MAX_LINE_LENGTH, Integer.MAX_VALUE);
     start = split.getStart();
     end = start + split.getLength();
     final Path file = split.getPath();

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/MultipleInputs.java Fri Sep 18 15:09:48 2009
@@ -34,6 +34,11 @@
  * a different {@link InputFormat} and {@link Mapper} for each path 
  */
 public class MultipleInputs {
+  public static final String DIR_FORMATS = 
+    "mapreduce.input.multipleinputs.dir.formats";
+  public static final String DIR_MAPPERS = 
+    "mapreduce.input.multipleinputs.dir.mappers";
+  
   /**
    * Add a {@link Path} with a custom {@link InputFormat} to the list of
    * inputs for the map-reduce job.
@@ -48,8 +53,8 @@
     String inputFormatMapping = path.toString() + ";"
        + inputFormatClass.getName();
     Configuration conf = job.getConfiguration();
-    String inputFormats = conf.get("mapred.input.dir.formats");
-    conf.set("mapred.input.dir.formats",
+    String inputFormats = conf.get(DIR_FORMATS);
+    conf.set(DIR_FORMATS,
        inputFormats == null ? inputFormatMapping : inputFormats + ","
            + inputFormatMapping);
 
@@ -73,8 +78,8 @@
     addInputPath(job, path, inputFormatClass);
     Configuration conf = job.getConfiguration();
     String mapperMapping = path.toString() + ";" + mapperClass.getName();
-    String mappers = conf.get("mapred.input.dir.mappers");
-    conf.set("mapred.input.dir.mappers", mappers == null ? mapperMapping
+    String mappers = conf.get(DIR_MAPPERS);
+    conf.set(DIR_MAPPERS, mappers == null ? mapperMapping
        : mappers + "," + mapperMapping);
 
     job.setMapperClass(DelegatingMapper.class);
@@ -92,7 +97,7 @@
   static Map<Path, InputFormat> getInputFormatMap(JobContext job) {
     Map<Path, InputFormat> m = new HashMap<Path, InputFormat>();
     Configuration conf = job.getConfiguration();
-    String[] pathMappings = conf.get("mapred.input.dir.formats").split(",");
+    String[] pathMappings = conf.get(DIR_FORMATS).split(",");
     for (String pathMapping : pathMappings) {
       String[] split = pathMapping.split(";");
       InputFormat inputFormat;
@@ -119,12 +124,12 @@
   static Map<Path, Class<? extends Mapper>> 
       getMapperTypeMap(JobContext job) {
     Configuration conf = job.getConfiguration();
-    if (conf.get("mapred.input.dir.mappers") == null) {
+    if (conf.get(DIR_MAPPERS) == null) {
       return Collections.emptyMap();
     }
     Map<Path, Class<? extends Mapper>> m = 
       new HashMap<Path, Class<? extends Mapper>>();
-    String[] pathMappings = conf.get("mapred.input.dir.mappers").split(",");
+    String[] pathMappings = conf.get(DIR_MAPPERS).split(",");
     for (String pathMapping : pathMappings) {
       String[] split = pathMapping.split(";");
       Class<? extends Mapper> mapClass;

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/NLineInputFormat.java Fri Sep 18 15:09:48 2009
@@ -56,6 +56,8 @@
  */
 
 public class NLineInputFormat extends FileInputFormat<LongWritable, Text> { 
+  public static final String LINES_PER_MAP = 
+    "mapreduce.input.lineinputformat.linespermap";
 
   public RecordReader<LongWritable, Text> createRecordReader(
       InputSplit genericSplit, TaskAttemptContext context) 
@@ -136,8 +138,7 @@
    * @param numLines the number of lines per split
    */
   public static void setNumLinesPerSplit(Job job, int numLines) {
-    job.getConfiguration().setInt(
-      "mapred.line.input.format.linespermap", numLines);
+    job.getConfiguration().setInt(LINES_PER_MAP, numLines);
   }
 
   /**
@@ -146,7 +147,6 @@
    * @return the number of lines per split
    */
   public static int getNumLinesPerSplit(JobContext job) {
-    return job.getConfiguration().getInt(
-      "mapred.line.input.format.linespermap", 1);
+    return job.getConfiguration().getInt(LINES_PER_MAP, 1);
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileInputFilter.java Fri Sep 18 15:09:48 2009
@@ -46,10 +46,12 @@
     extends SequenceFileInputFormat<K, V> {
   public static final Log LOG = LogFactory.getLog(FileInputFormat.class);
   
-  final public static String FILTER_CLASS = "sequencefile.filter.class";
-  final private static String FILTER_FREQUENCY
-    = "sequencefile.filter.frequency";
-  final private static String FILTER_REGEX = "sequencefile.filter.regex";
+  final public static String FILTER_CLASS = 
+    "mapreduce.input.mapreduce.input.mapreduce.input.sequencefileinputfilter.class";
+  final public static String FILTER_FREQUENCY = 
+    "mapreduce.input.mapreduce.input.mapreduce.input.sequencefileinputfilter.frequency";
+  final public static String FILTER_REGEX = 
+    "mapreduce.input.mapreduce.input.mapreduce.input.sequencefileinputfilter.regex";
     
   public SequenceFileInputFilter() {
   }
@@ -166,7 +168,7 @@
      * @param conf configuration
      */
     public void setConf(Configuration conf) {
-      this.frequency = conf.getInt("sequencefile.filter.frequency", 10);
+      this.frequency = conf.getInt(FILTER_FREQUENCY, 10);
       if (this.frequency <= 0) {
         throw new RuntimeException(
           "Negative "+FILTER_FREQUENCY + ": " + this.frequency);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java Fri Sep 18 15:09:48 2009
@@ -48,7 +48,7 @@
   // A job will be in one of the following states
   public static enum State {SUCCESS, WAITING, RUNNING, READY, FAILED,
                             DEPENDENT_FAILED}; 
-	
+  public static final String CREATE_DIR = "mapreduce.jobcontrol.createdir.ifnotexist";
   private State state;
   private String controlID;     // assigned and used by JobControl class
   private Job job;               // mapreduce job to be executed.
@@ -303,7 +303,7 @@
   protected synchronized void submit() {
     try {
       Configuration conf = job.getConfiguration();
-      if (conf.getBoolean("create.empty.dir.if.nonexist", false)) {
+      if (conf.getBoolean(CREATE_DIR, false)) {
         FileSystem fs = FileSystem.get(conf);
         Path inputPaths[] = FileInputFormat.getInputPaths(job);
         for (int i = 0; i < inputPaths.length; i++) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/CompositeInputFormat.java Fri Sep 18 15:09:48 2009
@@ -121,7 +121,7 @@
   public List<InputSplit> getSplits(JobContext job) 
       throws IOException, InterruptedException {
     setFormat(job.getConfiguration());
-    job.getConfiguration().setLong("mapred.min.split.size", Long.MAX_VALUE);
+    job.getConfiguration().setLong("mapreduce.input.fileinputformat.split.minsize", Long.MAX_VALUE);
     return root.getSplits(job);
   }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/join/Parser.java Fri Sep 18 15:09:48 2009
@@ -322,7 +322,7 @@
         }
         Configuration conf = getConf(taskContext.getConfiguration());
         TaskAttemptContext context = new TaskAttemptContext(conf, 
-          TaskAttemptID.forName(conf.get("mapred.task.id")));
+          TaskAttemptID.forName(conf.get(JobContext.TASK_ATTEMPT_ID)));
         return rrCstrMap.get(ident).newInstance(id,
             inf.createRecordReader(split, context), cmpcl);
       } catch (IllegalAccessException e) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/map/MultithreadedMapper.java Fri Sep 18 15:09:48 2009
@@ -56,6 +56,9 @@
   extends Mapper<K1, V1, K2, V2> {
 
   private static final Log LOG = LogFactory.getLog(MultithreadedMapper.class);
+  public static String NUM_THREADS = "mapreduce.mapper.multithreadedmapper.threads";
+  public static String MAP_CLASS = "mapreduce.mapper.multithreadedmapper.mapclass";
+  
   private Class<? extends Mapper<K1,V1,K2,V2>> mapClass;
   private Context outer;
   private List<MapRunner> runners;
@@ -66,8 +69,7 @@
    * @return the number of threads
    */
   public static int getNumberOfThreads(JobContext job) {
-    return job.getConfiguration().
-            getInt("mapred.map.multithreadedrunner.threads", 10);
+    return job.getConfiguration().getInt(NUM_THREADS, 10);
   }
 
   /**
@@ -76,8 +78,7 @@
    * @param threads the new number of threads
    */
   public static void setNumberOfThreads(Job job, int threads) {
-    job.getConfiguration().setInt("mapred.map.multithreadedrunner.threads", 
-                                  threads);
+    job.getConfiguration().setInt(NUM_THREADS, threads);
   }
 
   /**
@@ -93,8 +94,7 @@
   public static <K1,V1,K2,V2>
   Class<Mapper<K1,V1,K2,V2>> getMapperClass(JobContext job) {
     return (Class<Mapper<K1,V1,K2,V2>>) 
-         job.getConfiguration().getClass("mapred.map.multithreadedrunner.class",
-                                         Mapper.class);
+      job.getConfiguration().getClass(MAP_CLASS, Mapper.class);
   }
   
   /**
@@ -113,8 +113,7 @@
       throw new IllegalArgumentException("Can't have recursive " + 
                                          "MultithreadedMapper instances.");
     }
-    job.getConfiguration().setClass("mapred.map.multithreadedrunner.class",
-                                    cls, Mapper.class);
+    job.getConfiguration().setClass(MAP_CLASS, cls, Mapper.class);
   }
 
   /**

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/map/RegexMapper.java Fri Sep 18 15:09:48 2009
@@ -31,13 +31,15 @@
 /** A {@link Mapper} that extracts text matching a regular expression. */
 public class RegexMapper<K> extends Mapper<K, Text, Text, LongWritable> {
 
+  public static String PATTERN = "mapreduce.mapper.regex";
+  public static String GROUP = "mapreduce.mapper.regexmapper..group";
   private Pattern pattern;
   private int group;
 
   public void setup(Context context) {
     Configuration conf = context.getConfiguration();
-    pattern = Pattern.compile(conf.get("mapred.mapper.regex"));
-    group = conf.getInt("mapred.mapper.regex.group", 0);
+    pattern = Pattern.compile(conf.get(PATTERN));
+    group = conf.getInt(GROUP, 0);
   }
 
   public void map(K key, Text value,

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java Fri Sep 18 15:09:48 2009
@@ -33,7 +33,7 @@
 import org.apache.hadoop.util.StringUtils;
 
 /** An {@link OutputCommitter} that commits files specified 
- * in job output directory i.e. ${mapred.output.dir}. 
+ * in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}. 
  **/
 public class FileOutputCommitter extends OutputCommitter {
 



Mime
View raw message