hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sha...@apache.org
Subject svn commit: r816664 [5/9] - in /hadoop/mapreduce/trunk: ./ conf/ src/benchmarks/gridmix/ src/benchmarks/gridmix/pipesort/ src/benchmarks/gridmix2/ src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/ src/c++/pipes/impl/ src/c++/task-controller...
Date Fri, 18 Sep 2009 15:10:02 GMT
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java Fri Sep 18 15:09:48 2009
@@ -326,13 +326,13 @@
     (numMapTasks + numReduceTasks + 10);
     
     this.slowTaskThreshold = Math.max(0.0f,
-        conf.getFloat("mapred.speculative.execution.slowTaskThreshold",1.0f));
+        conf.getFloat(JobContext.SPECULATIVE_SLOWTASK_THRESHOLD,1.0f));
     this.speculativeCap = conf.getFloat(
-        "mapred.speculative.execution.speculativeCap",0.1f);
+        JobContext.SPECULATIVECAP,0.1f);
     this.slowNodeThreshold = conf.getFloat(
-        "mapred.speculative.execution.slowNodeThreshold",1.0f);
+        JobContext.SPECULATIVE_SLOWNODE_THRESHOLD,1.0f);
     this.jobSetupCleanupNeeded = conf.getBoolean(
-        "mapred.committer.job.setup.cleanup.needed", true);
+        JobContext.SETUP_CLEANUP_NEEDED, true);
     if (tracker != null) { // Some mock tests have null tracker
       this.jobHistory = tracker.getJobHistory();
     }
@@ -413,11 +413,11 @@
     this.nonRunningReduces = new LinkedList<TaskInProgress>();    
     this.runningReduces = new LinkedHashSet<TaskInProgress>();
     this.slowTaskThreshold = Math.max(0.0f,
-        conf.getFloat("mapred.speculative.execution.slowTaskThreshold",1.0f));
+        conf.getFloat(JobContext.SPECULATIVE_SLOWTASK_THRESHOLD,1.0f));
     this.speculativeCap = conf.getFloat(
-        "mapred.speculative.execution.speculativeCap",0.1f);
+        JobContext.SPECULATIVECAP,0.1f);
     this.slowNodeThreshold = conf.getFloat(
-        "mapred.speculative.execution.slowNodeThreshold",1.0f);
+        JobContext.SPECULATIVE_SLOWNODE_THRESHOLD,1.0f);
 
   }
 
@@ -595,7 +595,7 @@
     // we should start scheduling reduces
     completedMapsForReduceSlowstart = 
       (int)Math.ceil(
-          (conf.getFloat("mapred.reduce.slowstart.completed.maps", 
+          (conf.getFloat(JobContext.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 
                          DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART) * 
            numMapTasks));
     
@@ -655,7 +655,7 @@
 
   Job.RawSplit[] createSplits() throws IOException {
     DataInputStream splitFile =
-      fs.open(new Path(conf.get("mapred.job.split.file")));
+      fs.open(new Path(conf.get(JobContext.SPLIT_FILE)));
     Job.RawSplit[] splits;
     try {
       splits = Job.readSplitFile(splitFile);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobQueueTaskScheduler.java Fri Sep 18 15:09:48 2009
@@ -25,6 +25,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
 
 /**
@@ -71,7 +72,7 @@
   @Override
   public synchronized void setConf(Configuration conf) {
     super.setConf(conf);
-    padFraction = conf.getFloat("mapred.jobtracker.taskalloc.capacitypad", 
+    padFraction = conf.getFloat(JTConfig.JT_TASK_ALLOC_PAD_FRACTION, 
                                  0.01f);
     this.eagerTaskInitializationListener =
       new EagerTaskInitializationListener(conf);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java Fri Sep 18 15:09:48 2009
@@ -101,6 +101,8 @@
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
 
 /*******************************************************
  * JobTracker is the central location for submitting and 
@@ -109,11 +111,10 @@
  *******************************************************/
 public class JobTracker implements MRConstants, InterTrackerProtocol,
     ClientProtocol, TaskTrackerManager,
-    RefreshAuthorizationPolicyProtocol, AdminOperationsProtocol {
+    RefreshAuthorizationPolicyProtocol, AdminOperationsProtocol, JTConfig {
 
   static{
-    Configuration.addDefaultResource("mapred-default.xml");
-    Configuration.addDefaultResource("mapred-site.xml");
+    ConfigUtil.loadResources();
   }
 
   private final long tasktrackerExpiryInterval;
@@ -460,7 +461,7 @@
         //test cases can set this to false to validate job data structures on 
         //job completion
         boolean retireJob = 
-          conf.getBoolean("mapred.job.tracker.retire.jobs", true);
+          conf.getBoolean(JT_RETIREJOBS, true);
 
         if (retireJob) {
           //purge the job from memory
@@ -1284,7 +1285,7 @@
     // find the owner of the process
     clock = newClock;
     mrOwner = UnixUserGroupInformation.login(conf);
-    supergroup = conf.get("mapred.permissions.supergroup", "supergroup");
+    supergroup = conf.get(JT_SUPERGROUP, "supergroup");
     LOG.info("Starting jobtracker with owner as " + mrOwner.getUserName() 
              + " and supergroup as " + supergroup);
 
@@ -1292,19 +1293,15 @@
     // Grab some static constants
     //
     tasktrackerExpiryInterval = 
-      conf.getLong("mapred.tasktracker.expiry.interval", 10 * 60 * 1000);
-    retiredJobsCacheSize = 
-      conf.getInt("mapred.job.tracker.retiredjobs.cache.size", 1000);
-    MAX_BLACKLISTS_PER_TRACKER = 
-        conf.getInt("mapred.max.tracker.blacklists", 4);
-    NUM_HEARTBEATS_IN_SECOND = 
-        conf.getInt("mapred.heartbeats.in.second", 100);
+      conf.getLong(JT_TRACKER_EXPIRY_INTERVAL, 10 * 60 * 1000);
+    retiredJobsCacheSize = conf.getInt(JT_RETIREJOB_CACHE_SIZE, 1000);
+    MAX_BLACKLISTS_PER_TRACKER = conf.getInt(JTConfig.JT_MAX_TRACKER_BLACKLISTS, 4);
+    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, 100);
 
     //This configuration is there solely for tuning purposes and 
     //once this feature has been tested in real clusters and an appropriate
     //value for the threshold has been found, this config might be taken out.
-    AVERAGE_BLACKLIST_THRESHOLD = 
-      conf.getFloat("mapred.cluster.average.blacklist.threshold", 0.5f); 
+    AVERAGE_BLACKLIST_THRESHOLD = conf.getFloat(JTConfig.JT_AVG_BLACKLIST_THRESHOLD, 0.5f); 
 
     // This is a directory of temporary submission files.  We delete it
     // on startup, and can delete any files that we're done with
@@ -1314,15 +1311,15 @@
     initializeTaskMemoryRelatedConfig();
 
     // Read the hosts/exclude files to restrict access to the jobtracker.
-    this.hostsReader = new HostsFileReader(conf.get("mapred.hosts", ""),
-                                           conf.get("mapred.hosts.exclude", ""));
+    this.hostsReader = new HostsFileReader(conf.get(JTConfig.JT_HOSTS_FILENAME, ""),
+                                           conf.get(JTConfig.JT_HOSTS_EXCLUDE_FILENAME, ""));
 
     Configuration queuesConf = new Configuration(this.conf);
     queueManager = new QueueManager(queuesConf);
     
     // Create the scheduler
     Class<? extends TaskScheduler> schedulerClass
-      = conf.getClass("mapred.jobtracker.taskScheduler",
+      = conf.getClass(JT_TASK_SCHEDULER,
           JobQueueTaskScheduler.class, TaskScheduler.class);
     taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf);
                                            
@@ -1342,7 +1339,7 @@
       SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
     }
     
-    int handlerCount = conf.getInt("mapred.job.tracker.handler.count", 10);
+    int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
     this.interTrackerServer = RPC.getServer(this, addr.getHostName(), addr.getPort(), handlerCount, false, conf);
     if (LOG.isDebugEnabled()) {
       Properties p = System.getProperties();
@@ -1354,7 +1351,7 @@
     }
 
     InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
-        conf.get("mapred.job.tracker.http.address", "0.0.0.0:50030"));
+        conf.get(JT_HTTP_ADDRESS, "0.0.0.0:50030"));
     String infoBindAddress = infoSocAddr.getHostName();
     int tmpInfoPort = infoSocAddr.getPort();
     this.startTime = clock.getTime();
@@ -1389,10 +1386,10 @@
     // The rpc/web-server ports can be ephemeral ports... 
     // ... ensure we have the correct info
     this.port = interTrackerServer.getListenerAddress().getPort();
-    this.conf.set("mapred.job.tracker", (this.localMachine + ":" + this.port));
+    this.conf.set(JT_IPC_ADDRESS, (this.localMachine + ":" + this.port));
     LOG.info("JobTracker up at: " + this.port);
     this.infoPort = this.infoServer.getPort();
-    this.conf.set("mapred.job.tracker.http.address", 
+    this.conf.set(JT_HTTP_ADDRESS, 
         infoBindAddress + ":" + this.infoPort); 
     LOG.info("JobTracker webserver: " + this.infoServer.getPort());
     
@@ -1420,7 +1417,7 @@
         
         // Check if the history is enabled .. as we can't have persistence with 
         // history disabled
-        if (conf.getBoolean("mapred.jobtracker.restart.recover", false) 
+        if (conf.getBoolean(JT_RESTART_ENABLED, false) 
             && systemDirData != null) {
           for (FileStatus status : systemDirData) {
             try {
@@ -1444,9 +1441,9 @@
         }
         LOG.error("Mkdirs failed to create " + systemDir);
       } catch (AccessControlException ace) {
-        LOG.warn("Failed to operate on mapred.system.dir (" + systemDir 
+        LOG.warn("Failed to operate on " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir 
                  + ") because of permissions.");
-        LOG.warn("Manually delete the mapred.system.dir (" + systemDir 
+        LOG.warn("Manually delete the " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir 
                  + ") and then start the JobTracker.");
         LOG.warn("Bailing out ... ");
         throw ace;
@@ -1474,7 +1471,7 @@
     this.dnsToSwitchMapping = ReflectionUtils.newInstance(
         conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
             DNSToSwitchMapping.class), conf);
-    this.numTaskCacheLevels = conf.getInt("mapred.task.cache.levels", 
+    this.numTaskCacheLevels = conf.getInt(JT_TASKCACHE_LEVELS, 
         NetworkTopology.DEFAULT_HOST_LEVEL);
 
     //initializes the job status store
@@ -1522,7 +1519,7 @@
   }
 
   /**
-   * Get JobTracker's FileSystem. This is the filesystem for mapred.system.dir.
+   * Get JobTracker's FileSystem. This is the filesystem for mapreduce.system.dir.
    */
   FileSystem getFileSystem() {
     return fs;
@@ -1530,7 +1527,7 @@
 
   /**
    * Get the FileSystem for the given path. This can be used to resolve
-   * filesystem for job history, local job files or mapred.system.dir path.
+   * filesystem for job history, local job files or mapreduce.system.dir path.
    */
   FileSystem getFileSystem(Path path) throws IOException {
     return path.getFileSystem(conf);
@@ -1549,12 +1546,12 @@
   }
 
   public static Class<? extends JobTrackerInstrumentation> getInstrumentationClass(Configuration conf) {
-    return conf.getClass("mapred.jobtracker.instrumentation",
+    return conf.getClass(JT_INSTRUMENTATION,
         JobTrackerMetricsInst.class, JobTrackerInstrumentation.class);
   }
   
   public static void setInstrumentationClass(Configuration conf, Class<? extends JobTrackerInstrumentation> t) {
-    conf.setClass("mapred.jobtracker.instrumentation",
+    conf.setClass(JT_INSTRUMENTATION,
         t, JobTrackerInstrumentation.class);
   }
 
@@ -1564,7 +1561,7 @@
 
   public static InetSocketAddress getAddress(Configuration conf) {
     String jobTrackerStr =
-      conf.get("mapred.job.tracker", "localhost:8012");
+      conf.get(JT_IPC_ADDRESS, "localhost:8012");
     return NetUtils.createSocketAddr(jobTrackerStr);
   }
 
@@ -3340,16 +3337,6 @@
 
   TaskCompletionEvent[] EMPTY_EVENTS = new TaskCompletionEvent[0];
 
-  static final String MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY =
-      "mapred.cluster.map.memory.mb";
-  static final String MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY =
-      "mapred.cluster.reduce.memory.mb";
-
-  static final String MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY =
-      "mapred.cluster.max.map.memory.mb";
-  static final String MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY =
-      "mapred.cluster.max.reduce.memory.mb";
-
   /* 
    * Returns a list of TaskCompletionEvent for the given job, 
    * starting from fromEventId.
@@ -3498,7 +3485,7 @@
    * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getSystemDir()
    */
   public String getSystemDir() {
-    Path sysDir = new Path(conf.get("mapred.system.dir", "/tmp/hadoop/mapred/system"));  
+    Path sysDir = new Path(conf.get(JTConfig.JT_SYSTEM_DIR, "/tmp/hadoop/mapred/system"));
     return fs.makeQualified(sysDir).toString();
   }
   
@@ -3723,13 +3710,13 @@
   }
   
   private synchronized void refreshHosts() throws IOException {
-    // Reread the config to get mapred.hosts and mapred.hosts.exclude filenames.
+    // Reread the config to get HOSTS and HOSTS_EXCLUDE filenames.
     // Update the file names and refresh internal includes and excludes list
     LOG.info("Refreshing hosts information");
     Configuration conf = new Configuration();
 
-    hostsReader.updateFileNames(conf.get("mapred.hosts",""), 
-                                conf.get("mapred.hosts.exclude", ""));
+    hostsReader.updateFileNames(conf.get(JTConfig.JT_HOSTS_FILENAME,""), 
+                                conf.get(JTConfig.JT_HOSTS_EXCLUDE_FILENAME, ""));
     hostsReader.refresh();
     
     Set<String> excludeSet = new HashSet<String>();
@@ -3923,7 +3910,7 @@
    * Returns the confgiured maximum number of tasks for a single job
    */
   int getMaxTasksPerJob() {
-    return conf.getInt("mapred.jobtracker.maxtasks.per.job", -1);
+    return conf.getInt(JT_TASKS_PER_JOB, -1);
   }
   
   @Override
@@ -3945,19 +3932,19 @@
   private void initializeTaskMemoryRelatedConfig() {
     memSizeForMapSlotOnJT =
         JobConf.normalizeMemoryConfigValue(conf.getLong(
-            JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY,
+            MAPMEMORY_MB,
             JobConf.DISABLED_MEMORY_LIMIT));
     memSizeForReduceSlotOnJT =
         JobConf.normalizeMemoryConfigValue(conf.getLong(
-            JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY,
+            REDUCEMEMORY_MB,
             JobConf.DISABLED_MEMORY_LIMIT));
 
     if (conf.get(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY) != null) {
       LOG.warn(
         JobConf.deprecatedString(
           JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY)+
-          " instead use "+JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY+
-          " and " + JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY
+          " instead use "+JTConfig.JT_MAX_MAPMEMORY_MB+
+          " and " + JTConfig.JT_MAX_REDUCEMEMORY_MB
       );
 
       limitMaxMemForMapTasks = limitMaxMemForReduceTasks =
@@ -3975,12 +3962,12 @@
       limitMaxMemForMapTasks =
         JobConf.normalizeMemoryConfigValue(
           conf.getLong(
-            JobTracker.MAPRED_CLUSTER_MAX_MAP_MEMORY_MB_PROPERTY,
+            JTConfig.JT_MAX_MAPMEMORY_MB,
             JobConf.DISABLED_MEMORY_LIMIT));
       limitMaxMemForReduceTasks =
         JobConf.normalizeMemoryConfigValue(
           conf.getLong(
-            JobTracker.MAPRED_CLUSTER_MAX_REDUCE_MEMORY_MB_PROPERTY,
+            JTConfig.JT_MAX_REDUCEMEMORY_MB,
             JobConf.DISABLED_MEMORY_LIMIT));
     }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JvmManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JvmManager.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JvmManager.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JvmManager.java Fri Sep 18 15:09:48 2009
@@ -33,6 +33,7 @@
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.mapred.TaskController.TaskControllerContext;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.mapreduce.util.ProcessTree;
 
@@ -195,7 +196,7 @@
         context.task = task;
         // If we are returning the same task as which the JVM was launched
         // we don't initialize task once again.
-        if (!jvmRunner.env.conf.get("mapred.task.id").equals(
+        if (!jvmRunner.env.conf.get(JobContext.TASK_ATTEMPT_ID).equals(
             task.getTaskID().toString())) {
           try {
             tracker.getTaskController().initializeTask(context);
@@ -446,7 +447,7 @@
           if (initalContext != null && initalContext.env != null) {
             initalContext.pid = jvmIdToPid.get(jvmId);
             initalContext.sleeptimeBeforeSigkill = tracker.getJobConf()
-                .getLong("mapred.tasktracker.tasks.sleeptime-before-sigkill",
+                .getLong(TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL,
                     ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
 
             // Destroy the task jvm

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/KeyValueLineRecordReader.java Fri Sep 18 15:09:48 2009
@@ -27,7 +27,7 @@
 /**
  * This class treats a line in the input as a key/value pair separated by a 
  * separator character. The separator can be specified in config file 
- * under the attribute name key.value.separator.in.input.line. The default
+ * under the attribute name mapreduce.input.keyvaluelinerecordreader.key.value.separator. The default
  * separator is the tab character ('\t').
  * 
  * @deprecated Use 
@@ -61,7 +61,7 @@
     lineRecordReader = new LineRecordReader(job, split);
     dummyKey = lineRecordReader.createKey();
     innerValue = lineRecordReader.createValue();
-    String sepStr = job.get("key.value.separator.in.input.line", "\t");
+    String sepStr = job.get("mapreduce.input.keyvaluelinerecordreader.key.value.separator", "\t");
     this.separator = (byte) sepStr.charAt(0);
   }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LimitTasksPerJobTaskScheduler.java Fri Sep 18 15:09:48 2009
@@ -20,28 +20,24 @@
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
 
 /**
  * A {@link TaskScheduler} that limits the maximum number of tasks
  * running for a job. The limit is set by means of the
- * <code>mapred.jobtracker.scheduler.maxRunningTasksPerJob</code>
- * property.
+ * {@link JTConfig#JT_RUNNINGTASKS_PER_JOB} property.
  */
 class LimitTasksPerJobTaskScheduler extends JobQueueTaskScheduler {
   
   private static final Log LOG = LogFactory.getLog(
     "org.apache.hadoop.mapred.TaskLimitedJobQueueTaskScheduler");
   
-  public static final String MAX_TASKS_PER_JOB_PROPERTY = 
-    "mapred.jobtracker.taskScheduler.maxRunningTasksPerJob";
-  
   private long maxTasksPerJob;
   
   public LimitTasksPerJobTaskScheduler() {
@@ -60,9 +56,10 @@
   @Override
   public synchronized void setConf(Configuration conf) {
     super.setConf(conf);
-    maxTasksPerJob = conf.getLong(MAX_TASKS_PER_JOB_PROPERTY ,Long.MAX_VALUE);
+    maxTasksPerJob = 
+      conf.getLong(JTConfig.JT_RUNNINGTASKS_PER_JOB, Long.MAX_VALUE);
     if (maxTasksPerJob <= 0) {
-      String msg = MAX_TASKS_PER_JOB_PROPERTY +
+      String msg = JTConfig.JT_RUNNINGTASKS_PER_JOB +
         " is set to zero or a negative value. Aborting.";
       LOG.fatal(msg);
       throw new RuntimeException (msg);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LineRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LineRecordReader.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LineRecordReader.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LineRecordReader.java Fri Sep 18 15:09:48 2009
@@ -77,8 +77,8 @@
 
   public LineRecordReader(Configuration job, 
                           FileSplit split) throws IOException {
-    this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength",
-                                    Integer.MAX_VALUE);
+    this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
+      LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
     start = split.getStart();
     end = start + split.getLength();
     final Path file = split.getPath();
@@ -130,8 +130,8 @@
   public LineRecordReader(InputStream in, long offset, long endOffset, 
                           Configuration job) 
     throws IOException{
-    this.maxLineLength = job.getInt("mapred.linerecordreader.maxlength",
-                                    Integer.MAX_VALUE);
+    this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
+      LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
     this.in = new LineReader(in, job);
     this.start = offset;
     this.pos = offset;

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LinuxTaskController.java Fri Sep 18 15:09:48 2009
@@ -42,8 +42,8 @@
  * JVM and killing it when needed, and also initializing and
  * finalizing the task environment. 
  * <p> The setuid executable is launched using the command line:</p>
- * <p>task-controller user-name command command-args, where</p>
- * <p>user-name is the name of the owner who submits the job</p>
+ * <p>task-controller mapreduce.job.user.name command command-args, where</p>
+ * <p>mapreduce.job.user.name is the name of the owner who submits the job</p>
  * <p>command is one of the cardinal value of the 
  * {@link LinuxTaskController.TaskCommands} enumeration</p>
  * <p>command-args depends on the command being launched.</p>
@@ -252,7 +252,7 @@
   }
 
   // Get the directory from the list of directories configured
-  // in mapred.local.dir chosen for storing data pertaining to
+  // in Configs.LOCAL_DIR chosen for storing data pertaining to
   // this task.
   private String getDirectoryChosenForTask(File directory,
       TaskControllerContext context) {
@@ -280,13 +280,13 @@
    * <br/>
    * For launching following is command line argument:
    * <br/>
-   * {@code user-name command tt-root job_id task_id} 
+   * {@code mapreduce.job.user.name command tt-root job_id task_id} 
    * <br/>
    * For terminating/killing task jvm.
-   * {@code user-name command tt-root task-pid}
+   * {@code mapreduce.job.user.name command tt-root task-pid}
    * 
    * @param command command to be executed.
-   * @param userName user name
+   * @param userName mapreduce.job.user.name
    * @param cmdArgs list of extra arguments
    * @param workDir working directory for the task-controller
    * @param env JVM environment variables.

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/LocalJobRunner.java Fri Sep 18 15:09:48 2009
@@ -41,9 +41,11 @@
 import org.apache.hadoop.mapreduce.QueueInfo;
 import org.apache.hadoop.mapreduce.TaskCompletionEvent;
 import org.apache.hadoop.mapreduce.TaskTrackerInfo;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.filecache.TaskDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.server.jobtracker.State;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -143,7 +145,7 @@
       this.taskDistributedCacheManager = 
           trackerDistributerdCacheManager.newTaskDistributedCacheManager(conf);
       taskDistributedCacheManager.setup(
-          new LocalDirAllocator("mapred.local.dir"), 
+          new LocalDirAllocator(MRConfig.LOCAL_DIR), 
           new File(systemJobDir.toString()),
           "archive");
       
@@ -554,7 +556,8 @@
    * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getSystemDir()
    */
   public String getSystemDir() {
-    Path sysDir = new Path(conf.get("mapred.system.dir", "/tmp/hadoop/mapred/system"));  
+    Path sysDir = new Path(
+      conf.get(JTConfig.JT_SYSTEM_DIR, "/tmp/hadoop/mapred/system"));  
     return fs.makeQualified(sysDir).toString();
   }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapOutputFile.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapOutputFile.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapOutputFile.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapOutputFile.java Fri Sep 18 15:09:48 2009
@@ -23,12 +23,13 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRConfig;
 
 /**
  * Manipulate the working area for the transient store for maps and reduces.
  * This class is used by map and reduce tasks to identify the directories that
  * they need to write to/read from for intermediate files. The callers of 
- * these methods are from child space and see mapred.local.dir as 
+ * these methods are from child space and see mapreduce.cluster.local.dir as 
  * taskTracker/jobCache/jobId/attemptId
  * 
  * <FRAMEWORK-USE-ONLY>
@@ -45,7 +46,7 @@
   }
 
   private LocalDirAllocator lDirAlloc = 
-                            new LocalDirAllocator("mapred.local.dir");
+    new LocalDirAllocator(MRConfig.LOCAL_DIR);
   
   /**
    * Return the path to local map output file created earlier

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapTask.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapTask.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapTask.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/MapTask.java Fri Sep 18 15:09:48 2009
@@ -53,6 +53,7 @@
 import org.apache.hadoop.mapred.Merger.Segment;
 import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.util.IndexedSortable;
@@ -114,7 +115,7 @@
         && (conf.getKeepTaskFilesPattern() != null || conf
             .getKeepFailedTaskFiles())) {
       Path localSplit =
-          new LocalDirAllocator("mapred.local.dir").getLocalPathForWrite(
+          new LocalDirAllocator(MRConfig.LOCAL_DIR).getLocalPathForWrite(
               TaskTracker.getLocalSplitFile(conf.getUser(), getJobID()
                   .toString(), getTaskID().toString()), conf);
       LOG.debug("Writing local split to " + localSplit);
@@ -355,7 +356,7 @@
     RecordReader<INKEY,INVALUE> in = isSkipping() ? 
         new SkippingRecordReader<INKEY,INVALUE>(rawIn, umbilical, reporter) :
         new TrackedRecordReader<INKEY,INVALUE>(rawIn, reporter);
-    job.setBoolean("mapred.skip.on", isSkipping());
+    job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
 
 
     int numReduceTasks = conf.getNumReduceTasks();
@@ -390,9 +391,9 @@
   private void updateJobWithSplit(final JobConf job, InputSplit inputSplit) {
     if (inputSplit instanceof FileSplit) {
       FileSplit fileSplit = (FileSplit) inputSplit;
-      job.set("map.input.file", fileSplit.getPath().toString());
-      job.setLong("map.input.start", fileSplit.getStart());
-      job.setLong("map.input.length", fileSplit.getLength());
+      job.set(JobContext.MAP_INPUT_FILE, fileSplit.getPath().toString());
+      job.setLong(JobContext.MAP_INPUT_START, fileSplit.getStart());
+      job.setLong(JobContext.MAP_INPUT_PATH, fileSplit.getLength());
     }
   }
 
@@ -570,7 +571,7 @@
       new NewTrackingRecordReader<INKEY,INVALUE>
           (inputFormat.createRecordReader(split, taskContext), reporter);
     
-    job.setBoolean("mapred.skip.on", isSkipping());
+    job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
     org.apache.hadoop.mapreduce.RecordWriter output = null;
     org.apache.hadoop.mapreduce.Mapper<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context 
          mapperContext = null;
@@ -744,21 +745,21 @@
       indexCacheList = new ArrayList<SpillRecord>();
       
       //sanity checks
-      final float spillper = job.getFloat("io.sort.spill.percent",(float)0.8);
-      final float recper = job.getFloat("io.sort.record.percent",(float)0.05);
-      final int sortmb = job.getInt("io.sort.mb", 100);
+      final float spillper = job.getFloat(JobContext.MAP_SORT_SPILL_PERCENT,(float)0.8);
+      final float recper = job.getFloat(JobContext.MAP_SORT_RECORD_PERCENT,(float)0.05);
+      final int sortmb = job.getInt(JobContext.IO_SORT_MB, 100);
       if (spillper > (float)1.0 || spillper < (float)0.0) {
-        throw new IOException("Invalid \"io.sort.spill.percent\": " + spillper);
+        throw new IOException("Invalid \"mapreduce.map.sort.spill.percent\": " + spillper);
       }
       if (recper > (float)1.0 || recper < (float)0.01) {
-        throw new IOException("Invalid \"io.sort.record.percent\": " + recper);
+        throw new IOException("Invalid \"mapreduce.map.sort.record.percent\": " + recper);
       }
       if ((sortmb & 0x7FF) != sortmb) {
-        throw new IOException("Invalid \"io.sort.mb\": " + sortmb);
+        throw new IOException("Invalid \"mapreduce.task.mapreduce.task.io.sort.mb\": " + sortmb);
       }
       sorter = ReflectionUtils.newInstance(job.getClass("map.sort.class",
             QuickSort.class, IndexedSorter.class), job);
-      LOG.info("io.sort.mb = " + sortmb);
+      LOG.info("mapreduce.task.mapreduce.task.io.sort.mb = " + sortmb);
       // buffers and accounting
       int maxMemUsage = sortmb << 20;
       int recordCapacity = (int)(maxMemUsage * recper);
@@ -804,7 +805,7 @@
       } else {
         combineCollector = null;
       }
-      minSpillsForCombine = job.getInt("min.num.spills.for.combine", 3);
+      minSpillsForCombine = job.getInt(JobContext.MAP_COMBINE_MIN_SPISS, 3);
       spillThread.setDaemon(true);
       spillThread.setName("SpillThread");
       spillLock.lock();
@@ -1539,7 +1540,7 @@
             }
           }
 
-          int mergeFactor = job.getInt("io.sort.factor", 100);
+          int mergeFactor = job.getInt(JobContext.IO_SORT_FACTOR, 100);
           // sort the segments only if there are intermediate merges
           boolean sortSegments = segmentList.size() > mergeFactor;
           //merge

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Mapper.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Mapper.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Mapper.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Mapper.java Fri Sep 18 15:09:48 2009
@@ -81,8 +81,8 @@
  *       private int noRecords = 0;
  *       
  *       public void configure(JobConf job) {
- *         mapTaskId = job.get("mapred.task.id");
- *         inputFile = job.get("map.input.file");
+ *         mapTaskId = job.get(JobContext.TASK_ATTEMPT_ID);
+ *         inputFile = job.get(JobContext.MAP_INPUT_FILE);
  *       }
  *       
  *       public void map(K key, V val,
@@ -145,8 +145,8 @@
    * takes an insignificant amount of time to process individual key/value 
    * pairs, this is crucial since the framework might assume that the task has 
    * timed-out and kill that task. The other way of avoiding this is to set 
-   * <a href="{@docRoot}/../mapred-default.html#mapred.task.timeout">
-   * mapred.task.timeout</a> to a high-enough value (or even zero for no 
+   * <a href="{@docRoot}/../mapred-default.html#mapreduce.task.timeout">
+   * mapreduce.task.timeout</a> to a high-enough value (or even zero for no 
    * time-outs).</p>
    * 
    * @param key the input key.

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Merger.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Merger.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Merger.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Merger.java Fri Sep 18 15:09:48 2009
@@ -36,6 +36,7 @@
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.mapred.IFile.Reader;
 import org.apache.hadoop.mapred.IFile.Writer;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.util.PriorityQueue;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progressable;
@@ -49,7 +50,7 @@
 
   // Local directories
   private static LocalDirAllocator lDirAlloc = 
-    new LocalDirAllocator("mapred.local.dir");
+    new LocalDirAllocator(MRConfig.LOCAL_DIR);
 
   public static <K extends Object, V extends Object>
   RawKeyValueIterator merge(Configuration conf, FileSystem fs,
@@ -188,7 +189,7 @@
   void writeFile(RawKeyValueIterator records, Writer<K, V> writer, 
                  Progressable progressable, Configuration conf) 
   throws IOException {
-    long progressBar = conf.getLong("mapred.merge.recordsBeforeProgress",
+    long progressBar = conf.getLong(JobContext.RECORDS_BEFORE_PROGRESS,
         10000);
     long recordCtr = 0;
     while(records.next()) {
@@ -746,7 +747,7 @@
      * calculating mergeProgress. This simulates the above merge() method and
      * tries to obtain the number of bytes that are going to be merged in all
      * merges(assuming that there is no combiner called while merging).
-     * @param factor io.sort.factor
+     * @param factor mapreduce.task.mapreduce.task.io.sort.factor
      * @param inMem  number of segments in memory to be merged
      */
     long computeBytesInMerges(int factor, int inMem) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/NodeHealthCheckerService.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/NodeHealthCheckerService.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/NodeHealthCheckerService.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/NodeHealthCheckerService.java Fri Sep 18 15:09:48 2009
@@ -29,6 +29,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
@@ -62,13 +63,18 @@
   static private final String ERROR_PATTERN = "ERROR";
 
   /* Configuration keys */
-  static final String HEALTH_CHECK_SCRIPT_PROPERTY = "mapred.healthChecker.script.path";
+  static final String HEALTH_CHECK_SCRIPT_PROPERTY = 
+    TTConfig.TT_HEALTH_CHECKER_SCRIPT_PATH;
 
-  static final String HEALTH_CHECK_INTERVAL_PROPERTY = "mapred.healthChecker.interval";
+  static final String HEALTH_CHECK_INTERVAL_PROPERTY = 
+    TTConfig.TT_HEALTH_CHECKER_INTERVAL;
 
-  static final String HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY = "mapred.healthChecker.script.timeout";
+  static final String HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY = 
+    TTConfig.TT_HEALTH_CHECKER_SCRIPT_TIMEOUT;
 
-  static final String HEALTH_CHECK_SCRIPT_ARGUMENTS_PROPERTY = "mapred.healthChecker.script.args";
+  static final String HEALTH_CHECK_SCRIPT_ARGUMENTS_PROPERTY = 
+    TTConfig.TT_HEALTH_CHECKER_SCRIPT_ARGS;
+  
   /* end of configuration keys */
   /** Time out error message */
   static final String NODE_HEALTH_SCRIPT_TIMED_OUT_MSG = "Node health script timed out";
@@ -211,12 +217,14 @@
    * Method which initializes the values for the script path and interval time.
    */
   private void initialize(Configuration conf) {
-    this.nodeHealthScript = conf.get(HEALTH_CHECK_SCRIPT_PROPERTY);
-    this.intervalTime = conf.getLong(HEALTH_CHECK_INTERVAL_PROPERTY,
+    this.nodeHealthScript = 
+        conf.get(TTConfig.TT_HEALTH_CHECKER_SCRIPT_PATH);
+    this.intervalTime = conf.getLong(TTConfig.TT_HEALTH_CHECKER_INTERVAL,
         DEFAULT_HEALTH_CHECK_INTERVAL);
-    this.scriptTimeout = conf.getLong(HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY,
+    this.scriptTimeout = conf.getLong(
+        TTConfig.TT_HEALTH_CHECKER_SCRIPT_TIMEOUT,
         DEFAULT_HEALTH_SCRIPT_FAILURE_INTERVAL);
-    String[] args = conf.getStrings(HEALTH_CHECK_SCRIPT_ARGUMENTS_PROPERTY,
+    String[] args = conf.getStrings(TTConfig.TT_HEALTH_CHECKER_SCRIPT_ARGS,
         new String[] {});
     timer = new NodeHealthMonitorExecutor(args);
   }
@@ -323,7 +331,8 @@
    * @return true if node health monitoring service can be started.
    */
   static boolean shouldRun(Configuration conf) {
-    String nodeHealthScript = conf.get(HEALTH_CHECK_SCRIPT_PROPERTY);
+    String nodeHealthScript = 
+      conf.get(TTConfig.TT_HEALTH_CHECKER_SCRIPT_PATH);
     if (nodeHealthScript == null || nodeHealthScript.trim().isEmpty()) {
       return false;
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/ReduceTask.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/ReduceTask.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/ReduceTask.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/ReduceTask.java Fri Sep 18 15:09:48 2009
@@ -45,6 +45,7 @@
 import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.mapreduce.task.reduce.Shuffle;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progressable;
@@ -307,7 +308,7 @@
   @SuppressWarnings("unchecked")
   public void run(JobConf job, final TaskUmbilicalProtocol umbilical)
     throws IOException, InterruptedException, ClassNotFoundException {
-    job.setBoolean("mapred.skip.on", isSkipping());
+    job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
 
     if (isMapOrReduce()) {
       copyPhase = getProgress().addPhase("copy");
@@ -337,7 +338,7 @@
     // Initialize the codec
     codec = initCodec();
     RawKeyValueIterator rIter = null;
-    boolean isLocal = "local".equals(job.get("mapred.job.tracker", "local"));
+    boolean isLocal = "local".equals(job.get(JTConfig.JT_IPC_ADDRESS, "local"));
     if (!isLocal) {
       Class combinerClass = conf.getCombinerClass();
       CombineOutputCollector combineCollector = 
@@ -360,7 +361,7 @@
                            job.getMapOutputValueClass(), codec, 
                            getMapFiles(rfs, true),
                            !conf.getKeepFailedTaskFiles(), 
-                           job.getInt("io.sort.factor", 100),
+                           job.getInt(JobContext.IO_SORT_FACTOR, 100),
                            new Path(getTaskID().toString()), 
                            job.getOutputKeyComparator(),
                            reporter, spilledRecordsCounter, null, null);
@@ -526,6 +527,7 @@
     org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> trackedRW = 
       new NewTrackingRecordWriter<OUTKEY, OUTVALUE>(output, reduceOutputCounter);
     job.setBoolean("mapred.skip.on", isSkipping());
+    job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
     org.apache.hadoop.mapreduce.Reducer.Context 
          reducerContext = createReduceContext(reducer, job, getTaskID(),
                                                rIter, reduceInputKeyCounter, 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Reducer.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Reducer.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Reducer.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Reducer.java Fri Sep 18 15:09:48 2009
@@ -109,7 +109,7 @@
  *       private int noKeys = 0;
  *       
  *       public void configure(JobConf job) {
- *         reduceTaskId = job.get("mapred.task.id");
+ *         reduceTaskId = job.get(JobContext.TASK_ATTEMPT_ID);
  *       }
  *       
  *       public void reduce(K key, Iterator&lt;V&gt; values,
@@ -185,8 +185,8 @@
    * takes an insignificant amount of time to process individual key/value 
    * pairs, this is crucial since the framework might assume that the task has 
    * timed-out and kill that task. The other way of avoiding this is to set 
-   * <a href="{@docRoot}/../mapred-default.html#mapred.task.timeout">
-   * mapred.task.timeout</a> to a high-enough value (or even zero for no 
+   * <a href="{@docRoot}/../mapred-default.html#mapreduce.task.timeout">
+   * mapreduce.task.timeout</a> to a high-enough value (or even zero for no 
    * time-outs).</p>
    * 
    * @param key the key.

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SequenceFileAsBinaryOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -61,7 +61,8 @@
    */
   static public void setSequenceFileOutputKeyClass(JobConf conf, 
                                                    Class<?> theClass) {
-    conf.setClass("mapred.seqbinary.output.key.class", theClass, Object.class);
+    conf.setClass(org.apache.hadoop.mapreduce.lib.output.
+      SequenceFileAsBinaryOutputFormat.KEY_CLASS, theClass, Object.class);
   }
 
   /**
@@ -74,8 +75,8 @@
    */
   static public void setSequenceFileOutputValueClass(JobConf conf, 
                                                      Class<?> theClass) {
-    conf.setClass("mapred.seqbinary.output.value.class", 
-                  theClass, Object.class);
+    conf.setClass(org.apache.hadoop.mapreduce.lib.output.
+      SequenceFileAsBinaryOutputFormat.VALUE_CLASS, theClass, Object.class);
   }
 
   /**
@@ -84,9 +85,10 @@
    * @return the key class of the {@link SequenceFile}
    */
   static public Class<? extends WritableComparable> getSequenceFileOutputKeyClass(JobConf conf) { 
-    return conf.getClass("mapred.seqbinary.output.key.class", 
-                         conf.getOutputKeyClass().asSubclass(WritableComparable.class),
-                         WritableComparable.class);
+    return conf.getClass(org.apache.hadoop.mapreduce.lib.output.
+      SequenceFileAsBinaryOutputFormat.KEY_CLASS, 
+      conf.getOutputKeyClass().asSubclass(WritableComparable.class),
+      WritableComparable.class);
   }
 
   /**
@@ -95,9 +97,9 @@
    * @return the value class of the {@link SequenceFile}
    */
   static public Class<? extends Writable> getSequenceFileOutputValueClass(JobConf conf) { 
-    return conf.getClass("mapred.seqbinary.output.value.class", 
-                         conf.getOutputValueClass().asSubclass(Writable.class),
-                         Writable.class);
+    return conf.getClass(org.apache.hadoop.mapreduce.lib.output.
+      SequenceFileAsBinaryOutputFormat.VALUE_CLASS, 
+      conf.getOutputValueClass().asSubclass(Writable.class), Writable.class);
   }
   
   @Override 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SequenceFileOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -102,8 +102,8 @@
    *         defaulting to {@link CompressionType#RECORD}
    */
   public static CompressionType getOutputCompressionType(JobConf conf) {
-    String val = conf.get("mapred.output.compression.type", 
-                          CompressionType.RECORD.toString());
+    String val = conf.get(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS_TYPE, CompressionType.RECORD.toString());
     return CompressionType.valueOf(val);
   }
   
@@ -116,7 +116,8 @@
   public static void setOutputCompressionType(JobConf conf, 
 		                                          CompressionType style) {
     setCompressOutput(conf, true);
-    conf.set("mapred.output.compression.type", style.toString());
+    conf.set(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS_TYPE, style.toString());
   }
 
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SkipBadRecords.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SkipBadRecords.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SkipBadRecords.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/SkipBadRecords.java Fri Sep 18 15:09:48 2009
@@ -72,16 +72,16 @@
     "ReduceProcessedGroups";
   
   private static final String ATTEMPTS_TO_START_SKIPPING = 
-    "mapred.skip.attempts.to.start.skipping";
+    JobContext.SKIP_START_ATTEMPTS;
   private static final String AUTO_INCR_MAP_PROC_COUNT = 
-    "mapred.skip.map.auto.incr.proc.count";
+    JobContext.MAP_SKIP_INCR_PROC_COUNT;
   private static final String AUTO_INCR_REDUCE_PROC_COUNT = 
-    "mapred.skip.reduce.auto.incr.proc.count";
-  private static final String OUT_PATH = "mapred.skip.out.dir";
+    JobContext.REDUCE_SKIP_INCR_PROC_COUNT;
+  private static final String OUT_PATH = JobContext.SKIP_OUTDIR;
   private static final String MAPPER_MAX_SKIP_RECORDS = 
-    "mapred.skip.map.max.skip.records";
+    JobContext.MAP_SKIP_MAX_RECORDS;
   private static final String REDUCER_MAX_SKIP_GROUPS = 
-    "mapred.skip.reduce.max.skip.groups";
+    JobContext.REDUCE_SKIP_MAXGROUPS;
   
   /**
    * Get the number of Task attempts AFTER which skip mode 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java Fri Sep 18 15:09:48 2009
@@ -45,7 +45,9 @@
 import org.apache.hadoop.io.serializer.Deserializer;
 import org.apache.hadoop.io.serializer.SerializationFactory;
 import org.apache.hadoop.mapred.IFile.Writer;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progressable;
@@ -391,11 +393,11 @@
    * Localize the given JobConf to be specific for this task.
    */
   public void localizeConfiguration(JobConf conf) throws IOException {
-    conf.set("mapred.tip.id", taskId.getTaskID().toString()); 
-    conf.set("mapred.task.id", taskId.toString());
-    conf.setBoolean("mapred.task.is.map", isMapTask());
-    conf.setInt("mapred.task.partition", partition);
-    conf.set("mapred.job.id", taskId.getJobID().toString());
+    conf.set(JobContext.TASK_ID, taskId.getTaskID().toString()); 
+    conf.set(JobContext.TASK_ATTEMPT_ID, taskId.toString());
+    conf.setBoolean(JobContext.TASK_ISMAP, isMapTask());
+    conf.setInt(JobContext.TASK_PARTITION, partition);
+    conf.set(JobContext.ID, taskId.getJobID().toString());
   }
   
   /** Run this task as a part of the named job.  This method is executed in the
@@ -888,11 +890,11 @@
       this.conf = new JobConf(conf);
     }
     this.mapOutputFile.setConf(this.conf);
-    this.lDirAlloc = new LocalDirAllocator("mapred.local.dir");
+    this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
     // add the static resolutions (this is required for the junit to
     // work on testcases that simulate multiple nodes on a single physical
     // node.
-    String hostToResolved[] = conf.getStrings("hadoop.net.static.resolutions");
+    String hostToResolved[] = conf.getStrings(TTConfig.TT_STATIC_RESOLUTIONS);
     if (hostToResolved != null) {
       for (String str : hostToResolved) {
         String name = str.substring(0, str.indexOf('='));

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskController.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskController.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskController.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskController.java Fri Sep 18 15:09:48 2009
@@ -26,6 +26,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JvmManager.JvmEnv;
 import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
@@ -52,21 +53,21 @@
     return conf;
   }
 
-  // The list of directory paths specified in the variable mapred.local.dir.
+  // The list of directory paths specified in the variable Configs.LOCAL_DIR
   // This is used to determine which among the list of directories is picked up
   // for storing data for a particular task.
   protected String[] mapredLocalDirs;
 
   public void setConf(Configuration conf) {
     this.conf = conf;
-    mapredLocalDirs = conf.getStrings("mapred.local.dir");
+    mapredLocalDirs = conf.getStrings(MRConfig.LOCAL_DIR);
   }
 
   /**
    * Sets up the permissions of the following directories on all the configured
    * disks:
    * <ul>
-   * <li>mapred-local directories</li>
+   * <li>mapreduce.cluster.local.directories</li>
    * <li>Job cache directories</li>
    * <li>Archive directories</li>
    * <li>Hadoop log directories</li>
@@ -74,10 +75,10 @@
    */
   void setup() {
     for (String localDir : this.mapredLocalDirs) {
-      // Set up the mapred-local directories.
+      // Set up the mapreduce.cluster.local.directories.
       File mapredlocalDir = new File(localDir);
       if (!mapredlocalDir.exists() && !mapredlocalDir.mkdirs()) {
-        LOG.warn("Unable to create mapred-local directory : "
+        LOG.warn("Unable to create mapreduce.cluster.local.directory : "
             + mapredlocalDir.getPath());
       } else {
         Localizer.PermissionsHandler.setPermissions(mapredlocalDir,

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskLog.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskLog.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskLog.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskLog.java Fri Sep 18 15:09:48 2009
@@ -395,7 +395,7 @@
    * @return the number of bytes to cap the log files at
    */
   public static long getTaskLogLength(JobConf conf) {
-    return conf.getLong("mapred.userlog.limit.kb", 0) * 1024;
+    return conf.getLong(JobContext.TASK_USERLOG_LIMIT, 0) * 1024;
   }
 
   /**

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java Fri Sep 18 15:09:48 2009
@@ -29,6 +29,7 @@
 
 import org.apache.hadoop.mapred.TaskTracker;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.mapreduce.util.ProcfsBasedProcessTree;
 import org.apache.hadoop.mapreduce.util.ProcessTree;
 import org.apache.hadoop.util.StringUtils;
@@ -52,9 +53,8 @@
 
   public TaskMemoryManagerThread(TaskTracker taskTracker) {
     this(taskTracker.getTotalMemoryAllottedForTasksOnTT() * 1024 * 1024L,
-            taskTracker.getJobConf().getLong(
-                "mapred.tasktracker.taskmemorymanager.monitoring-interval", 
-                5000L));         
+      taskTracker.getJobConf().getLong(
+        TTConfig.TT_MEMORY_MANAGER_MONITORING_INTERVAL, 5000L));         
     this.taskTracker = taskTracker;
   }
 
@@ -179,7 +179,7 @@
                   taskTracker
                       .getJobConf()
                       .getLong(
-                          "mapred.tasktracker.tasks.sleeptime-before-sigkill",
+                          TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL,
                           ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
 
               // create process tree object

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskRunner.java Fri Sep 18 15:09:48 2009
@@ -32,6 +32,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.filecache.TaskDistributedCacheManager;
 import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager;
@@ -161,7 +162,7 @@
       //before preparing the job localize 
       //all the archives
       TaskAttemptID taskid = t.getTaskID();
-      LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
+      LocalDirAllocator lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
       File workDir = formWorkDir(lDirAlloc, taskid, t.isTaskCleanupTask(), conf);
 
       // We don't create any symlinks yet, so presence/absence of workDir
@@ -439,8 +440,8 @@
       JobConf conf)
       throws IOException {
 
-    // add java.io.tmpdir given by mapred.child.tmp
-    String tmp = conf.get("mapred.child.tmp", "./tmp");
+    // add java.io.tmpdir given by mapreduce.task.tmp.dir
+    String tmp = conf.get(JobContext.TASK_TEMP_DIR, "./tmp");
     Path tmpDir = new Path(tmp);
 
     // if temp directory path is not absolute, prepend it with workDir.
@@ -556,13 +557,13 @@
   }
 
   /**
-   * Prepare the mapred.local.dir for the child. The child is sand-boxed now.
+   * Prepare the Configs.LOCAL_DIR for the child. The child is sand-boxed now.
    * Whenever it uses LocalDirAllocator from now on inside the child, it will
    * only see files inside the attempt-directory. This is done in the Child's
    * process space.
    */
   static void setupChildMapredLocalDirs(Task t, JobConf conf) {
-    String[] localDirs = conf.getStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY);
+    String[] localDirs = conf.getStrings(MRConfig.LOCAL_DIR);
     String jobId = t.getJobID().toString();
     String taskId = t.getTaskID().toString();
     boolean isCleanup = t.isTaskCleanupTask();
@@ -574,8 +575,8 @@
       childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
           + TaskTracker.getLocalTaskDir(user, jobId, taskId, isCleanup));
     }
-    LOG.debug("mapred.local.dir for child : " + childMapredLocalDir);
-    conf.set("mapred.local.dir", childMapredLocalDir.toString());
+    LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
+    conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
   }
 
   /** Creates the working directory pathname for a task attempt. */ 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskTracker.java Fri Sep 18 15:09:48 2009
@@ -68,8 +68,11 @@
 import org.apache.hadoop.mapred.TaskController.JobInitializationContext;
 import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus;
 import org.apache.hadoop.mapred.pipes.Submitter;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
 import org.apache.hadoop.mapreduce.task.reduce.ShuffleHeader;
 import org.apache.hadoop.metrics.MetricsContext;
@@ -84,6 +87,7 @@
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
 import org.apache.hadoop.mapreduce.util.MemoryCalculatorPlugin;
 import org.apache.hadoop.mapreduce.util.ProcfsBasedProcessTree;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -100,7 +104,7 @@
  *
  *******************************************************/
 public class TaskTracker 
-             implements MRConstants, TaskUmbilicalProtocol, Runnable {
+    implements MRConstants, TaskUmbilicalProtocol, Runnable, TTConfig {
   /**
    * @deprecated
    */
@@ -120,8 +124,7 @@
   static enum State {NORMAL, STALE, INTERRUPTED, DENIED}
 
   static{
-    Configuration.addDefaultResource("mapred-default.xml");
-    Configuration.addDefaultResource("mapred-site.xml");
+    ConfigUtil.loadResources();
   }
 
   public static final Log LOG =
@@ -212,7 +215,7 @@
   static final String LOCAL_SPLIT_FILE = "split.dta";
   static final String JOBFILE = "job.xml";
 
-  static final String JOB_LOCAL_DIR = "job.local.dir";
+  static final String JOB_LOCAL_DIR = JobContext.JOB_LOCAL_DIR;
 
   private JobConf fConf;
   FileSystem localFs;
@@ -236,7 +239,7 @@
   private long totalMemoryAllottedForTasks = JobConf.DISABLED_MEMORY_LIMIT;
 
   static final String MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY =
-      "mapred.tasktracker.memory_calculator_plugin";
+      TT_MEMORY_CALCULATOR_PLUGIN;
 
   /**
    * the minimum interval between jobtracker polls
@@ -509,14 +512,14 @@
   synchronized void initialize() throws IOException {
     localFs = FileSystem.getLocal(fConf);
     // use configured nameserver & interface to get local hostname
-    if (fConf.get("slave.host.name") != null) {
-      this.localHostname = fConf.get("slave.host.name");
+    if (fConf.get(TT_HOST_NAME) != null) {
+      this.localHostname = fConf.get(TT_HOST_NAME);
     }
     if (localHostname == null) {
       this.localHostname =
       DNS.getDefaultHost
-      (fConf.get("mapred.tasktracker.dns.interface","default"),
-       fConf.get("mapred.tasktracker.dns.nameserver","default"));
+      (fConf.get(TT_DNS_INTERFACE,"default"),
+       fConf.get(TT_DNS_NAMESERVER,"default"));
     }
  
     //check local disk
@@ -532,10 +535,11 @@
     this.acceptNewTasks = true;
     this.status = null;
 
-    this.minSpaceStart = this.fConf.getLong("mapred.local.dir.minspacestart", 0L);
-    this.minSpaceKill = this.fConf.getLong("mapred.local.dir.minspacekill", 0L);
+    this.minSpaceStart = this.fConf.getLong(TT_LOCAL_DIR_MINSPACE_START, 0L);
+    this.minSpaceKill = this.fConf.getLong(TT_LOCAL_DIR_MINSPACE_KILL, 0L);
     //tweak the probe sample size (make it a function of numCopiers)
-    probe_sample_size = this.fConf.getInt("mapred.tasktracker.events.batchsize", 500);
+    probe_sample_size = 
+      this.fConf.getInt(TT_MAX_TASK_COMPLETION_EVENTS_TO_POLL, 500);
     
     Class<? extends TaskTrackerInstrumentation> metricsInst = getInstrumentationClass(fConf);
     try {
@@ -551,7 +555,7 @@
     
     // bind address
     InetSocketAddress socAddr = NetUtils.createSocketAddr(
-        fConf.get("mapred.task.tracker.report.address", "127.0.0.1:0"));
+        fConf.get(TT_REPORT_ADDRESS, "127.0.0.1:0"));
     String bindAddress = socAddr.getHostName();
     int tmpPort = socAddr.getPort();
     
@@ -579,7 +583,7 @@
 
     // get the assigned address
     this.taskReportAddress = taskReportServer.getListenerAddress();
-    this.fConf.set("mapred.task.tracker.report.address",
+    this.fConf.set(TT_REPORT_ADDRESS,
         taskReportAddress.getHostName() + ":" + taskReportAddress.getPort());
     LOG.info("TaskTracker up at: " + this.taskReportAddress);
 
@@ -616,7 +620,7 @@
     mapLauncher.start();
     reduceLauncher.start();
     Class<? extends TaskController> taskControllerClass 
-                          = fConf.getClass("mapred.task.tracker.task-controller",
+                          = fConf.getClass(TT_TASK_CONTROLLER,
                                             DefaultTaskController.class, 
                                             TaskController.class); 
     taskController = (TaskController)ReflectionUtils.newInstance(
@@ -636,13 +640,13 @@
 
   public static Class<? extends TaskTrackerInstrumentation> getInstrumentationClass(
     Configuration conf) {
-    return conf.getClass("mapred.tasktracker.instrumentation",
+    return conf.getClass(TT_INSTRUMENTATION,
         TaskTrackerMetricsInst.class, TaskTrackerInstrumentation.class);
   }
 
   public static void setInstrumentationClass(
     Configuration conf, Class<? extends TaskTrackerInstrumentation> t) {
-    conf.setClass("mapred.tasktracker.instrumentation",
+    conf.setClass(TT_INSTRUMENTATION,
         t, TaskTrackerInstrumentation.class);
   }
   
@@ -828,7 +832,7 @@
   }
 
   private static LocalDirAllocator lDirAlloc = 
-                              new LocalDirAllocator("mapred.local.dir");
+                              new LocalDirAllocator(MRConfig.LOCAL_DIR);
 
   // intialize the job directory
   private void localizeJob(TaskInProgress tip) throws IOException {
@@ -1073,22 +1077,22 @@
    */
   public TaskTracker(JobConf conf) throws IOException {
     fConf = conf;
-    maxMapSlots = conf.getInt("mapred.tasktracker.map.tasks.maximum", 2);
-    maxReduceSlots = conf.getInt("mapred.tasktracker.reduce.tasks.maximum", 2);
+    maxMapSlots = conf.getInt(TT_MAP_SLOTS, 2);
+    maxReduceSlots = conf.getInt(TT_REDUCE_SLOTS, 2);
     this.jobTrackAddr = JobTracker.getAddress(conf);
     InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(
-        conf.get("mapred.task.tracker.http.address", "0.0.0.0:50060"));
+        conf.get(TT_HTTP_ADDRESS, "0.0.0.0:50060"));
     String httpBindAddress = infoSocAddr.getHostName();
     int httpPort = infoSocAddr.getPort();
     this.server = new HttpServer("task", httpBindAddress, httpPort,
         httpPort == 0, conf);
-    workerThreads = conf.getInt("tasktracker.http.threads", 40);
+    workerThreads = conf.getInt(TT_HTTP_THREADS, 40);
     this.shuffleServerMetrics = new ShuffleServerMetrics(conf);
     server.setThreads(1, workerThreads);
     // let the jsp pages get to the task tracker, config, and other relevant
     // objects
     FileSystem local = FileSystem.getLocal(conf);
-    this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
+    this.localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR);
     server.setAttribute("task.tracker", this);
     server.setAttribute("local.file.system", local);
     server.setAttribute("conf", conf);
@@ -1981,12 +1985,11 @@
                     + cwd.toString());
       }
 
-      localJobConf.set("mapred.local.dir",
-                       fConf.get("mapred.local.dir"));
+      localJobConf.set(LOCAL_DIR,
+                       fConf.get(LOCAL_DIR));
 
-      if (fConf.get("slave.host.name") != null) {
-        localJobConf.set("slave.host.name",
-                         fConf.get("slave.host.name"));
+      if (fConf.get(TT_HOST_NAME) != null) {
+        localJobConf.set(TT_HOST_NAME, fConf.get(TT_HOST_NAME));
       }
             
       keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles();
@@ -2005,7 +2008,7 @@
             str.append(',');
           }
         }
-        localJobConf.set("hadoop.net.static.resolutions", str.toString());
+        localJobConf.set(TT_STATIC_RESOLUTIONS, str.toString());
       }
       if (task.isMapTask()) {
         debugCommand = localJobConf.getMapDebugScript();
@@ -2044,7 +2047,7 @@
     public synchronized void setJobConf(JobConf lconf){
       this.localJobConf = lconf;
       keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles();
-      taskTimeout = localJobConf.getLong("mapred.task.timeout", 
+      taskTimeout = localJobConf.getLong(JobContext.TASK_TIMEOUT, 
                                          10 * 60 * 1000);
     }
         
@@ -2325,7 +2328,7 @@
 
               // add all lines of debug out to diagnostics
               try {
-                int num = localJobConf.getInt("mapred.debug.out.lines", -1);
+                int num = localJobConf.getInt(JobContext.TASK_DEBUGOUT_LINES, -1);
                 addDiagnostics(FileUtil.makeShellPath(stdout),num,"DEBUG OUT");
               } catch(IOException ioe) {
                 LOG.warn("Exception in add diagnostics!");
@@ -2990,7 +2993,7 @@
       JobConf conf=new JobConf();
       // enable the server to track time spent waiting on locks
       ReflectionUtils.setContentionTracing
-        (conf.getBoolean("tasktracker.contention.tracking", false));
+        (conf.getBoolean(TT_CONTENTION_TRACKING, false));
       new TaskTracker(conf).run();
     } catch (Throwable e) {
       LOG.error("Can not start task tracker because "+
@@ -3279,7 +3282,7 @@
     }
 
     Class<? extends MemoryCalculatorPlugin> clazz =
-        fConf.getClass(MAPRED_TASKTRACKER_MEMORY_CALCULATOR_PLUGIN_PROPERTY,
+        fConf.getClass(TT_MEMORY_CALCULATOR_PLUGIN,
             null, MemoryCalculatorPlugin.class);
     MemoryCalculatorPlugin memoryCalculatorPlugin =
         MemoryCalculatorPlugin
@@ -3303,11 +3306,11 @@
 
     mapSlotMemorySizeOnTT =
         fConf.getLong(
-            JobTracker.MAPRED_CLUSTER_MAP_MEMORY_MB_PROPERTY,
+            MAPMEMORY_MB,
             JobConf.DISABLED_MEMORY_LIMIT);
     reduceSlotSizeMemoryOnTT =
         fConf.getLong(
-            JobTracker.MAPRED_CLUSTER_REDUCE_MEMORY_MB_PROPERTY,
+            REDUCEMEMORY_MB,
             JobConf.DISABLED_MEMORY_LIMIT);
     totalMemoryAllottedForTasks =
         maxMapSlots * mapSlotMemorySizeOnTT + maxReduceSlots

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -113,7 +113,7 @@
                                                   Progressable progress)
     throws IOException {
     boolean isCompressed = getCompressOutput(job);
-    String keyValueSeparator = job.get("mapred.textoutputformat.separator", 
+    String keyValueSeparator = job.get("mapreduce.output.textoutputformat.separator", 
                                        "\t");
     if (!isCompressed) {
       Path file = FileOutputFormat.getTaskOutputPath(job, name);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java Fri Sep 18 15:09:48 2009
@@ -145,9 +145,9 @@
                             {split, jc, reporter, Integer.valueOf(idx)});
 
       // setup some helper config variables.
-      jc.set("map.input.file", split.getPath(idx).toString());
-      jc.setLong("map.input.start", split.getOffset(idx));
-      jc.setLong("map.input.length", split.getLength(idx));
+      jc.set(JobContext.MAP_INPUT_FILE, split.getPath(idx).toString());
+      jc.setLong(JobContext.MAP_INPUT_START, split.getOffset(idx));
+      jc.setLong(JobContext.MAP_INPUT_PATH, split.getLength(idx));
     } catch (Exception e) {
       throw new RuntimeException (e);
     }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/FieldSelectionMapReduce.java Fri Sep 18 15:09:48 2009
@@ -44,9 +44,9 @@
  * fields are from the value only. Otherwise, the fields are the union of those
  * from the key and those from the value.
  * 
- * The field separator is under attribute "mapred.data.field.separator"
+ * The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
  * 
- * The map output field list spec is under attribute "map.output.key.value.fields.spec".
+ * The map output field list spec is under attribute "mapreduce.fieldsel.mapreduce.fieldsel.map.output.key.value.fields.spec".
  * The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
  * key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
  * Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
@@ -57,7 +57,7 @@
  * Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
  * and use fields 6,5,1,2,3,7 and above for values.
  * 
- * The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec".
+ * The reduce output field list spec is under attribute "mapreduce.fieldsel.mapreduce.fieldsel.reduce.output.key.value.fields.spec".
  * 
  * The reducer extracts output key/value pairs in a similar manner, except that
  * the key is never ignored.
@@ -156,13 +156,13 @@
   }
 
   public void configure(JobConf job) {
-    this.fieldSeparator = job.get("mapred.data.field.separator", "\t");
-    this.mapOutputKeyValueSpec = job.get("map.output.key.value.fields.spec",
+    this.fieldSeparator = job.get("mapreduce.fieldsel.data.field.separator", "\t");
+    this.mapOutputKeyValueSpec = job.get("mapreduce.fieldsel.mapreduce.fieldsel.map.output.key.value.fields.spec",
         "0-:");
     this.ignoreInputKey = TextInputFormat.class.getCanonicalName().equals(
         job.getInputFormat().getClass().getCanonicalName());
     this.reduceOutputKeyValueSpec = job.get(
-        "reduce.output.key.value.fields.spec", "0-:");
+        "mapreduce.fieldsel.mapreduce.fieldsel.reduce.output.key.value.fields.spec", "0-:");
     parseOutputKeyValueSpec();
     LOG.info(specToString());
   }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java Fri Sep 18 15:09:48 2009
@@ -20,6 +20,7 @@
 
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConfigurable;
+import org.apache.hadoop.mapreduce.JobContext;
 
 /**
  * This comparator implementation provides a subset of the features provided
@@ -33,8 +34,8 @@
  *  character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
  *  of the field); if omitted from pos2, it defaults to 0 (the end of the
  *  field). opts are ordering options (any of 'nr' as described above). 
- * We assume that the fields in the key are separated by 
- * map.output.key.field.separator.
+ * We assume that the fields in the key are separated by
+ * {@link JobContext#MAP_OUTPUT_KEY_FIELD_SEPERATOR} 
  * @deprecated Use 
  * {@link org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedComparator} 
  * instead

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/LazyOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -44,7 +44,7 @@
   public static void  setOutputFormatClass(JobConf job, 
       Class<? extends OutputFormat> theClass) {
       job.setOutputFormat(LazyOutputFormat.class);
-      job.setClass("mapred.lazy.output.format", theClass, OutputFormat.class);
+      job.setClass("mapreduce.output.lazyoutputformat.outputformat", theClass, OutputFormat.class);
   }
 
   @Override
@@ -68,7 +68,7 @@
   @SuppressWarnings("unchecked")
   private void getBaseOutputFormat(JobConf job) throws IOException {
     baseOut = ReflectionUtils.newInstance(
-        job.getClass("mapred.lazy.output.format", null, OutputFormat.class), 
+        job.getClass("mapreduce.output.lazyoutputformat.outputformat", null, OutputFormat.class), 
         job); 
     if (baseOut == null) {
       throw new IOException("Ouput format not set for LazyOutputFormat");

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultipleInputs.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultipleInputs.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultipleInputs.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultipleInputs.java Fri Sep 18 15:09:48 2009
@@ -48,8 +48,8 @@
 
     String inputFormatMapping = path.toString() + ";"
        + inputFormatClass.getName();
-    String inputFormats = conf.get("mapred.input.dir.formats");
-    conf.set("mapred.input.dir.formats",
+    String inputFormats = conf.get("mapreduce.input.multipleinputs.dir.formats");
+    conf.set("mapreduce.input.multipleinputs.dir.formats",
        inputFormats == null ? inputFormatMapping : inputFormats + ","
            + inputFormatMapping);
 
@@ -72,8 +72,8 @@
     addInputPath(conf, path, inputFormatClass);
 
     String mapperMapping = path.toString() + ";" + mapperClass.getName();
-    String mappers = conf.get("mapred.input.dir.mappers");
-    conf.set("mapred.input.dir.mappers", mappers == null ? mapperMapping
+    String mappers = conf.get("mapreduce.input.multipleinputs.dir.mappers");
+    conf.set("mapreduce.input.multipleinputs.dir.mappers", mappers == null ? mapperMapping
        : mappers + "," + mapperMapping);
 
     conf.setMapperClass(DelegatingMapper.class);
@@ -89,7 +89,7 @@
    */
   static Map<Path, InputFormat> getInputFormatMap(JobConf conf) {
     Map<Path, InputFormat> m = new HashMap<Path, InputFormat>();
-    String[] pathMappings = conf.get("mapred.input.dir.formats").split(",");
+    String[] pathMappings = conf.get("mapreduce.input.multipleinputs.dir.formats").split(",");
     for (String pathMapping : pathMappings) {
       String[] split = pathMapping.split(";");
       InputFormat inputFormat;
@@ -114,11 +114,11 @@
    */
   @SuppressWarnings("unchecked")
   static Map<Path, Class<? extends Mapper>> getMapperTypeMap(JobConf conf) {
-    if (conf.get("mapred.input.dir.mappers") == null) {
+    if (conf.get("mapreduce.input.multipleinputs.dir.mappers") == null) {
       return Collections.emptyMap();
     }
     Map<Path, Class<? extends Mapper>> m = new HashMap<Path, Class<? extends Mapper>>();
-    String[] pathMappings = conf.get("mapred.input.dir.mappers").split(",");
+    String[] pathMappings = conf.get("mapreduce.input.multipleinputs.dir.mappers").split(",");
     for (String pathMapping : pathMappings) {
       String[] split = pathMapping.split(";");
       Class<? extends Mapper> mapClass;

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultipleOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -28,6 +28,7 @@
 import org.apache.hadoop.mapred.FileOutputFormat;
 import org.apache.hadoop.mapred.RecordWriter;
 import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.util.Progressable;
 
 /**
@@ -174,7 +175,7 @@
 
   /**
    * Generate the outfile name based on a given anme and the input file name. If
-   * the map input file does not exists (i.e. this is not for a map only job),
+   * the {@link JobContext#MAP_INPUT_FILE} does not exists (i.e. this is not for a map only job),
    * the given name is returned unchanged. If the config value for
    * "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
    * name is returned unchanged. Otherwise, return a file name consisting of the
@@ -188,9 +189,10 @@
    * @return the outfile name based on a given anme and the input file name.
    */
   protected String getInputFileBasedOutputFileName(JobConf job, String name) {
-    String infilepath = job.get("map.input.file");
+    String infilepath = job.get(JobContext.MAP_INPUT_FILE);
     if (infilepath == null) {
-      // if the map input file does not exists, then return the given name
+      // if the {@link JobContext#MAP_INPUT_FILE} does not exists,
+      // then return the given name
       return name;
     }
     int numOfTrailingLegsToUse = job.getInt("mapred.outputformat.numOfTrailingLegs", 0);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java Fri Sep 18 15:09:48 2009
@@ -67,7 +67,7 @@
   @SuppressWarnings("unchecked")
   public void configure(JobConf jobConf) {
     int numberOfThreads =
-      jobConf.getInt("mapred.map.multithreadedrunner.threads", 10);
+      jobConf.getInt(MultithreadedMapper.NUM_THREADS, 10);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Configuring jobConf " + jobConf.getJobName() +
                 " to use " + numberOfThreads + " threads");

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/NLineInputFormat.java Fri Sep 18 15:09:48 2009
@@ -87,6 +87,6 @@
   }
 
   public void configure(JobConf conf) {
-    N = conf.getInt("mapred.line.input.format.linespermap", 1);
+    N = conf.getInt("mapreduce.input.lineinputformat.linespermap", 1);
   }
 }

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/RegexMapper.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/RegexMapper.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/RegexMapper.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/RegexMapper.java Fri Sep 18 15:09:48 2009
@@ -42,8 +42,10 @@
   private int group;
 
   public void configure(JobConf job) {
-    pattern = Pattern.compile(job.get("mapred.mapper.regex"));
-    group = job.getInt("mapred.mapper.regex.group", 0);
+    pattern = Pattern.compile(job.get(org.apache.hadoop.mapreduce.lib.map.
+                RegexMapper.PATTERN));
+    group = job.getInt(org.apache.hadoop.mapreduce.lib.map.
+              RegexMapper.GROUP, 0);
   }
 
   public void map(K key, Text value,

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -28,6 +28,7 @@
 import org.apache.hadoop.mapred.OutputFormat;
 import org.apache.hadoop.mapred.RecordWriter;
 import org.apache.hadoop.mapred.Reporter;
+import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.util.Progressable;
@@ -69,7 +70,7 @@
       JobConf job, String name, Progressable progress) throws IOException {
     org.apache.hadoop.mapreduce.RecordWriter<K, V> w = super.getRecordWriter(
       new TaskAttemptContext(job, 
-            TaskAttemptID.forName(job.get("mapred.task.id"))));
+            TaskAttemptID.forName(job.get(JobContext.TASK_ATTEMPT_ID))));
     org.apache.hadoop.mapreduce.lib.db.DBOutputFormat.DBRecordWriter writer = 
      (org.apache.hadoop.mapreduce.lib.db.DBOutputFormat.DBRecordWriter) w;
     try {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/package.html
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/package.html?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/package.html (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/package.html Fri Sep 18 15:09:48 2009
@@ -179,9 +179,9 @@
     grepJob.setCombinerClass(GrepReducer.class);
     grepJob.setReducerClass(GrepReducer.class);
 
-    grepJob.set("mapred.mapper.regex", args[2]);
+    grepJob.set("mapreduce.mapper.regex", args[2]);
     if (args.length == 4)
-      grepJob.set("mapred.mapper.regex.group", args[3]);
+      grepJob.set("mapreduce.mapper.regexmapper..group", args[3]);
 
     grepJob.setOutputFormat(SequenceFileOutputFormat.class);
     grepJob.setOutputKeyClass(Text.class);



Mime
View raw message