hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sha...@apache.org
Subject svn commit: r816664 [4/9] - in /hadoop/mapreduce/trunk: ./ conf/ src/benchmarks/gridmix/ src/benchmarks/gridmix/pipesort/ src/benchmarks/gridmix2/ src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/ src/c++/pipes/impl/ src/c++/task-controller...
Date Fri, 18 Sep 2009 15:10:02 GMT
Modified: hadoop/mapreduce/trunk/src/java/mapred-default.xml
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/mapred-default.xml?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/mapred-default.xml (original)
+++ hadoop/mapreduce/trunk/src/java/mapred-default.xml Fri Sep 18 15:09:48 2009
@@ -8,7 +8,7 @@
 <configuration>
 
 <property>
-  <name>hadoop.job.history.location</name>
+  <name>mapreduce.jobtracker.jobhistory.location</name>
   <value></value>
   <description> If job tracker is static the history files are stored 
   in this single well known place. If No value is set here, by default,
@@ -17,7 +17,7 @@
 </property>
 
 <property>
-  <name>hadoop.job.history.user.location</name>
+  <name>mapreduce.job.userhistorylocation</name>
   <value></value>
   <description> User can specify a location to store the history files of 
   a particular job. If nothing is specified, the logs are stored in 
@@ -27,16 +27,16 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.history.completed.location</name>
+  <name>mapreduce.jobtracker.jobhistory.completed.location</name>
   <value></value>
   <description> The completed job history files are stored at this single well 
   known location. If nothing is specified, the files are stored at 
-  ${hadoop.job.history.location}/done.
+  ${mapreduce.jobtracker.jobhistory.location}/done.
   </description>
 </property>
 
 <property>
-  <name>mapred.committer.job.setup.cleanup.needed</name>
+  <name>mapreduce.job.committer.setup.cleanup.needed</name>
   <value>true</value>
   <description> true, if job needs job-setup and job-cleanup.
                 false, otherwise  
@@ -45,14 +45,14 @@
 <!-- i/o properties -->
 
 <property>
-  <name>io.sort.factor</name>
+  <name>mapreduce.task.io.sort.factor</name>
   <value>10</value>
   <description>The number of streams to merge at once while sorting
   files.  This determines the number of open file handles.</description>
 </property>
 
 <property>
-  <name>io.sort.mb</name>
+  <name>mapreduce.task.io.sort.mb</name>
   <value>100</value>
   <description>The total amount of buffer memory to use while sorting 
   files, in megabytes.  By default, gives each merge stream 1MB, which
@@ -60,16 +60,17 @@
 </property>
 
 <property>
-  <name>io.sort.record.percent</name>
+  <name>mapreduce.map.sort.record.percent</name>
   <value>0.05</value>
-  <description>The percentage of io.sort.mb dedicated to tracking record
-  boundaries. Let this value be r, io.sort.mb be x. The maximum number
+  <description>The percentage of mapreduce.task.io.sort.mb dedicated to 
+  tracking record boundaries. Let this value be r, 
+  mapreduce.task.io.sort.mb be x. The maximum number
   of records collected before the collection thread must block is equal
   to (r * x) / 4</description>
 </property>
 
 <property>
-  <name>io.sort.spill.percent</name>
+  <name>mapreduce.map.sort.spill.percent</name>
   <value>0.80</value>
   <description>The soft limit in either the buffer or record collection
   buffers. Once reached, a thread will begin to spill the contents to disk
@@ -78,7 +79,7 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker</name>
+  <name>mapreduce.jobtracker.address</name>
   <value>local</value>
   <description>The host and port that the MapReduce job tracker runs
   at.  If "local", then jobs are run in-process as a single map
@@ -87,7 +88,7 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.http.address</name>
+  <name>mapreduce.jobtracker.http.address</name>
   <value>0.0.0.0:50030</value>
   <description>
     The job tracker http server address and port the server will listen on.
@@ -96,7 +97,7 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.handler.count</name>
+  <name>mapreduce.jobtracker.handler.count</name>
   <value>10</value>
   <description>
     The number of server threads for the JobTracker. This should be roughly
@@ -105,7 +106,7 @@
 </property>
 
 <property>
-  <name>mapred.task.tracker.report.address</name>
+  <name>mapreduce.tasktracker.report.address</name>
   <value>127.0.0.1:0</value>
   <description>The interface and port that task tracker server listens on. 
   Since it is only connected to by the tasks, it uses the local interface.
@@ -114,7 +115,7 @@
 </property>
 
 <property>
-  <name>mapred.local.dir</name>
+  <name>mapreduce.cluster.local.dir</name>
   <value>${hadoop.tmp.dir}/mapred/local</value>
   <description>The local directory where MapReduce stores intermediate
   data files.  May be a comma-separated list of
@@ -124,32 +125,32 @@
 </property>
 
 <property>
-  <name>mapred.system.dir</name>
+  <name>mapreduce.jobtracker.system.dir</name>
   <value>${hadoop.tmp.dir}/mapred/system</value>
   <description>The shared directory where MapReduce stores control files.
   </description>
 </property>
 
 <property>
-  <name>mapred.temp.dir</name>
+  <name>mapreduce.cluster.temp.dir</name>
   <value>${hadoop.tmp.dir}/mapred/temp</value>
   <description>A shared directory for temporary files.
   </description>
 </property>
 
 <property>
-  <name>mapred.local.dir.minspacestart</name>
+  <name>mapreduce.tasktracker.local.dir.minspacestart</name>
   <value>0</value>
-  <description>If the space in mapred.local.dir drops under this, 
+  <description>If the space in mapreduce.cluster.local.dir drops under this, 
   do not ask for more tasks.
   Value in bytes.
   </description>
 </property>
 
 <property>
-  <name>mapred.local.dir.minspacekill</name>
+  <name>mapreduce.tasktracker.local.dir.minspacekill</name>
   <value>0</value>
-  <description>If the space in mapred.local.dir drops under this, 
+  <description>If the space in mapreduce.cluster.local.dir drops under this, 
     do not ask more tasks until all the current ones have finished and 
     cleaned up. Also, to save the rest of the tasks we have running, 
     kill one of them, to clean up some space. Start with the reduce tasks,
@@ -159,7 +160,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.expiry.interval</name>
+  <name>mapreduce.jobtracker.expire.trackers.interval</name>
   <value>600000</value>
   <description>Expert: The time-interval, in miliseconds, after which
   a tasktracker is declared 'lost' if it doesn't send heartbeats.
@@ -167,14 +168,14 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.instrumentation</name>
+  <name>mapreduce.tasktracker.instrumentation</name>
   <value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value>
   <description>Expert: The instrumentation class to associate with each TaskTracker.
   </description>
 </property>
 
 <property>
-  <name>mapred.tasktracker.memory_calculator_plugin</name>
+  <name>mapreduce.tasktracker.memorycalculatorplugin</name>
   <value></value>
   <description>
    Name of the class whose instance will be used to query memory information
@@ -188,7 +189,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.taskmemorymanager.monitoring-interval</name>
+  <name>mapreduce.tasktracker.taskmemorymanager.monitoringinterval</name>
   <value>5000</value>
   <description>The interval, in milliseconds, for which the tasktracker waits
    between two cycles of monitoring its tasks' memory usage. Used only if
@@ -197,7 +198,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+  <name>mapreduce.tasktracker.tasks.sleeptimebeforesigkill</name>
   <value>5000</value>
   <description>The time, in milliseconds, the tasktracker waits for sending a
   SIGKILL to a task, after it has been sent a SIGTERM. This is currently
@@ -206,25 +207,25 @@
 </property>
 
 <property>
-  <name>mapred.map.tasks</name>
+  <name>mapreduce.job.maps</name>
   <value>2</value>
   <description>The default number of map tasks per job.
-  Ignored when mapred.job.tracker is "local".  
+  Ignored when mapreduce.jobtracker.address is "local".  
   </description>
 </property>
 
 <property>
-  <name>mapred.reduce.tasks</name>
+  <name>mapreduce.job.reduces</name>
   <value>1</value>
   <description>The default number of reduce tasks per job. Typically set to 99%
   of the cluster's reduce capacity, so that if a node fails the reduces can 
   still be executed in a single wave.
-  Ignored when mapred.job.tracker is "local".
+  Ignored when mapreduce.jobtracker.address is "local".
   </description>
 </property>
 
 <property>
-  <name>mapred.jobtracker.restart.recover</name>
+  <name>mapreduce.jobtracker.restart.recover</name>
   <value>false</value>
   <description>"true" to enable (job) recovery upon restart,
                "false" to start afresh
@@ -232,7 +233,7 @@
 </property>
 
 <property>
-  <name>mapred.jobtracker.job.history.block.size</name>
+  <name>mapreduce.jobtracker.jobhistory.block.size</name>
   <value>3145728</value>
   <description>The block size of the job history file. Since the job recovery
                uses job history, its important to dump job history to disk as 
@@ -242,13 +243,13 @@
 </property>
 
 <property>
-  <name>mapred.jobtracker.taskScheduler</name>
+  <name>mapreduce.jobtracker.taskscheduler</name>
   <value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value>
   <description>The class responsible for scheduling the tasks.</description>
 </property>
 
 <property>
-  <name>mapred.jobtracker.taskScheduler.maxRunningTasksPerJob</name>
+  <name>mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob</name>
   <value></value>
   <description>The maximum number of running tasks for a job before
   it gets preempted. No limits if undefined.
@@ -256,7 +257,7 @@
 </property>
 
 <property>
-  <name>mapred.map.max.attempts</name>
+  <name>mapreduce.map.maxattempts</name>
   <value>4</value>
   <description>Expert: The maximum number of attempts per map task.
   In other words, framework will try to execute a map task these many number
@@ -265,7 +266,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.max.attempts</name>
+  <name>mapreduce.reduce.maxattempts</name>
   <value>4</value>
   <description>Expert: The maximum number of attempts per reduce task.
   In other words, framework will try to execute a reduce task these many number
@@ -274,7 +275,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.parallel.copies</name>
+  <name>mapreduce.reduce.shuffle.parallelcopies</name>
   <value>5</value>
   <description>The default number of parallel transfers run by reduce
   during the copy(shuffle) phase.
@@ -282,15 +283,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.copy.backoff</name>
-  <value>300</value>
-  <description>The maximum amount of time (in seconds) a reducer spends on 
-  fetching one map output before declaring it as failed.
-  </description>
-</property>
-
-<property>
-  <name>mapred.shuffle.connect.timeout</name>
+  <name>mapreduce.reduce.shuffle.connect.timeout</name>
   <value>180000</value>
   <description>Expert: Cluster-wide configuration. The maximum amount of
   time (in milli seconds) reduce task spends in trying to connect to a
@@ -299,7 +292,7 @@
 </property>
 
 <property>
-  <name>mapred.shuffle.read.timeout</name>
+  <name>mapreduce.reduce.shuffle.read.timeout</name>
   <value>30000</value>
   <description>Expert: Cluster-wide configuration. The maximum amount of time
   (in milli seconds) reduce task waits for map output data to be available
@@ -308,7 +301,7 @@
 </property>
 
 <property>
-  <name>mapred.task.timeout</name>
+  <name>mapreduce.task.timeout</name>
   <value>600000</value>
   <description>The number of milliseconds before a task will be
   terminated if it neither reads an input, writes an output, nor
@@ -317,7 +310,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.map.tasks.maximum</name>
+  <name>mapreduce.tasktracker.map.tasks.maximum</name>
   <value>2</value>
   <description>The maximum number of map tasks that will be run
   simultaneously by a task tracker.
@@ -325,7 +318,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.reduce.tasks.maximum</name>
+  <name>mapreduce.tasktracker.reduce.tasks.maximum</name>
   <value>2</value>
   <description>The maximum number of reduce tasks that will be run
   simultaneously by a task tracker.
@@ -333,14 +326,14 @@
 </property>
 
 <property>
-  <name>mapred.job.tracker.retiredjobs.cache.size</name>
+  <name>mapreduce.jobtracker.retiredjobs.cache.size</name>
   <value>1000</value>
   <description>The number of retired job status to keep in the cache.
   </description>
 </property>
 
 <property>
-  <name>mapred.job.tracker.jobhistory.lru.cache.size</name>
+  <name>mapreduce.jobtracker.jobhistory.lru.cache.size</name>
   <value>5</value>
   <description>The number of job history files loaded in memory. The jobs are 
   loaded when they are first accessed. The cache is cleared based on LRU.
@@ -348,7 +341,7 @@
 </property>
 
 <property>
-  <name>mapred.jobtracker.instrumentation</name>
+  <name>mapreduce.jobtracker.instrumentation</name>
   <value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value>
   <description>Expert: The instrumentation class to associate with each JobTracker.
   </description>
@@ -394,7 +387,7 @@
 </property>
 
 <property>
-  <name>mapred.child.tmp</name>
+  <name>mapreduce.task.tmp.dir</name>
   <value>./tmp</value>
   <description> To set the value of tmp directory for map and reduce tasks.
   If the value is an absolute path, it is directly assigned. Otherwise, it is
@@ -406,7 +399,7 @@
 </property>
 
 <property>
-  <name>mapred.map.child.log.level</name>
+  <name>mapreduce.map.log.level</name>
   <value>INFO</value>
   <description>The logging level for the map task. The allowed levels are:
   OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -414,7 +407,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.child.log.level</name>
+  <name>mapreduce.reduce.log.level</name>
   <value>INFO</value>
   <description>The logging level for the reduce task. The allowed levels are:
   OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -422,7 +415,7 @@
 </property>
 
 <property>
-  <name>mapred.inmem.merge.threshold</name>
+  <name>mapreduce.reduce.merge.inmem.threshold</name>
   <value>1000</value>
   <description>The threshold, in terms of the number of files 
   for the in-memory merge process. When we accumulate threshold number of files
@@ -433,17 +426,17 @@
 </property>
 
 <property>
-  <name>mapred.job.shuffle.merge.percent</name>
+  <name>mapreduce.reduce.shuffle.merge.percent</name>
   <value>0.66</value>
   <description>The usage threshold at which an in-memory merge will be
   initiated, expressed as a percentage of the total memory allocated to
   storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
+  mapreduce.reduce.shuffle.input.buffer.percent.
   </description>
 </property>
 
 <property>
-  <name>mapred.job.shuffle.input.buffer.percent</name>
+  <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
   <value>0.70</value>
   <description>The percentage of memory to be allocated from the maximum heap
   size to storing map outputs during the shuffle.
@@ -451,7 +444,7 @@
 </property>
 
 <property>
-  <name>mapred.job.reduce.input.buffer.percent</name>
+  <name>mapreduce.reduce.input.buffer.percent</name>
   <value>0.0</value>
   <description>The percentage of memory- relative to the maximum heap size- to
   retain map outputs during the reduce. When the shuffle is concluded, any
@@ -461,7 +454,7 @@
 </property>
 
 <property>
-  <name>mapred.job.reduce.markreset.buffer.percent</name>
+  <name>mapreduce.reduce.markreset.buffer.percent</name>
   <value>0.0</value>
   <description>The percentage of memory -relative to the maximum heap size- to
   be used for caching values when using the mark-reset functionality.
@@ -469,27 +462,27 @@
 </property>
 
 <property>
-  <name>mapred.map.tasks.speculative.execution</name>
+  <name>mapreduce.map.speculative</name>
   <value>true</value>
   <description>If true, then multiple instances of some map tasks 
                may be executed in parallel.</description>
 </property>
 
 <property>
-  <name>mapred.reduce.tasks.speculative.execution</name>
+  <name>mapreduce.reduce.speculative</name>
   <value>true</value>
   <description>If true, then multiple instances of some reduce tasks 
                may be executed in parallel.</description>
 </property>
 <property>
-  <name>mapred.speculative.execution.speculativeCap</name>
+  <name>mapreduce.job.speculative.speculativecap</name>
   <value>0.1</value>
   <description>The max percent (0-1) of running tasks that
   can be speculatively re-executed at any time.</description>
 </property>
  
 <property>
-  <name>mapred.speculative.execution.slowTaskThreshold</name>
+  <name>mapreduce.job.speculative.slowtaskthreshold</name>
   <value>1.0</value>The number of standard deviations by which a task's 
   ave progress-rates must be lower than the average of all running tasks'
   for the task to be considered too slow.
@@ -498,7 +491,7 @@
 </property>
 
 <property>
-  <name>mapred.speculative.execution.slowNodeThreshold</name>
+  <name>mapreduce.job.speculative.slownodethreshold</name>
   <value>1.0</value>
   <description>The number of standard deviations by which a Task 
   Tracker's ave map and reduce progress-rates (finishTime-dispatchTime)
@@ -508,7 +501,7 @@
 </property>
 
 <property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
+  <name>mapreduce.job.jvm.numtasks</name>
   <value>1</value>
   <description>How many tasks to run per jvm. If set to -1, there is
   no limit. 
@@ -516,7 +509,7 @@
 </property>
 
 <property>
-  <name>mapred.min.split.size</name>
+  <name>mapreduce.input.fileinputformat.split.minsize</name>
   <value>0</value>
   <description>The minimum size chunk that map input should be split
   into.  Note that some file formats may have minimum split sizes that
@@ -524,14 +517,14 @@
 </property>
 
 <property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
+  <name>mapreduce.jobtracker.maxtasks.perjob</name>
   <value>-1</value>
   <description>The maximum number of tasks for a single job.
   A value of -1 indicates that there is no maximum.  </description>
 </property>
 
 <property>
-  <name>mapred.submit.replication</name>
+  <name>mapreduce.client.submit.file.replication</name>
   <value>10</value>
   <description>The replication level for submitted job files.  This
   should be around the square root of the number of nodes.
@@ -540,7 +533,7 @@
 
 
 <property>
-  <name>mapred.tasktracker.dns.interface</name>
+  <name>mapreduce.tasktracker.dns.interface</name>
   <value>default</value>
   <description>The name of the Network Interface from which a task
   tracker should report its IP address.
@@ -548,7 +541,7 @@
  </property>
  
 <property>
-  <name>mapred.tasktracker.dns.nameserver</name>
+  <name>mapreduce.tasktracker.dns.nameserver</name>
   <value>default</value>
   <description>The host name or IP address of the name server (DNS)
   which a TaskTracker should use to determine the host name used by
@@ -557,7 +550,7 @@
  </property>
  
 <property>
-  <name>tasktracker.http.threads</name>
+  <name>mapreduce.tasktracker.http.threads</name>
   <value>40</value>
   <description>The number of worker threads that for the http server. This is
                used for map output fetching
@@ -565,7 +558,7 @@
 </property>
 
 <property>
-  <name>mapred.task.tracker.http.address</name>
+  <name>mapreduce.tasktracker.http.address</name>
   <value>0.0.0.0:50060</value>
   <description>
     The task tracker http server address and port.
@@ -574,7 +567,7 @@
 </property>
 
 <property>
-  <name>keep.failed.task.files</name>
+  <name>mapreduce.task.files.preserve.failedtasks</name>
   <value>false</value>
   <description>Should the files for failed tasks be kept. This should only be 
                used on jobs that are failing, because the storage is never
@@ -585,7 +578,7 @@
 
 <!-- 
   <property>
-  <name>keep.task.files.pattern</name>
+  <name>mapreduce.task.files.preserve.filepattern</name>
   <value>.*_m_123456_0</value>
   <description>Keep all files from tasks whose task names match the given
                regular expression. Defaults to none.</description>
@@ -593,14 +586,14 @@
 -->
 
 <property>
-  <name>mapred.output.compress</name>
+  <name>mapreduce.output.fileoutputformat.compress</name>
   <value>false</value>
   <description>Should the job outputs be compressed?
   </description>
 </property>
 
 <property>
-  <name>mapred.output.compression.type</name>
+  <name>mapreduce.output.fileoutputformat.compression.type</name>
   <value>RECORD</value>
   <description>If the job outputs are to compressed as SequenceFiles, how should
                they be compressed? Should be one of NONE, RECORD or BLOCK.
@@ -608,14 +601,14 @@
 </property>
 
 <property>
-  <name>mapred.output.compression.codec</name>
+  <name>mapreduce.output.fileoutputformat.compression.codec</name>
   <value>org.apache.hadoop.io.compress.DefaultCodec</value>
   <description>If the job outputs are compressed, how should they be compressed?
   </description>
 </property>
 
 <property>
-  <name>mapred.compress.map.output</name>
+  <name>mapreduce.map.output.compress</name>
   <value>false</value>
   <description>Should the outputs of the maps be compressed before being
                sent across the network. Uses SequenceFile compression.
@@ -623,7 +616,7 @@
 </property>
 
 <property>
-  <name>mapred.map.output.compression.codec</name>
+  <name>mapreduce.map.output.compress.codec</name>
   <value>org.apache.hadoop.io.compress.DefaultCodec</value>
   <description>If the map outputs are compressed, how should they be 
                compressed?
@@ -638,14 +631,14 @@
 </property>
 
 <property>
-  <name>mapred.userlog.limit.kb</name>
+  <name>mapreduce.task.userlog.limit.kb</name>
   <value>0</value>
   <description>The maximum size of user-logs of each task in KB. 0 disables the cap.
   </description>
 </property>
 
 <property>
-  <name>mapred.userlog.retain.hours</name>
+  <name>mapreduce.task.userlog.retain.hours</name>
   <value>24</value>
   <description>The maximum time, in hours, for which the user-logs are to be 
           retained.
@@ -653,7 +646,7 @@
 </property>
 
 <property>
-  <name>mapred.hosts</name>
+  <name>mapreduce.jobtracker.hosts.filename</name>
   <value></value>
   <description>Names a file that contains the list of nodes that may
   connect to the jobtracker.  If the value is empty, all hosts are
@@ -661,7 +654,7 @@
 </property>
 
 <property>
-  <name>mapred.hosts.exclude</name>
+  <name>mapreduce.jobtracker.hosts.exclude.filename</name>
   <value></value>
   <description>Names a file that contains the list of hosts that
   should be excluded by the jobtracker.  If the value is empty, no
@@ -669,7 +662,7 @@
 </property>
 
 <property>
-  <name>mapred.heartbeats.in.second</name>
+  <name>mapreduce.jobtracker.heartbeats.in.second</name>
   <value>100</value>
   <description>Expert: Approximate number of heart-beats that could arrive 
                JobTracker in a second. Assuming each RPC can be processed 
@@ -678,7 +671,7 @@
 </property> 
 
 <property>
-  <name>mapred.max.tracker.blacklists</name>
+  <name>mapreduce.jobtracker.tasktracker.maxblacklists</name>
   <value>4</value>
   <description>The number of blacklists for a taskTracker by various jobs
                after which the task tracker could be blacklisted across
@@ -689,7 +682,7 @@
 </property> 
 
 <property>
-  <name>mapred.max.tracker.failures</name>
+  <name>mapreduce.job.maxtaskfailures.per.tracker</name>
   <value>4</value>
   <description>The number of task-failures on a tasktracker of a given job 
                after which new tasks of that job aren't assigned to it.
@@ -697,7 +690,7 @@
 </property>
 
 <property>
-  <name>jobclient.output.filter</name>
+  <name>mapreduce.client.output.filter</name>
   <value>FAILED</value>
   <description>The filter for controlling the output of the task's userlogs sent
                to the console of the JobClient. 
@@ -707,7 +700,7 @@
 </property>
 
   <property>
-    <name>jobclient.completion.poll.interval</name>
+    <name>mapreduce.client.completion.pollinterval</name>
     <value>5000</value>
     <description>The interval (in milliseconds) between which the JobClient
     polls the JobTracker for updates about job status. You may want to set this
@@ -717,7 +710,7 @@
   </property>
 
   <property>
-    <name>jobclient.progress.monitor.poll.interval</name>
+    <name>mapreduce.client.progerssmonitor.pollinterval</name>
     <value>1000</value>
     <description>The interval (in milliseconds) between which the JobClient
     reports status to the console and checks for job completion. You may want to set this
@@ -727,7 +720,7 @@
   </property>
 
   <property>
-    <name>mapred.job.tracker.persist.jobstatus.active</name>
+    <name>mapreduce.jobtracker.persist.jobstatus.active</name>
     <value>false</value>
     <description>Indicates if persistency of job status information is
       active or not.
@@ -735,7 +728,7 @@
   </property>
 
   <property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
+  <name>mapreduce.jobtracker.persist.jobstatus.hours</name>
   <value>0</value>
   <description>The number of hours job status information is persisted in DFS.
     The job status information will be available after it drops of the memory
@@ -745,7 +738,7 @@
 </property>
 
   <property>
-    <name>mapred.job.tracker.persist.jobstatus.dir</name>
+    <name>mapreduce.jobtracker.persist.jobstatus.dir</name>
     <value>/jobtracker/jobsInfo</value>
     <description>The directory where the job status information is persisted
       in a file system to be available after it drops of the memory queue and
@@ -754,7 +747,7 @@
   </property>
 
   <property>
-    <name>mapred.task.profile</name>
+    <name>mapreduce.task.profile</name>
     <value>false</value>
     <description>To set whether the system should collect profiler
      information for some of the tasks in this job? The information is stored
@@ -763,30 +756,23 @@
   </property>
 
   <property>
-    <name>mapred.task.profile.maps</name>
+    <name>mapreduce.task.profile.maps</name>
     <value>0-2</value>
     <description> To set the ranges of map tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
+    mapreduce.task.profile has to be set to true for the value to be accounted.
     </description>
   </property>
 
   <property>
-    <name>mapred.task.profile.reduces</name>
+    <name>mapreduce.task.profile.reduces</name>
     <value>0-2</value>
     <description> To set the ranges of reduce tasks to profile.
-    mapred.task.profile has to be set to true for the value to be accounted.
+    mapreduce.task.profile has to be set to true for the value to be accounted.
     </description>
   </property>
 
   <property>
-    <name>mapred.line.input.format.linespermap</name>
-    <value>1</value>
-    <description> Number of lines per split in NLineInputFormat.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.skip.attempts.to.start.skipping</name>
+    <name>mapreduce.task.skip.start.attempts</name>
     <value>2</value>
     <description> The number of Task attempts AFTER which skip mode 
     will be kicked off. When skip mode is kicked off, the 
@@ -798,7 +784,7 @@
   </property>
   
   <property>
-    <name>mapred.skip.map.auto.incr.proc.count</name>
+    <name>mapreduce.map.skip.proc.count.autoincr</name>
     <value>true</value>
     <description> The flag which if set to true, 
     SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented 
@@ -810,7 +796,7 @@
   </property>
   
   <property>
-    <name>mapred.skip.reduce.auto.incr.proc.count</name>
+    <name>mapreduce.reduce.skip.proc.count.autoincr</name>
     <value>true</value>
     <description> The flag which if set to true, 
     SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented 
@@ -822,7 +808,7 @@
   </property>
   
   <property>
-    <name>mapred.skip.out.dir</name>
+    <name>mapreduce.job.skip.outdir</name>
     <value></value>
     <description> If no value is specified here, the skipped records are 
     written to the output directory at _logs/skip.
@@ -831,7 +817,7 @@
   </property>
 
   <property>
-    <name>mapred.skip.map.max.skip.records</name>
+    <name>mapreduce.map.skip.maxrecords</name>
     <value>0</value>
     <description> The number of acceptable skip records surrounding the bad 
     record PER bad record in mapper. The number includes the bad record as well.
@@ -846,7 +832,7 @@
   </property>
   
   <property>
-    <name>mapred.skip.reduce.max.skip.groups</name>
+    <name>mapreduce.reduce.skip.maxgroups</name>
     <value>0</value>
     <description> The number of acceptable skip groups surrounding the bad 
     group PER bad group in reducer. The number includes the bad group as well.
@@ -864,7 +850,7 @@
 
 <!--
 <property>
- <name>job.end.notification.url</name>
+ <name>mapreduce.job.end-notification.url</name>
  <value>http://localhost:8080/jobstatus.php?jobId=$jobId&amp;jobStatus=$jobStatus</value>
  <description>Indicates url which will be called on completion of job to inform
               end status of job.
@@ -876,14 +862,14 @@
 -->
 
 <property>
-  <name>job.end.retry.attempts</name>
+  <name>mapreduce.job.end-notification.retry.attempts</name>
   <value>0</value>
   <description>Indicates how many times hadoop should attempt to contact the
                notification URL </description>
 </property>
 
 <property>
-  <name>job.end.retry.interval</name>
+  <name>mapreduce.job.end-notification.retry.interval</name>
    <value>30000</value>
    <description>Indicates time in milliseconds between notification URL retry
                 calls</description>
@@ -891,15 +877,7 @@
   
 <!-- Proxy Configuration -->
 <property>
-  <name>hadoop.rpc.socket.factory.class.JobSubmissionProtocol</name>
-  <value></value>
-  <description> SocketFactory to use to connect to a Map/Reduce master
-    (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default.
-  </description>
-</property>
-
-<property>
-  <name>mapred.task.cache.levels</name>
+  <name>mapreduce.jobtracker.taskcache.levels</name>
   <value>2</value>
   <description> This is the max level of the task cache. For example, if
     the level is 2, the tasks cached are at the host level and at the rack
@@ -908,7 +886,7 @@
 </property>
 
 <property>
-  <name>mapred.job.queue.name</name>
+  <name>mapreduce.job.queuename</name>
   <value>default</value>
   <description> Queue to which a job is submitted. This must match one of the
     queues defined in mapred.queue.names for the system. Also, the ACL setup
@@ -919,7 +897,7 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.indexcache.mb</name>
+  <name>mapreduce.tasktracker.indexcache.mb</name>
   <value>10</value>
   <description> The maximum memory that a task tracker allows for the 
     index cache that is used when serving map outputs to reducers.
@@ -927,7 +905,7 @@
 </property>
 
 <property>
-  <name>mapred.merge.recordsBeforeProgress</name>
+  <name>mapreduce.task.merge.progress.records</name>
   <value>10000</value>
   <description> The number of records to process during merge before
    sending a progress notification to the TaskTracker.
@@ -935,7 +913,7 @@
 </property>
 
 <property>
-  <name>mapred.reduce.slowstart.completed.maps</name>
+  <name>mapreduce.job.reduce.slowstart.completedmaps</name>
   <value>0.05</value>
   <description>Fraction of the number of maps in the job which should be 
   complete before reduces are scheduled for the job. 
@@ -943,7 +921,7 @@
 </property>
 
 <property>
-  <name>mapred.task.tracker.task-controller</name>
+  <name>mapreduce.tasktracker.taskcontroller</name>
   <value>org.apache.hadoop.mapred.DefaultTaskController</value>
   <description>TaskController which is used to launch and manage task execution 
   </description>
@@ -952,7 +930,7 @@
 <!--  Node health script variables -->
 
 <property>
-  <name>mapred.healthChecker.script.path</name>
+  <name>mapreduce.tasktracker.healthchecker.script.path</name>
   <value></value>
   <description>Absolute path to the script which is
   periodicallyrun by the node health monitoring service to determine if
@@ -962,21 +940,21 @@
 </property>
 
 <property>
-  <name>mapred.healthChecker.interval</name>
+  <name>mapreduce.tasktracker.healthchecker.interval</name>
   <value>60000</value>
   <description>Frequency of the node health script to be run,
   in milliseconds</description>
 </property>
 
 <property>
-  <name>mapred.healthChecker.script.timeout</name>
+  <name>mapreduce.tasktracker.healthchecker.script.timeout</name>
   <value>600000</value>
   <description>Time after node health script should be killed if 
   unresponsive and considered that the script has failed.</description>
 </property>
 
 <property>
-  <name>mapred.healthChecker.script.args</name>
+  <name>mapreduce.tasktracker.healthchecker.script.args</name>
   <value></value>
   <description>List of arguments which are to be passed to 
   node health script when it is being launched comma seperated.

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/BackupStore.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/BackupStore.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/BackupStore.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/BackupStore.java Fri Sep 18 15:09:48 2009
@@ -38,6 +38,7 @@
 import org.apache.hadoop.mapred.IFile.Reader;
 import org.apache.hadoop.mapred.IFile.Writer;
 import org.apache.hadoop.mapred.Merger.Segment;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 /**
@@ -80,10 +81,10 @@
   throws IOException {
     
     final float bufferPercent =
-      conf.getFloat("mapred.job.reduce.markreset.buffer.percent", 0f);
+      conf.getFloat(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT, 0f);
 
     if (bufferPercent > 1.0 || bufferPercent < 0.0) {
-      throw new IOException("mapred.job.reduce.markreset.buffer.percent" +
+      throw new IOException(JobContext.REDUCE_MARKRESET_BUFFER_PERCENT +
           bufferPercent);
     }
 
@@ -91,7 +92,7 @@
         Runtime.getRuntime().maxMemory() * bufferPercent, Integer.MAX_VALUE);
 
     // Support an absolute size also.
-    int tmp = conf.getInt("mapred.job.reduce.markreset.buffer.size", 0);
+    int tmp = conf.getInt(JobContext.REDUCE_MARKRESET_BUFFER_SIZE, 0);
     if (tmp >  0) {
       maxSize = tmp;
     }
@@ -516,7 +517,7 @@
     throws IOException {
       this.conf = conf;
       this.fs = FileSystem.getLocal(conf);
-      this.lDirAlloc = new LocalDirAllocator("mapred.local.dir");
+      this.lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
     }
 
     void write(DataInputBuffer key, DataInputBuffer value)

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Child.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Child.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Child.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Child.java Fri Sep 18 15:09:48 2009
@@ -142,7 +142,7 @@
         TaskLog.syncLogs(firstTaskid, taskid, isCleanup);
         JobConf job = new JobConf(task.getJobFile());
 
-        // setup the child's mapred-local-dir. The child is now sandboxed and
+        // setup the child's Configs.LOCAL_DIR. The child is now sandboxed and
         // can only see files down and under attemtdir only.
         TaskRunner.setupChildMapredLocalDirs(task, job);
 
@@ -153,7 +153,7 @@
 
         numTasksToExecute = job.getNumTasksToExecutePerJvm();
         assert(numTasksToExecute != 0);
-        TaskLog.cleanup(job.getInt("mapred.userlog.retain.hours", 24));
+        TaskLog.cleanup(job.getInt(JobContext.TASK_LOG_RETAIN_HOURS, 24));
 
         task.setConf(job);
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/CompletedJobStatusStore.java Fri Sep 18 15:09:48 2009
@@ -27,6 +27,7 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 
 /**
  * Persists and retrieves the Job info of a job into/from DFS.
@@ -53,14 +54,14 @@
 
   CompletedJobStatusStore(Configuration conf) throws IOException {
     active =
-      conf.getBoolean("mapred.job.tracker.persist.jobstatus.active", false);
+      conf.getBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);
 
     if (active) {
       retainTime =
-        conf.getInt("mapred.job.tracker.persist.jobstatus.hours", 0) * HOUR;
+        conf.getInt(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, 0) * HOUR;
 
       jobInfoDir =
-        conf.get("mapred.job.tracker.persist.jobstatus.dir", JOB_INFO_STORE_DIR);
+        conf.get(JTConfig.JT_PERSIST_JOBSTATUS_DIR, JOB_INFO_STORE_DIR);
 
       Path path = new Path(jobInfoDir);
       

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/EagerTaskInitializationListener.java Fri Sep 18 15:09:48 2009
@@ -29,6 +29,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 
 /**
  * A {@link JobInProgressListener} which initializes the tasks for a job as soon
@@ -87,7 +88,8 @@
   private TaskTrackerManager ttm;
   
   public EagerTaskInitializationListener(Configuration conf) {
-    numThreads = conf.getInt("mapred.jobinit.threads", DEFAULT_NUM_THREADS);
+    numThreads = 
+      conf.getInt(JTConfig.JT_JOBINIT_THREADS, DEFAULT_NUM_THREADS);
     threadPool = Executors.newFixedThreadPool(numThreads);
   }
   

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileInputFormat.java Fri Sep 18 15:09:48 2009
@@ -123,7 +123,8 @@
    */
   public static void setInputPathFilter(JobConf conf,
                                         Class<? extends PathFilter> filter) {
-    conf.setClass("mapred.input.pathFilter.class", filter, PathFilter.class);
+    conf.setClass(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.PATHFILTER_CLASS, filter, PathFilter.class);
   }
 
   /**
@@ -133,7 +134,8 @@
    */
   public static PathFilter getInputPathFilter(JobConf conf) {
     Class<? extends PathFilter> filterClass = conf.getClass(
-	"mapred.input.pathFilter.class", null, PathFilter.class);
+	  org.apache.hadoop.mapreduce.lib.input.FileInputFormat.PATHFILTER_CLASS,
+	  null, PathFilter.class);
     return (filterClass != null) ?
         ReflectionUtils.newInstance(filterClass, conf) : null;
   }
@@ -209,8 +211,8 @@
     }
 
     long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
-    long minSize = Math.max(job.getLong("mapred.min.split.size", 1),
-                            minSplitSize);
+    long minSize = Math.max(job.getLong(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.SPLIT_MINSIZE, 1), minSplitSize);
 
     // generate splits
     ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
@@ -313,7 +315,8 @@
       path = new Path(conf.getWorkingDirectory(), inputPaths[i]);
       str.append(StringUtils.escapeString(path.toString()));
     }
-    conf.set("mapred.input.dir", str.toString());
+    conf.set(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.INPUT_DIR, str.toString());
   }
 
   /**
@@ -326,8 +329,10 @@
   public static void addInputPath(JobConf conf, Path path ) {
     path = new Path(conf.getWorkingDirectory(), path);
     String dirStr = StringUtils.escapeString(path.toString());
-    String dirs = conf.get("mapred.input.dir");
-    conf.set("mapred.input.dir", dirs == null ? dirStr :
+    String dirs = conf.get(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.INPUT_DIR);
+    conf.set(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.INPUT_DIR, dirs == null ? dirStr :
       dirs + StringUtils.COMMA_STR + dirStr);
   }
          
@@ -377,7 +382,8 @@
    * @return the list of input {@link Path}s for the map-reduce job.
    */
   public static Path[] getInputPaths(JobConf conf) {
-    String dirs = conf.get("mapred.input.dir", "");
+    String dirs = conf.get(org.apache.hadoop.mapreduce.lib.input.
+      FileInputFormat.INPUT_DIR, "");
     String [] list = StringUtils.split(dirs);
     Path[] result = new Path[list.length];
     for (int i = 0; i < list.length; i++) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileOutputCommitter.java Fri Sep 18 15:09:48 2009
@@ -29,7 +29,7 @@
 import org.apache.hadoop.util.StringUtils;
 
 /** An {@link OutputCommitter} that commits files specified 
- * in job output directory i.e. ${mapred.output.dir}. 
+ * in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}. 
  **/
 public class FileOutputCommitter extends OutputCommitter {
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileOutputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileOutputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileOutputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/FileOutputFormat.java Fri Sep 18 15:09:48 2009
@@ -36,7 +36,8 @@
    * @param compress should the output of the job be compressed?
    */
   public static void setCompressOutput(JobConf conf, boolean compress) {
-    conf.setBoolean("mapred.output.compress", compress);
+    conf.setBoolean(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS, compress);
   }
   
   /**
@@ -46,7 +47,8 @@
    *         <code>false</code> otherwise
    */
   public static boolean getCompressOutput(JobConf conf) {
-    return conf.getBoolean("mapred.output.compress", false);
+    return conf.getBoolean(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS, false);
   }
   
   /**
@@ -59,7 +61,8 @@
   setOutputCompressorClass(JobConf conf, 
                            Class<? extends CompressionCodec> codecClass) {
     setCompressOutput(conf, true);
-    conf.setClass("mapred.output.compression.codec", codecClass, 
+    conf.setClass(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS_CODEC, codecClass, 
                   CompressionCodec.class);
   }
   
@@ -76,7 +79,8 @@
 		                       Class<? extends CompressionCodec> defaultValue) {
     Class<? extends CompressionCodec> codecClass = defaultValue;
     
-    String name = conf.get("mapred.output.compression.codec");
+    String name = conf.get(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.COMPRESS_CODEC);
     if (name != null) {
       try {
         codecClass = 
@@ -124,7 +128,8 @@
    */
   public static void setOutputPath(JobConf conf, Path outputDir) {
     outputDir = new Path(conf.getWorkingDirectory(), outputDir);
-    conf.set("mapred.output.dir", outputDir.toString());
+    conf.set(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.OUTDIR, outputDir.toString());
   }
 
   /**
@@ -140,7 +145,7 @@
   
   static void setWorkOutputPath(JobConf conf, Path outputDir) {
     outputDir = new Path(conf.getWorkingDirectory(), outputDir);
-    conf.set("mapred.work.output.dir", outputDir.toString());
+    conf.set(JobContext.TASK_OUTPUT_DIR, outputDir.toString());
   }
   
   /**
@@ -150,7 +155,8 @@
    * @see FileOutputFormat#getWorkOutputPath(JobConf)
    */
   public static Path getOutputPath(JobConf conf) {
-    String name = conf.get("mapred.output.dir");
+    String name = conf.get(org.apache.hadoop.mapreduce.lib.output.
+      FileOutputFormat.OUTDIR);
     return name == null ? null: new Path(name);
   }
   
@@ -164,7 +170,7 @@
    *  is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not 
    *  a <code>FileOutputCommitter</code>, the task's temporary output
    *  directory is same as {@link #getOutputPath(JobConf)} i.e.
-   *  <tt>${mapred.output.dir}$</tt></p>
+   *  <tt>${mapreduce.output.fileoutputformat.outputdir}$</tt></p>
    *  
    * <p>Some applications need to create/write-to side-files, which differ from
    * the actual job-outputs.
@@ -177,23 +183,23 @@
    * 
    * <p>To get around this the Map-Reduce framework helps the application-writer 
    * out by maintaining a special 
-   * <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> 
+   * <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> 
    * sub-directory for each task-attempt on HDFS where the output of the 
    * task-attempt goes. On successful completion of the task-attempt the files 
-   * in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only) 
-   * are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the 
+   * in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only) 
+   * are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the 
    * framework discards the sub-directory of unsuccessful task-attempts. This 
    * is completely transparent to the application.</p>
    * 
    * <p>The application-writer can take advantage of this by creating any 
-   * side-files required in <tt>${mapred.work.output.dir}</tt> during execution 
+   * side-files required in <tt>${mapreduce.task.output.dir}</tt> during execution 
    * of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the 
    * framework will move them out similarly - thus she doesn't have to pick 
    * unique paths per task-attempt.</p>
    * 
-   * <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during 
+   * <p><i>Note</i>: the value of <tt>${mapreduce.task.output.dir}</tt> during 
    * execution of a particular task-attempt is actually 
-   * <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is 
+   * <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}</tt>, and this value is 
    * set by the map-reduce framework. So, just create any side-files in the 
    * path  returned by {@link #getWorkOutputPath(JobConf)} from map/reduce 
    * task to take advantage of this feature.</p>
@@ -206,7 +212,7 @@
    * for the map-reduce job.
    */
   public static Path getWorkOutputPath(JobConf conf) {
-    String name = conf.get("mapred.work.output.dir");
+    String name = conf.get(JobContext.TASK_OUTPUT_DIR);
     return name == null ? null: new Path(name);
   }
 
@@ -230,7 +236,7 @@
     OutputCommitter committer = conf.getOutputCommitter();
     Path workPath = outputPath;
     TaskAttemptContext context = new TaskAttemptContext(conf,
-                TaskAttemptID.forName(conf.get("mapred.task.id")));
+                TaskAttemptID.forName(conf.get(JobContext.TASK_ATTEMPT_ID)));
     if (committer instanceof FileOutputCommitter) {
       workPath = ((FileOutputCommitter)committer).getWorkPath(context,
                                                               outputPath);
@@ -257,13 +263,13 @@
    * @return a unique name accross all tasks of the job.
    */
   public static String getUniqueName(JobConf conf, String name) {
-    int partition = conf.getInt("mapred.task.partition", -1);
+    int partition = conf.getInt(JobContext.TASK_PARTITION, -1);
     if (partition == -1) {
       throw new IllegalArgumentException(
         "This method can only be called from within a Job");
     }
 
-    String taskType = (conf.getBoolean("mapred.task.is.map", true)) ? "m" : "r";
+    String taskType = (conf.getBoolean(JobContext.TASK_ISMAP, true)) ? "m" : "r";
 
     NumberFormat numberFormat = NumberFormat.getInstance();
     numberFormat.setMinimumIntegerDigits(5);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IndexCache.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IndexCache.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IndexCache.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IndexCache.java Fri Sep 18 15:09:48 2009
@@ -25,6 +25,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
 
 class IndexCache {
 
@@ -42,7 +43,7 @@
   public IndexCache(JobConf conf) {
     this.conf = conf;
     totalMemoryAllowed =
-      conf.getInt("mapred.tasktracker.indexcache.mb", 10) * 1024 * 1024;
+      conf.getInt(TTConfig.TT_INDEX_CACHE, 10) * 1024 * 1024;
     LOG.info("IndexCache created with max memory = " + totalMemoryAllowed);
   }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/InputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/InputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/InputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/InputFormat.java Fri Sep 18 15:09:48 2009
@@ -48,8 +48,8 @@
  * bytes, of the input files. However, the {@link FileSystem} blocksize of  
  * the input files is treated as an upper bound for input splits. A lower bound 
  * on the split size can be set via 
- * <a href="{@docRoot}/../mapred-default.html#mapred.min.split.size">
- * mapred.min.split.size</a>.</p>
+ * <a href="{@docRoot}/../mapred-default.html#mapreduce.input.fileinputformat.split.minsize">
+ * mapreduce.input.fileinputformat.split.minsize</a>.</p>
  * 
  * <p>Clearly, logical splits based on input-size is insufficient for many 
  * applications since record boundaries are to respected. In such cases, the

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/IsolationRunner.java Fri Sep 18 15:09:48 2009
@@ -33,6 +33,7 @@
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JvmTask;
+import org.apache.hadoop.mapreduce.MRConfig;
 
 /**
  * IsolationRunner is intended to facilitate debugging by re-running a specific
@@ -40,7 +41,7 @@
  * Currently, it is limited to re-running map tasks.
  *
  * Users may coerce MapReduce to keep task files around by setting 
- * keep.failed.task.files.  See mapred_tutorial.xml for more documentation.
+ * mapreduce.task.files.preserve.failedtasks.  See mapred_tutorial.xml for more documentation.
  */
 public class IsolationRunner {
   private static final Log LOG = 
@@ -153,21 +154,21 @@
       return false;
     }
     JobConf conf = new JobConf(new Path(jobFilename.toString()));
-    TaskAttemptID taskId = TaskAttemptID.forName(conf.get("mapred.task.id"));
+    TaskAttemptID taskId = TaskAttemptID.forName(conf.get(JobContext.TASK_ATTEMPT_ID));
     if (taskId == null) {
-      System.out.println("mapred.task.id not found in configuration;" + 
+      System.out.println("mapreduce.task.attempt.id not found in configuration;" + 
           " job.xml is not a task config");
     }
-    boolean isMap = conf.getBoolean("mapred.task.is.map", true);
+    boolean isMap = conf.getBoolean(JobContext.TASK_ISMAP, true);
     if (!isMap) {
       System.out.println("Only map tasks are supported.");
       return false;
     }
-    int partition = conf.getInt("mapred.task.partition", 0);
+    int partition = conf.getInt(JobContext.TASK_PARTITION, 0);
     
     // setup the local and user working directories
     FileSystem local = FileSystem.getLocal(conf);
-    LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
+    LocalDirAllocator lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
 
     File workDirName = TaskRunner.formWorkDir(lDirAlloc, taskId, false, conf);
     local.setWorkingDirectory(new Path(workDirName.toString()));
@@ -183,7 +184,7 @@
     // any of the configured local disks, so use LocalDirAllocator to find out
     // where it is.
     Path localSplit =
-        new LocalDirAllocator("mapred.local.dir").getLocalPathToRead(
+        new LocalDirAllocator(MRConfig.LOCAL_DIR).getLocalPathToRead(
             TaskTracker.getLocalSplitFile(conf.getUser(), taskId.getJobID()
                 .toString(), taskId.toString()), conf);
     DataInputStream splitFile = FileSystem.getLocal(conf).open(localSplit);

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JSPUtil.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JSPUtil.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JSPUtil.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JSPUtil.java Fri Sep 18 15:09:48 2009
@@ -34,6 +34,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.StringUtils;
 
@@ -47,7 +48,7 @@
     new LinkedHashMap<String, JobInfo>(); 
 
   private static final int CACHE_SIZE = 
-    conf.getInt("mapred.job.tracker.jobhistory.lru.cache.size", 5);
+    conf.getInt(JTConfig.JT_JOBHISTORY_CACHE_SIZE, 5);
 
   private static final Log LOG = LogFactory.getLog(JSPUtil.class);
   /**

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobClient.java Fri Sep 18 15:09:48 2009
@@ -35,6 +35,7 @@
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.tools.CLI;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -131,8 +132,7 @@
   private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; 
 
   static{
-    Configuration.addDefaultResource("mapred-default.xml");
-    Configuration.addDefaultResource("mapred-site.xml");
+    ConfigUtil.loadResources();
   }
 
   /**

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobConf.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobConf.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobConf.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobConf.java Fri Sep 18 15:09:48 2009
@@ -41,6 +41,10 @@
 import org.apache.hadoop.mapred.lib.HashPartitioner;
 import org.apache.hadoop.mapred.lib.KeyFieldBasedComparator;
 import org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.log4j.Level;
@@ -108,8 +112,7 @@
   private static final Log LOG = LogFactory.getLog(JobConf.class);
 
   static{
-    Configuration.addDefaultResource("mapred-default.xml");
-    Configuration.addDefaultResource("mapred-site.xml");
+    ConfigUtil.loadResources();
   }
 
   /**
@@ -147,9 +150,9 @@
   public static final long DISABLED_MEMORY_LIMIT = -1L;
 
   /**
-   * Property name for the configuration property mapred.local.dir
+   * Property name for the configuration property mapreduce.cluster.local.dir
    */
-  public static final String MAPRED_LOCAL_DIR_PROPERTY = "mapred.local.dir";
+  public static final String MAPRED_LOCAL_DIR_PROPERTY = MRConfig.LOCAL_DIR;
 
   /**
    * Name of the queue to which jobs will be submitted, if no queue
@@ -157,11 +160,11 @@
    */
   public static final String DEFAULT_QUEUE_NAME = "default";
 
-  static final String MAPRED_JOB_MAP_MEMORY_MB_PROPERTY =
-      "mapred.job.map.memory.mb";
+  static final String MAPRED_JOB_MAP_MEMORY_MB_PROPERTY = 
+      JobContext.MAP_MEMORY_MB;
 
   static final String MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY =
-      "mapred.job.reduce.memory.mb";
+    JobContext.REDUCE_MEMORY_MB;
 
   /**
    * Configuration key to set the java command line options for the child
@@ -205,7 +208,7 @@
    * other environment variables to the map processes.
    */
   public static final String MAPRED_MAP_TASK_JAVA_OPTS = 
-    "mapred.map.child.java.opts";
+    JobContext.MAP_JAVA_OPTS;
   
   /**
    * Configuration key to set the java command line options for the reduce tasks.
@@ -225,7 +228,7 @@
    * pass process environment variables to the reduce processes.
    */
   public static final String MAPRED_REDUCE_TASK_JAVA_OPTS = 
-    "mapred.reduce.child.java.opts";
+    JobContext.REDUCE_JAVA_OPTS;
   
   public static final String DEFAULT_MAPRED_TASK_JAVA_OPTS = "-Xmx200m";
   
@@ -249,7 +252,7 @@
    * Note: This must be greater than or equal to the -Xmx passed to the JavaVM
    *       via {@link #MAPRED_MAP_TASK_JAVA_OPTS}, else the VM might not start.
    */
-  public static final String MAPRED_MAP_TASK_ULIMIT = "mapred.map.child.ulimit";
+  public static final String MAPRED_MAP_TASK_ULIMIT = JobContext.MAP_ULIMIT;
   
   /**
    * Configuration key to set the maximum virutal memory available to the
@@ -258,8 +261,9 @@
    * Note: This must be greater than or equal to the -Xmx passed to the JavaVM
    *       via {@link #MAPRED_REDUCE_TASK_JAVA_OPTS}, else the VM might not start.
    */
-  public static final String MAPRED_REDUCE_TASK_ULIMIT =
-    "mapred.reduce.child.ulimit";
+  public static final String MAPRED_REDUCE_TASK_ULIMIT = 
+    JobContext.REDUCE_ULIMIT;
+
 
   /**
    * Configuration key to set the environment of the child map/reduce tasks.
@@ -292,7 +296,7 @@
    *   <li> B=$X:c This is inherit tasktracker's X env variable. </li>
    * </ul>
    */
-  public static final String MAPRED_MAP_TASK_ENV = "mapred.map.child.env";
+  public static final String MAPRED_MAP_TASK_ENV = JobContext.MAP_ENV;
   
   /**
    * Configuration key to set the maximum virutal memory available to the
@@ -307,8 +311,7 @@
    *   <li> B=$X:c This is inherit tasktracker's X env variable. </li>
    * </ul>
    */
-  public static final String MAPRED_REDUCE_TASK_ENV =
-    "mapred.reduce.child.env";
+  public static final String MAPRED_REDUCE_TASK_ENV = JobContext.REDUCE_ENV;
 
   /**
    * Configuration key to set the logging {@link Level} for the map task.
@@ -317,7 +320,7 @@
    * OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
    */
   public static final String MAPRED_MAP_TASK_LOG_LEVEL = 
-    "mapred.map.child.log.level";
+    JobContext.MAP_LOG_LEVEL;
   
   /**
    * Configuration key to set the logging {@link Level} for the reduce task.
@@ -326,7 +329,7 @@
    * OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
    */
   public static final String MAPRED_REDUCE_TASK_LOG_LEVEL = 
-    "mapred.reduce.child.log.level";
+    JobContext.REDUCE_LOG_LEVEL;
   
   /**
    * Default logging level for map/reduce tasks.
@@ -408,14 +411,14 @@
    * 
    * @return the user jar for the map-reduce job.
    */
-  public String getJar() { return get("mapred.jar"); }
+  public String getJar() { return get(JobContext.JAR); }
   
   /**
    * Set the user jar for the map-reduce job.
    * 
    * @param jar the user jar for the map-reduce job.
    */
-  public void setJar(String jar) { set("mapred.jar", jar); }
+  public void setJar(String jar) { set(JobContext.JAR, jar); }
   
   /**
    * Set the job's jar file by finding an example class location.
@@ -430,7 +433,7 @@
   }
 
   public String[] getLocalDirs() throws IOException {
-    return getStrings(MAPRED_LOCAL_DIR_PROPERTY);
+    return getStrings(MRConfig.LOCAL_DIR);
   }
 
   public void deleteLocalFiles() throws IOException {
@@ -452,7 +455,7 @@
    * local directories.
    */
   public Path getLocalPath(String pathString) throws IOException {
-    return getLocalPath(MAPRED_LOCAL_DIR_PROPERTY, pathString);
+    return getLocalPath(MRConfig.LOCAL_DIR, pathString);
   }
 
   /**
@@ -461,7 +464,7 @@
    * @return the username
    */
   public String getUser() {
-    return get("user.name");
+    return get(JobContext.USER_NAME);
   }
   
   /**
@@ -470,7 +473,7 @@
    * @param user the username for this job.
    */
   public void setUser(String user) {
-    set("user.name", user);
+    set(JobContext.USER_NAME, user);
   }
 
 
@@ -484,7 +487,7 @@
    * 
    */
   public void setKeepFailedTaskFiles(boolean keep) {
-    setBoolean("keep.failed.task.files", keep);
+    setBoolean(JobContext.PRESERVE_FAILED_TASK_FILES, keep);
   }
   
   /**
@@ -493,7 +496,7 @@
    * @return should the files be kept?
    */
   public boolean getKeepFailedTaskFiles() {
-    return getBoolean("keep.failed.task.files", false);
+    return getBoolean(JobContext.PRESERVE_FAILED_TASK_FILES, false);
   }
   
   /**
@@ -505,7 +508,7 @@
    *        task names.
    */
   public void setKeepTaskFilesPattern(String pattern) {
-    set("keep.task.files.pattern", pattern);
+    set(JobContext.PRESERVE_FILES_PATTERN, pattern);
   }
   
   /**
@@ -515,7 +518,7 @@
    * @return the pattern as a string, if it was set, othewise null.
    */
   public String getKeepTaskFilesPattern() {
-    return get("keep.task.files.pattern");
+    return get(JobContext.PRESERVE_FILES_PATTERN);
   }
   
   /**
@@ -525,7 +528,7 @@
    */
   public void setWorkingDirectory(Path dir) {
     dir = new Path(getWorkingDirectory(), dir);
-    set("mapred.working.dir", dir.toString());
+    set(JobContext.WORKING_DIR, dir.toString());
   }
   
   /**
@@ -534,13 +537,13 @@
    * @return the directory name.
    */
   public Path getWorkingDirectory() {
-    String name = get("mapred.working.dir");
+    String name = get(JobContext.WORKING_DIR);
     if (name != null) {
       return new Path(name);
     } else {
       try {
         Path dir = FileSystem.get(this).getWorkingDirectory();
-        set("mapred.working.dir", dir.toString());
+        set(JobContext.WORKING_DIR, dir.toString());
         return dir;
       } catch (IOException e) {
         throw new RuntimeException(e);
@@ -555,14 +558,14 @@
    * -1 signifies no limit
    */
   public void setNumTasksToExecutePerJvm(int numTasks) {
-    setInt("mapred.job.reuse.jvm.num.tasks", numTasks);
+    setInt(JobContext.JVM_NUMTASKS_TORUN, numTasks);
   }
   
   /**
    * Get the number of tasks that a spawned JVM should execute
    */
   public int getNumTasksToExecutePerJvm() {
-    return getInt("mapred.job.reuse.jvm.num.tasks", 1);
+    return getInt(JobContext.JVM_NUMTASKS_TORUN, 1);
   }
   
   /**
@@ -640,7 +643,7 @@
    * @param compress should the map outputs be compressed?
    */
   public void setCompressMapOutput(boolean compress) {
-    setBoolean("mapred.compress.map.output", compress);
+    setBoolean(JobContext.MAP_OUTPUT_COMPRESS, compress);
   }
   
   /**
@@ -650,7 +653,7 @@
    *         <code>false</code> otherwise.
    */
   public boolean getCompressMapOutput() {
-    return getBoolean("mapred.compress.map.output", false);
+    return getBoolean(JobContext.MAP_OUTPUT_COMPRESS, false);
   }
 
   /**
@@ -662,7 +665,7 @@
   public void 
   setMapOutputCompressorClass(Class<? extends CompressionCodec> codecClass) {
     setCompressMapOutput(true);
-    setClass("mapred.map.output.compression.codec", codecClass, 
+    setClass(JobContext.MAP_OUTPUT_COMPRESS_CODEC, codecClass, 
              CompressionCodec.class);
   }
   
@@ -677,7 +680,7 @@
   public Class<? extends CompressionCodec> 
   getMapOutputCompressorClass(Class<? extends CompressionCodec> defaultValue) {
     Class<? extends CompressionCodec> codecClass = defaultValue;
-    String name = get("mapred.map.output.compression.codec");
+    String name = get(JobContext.MAP_OUTPUT_COMPRESS_CODEC);
     if (name != null) {
       try {
         codecClass = getClassByName(name).asSubclass(CompressionCodec.class);
@@ -697,7 +700,7 @@
    * @return the map output key class.
    */
   public Class<?> getMapOutputKeyClass() {
-    Class<?> retv = getClass("mapred.mapoutput.key.class", null, Object.class);
+    Class<?> retv = getClass(JobContext.MAP_OUTPUT_KEY_CLASS, null, Object.class);
     if (retv == null) {
       retv = getOutputKeyClass();
     }
@@ -712,7 +715,7 @@
    * @param theClass the map output key class.
    */
   public void setMapOutputKeyClass(Class<?> theClass) {
-    setClass("mapred.mapoutput.key.class", theClass, Object.class);
+    setClass(JobContext.MAP_OUTPUT_KEY_CLASS, theClass, Object.class);
   }
   
   /**
@@ -723,7 +726,7 @@
    * @return the map output value class.
    */
   public Class<?> getMapOutputValueClass() {
-    Class<?> retv = getClass("mapred.mapoutput.value.class", null,
+    Class<?> retv = getClass(JobContext.MAP_OUTPUT_VALUE_CLASS, null,
         Object.class);
     if (retv == null) {
       retv = getOutputValueClass();
@@ -739,7 +742,7 @@
    * @param theClass the map output value class.
    */
   public void setMapOutputValueClass(Class<?> theClass) {
-    setClass("mapred.mapoutput.value.class", theClass, Object.class);
+    setClass(JobContext.MAP_OUTPUT_VALUE_CLASS, theClass, Object.class);
   }
   
   /**
@@ -748,7 +751,7 @@
    * @return the key class for the job output data.
    */
   public Class<?> getOutputKeyClass() {
-    return getClass("mapred.output.key.class",
+    return getClass(JobContext.OUTPUT_KEY_CLASS,
                     LongWritable.class, Object.class);
   }
   
@@ -758,7 +761,7 @@
    * @param theClass the key class for the job output data.
    */
   public void setOutputKeyClass(Class<?> theClass) {
-    setClass("mapred.output.key.class", theClass, Object.class);
+    setClass(JobContext.OUTPUT_KEY_CLASS, theClass, Object.class);
   }
 
   /**
@@ -767,8 +770,8 @@
    * @return the {@link RawComparator} comparator used to compare keys.
    */
   public RawComparator getOutputKeyComparator() {
-    Class<? extends RawComparator> theClass = getClass("mapred.output.key.comparator.class",
-	        null, RawComparator.class);
+    Class<? extends RawComparator> theClass = getClass(
+      JobContext.KEY_COMPARATOR, null, RawComparator.class);
     if (theClass != null)
       return ReflectionUtils.newInstance(theClass, this);
     return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class));
@@ -782,7 +785,7 @@
    * @see #setOutputValueGroupingComparator(Class)                 
    */
   public void setOutputKeyComparatorClass(Class<? extends RawComparator> theClass) {
-    setClass("mapred.output.key.comparator.class",
+    setClass(JobContext.KEY_COMPARATOR,
              theClass, RawComparator.class);
   }
 
@@ -803,14 +806,14 @@
    */
   public void setKeyFieldComparatorOptions(String keySpec) {
     setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
-    set("mapred.text.key.comparator.options", keySpec);
+    set(KeyFieldBasedComparator.COMPARATOR_OPTIONS, keySpec);
   }
   
   /**
    * Get the {@link KeyFieldBasedComparator} options
    */
   public String getKeyFieldComparatorOption() {
-    return get("mapred.text.key.comparator.options");
+    return get(KeyFieldBasedComparator.COMPARATOR_OPTIONS);
   }
 
   /**
@@ -828,14 +831,14 @@
    */
   public void setKeyFieldPartitionerOptions(String keySpec) {
     setPartitionerClass(KeyFieldBasedPartitioner.class);
-    set("mapred.text.key.partitioner.options", keySpec);
+    set(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS, keySpec);
   }
   
   /**
    * Get the {@link KeyFieldBasedPartitioner} options
    */
   public String getKeyFieldPartitionerOption() {
-    return get("mapred.text.key.partitioner.options");
+    return get(KeyFieldBasedPartitioner.PARTITIONER_OPTIONS);
   }
 
   /** 
@@ -846,8 +849,8 @@
    * @see #setOutputValueGroupingComparator(Class) for details.  
    */
   public RawComparator getOutputValueGroupingComparator() {
-    Class<? extends RawComparator> theClass = getClass("mapred.output.value.groupfn.class", null,
-        RawComparator.class);
+    Class<? extends RawComparator> theClass = getClass(
+      JobContext.GROUP_COMPARATOR_CLASS, null, RawComparator.class);
     if (theClass == null) {
       return getOutputKeyComparator();
     }
@@ -881,8 +884,8 @@
    * @see #setOutputKeyComparatorClass(Class)                 
    */
   public void setOutputValueGroupingComparator(
-		  Class<? extends RawComparator> theClass) {
-    setClass("mapred.output.value.groupfn.class",
+      Class<? extends RawComparator> theClass) {
+    setClass(JobContext.GROUP_COMPARATOR_CLASS,
              theClass, RawComparator.class);
   }
 
@@ -926,7 +929,7 @@
    * @return the value class for job outputs.
    */
   public Class<?> getOutputValueClass() {
-    return getClass("mapred.output.value.class", Text.class, Object.class);
+    return getClass(JobContext.OUTPUT_VALUE_CLASS, Text.class, Object.class);
   }
   
   /**
@@ -935,7 +938,7 @@
    * @param theClass the value class for job outputs.
    */
   public void setOutputValueClass(Class<?> theClass) {
-    setClass("mapred.output.value.class", theClass, Object.class);
+    setClass(JobContext.OUTPUT_VALUE_CLASS, theClass, Object.class);
   }
 
   /**
@@ -1086,7 +1089,7 @@
    *         <code>false</code> otherwise.
    */
   public boolean getMapSpeculativeExecution() { 
-    return getBoolean("mapred.map.tasks.speculative.execution", true);
+    return getBoolean(JobContext.MAP_SPECULATIVE, true);
   }
   
   /**
@@ -1097,7 +1100,7 @@
    *                             else <code>false</code>.
    */
   public void setMapSpeculativeExecution(boolean speculativeExecution) {
-    setBoolean("mapred.map.tasks.speculative.execution", speculativeExecution);
+    setBoolean(JobContext.MAP_SPECULATIVE, speculativeExecution);
   }
 
   /**
@@ -1109,7 +1112,7 @@
    *         <code>false</code> otherwise.
    */
   public boolean getReduceSpeculativeExecution() { 
-    return getBoolean("mapred.reduce.tasks.speculative.execution", true);
+    return getBoolean(JobContext.REDUCE_SPECULATIVE, true);
   }
   
   /**
@@ -1120,7 +1123,7 @@
    *                             else <code>false</code>.
    */
   public void setReduceSpeculativeExecution(boolean speculativeExecution) {
-    setBoolean("mapred.reduce.tasks.speculative.execution", 
+    setBoolean(JobContext.REDUCE_SPECULATIVE, 
                speculativeExecution);
   }
 
@@ -1130,7 +1133,7 @@
    * 
    * @return the number of reduce tasks for this job.
    */
-  public int getNumMapTasks() { return getInt("mapred.map.tasks", 1); }
+  public int getNumMapTasks() { return getInt(JobContext.NUM_MAPS, 1); }
   
   /**
    * Set the number of map tasks for this job.
@@ -1157,8 +1160,8 @@
    * bytes, of input files. However, the {@link FileSystem} blocksize of the 
    * input files is treated as an upper bound for input splits. A lower bound 
    * on the split size can be set via 
-   * <a href="{@docRoot}/../mapred-default.html#mapred.min.split.size">
-   * mapred.min.split.size</a>.</p>
+   * <a href="{@docRoot}/../mapred-default.html#mapreduce.input.fileinputformat.split.minsize">
+   * mapreduce.input.fileinputformat.split.minsize</a>.</p>
    *  
    * <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB, 
    * you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is 
@@ -1170,7 +1173,7 @@
    * @see FileSystem#getDefaultBlockSize()
    * @see FileStatus#getBlockSize()
    */
-  public void setNumMapTasks(int n) { setInt("mapred.map.tasks", n); }
+  public void setNumMapTasks(int n) { setInt(JobContext.NUM_MAPS, n); }
 
   /**
    * Get configured the number of reduce tasks for this job. Defaults to 
@@ -1178,7 +1181,7 @@
    * 
    * @return the number of reduce tasks for this job.
    */
-  public int getNumReduceTasks() { return getInt("mapred.reduce.tasks", 1); }
+  public int getNumReduceTasks() { return getInt(JobContext.NUM_REDUCES, 1); }
   
   /**
    * Set the requisite number of reduce tasks for this job.
@@ -1187,8 +1190,8 @@
    * 
    * <p>The right number of reduces seems to be <code>0.95</code> or 
    * <code>1.75</code> multiplied by (&lt;<i>no. of nodes</i>&gt; * 
-   * <a href="{@docRoot}/../mapred-default.html#mapred.tasktracker.reduce.tasks.maximum">
-   * mapred.tasktracker.reduce.tasks.maximum</a>).
+   * <a href="{@docRoot}/../mapred-default.html#mapreduce.tasktracker.reduce.tasks.maximum">
+   * mapreduce.tasktracker.reduce.tasks.maximum</a>).
    * </p>
    * 
    * <p>With <code>0.95</code> all of the reduces can launch immediately and 
@@ -1214,17 +1217,17 @@
    * 
    * @param n the number of reduce tasks for this job.
    */
-  public void setNumReduceTasks(int n) { setInt("mapred.reduce.tasks", n); }
+  public void setNumReduceTasks(int n) { setInt(JobContext.NUM_REDUCES, n); }
   
   /** 
    * Get the configured number of maximum attempts that will be made to run a
-   * map task, as specified by the <code>mapred.map.max.attempts</code>
+   * map task, as specified by the <code>mapreduce.map.maxattempts</code>
    * property. If this property is not already set, the default is 4 attempts.
    *  
    * @return the max number of attempts per map task.
    */
   public int getMaxMapAttempts() {
-    return getInt("mapred.map.max.attempts", 4);
+    return getInt(JobContext.MAP_MAX_ATTEMPTS, 4);
   }
   
   /** 
@@ -1234,18 +1237,18 @@
    * @param n the number of attempts per map task.
    */
   public void setMaxMapAttempts(int n) {
-    setInt("mapred.map.max.attempts", n);
+    setInt(JobContext.MAP_MAX_ATTEMPTS, n);
   }
 
   /** 
    * Get the configured number of maximum attempts  that will be made to run a
-   * reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+   * reduce task, as specified by the <code>mapreduce.reduce.maxattempts</code>
    * property. If this property is not already set, the default is 4 attempts.
    * 
    * @return the max number of attempts per reduce task.
    */
   public int getMaxReduceAttempts() {
-    return getInt("mapred.reduce.max.attempts", 4);
+    return getInt(JobContext.REDUCE_MAX_ATTEMPTS, 4);
   }
   /** 
    * Expert: Set the number of maximum attempts that will be made to run a
@@ -1254,7 +1257,7 @@
    * @param n the number of attempts per reduce task.
    */
   public void setMaxReduceAttempts(int n) {
-    setInt("mapred.reduce.max.attempts", n);
+    setInt(JobContext.REDUCE_MAX_ATTEMPTS, n);
   }
   
   /**
@@ -1264,7 +1267,7 @@
    * @return the job's name, defaulting to "".
    */
   public String getJobName() {
-    return get("mapred.job.name", "");
+    return get(JobContext.JOB_NAME, "");
   }
   
   /**
@@ -1273,7 +1276,7 @@
    * @param name the job's new name.
    */
   public void setJobName(String name) {
-    set("mapred.job.name", name);
+    set(JobContext.JOB_NAME, name);
   }
   
   /**
@@ -1291,6 +1294,7 @@
    *
    * @return the session identifier, defaulting to "".
    */
+  @Deprecated
   public String getSessionId() {
       return get("session.id", "");
   }
@@ -1300,6 +1304,7 @@
    *
    * @param sessionId the new session id.
    */
+  @Deprecated
   public void setSessionId(String sessionId) {
       set("session.id", sessionId);
   }
@@ -1312,7 +1317,7 @@
    * @param noFailures maximum no. of failures of a given job per tasktracker.
    */
   public void setMaxTaskFailuresPerTracker(int noFailures) {
-    setInt("mapred.max.tracker.failures", noFailures);
+    setInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, noFailures);
   }
   
   /**
@@ -1323,7 +1328,7 @@
    * @return the maximum no. of failures of a given job per tasktracker.
    */
   public int getMaxTaskFailuresPerTracker() {
-    return getInt("mapred.max.tracker.failures", 4); 
+    return getInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 4); 
   }
 
   /**
@@ -1340,7 +1345,7 @@
    *         the job being aborted.
    */
   public int getMaxMapTaskFailuresPercent() {
-    return getInt("mapred.max.map.failures.percent", 0);
+    return getInt(JobContext.MAP_FAILURES_MAX_PERCENT, 0);
   }
 
   /**
@@ -1354,7 +1359,7 @@
    *                the job being aborted.
    */
   public void setMaxMapTaskFailuresPercent(int percent) {
-    setInt("mapred.max.map.failures.percent", percent);
+    setInt(JobContext.MAP_FAILURES_MAX_PERCENT, percent);
   }
   
   /**
@@ -1371,7 +1376,7 @@
    *         the job being aborted.
    */
   public int getMaxReduceTaskFailuresPercent() {
-    return getInt("mapred.max.reduce.failures.percent", 0);
+    return getInt(JobContext.REDUCE_FAILURES_MAXPERCENT, 0);
   }
   
   /**
@@ -1385,7 +1390,7 @@
    *                the job being aborted.
    */
   public void setMaxReduceTaskFailuresPercent(int percent) {
-    setInt("mapred.max.reduce.failures.percent", percent);
+    setInt(JobContext.REDUCE_FAILURES_MAXPERCENT, percent);
   }
   
   /**
@@ -1394,7 +1399,7 @@
    * @param prio the {@link JobPriority} for this job.
    */
   public void setJobPriority(JobPriority prio) {
-    set("mapred.job.priority", prio.toString());
+    set(JobContext.PRIORITY, prio.toString());
   }
   
   /**
@@ -1403,7 +1408,7 @@
    * @return the {@link JobPriority} for this job.
    */
   public JobPriority getJobPriority() {
-    String prio = get("mapred.job.priority");
+    String prio = get(JobContext.PRIORITY);
     if(prio == null) {
       return JobPriority.NORMAL;
     }
@@ -1416,7 +1421,7 @@
    * @return true if some tasks will be profiled
    */
   public boolean getProfileEnabled() {
-    return getBoolean("mapred.task.profile", false);
+    return getBoolean(JobContext.TASK_PROFILE, false);
   }
 
   /**
@@ -1426,7 +1431,7 @@
    * @param newValue true means it should be gathered
    */
   public void setProfileEnabled(boolean newValue) {
-    setBoolean("mapred.task.profile", newValue);
+    setBoolean(JobContext.TASK_PROFILE, newValue);
   }
 
   /**
@@ -1438,7 +1443,7 @@
    * @return the parameters to pass to the task child to configure profiling
    */
   public String getProfileParams() {
-    return get("mapred.task.profile.params",
+    return get(JobContext.TASK_PROFILE_PARAMS,
                "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y," +
                  "verbose=n,file=%s");
   }
@@ -1453,7 +1458,7 @@
    * @param value the configuration string
    */
   public void setProfileParams(String value) {
-    set("mapred.task.profile.params", value);
+    set(JobContext.TASK_PROFILE_PARAMS, value);
   }
 
   /**
@@ -1462,8 +1467,8 @@
    * @return the task ranges
    */
   public IntegerRanges getProfileTaskRange(boolean isMap) {
-    return getRange((isMap ? "mapred.task.profile.maps" : 
-                       "mapred.task.profile.reduces"), "0-2");
+    return getRange((isMap ? JobContext.NUM_MAP_PROFILES : 
+                       JobContext.NUM_REDUCE_PROFILES), "0-2");
   }
 
   /**
@@ -1473,9 +1478,9 @@
    */
   public void setProfileTaskRange(boolean isMap, String newValue) {
     // parse the value to make sure it is legal
-    new Configuration.IntegerRanges(newValue);
-    set((isMap ? "mapred.task.profile.maps" : "mapred.task.profile.reduces"), 
-        newValue);
+      new Configuration.IntegerRanges(newValue);
+    set((isMap ? JobContext.NUM_MAP_PROFILES : JobContext.NUM_REDUCE_PROFILES), 
+          newValue);
   }
 
   /**
@@ -1502,7 +1507,7 @@
    * @param mDbgScript the script name
    */
   public void  setMapDebugScript(String mDbgScript) {
-    set("mapred.map.task.debug.script", mDbgScript);
+    set(JobContext.MAP_DEBUG_SCRIPT, mDbgScript);
   }
   
   /**
@@ -1512,7 +1517,7 @@
    * @see #setMapDebugScript(String)
    */
   public String getMapDebugScript() {
-    return get("mapred.map.task.debug.script");
+    return get(JobContext.MAP_DEBUG_SCRIPT);
   }
   
   /**
@@ -1539,7 +1544,7 @@
    * @param rDbgScript the script name
    */
   public void  setReduceDebugScript(String rDbgScript) {
-    set("mapred.reduce.task.debug.script", rDbgScript);
+    set(JobContext.REDUCE_DEBUG_SCRIPT, rDbgScript);
   }
   
   /**
@@ -1549,7 +1554,7 @@
    * @see #setReduceDebugScript(String)
    */
   public String getReduceDebugScript() {
-    return get("mapred.reduce.task.debug.script");
+    return get(JobContext.REDUCE_DEBUG_SCRIPT);
   }
 
   /**
@@ -1561,7 +1566,7 @@
    * @see #setJobEndNotificationURI(String)
    */
   public String getJobEndNotificationURI() {
-    return get("job.end.notification.url");
+    return get(JobContext.END_NOTIFICATION_URL);
   }
 
   /**
@@ -1581,7 +1586,7 @@
    *       JobCompletionAndChaining">Job Completion and Chaining</a>
    */
   public void setJobEndNotificationURI(String uri) {
-    set("job.end.notification.url", uri);
+    set(JobContext.END_NOTIFICATION_URL, uri);
   }
 
   /**
@@ -1590,9 +1595,9 @@
    * <p>
    * When a job starts, a shared directory is created at location
    * <code>
-   * ${mapred.local.dir}/taskTracker/$user/jobcache/$jobid/work/ </code>.
+   * ${mapreduce.cluster.local.dir}/taskTracker/$user/jobcache/$jobid/work/ </code>.
    * This directory is exposed to the users through 
-   * <code>job.local.dir </code>.
+   * <code>mapreduce.job.local.dir </code>.
    * So, the tasks can use this space 
    * as scratch space and share files among them. </p>
    * This value is available as System property also.
@@ -1600,7 +1605,7 @@
    * @return The localized job specific shared directory
    */
   public String getJobLocalDir() {
-    return get(TaskTracker.JOB_LOCAL_DIR);
+    return get(JobContext.JOB_LOCAL_DIR);
   }
 
   public long getMemoryForMapTask() {
@@ -1642,7 +1647,7 @@
    * @return name of the queue
    */
   public String getQueueName() {
-    return get("mapred.job.queue.name", DEFAULT_QUEUE_NAME);
+    return get(JobContext.QUEUE_NAME, DEFAULT_QUEUE_NAME);
   }
   
   /**
@@ -1651,7 +1656,7 @@
    * @param queueName Name of the queue
    */
   public void setQueueName(String queueName) {
-    set("mapred.job.queue.name", queueName);
+    set(JobContext.QUEUE_NAME, queueName);
   }
   
   /**
@@ -1737,8 +1742,8 @@
    * {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
    * <p/>
    * mapred.task.maxvmem is split into
-   * mapred.job.map.memory.mb
-   * and mapred.job.map.memory.mb,mapred
+   * mapreduce.map.memory.mb
+   * and mapreduce.map.memory.mb,mapred
    * each of the new key are set
    * as mapred.task.maxvmem / 1024
    * as new values are in MB
@@ -1780,8 +1785,8 @@
    * {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
    * <p/>
    * mapred.task.maxvmem is split into
-   * mapred.job.map.memory.mb
-   * and mapred.job.map.memory.mb,mapred
+   * mapreduce.map.memory.mb
+   * and mapreduce.map.memory.mb,mapred
    * each of the new key are set
    * as mapred.task.maxvmem / 1024
    * as new values are in MB

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobEndNotifier.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobEndNotifier.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobEndNotifier.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobEndNotifier.java Fri Sep 18 15:09:48 2009
@@ -98,8 +98,8 @@
     String uri = conf.getJobEndNotificationURI();
     if (uri != null) {
       // +1 to make logic for first notification identical to a retry
-      int retryAttempts = conf.getInt("job.end.retry.attempts", 0) + 1;
-      long retryInterval = conf.getInt("job.end.retry.interval", 30000);
+      int retryAttempts = conf.getInt(JobContext.END_NOTIFICATION_RETRIES, 0) + 1;
+      long retryInterval = conf.getInt(JobContext.END_NOTIFICATION_RETRIE_INTERVAL, 30000);
       if (uri.contains("$jobId")) {
         uri = uri.replace("$jobId", status.getJobID().toString());
       }



Mime
View raw message