Return-Path:
Delivered-To: apmail-hadoop-mapreduce-commits-archive@minotaur.apache.org
Received: (qmail 14662 invoked from network); 18 Sep 2009 15:11:12 -0000
Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3)
by minotaur.apache.org with SMTP; 18 Sep 2009 15:11:12 -0000
Received: (qmail 80738 invoked by uid 500); 18 Sep 2009 15:11:12 -0000
Delivered-To: apmail-hadoop-mapreduce-commits-archive@hadoop.apache.org
Received: (qmail 80690 invoked by uid 500); 18 Sep 2009 15:11:12 -0000
Mailing-List: contact mapreduce-commits-help@hadoop.apache.org; run by ezmlm
Precedence: bulk
List-Help:
List-Unsubscribe:
List-Post:
List-Id:
Reply-To: mapreduce-dev@hadoop.apache.org
Delivered-To: mailing list mapreduce-commits@hadoop.apache.org
Received: (qmail 80680 invoked by uid 99); 18 Sep 2009 15:11:12 -0000
Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136)
by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 18 Sep 2009 15:11:12 +0000
X-ASF-Spam-Status: No, hits=-2000.0 required=10.0
tests=ALL_TRUSTED
X-Spam-Check-By: apache.org
Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4)
by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 18 Sep 2009 15:10:57 +0000
Received: by eris.apache.org (Postfix, from userid 65534)
id 18A12238898D; Fri, 18 Sep 2009 15:10:08 +0000 (UTC)
Content-Type: text/plain; charset="utf-8"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Subject: svn commit: r816664 [6/9] - in /hadoop/mapreduce/trunk: ./ conf/
src/benchmarks/gridmix/ src/benchmarks/gridmix/pipesort/
src/benchmarks/gridmix2/
src/benchmarks/gridmix2/src/java/org/apache/hadoop/mapreduce/
src/c++/pipes/impl/ src/c++/task-controller...
Date: Fri, 18 Sep 2009 15:10:02 -0000
To: mapreduce-commits@hadoop.apache.org
From: sharad@apache.org
X-Mailer: svnmailer-1.0.8
Message-Id: <20090918151008.18A12238898D@eris.apache.org>
X-Virus-Checked: Checked by ClamAV on apache.org
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Application.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Application.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Application.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Application.java Fri Sep 18 15:09:48 2009
@@ -29,6 +29,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.io.FloatWritable;
@@ -80,10 +81,10 @@
Map env = new HashMap();
// add TMPDIR environment variable with the value of java.io.tmpdir
env.put("TMPDIR", System.getProperty("java.io.tmpdir"));
- env.put("hadoop.pipes.command.port",
+ env.put(Submitter.PORT,
Integer.toString(serverSocket.getLocalPort()));
List cmd = new ArrayList();
- String interpretor = conf.get("hadoop.pipes.executable.interpretor");
+ String interpretor = conf.get(Submitter.INTERPRETOR);
if (interpretor != null) {
cmd.add(interpretor);
}
@@ -96,7 +97,8 @@
}
cmd.add(executable);
// wrap the command in a stdout/stderr capture
- TaskAttemptID taskid = TaskAttemptID.forName(conf.get("mapred.task.id"));
+ TaskAttemptID taskid =
+ TaskAttemptID.forName(conf.get(JobContext.TASK_ATTEMPT_ID));
File stdout = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDOUT);
File stderr = TaskLog.getTaskLogFile(taskid, TaskLog.LogName.STDERR);
long logLength = TaskLog.getTaskLogLength(conf);
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesMapRunner.java Fri Sep 18 15:09:48 2009
@@ -30,6 +30,7 @@
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SkipBadRecords;
+import org.apache.hadoop.mapreduce.JobContext;
/**
* An adaptor to run a C++ mapper.
@@ -76,7 +77,7 @@
boolean isJavaInput = Submitter.getIsJavaRecordReader(job);
downlink.runMap(reporter.getInputSplit(),
job.getNumReduceTasks(), isJavaInput);
- boolean skipping = job.getBoolean("mapred.skip.on", false);
+ boolean skipping = job.getBoolean(JobContext.SKIP_RECORDS, false);
try {
if (isJavaInput) {
// allocate key & value instances that are re-used for all entries
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesNonJavaInputFormat.java Fri Sep 18 15:09:48 2009
@@ -37,7 +37,7 @@
* The only useful thing this does is set up the Map-Reduce job to get the
* {@link PipesDummyRecordReader}, everything else left for the 'actual'
* InputFormat specified by the user which is given by
- * mapred.pipes.user.inputformat.
+ * mapreduce.pipes.inputformat.
*/
class PipesNonJavaInputFormat
implements InputFormat {
@@ -51,7 +51,7 @@
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
// Delegate the generation of input splits to the 'original' InputFormat
return ReflectionUtils.newInstance(
- job.getClass("mapred.pipes.user.inputformat",
+ job.getClass(Submitter.INPUT_FORMAT,
TextInputFormat.class,
InputFormat.class), job).getSplits(job, numSplits);
}
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/PipesReducer.java Fri Sep 18 15:09:48 2009
@@ -27,6 +27,7 @@
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SkipBadRecords;
+import org.apache.hadoop.mapreduce.JobContext;
import java.io.IOException;
import java.util.Iterator;
@@ -49,7 +50,7 @@
//disable the auto increment of the counter. For pipes, no of processed
//records could be different(equal or less) than the no of records input.
SkipBadRecords.setAutoIncrReducerProcCount(job, false);
- skipping = job.getBoolean("mapred.skip.on", false);
+ skipping = job.getBoolean(JobContext.SKIP_RECORDS, false);
}
/**
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Submitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Submitter.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Submitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/pipes/Submitter.java Fri Sep 18 15:09:48 2009
@@ -32,8 +32,6 @@
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
-import org.apache.commons.cli.OptionGroup;
-import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.cli.Parser;
@@ -41,6 +39,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -68,6 +67,18 @@
public class Submitter extends Configured implements Tool {
protected static final Log LOG = LogFactory.getLog(Submitter.class);
+ public static final String PRESERVE_COMMANDFILE =
+ "mapreduce.pipes.commandfile.preserve";
+ public static final String EXECUTABLE = "mapreduce.pipes.executable";
+ public static final String INTERPRETOR =
+ "mapreduce.pipes.executable.interpretor";
+ public static final String IS_JAVA_MAP = "mapreduce.pipes.isjavamapper";
+ public static final String IS_JAVA_RR = "mapreduce.pipes.isjavarecordreader";
+ public static final String IS_JAVA_RW = "mapreduce.pipes.isjavarecordwriter";
+ public static final String IS_JAVA_REDUCE = "mapreduce.pipes.isjavareducer";
+ public static final String PARTITIONER = "mapreduce.pipes.partitioner";
+ public static final String INPUT_FORMAT = "mapreduce.pipes.inputformat";
+ public static final String PORT = "mapreduce.pipes.command.port";
public Submitter() {
this(new Configuration());
@@ -83,9 +94,9 @@
* @return the URI where the application's executable is located
*/
public static String getExecutable(JobConf conf) {
- return conf.get("hadoop.pipes.executable");
+ return conf.get(Submitter.EXECUTABLE);
}
-
+
/**
* Set the URI for the application's executable. Normally this is a hdfs:
* location.
@@ -93,7 +104,7 @@
* @param executable The URI of the application's executable.
*/
public static void setExecutable(JobConf conf, String executable) {
- conf.set("hadoop.pipes.executable", executable);
+ conf.set(Submitter.EXECUTABLE, executable);
}
/**
@@ -102,7 +113,7 @@
* @param value the new value
*/
public static void setIsJavaRecordReader(JobConf conf, boolean value) {
- conf.setBoolean("hadoop.pipes.java.recordreader", value);
+ conf.setBoolean(Submitter.IS_JAVA_RR, value);
}
/**
@@ -111,7 +122,7 @@
* @return is it a Java RecordReader?
*/
public static boolean getIsJavaRecordReader(JobConf conf) {
- return conf.getBoolean("hadoop.pipes.java.recordreader", false);
+ return conf.getBoolean(Submitter.IS_JAVA_RR, false);
}
/**
@@ -120,7 +131,7 @@
* @param value the new value
*/
public static void setIsJavaMapper(JobConf conf, boolean value) {
- conf.setBoolean("hadoop.pipes.java.mapper", value);
+ conf.setBoolean(Submitter.IS_JAVA_MAP, value);
}
/**
@@ -129,7 +140,7 @@
* @return is it a Java Mapper?
*/
public static boolean getIsJavaMapper(JobConf conf) {
- return conf.getBoolean("hadoop.pipes.java.mapper", false);
+ return conf.getBoolean(Submitter.IS_JAVA_MAP, false);
}
/**
@@ -138,7 +149,7 @@
* @param value the new value
*/
public static void setIsJavaReducer(JobConf conf, boolean value) {
- conf.setBoolean("hadoop.pipes.java.reducer", value);
+ conf.setBoolean(Submitter.IS_JAVA_REDUCE, value);
}
/**
@@ -147,7 +158,7 @@
* @return is it a Java Reducer?
*/
public static boolean getIsJavaReducer(JobConf conf) {
- return conf.getBoolean("hadoop.pipes.java.reducer", false);
+ return conf.getBoolean(Submitter.IS_JAVA_REDUCE, false);
}
/**
@@ -156,7 +167,7 @@
* @param value the new value to set
*/
public static void setIsJavaRecordWriter(JobConf conf, boolean value) {
- conf.setBoolean("hadoop.pipes.java.recordwriter", value);
+ conf.setBoolean(Submitter.IS_JAVA_RW, value);
}
/**
@@ -165,7 +176,7 @@
* @return true, if the output of the job will be written by Java
*/
public static boolean getIsJavaRecordWriter(JobConf conf) {
- return conf.getBoolean("hadoop.pipes.java.recordwriter", false);
+ return conf.getBoolean(Submitter.IS_JAVA_RW, false);
}
/**
@@ -187,7 +198,7 @@
* @param cls the user's partitioner class
*/
static void setJavaPartitioner(JobConf conf, Class cls) {
- conf.set("hadoop.pipes.partitioner", cls.getName());
+ conf.set(Submitter.PARTITIONER, cls.getName());
}
/**
@@ -196,7 +207,7 @@
* @return the class that the user submitted
*/
static Class extends Partitioner> getJavaPartitioner(JobConf conf) {
- return conf.getClass("hadoop.pipes.partitioner",
+ return conf.getClass(Submitter.PARTITIONER,
HashPartitioner.class,
Partitioner.class);
}
@@ -209,12 +220,12 @@
* JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
* being deleted.
* To run using the data file, set the environment variable
- * "hadoop.pipes.command.file" to point to the file.
+ * "mapreduce.pipes.commandfile" to point to the file.
* @param conf the configuration to check
* @return will the framework save the command file?
*/
public static boolean getKeepCommandFile(JobConf conf) {
- return conf.getBoolean("hadoop.pipes.command-file.keep", false);
+ return conf.getBoolean(Submitter.PRESERVE_COMMANDFILE, false);
}
/**
@@ -223,7 +234,7 @@
* @param keep the new value
*/
public static void setKeepCommandFile(JobConf conf, boolean keep) {
- conf.setBoolean("hadoop.pipes.command-file.keep", keep);
+ conf.setBoolean(Submitter.PRESERVE_COMMANDFILE, keep);
}
/**
@@ -279,15 +290,15 @@
}
}
String textClassname = Text.class.getName();
- setIfUnset(conf, "mapred.mapoutput.key.class", textClassname);
- setIfUnset(conf, "mapred.mapoutput.value.class", textClassname);
- setIfUnset(conf, "mapred.output.key.class", textClassname);
- setIfUnset(conf, "mapred.output.value.class", textClassname);
+ setIfUnset(conf, JobContext.MAP_OUTPUT_KEY_CLASS, textClassname);
+ setIfUnset(conf, JobContext.MAP_OUTPUT_VALUE_CLASS, textClassname);
+ setIfUnset(conf, JobContext.OUTPUT_KEY_CLASS, textClassname);
+ setIfUnset(conf, JobContext.OUTPUT_VALUE_CLASS, textClassname);
// Use PipesNonJavaInputFormat if necessary to handle progress reporting
// from C++ RecordReaders ...
if (!getIsJavaRecordReader(conf) && !getIsJavaMapper(conf)) {
- conf.setClass("mapred.pipes.user.inputformat",
+ conf.setClass(Submitter.INPUT_FORMAT,
conf.getInputFormat().getClass(), InputFormat.class);
conf.setInputFormat(PipesNonJavaInputFormat.class);
}
@@ -302,8 +313,8 @@
DistributedCache.createSymlink(conf);
// set default gdb commands for map and reduce task
String defScript = "$HADOOP_HOME/src/c++/pipes/debug/pipes-default-script";
- setIfUnset(conf,"mapred.map.task.debug.script",defScript);
- setIfUnset(conf,"mapred.reduce.task.debug.script",defScript);
+ setIfUnset(conf, JobContext.MAP_DEBUG_SCRIPT,defScript);
+ setIfUnset(conf, JobContext.REDUCE_DEBUG_SCRIPT,defScript);
}
URI[] fileCache = DistributedCache.getCacheFiles(conf);
if (fileCache == null) {
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Cluster.java Fri Sep 18 15:09:48 2009
@@ -33,6 +33,7 @@
import org.apache.hadoop.mapreduce.jobhistory.JobHistory;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.server.jobtracker.State;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UnixUserGroupInformation;
@@ -48,8 +49,7 @@
private Path jobHistoryDir = null;
static {
- Configuration.addDefaultResource("mapred-default.xml");
- Configuration.addDefaultResource("mapred-site.xml");
+ ConfigUtil.loadResources();
}
public Cluster(Configuration conf) throws IOException {
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/InputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/InputFormat.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/InputFormat.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/InputFormat.java Fri Sep 18 15:09:48 2009
@@ -50,8 +50,8 @@
* bytes, of the input files. However, the {@link FileSystem} blocksize of
* the input files is treated as an upper bound for input splits. A lower bound
* on the split size can be set via
- *
- * mapred.min.split.size.
+ *
+ * mapreduce.input.fileinputformat.split.minsize.
*
* Clearly, logical splits based on input-size is insufficient for many
* applications since record boundaries are to respected. In such cases, the
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/Job.java Fri Sep 18 15:09:48 2009
@@ -47,6 +47,7 @@
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.mapreduce.util.ConfigUtil;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.util.StringUtils;
@@ -87,24 +88,28 @@
private static final Log LOG = LogFactory.getLog(Job.class);
public static enum JobState {DEFINE, RUNNING};
private static final long MAX_JOBSTATUS_AGE = 1000 * 2;
- public static final String OUTPUT_FILTER = "jobclient.output.filter";
+ public static final String OUTPUT_FILTER = "mapreduce.client.output.filter";
/** Key in mapred-*.xml that sets completionPollInvervalMillis */
public static final String COMPLETION_POLL_INTERVAL_KEY =
- "jobclient.completion.poll.interval";
+ "mapreduce.client.completion.pollinterval";
/** Default completionPollIntervalMillis is 5000 ms. */
static final int DEFAULT_COMPLETION_POLL_INTERVAL = 5000;
/** Key in mapred-*.xml that sets progMonitorPollIntervalMillis */
public static final String PROGRESS_MONITOR_POLL_INTERVAL_KEY =
- "jobclient.progress.monitor.poll.interval";
+ "mapreduce.client.progressmonitor.pollinterval";
/** Default progMonitorPollIntervalMillis is 1000 ms. */
static final int DEFAULT_MONITOR_POLL_INTERVAL = 1000;
+ public static final String USED_GENERIC_PARSER =
+ "mapreduce.client.genericoptionsparser.used";
+ public static final String SUBMIT_REPLICATION =
+ "mapreduce.client.submit.file.replication";
+
public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
static {
- Configuration.addDefaultResource("mapred-default.xml");
- Configuration.addDefaultResource("mapred-site.xml");
+ ConfigUtil.loadResources();
}
private JobState state = JobState.DEFINE;
@@ -757,7 +762,7 @@
*/
public void setJobSetupCleanupNeeded(boolean needed) {
ensureState(JobState.DEFINE);
- conf.setBoolean("mapred.committer.job.setup.cleanup.needed", needed);
+ conf.setBoolean(SETUP_CLEANUP_NEEDED, needed);
}
/**
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobContext.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobContext.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobContext.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobContext.java Fri Sep 18 15:09:48 2009
@@ -38,15 +38,188 @@
public class JobContext {
// Put all of the attribute names in here so that Job and JobContext are
// consistent.
- protected static final String INPUT_FORMAT_CLASS_ATTR =
- "mapreduce.inputformat.class";
- protected static final String MAP_CLASS_ATTR = "mapreduce.map.class";
- protected static final String COMBINE_CLASS_ATTR = "mapreduce.combine.class";
- protected static final String REDUCE_CLASS_ATTR = "mapreduce.reduce.class";
- protected static final String OUTPUT_FORMAT_CLASS_ATTR =
- "mapreduce.outputformat.class";
- protected static final String PARTITIONER_CLASS_ATTR =
- "mapreduce.partitioner.class";
+ public static final String INPUT_FORMAT_CLASS_ATTR =
+ "mapreduce.job.inputformat.class";
+ public static final String MAP_CLASS_ATTR = "mapreduce.job.map.class";
+ public static final String COMBINE_CLASS_ATTR =
+ "mapreduce.job.combine.class";
+ public static final String REDUCE_CLASS_ATTR =
+ "mapreduce.job.reduce.class";
+ public static final String OUTPUT_FORMAT_CLASS_ATTR =
+ "mapreduce.job.outputformat.class";
+ public static final String PARTITIONER_CLASS_ATTR =
+ "mapreduce.job.partitioner.class";
+
+ public static final String SETUP_CLEANUP_NEEDED =
+ "mapreduce.job.committer.setup.cleanup.needed";
+ public static final String JAR = "mapreduce.job.jar";
+ public static final String ID = "mapreduce.job.id";
+ public static final String JOB_NAME = "mapreduce.job.name";
+ public static final String USER_NAME = "mapreduce.job.user.name";
+ public static final String PRIORITY = "mapreduce.job.priority";
+ public static final String QUEUE_NAME = "mapreduce.job.queuename";
+ public static final String JVM_NUMTASKS_TORUN =
+ "mapreduce.job.jvm.numtasks";
+ public static final String SPLIT_FILE = "mapreduce.job.splitfile";
+ public static final String NUM_MAPS = "mapreduce.job.maps";
+ public static final String MAX_TASK_FAILURES_PER_TRACKER =
+ "mapreduce.job.maxtaskfailures.per.tracker";
+ public static final String COMPLETED_MAPS_FOR_REDUCE_SLOWSTART =
+ "mapreduce.job.reduce.slowstart.completedmaps";
+ public static final String NUM_REDUCES = "mapreduce.job.reduces";
+ public static final String SKIP_RECORDS = "mapreduce.job.skiprecords";
+ public static final String SKIP_OUTDIR = "mapreduce.job.skip.outdir";
+ public static final String SPECULATIVE_SLOWNODE_THRESHOLD =
+ "mapreduce.job.speculative.slownodethreshold";
+ public static final String SPECULATIVE_SLOWTASK_THRESHOLD =
+ "mapreduce.job.speculative.slowtaskthreshold";
+ public static final String SPECULATIVECAP =
+ "mapreduce.job.speculative.speculativecap";
+ public static final String JOB_LOCAL_DIR = "mapreduce.job.local.dir";
+ public static final String OUTPUT_KEY_CLASS =
+ "mapreduce.job.output.key.class";
+ public static final String OUTPUT_VALUE_CLASS =
+ "mapreduce.job.output.value.class";
+ public static final String KEY_COMPARATOR =
+ "mapreduce.job.output.key.comparator.class";
+ public static final String GROUP_COMPARATOR_CLASS =
+ "mapreduce.job.output.group.comparator.class";
+ public static final String WORKING_DIR = "mapreduce.job.working.dir";
+ public static final String HISTORY_LOCATION =
+ "mapreduce.job.userhistorylocation";
+ public static final String END_NOTIFICATION_URL =
+ "mapreduce.job.end-notification.url";
+ public static final String END_NOTIFICATION_RETRIES =
+ "mapreduce.job.end-notification.retry.attempts";
+ public static final String END_NOTIFICATION_RETRIE_INTERVAL =
+ "mapreduce.job.end-notification.retry.interval";
+ public static final String CLASSPATH_ARCHIVES =
+ "mapreduce.job.classpath.archives";
+ public static final String CLASSPATH_FILES = "mapreduce.job.classpath.files";
+ public static final String CACHE_FILES = "mapreduce.job.cache.files";
+ public static final String CACHE_ARCHIVES = "mapreduce.job.cache.archives";
+ public static final String CACHE_LOCALFILES =
+ "mapreduce.job.cache.local.files";
+ public static final String CACHE_LOCALARCHIVES =
+ "mapreduce.job.cache.local.archives";
+ public static final String CACHE_FILE_TIMESTAMPS =
+ "mapreduce.job.cache.files.timestamps";
+ public static final String CACHE_ARCHIVES_TIMESTAMPS =
+ "mapreduce.job.cache.archives.timestamps";
+ public static final String CACHE_SYMLINK =
+ "mapreduce.job.cache.symlink.create";
+
+ public static final String IO_SORT_FACTOR =
+ "mapreduce.task.io.sort.factor";
+ public static final String IO_SORT_MB = "mapreduce.task.io.sort.mb";
+ public static final String PRESERVE_FAILED_TASK_FILES =
+ "mapreduce.task.files.preserve.failedtasks";
+ public static final String PRESERVE_FILES_PATTERN =
+ "mapreduce.task.files.preserve.filepattern";
+ public static final String TASK_TEMP_DIR = "mapreduce.task.tmp.dir";
+ public static final String TASK_DEBUGOUT_LINES =
+ "mapreduce.task.debugout.lines";
+ public static final String RECORDS_BEFORE_PROGRESS =
+ "mapreduce.task.merge.progress.records";
+ public static final String SKIP_START_ATTEMPTS =
+ "mapreduce.task.skip.start.attempts";
+ public static final String TASK_ATTEMPT_ID = "mapreduce.task.attempt.id";
+ public static final String TASK_ISMAP = "mapreduce.task.ismap";
+ public static final String TASK_PARTITION = "mapreduce.task.partition";
+ public static final String TASK_PROFILE = "mapreduce.task.profile";
+ public static final String TASK_PROFILE_PARAMS =
+ "mapreduce.task.profile.params";
+ public static final String NUM_MAP_PROFILES =
+ "mapreduce.task.profile.maps";
+ public static final String NUM_REDUCE_PROFILES =
+ "mapreduce.task.profile.reduces";
+ public static final String TASK_TIMEOUT = "mapreduce.task.timeout";
+ public static final String TASK_ID = "mapreduce.task.id";
+ public static final String TASK_OUTPUT_DIR = "mapreduce.task.output.dir";
+ public static final String TASK_USERLOG_LIMIT =
+ "mapreduce.task.userlog.limit.kb";
+ public static final String TASK_LOG_RETAIN_HOURS =
+ "mapred.task.userlog.retain.hours";
+
+ public static final String MAP_SORT_RECORD_PERCENT =
+ "mapreduce.map.sort.record.percent";
+ public static final String MAP_SORT_SPILL_PERCENT =
+ "mapreduce.map.sort.spill.percent";
+ public static final String MAP_INPUT_FILE = "mapreduce.map.input.file";
+ public static final String MAP_INPUT_PATH = "mapreduce.map.input.length";
+ public static final String MAP_INPUT_START = "mapreduce.map.input.start";
+ public static final String MAP_MEMORY_MB = "mapreduce.map.memory.mb";
+ public static final String MAP_ENV = "mapreduce.map.env";
+ public static final String MAP_JAVA_OPTS = "mapreduce.map.java.opts";
+ public static final String MAP_ULIMIT = "mapreduce.map.ulimit";
+ public static final String MAP_MAX_ATTEMPTS = "mapreduce.map.maxattempts";
+ public static final String MAP_DEBUG_SCRIPT =
+ "mapreduce.map.debug.script";
+ public static final String MAP_SPECULATIVE = "mapreduce.map.speculative";
+ public static final String MAP_FAILURES_MAX_PERCENT =
+ "mapreduce.map.failures.maxpercent";
+ public static final String MAP_SKIP_INCR_PROC_COUNT =
+ "mapreduce.map.skip.proc-count.auto-incr";
+ public static final String MAP_SKIP_MAX_RECORDS =
+ "mapreduce.map.skip.maxrecords";
+ public static final String MAP_COMBINE_MIN_SPISS =
+ "mapreduce.map.combine.minspills";
+ public static final String MAP_OUTPUT_COMPRESS =
+ "mapreduce.map.output.compress";
+ public static final String MAP_OUTPUT_COMPRESS_CODEC =
+ "mapreduce.map.output.compress.codec";
+ public static final String MAP_OUTPUT_KEY_CLASS =
+ "mapreduce.map.output.key.class";
+ public static final String MAP_OUTPUT_VALUE_CLASS =
+ "mapreduce.map.output.value.class";
+ public static final String MAP_OUTPUT_KEY_FIELD_SEPERATOR =
+ "mapreduce.map.output.key.field.separator";
+ public static final String MAP_LOG_LEVEL = "mapreduce.map.log.level";
+
+ public static final String REDUCE_LOG_LEVEL =
+ "mapreduce.reduce.log.level";
+ public static final String REDUCE_MERGE_INMEM_THRESHOLD =
+ "mapreduce.reduce.merge.inmem.threshold";
+ public static final String REDUCE_INPUT_BUFFER_PERCENT =
+ "mapreduce.reduce.input.buffer.percent";
+ public static final String REDUCE_MARKRESET_BUFFER_PERCENT =
+ "mapreduce.reduce.markreset.buffer.percent";
+ public static final String REDUCE_MARKRESET_BUFFER_SIZE =
+ "mapreduce.reduce.markreset.buffer.size";
+ public static final String REDUCE_MEMORY_MB =
+ "mapreduce.reduce.memory.mb";
+ public static final String REDUCE_MEMORY_TOTAL_BYTES =
+ "mapreduce.reduce.memory.totalbytes";
+ public static final String SHUFFLE_INPUT_BUFFER_PERCENT =
+ "mapreduce.reduce.shuffle.input.buffer.percent";
+ public static final String SHUFFLE_MERGE_EPRCENT =
+ "mapreduce.reduce.shuffle.merge.percent";
+ public static final String REDUCE_FAILURES_MAXPERCENT =
+ "mapreduce.reduce.failures.maxpercent";
+ public static final String REDUCE_ENV = "mapreduce.reduce.env";
+ public static final String REDUCE_JAVA_OPTS =
+ "mapreduce.reduce.java.opts";
+ public static final String REDUCE_ULIMIT = "mapreduce.reduce.ulimit";
+ public static final String REDUCE_MAX_ATTEMPTS =
+ "mapreduce.reduce.maxattempts";
+ public static final String SHUFFLE_PARALLEL_COPIES =
+ "mapreduce.reduce.shuffle.parallelcopies";
+ public static final String REDUCE_DEBUG_SCRIPT =
+ "mapreduce.reduce.debug.script";
+ public static final String REDUCE_SPECULATIVE =
+ "mapreduce.reduce.speculative";
+ public static final String SHUFFLE_CONNECT_TIMEOUT =
+ "mapreduce.reduce.shuffle.connect.timeout";
+ public static final String SHUFFLE_READ_TIMEOUT =
+ "mapreduce.reduce.shuffle.read.timeout";
+ public static final String REDUCE_SKIP_INCR_PROC_COUNT =
+ "mapreduce.reduce.skip.proc-count.auto-incr";
+ public static final String REDUCE_SKIP_MAXGROUPS =
+ "mapreduce.reduce.skip.maxgroups";
+ public static final String REDUCE_MEMTOMEM_THRESHOLD =
+ "mapreduce.reduce.merge.memtomem.threshold";
+ public static final String REDUCE_MEMTOMEM_ENABLED =
+ "mapreduce.reduce.merge.memtomem.enabled";
protected final org.apache.hadoop.mapred.JobConf conf;
private final JobID jobId;
@@ -243,7 +416,7 @@
* @return boolean
*/
public boolean getJobSetupCleanupNeeded() {
- return conf.getBoolean("mapred.committer.job.setup.cleanup.needed", true);
+ return conf.getBoolean(SETUP_CLEANUP_NEEDED, true);
}
/**
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/JobSubmitter.java Fri Sep 18 15:09:48 2009
@@ -119,7 +119,7 @@
private void copyAndConfigureFiles(Job job, Path submitJobDir,
short replication) throws IOException {
Configuration conf = job.getConfiguration();
- if (!(conf.getBoolean("mapred.used.genericoptionsparser", false))) {
+ if (!(conf.getBoolean(Job.USED_GENERIC_PARSER, false))) {
LOG.warn("Use GenericOptionsParser for parsing the arguments. " +
"Applications should implement Tool for the same.");
}
@@ -222,7 +222,7 @@
private void configureCommandLineOptions(Job job, Path submitJobDir,
Path submitJarFile) throws IOException {
Configuration conf = job.getConfiguration();
- short replication = (short)conf.getInt("mapred.submit.replication", 10);
+ short replication = (short)conf.getInt(Job.SUBMIT_REPLICATION, 10);
copyAndConfigureFiles(job, submitJobDir, replication);
/* set this user's id in job configuration, so later job files can be
Added: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/MRConfig.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/MRConfig.java?rev=816664&view=auto
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/MRConfig.java (added)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/MRConfig.java Fri Sep 18 15:09:48 2009
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.hadoop.mapred.JobTracker;
+import org.apache.hadoop.mapred.TaskTracker;
+
+/**
+ * Place holder for cluster level configuration keys.
+ *
+ * These keys are used by both {@link JobTracker} and {@link TaskTracker}. The
+ * keys should have "mapreduce.cluster." as the prefix.
+ *
+ */
+public interface MRConfig {
+
+ // Cluster-level configuration parameters
+ public static final String TEMP_DIR = "mapreduce.cluster.temp.dir";
+ public static final String LOCAL_DIR = "mapreduce.cluster.local.dir";
+ public static final String MAPMEMORY_MB = "mapreduce.cluster.mapmemory.mb";
+ public static final String REDUCEMEMORY_MB =
+ "mapreduce.cluster.reducememory.mb";
+}
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/DistributedCache.java Fri Sep 18 15:09:48 2009
@@ -310,7 +310,7 @@
@Deprecated
public static void setCacheArchives(URI[] archives, Configuration conf) {
String sarchives = StringUtils.uriToString(archives);
- conf.set("mapred.cache.archives", sarchives);
+ conf.set(JobContext.CACHE_ARCHIVES, sarchives);
}
/**
@@ -323,7 +323,7 @@
@Deprecated
public static void setCacheFiles(URI[] files, Configuration conf) {
String sfiles = StringUtils.uriToString(files);
- conf.set("mapred.cache.files", sfiles);
+ conf.set(JobContext.CACHE_FILES, sfiles);
}
/**
@@ -336,7 +336,7 @@
*/
@Deprecated
public static URI[] getCacheArchives(Configuration conf) throws IOException {
- return StringUtils.stringToURI(conf.getStrings("mapred.cache.archives"));
+ return StringUtils.stringToURI(conf.getStrings(JobContext.CACHE_ARCHIVES));
}
/**
@@ -349,7 +349,7 @@
*/
@Deprecated
public static URI[] getCacheFiles(Configuration conf) throws IOException {
- return StringUtils.stringToURI(conf.getStrings("mapred.cache.files"));
+ return StringUtils.stringToURI(conf.getStrings(JobContext.CACHE_FILES));
}
/**
@@ -364,7 +364,7 @@
public static Path[] getLocalCacheArchives(Configuration conf)
throws IOException {
return StringUtils.stringToPath(conf
- .getStrings("mapred.cache.localArchives"));
+ .getStrings(JobContext.CACHE_LOCALARCHIVES));
}
/**
@@ -378,7 +378,7 @@
@Deprecated
public static Path[] getLocalCacheFiles(Configuration conf)
throws IOException {
- return StringUtils.stringToPath(conf.getStrings("mapred.cache.localFiles"));
+ return StringUtils.stringToPath(conf.getStrings(JobContext.CACHE_LOCALFILES));
}
/**
@@ -391,7 +391,7 @@
*/
@Deprecated
public static String[] getArchiveTimestamps(Configuration conf) {
- return conf.getStrings("mapred.cache.archives.timestamps");
+ return conf.getStrings(JobContext.CACHE_ARCHIVES_TIMESTAMPS);
}
@@ -405,7 +405,7 @@
*/
@Deprecated
public static String[] getFileTimestamps(Configuration conf) {
- return conf.getStrings("mapred.cache.files.timestamps");
+ return conf.getStrings(JobContext.CACHE_FILE_TIMESTAMPS);
}
/**
@@ -475,8 +475,8 @@
*/
@Deprecated
public static void addCacheArchive(URI uri, Configuration conf) {
- String archives = conf.get("mapred.cache.archives");
- conf.set("mapred.cache.archives", archives == null ? uri.toString()
+ String archives = conf.get(JobContext.CACHE_ARCHIVES);
+ conf.set(JobContext.CACHE_ARCHIVES, archives == null ? uri.toString()
: archives + "," + uri.toString());
}
@@ -489,8 +489,8 @@
*/
@Deprecated
public static void addCacheFile(URI uri, Configuration conf) {
- String files = conf.get("mapred.cache.files");
- conf.set("mapred.cache.files", files == null ? uri.toString() : files + ","
+ String files = conf.get(JobContext.CACHE_FILES);
+ conf.set(JobContext.CACHE_FILES, files == null ? uri.toString() : files + ","
+ uri.toString());
}
@@ -505,8 +505,8 @@
@Deprecated
public static void addFileToClassPath(Path file, Configuration conf)
throws IOException {
- String classpath = conf.get("mapred.job.classpath.files");
- conf.set("mapred.job.classpath.files", classpath == null ? file.toString()
+ String classpath = conf.get(JobContext.CLASSPATH_FILES);
+ conf.set(JobContext.CLASSPATH_FILES, classpath == null ? file.toString()
: classpath + "," + file.toString());
FileSystem fs = FileSystem.get(conf);
URI uri = fs.makeQualified(file).toUri();
@@ -524,7 +524,7 @@
@Deprecated
public static Path[] getFileClassPaths(Configuration conf) {
ArrayList list = (ArrayList)conf.getStringCollection(
- "mapred.job.classpath.files");
+ JobContext.CLASSPATH_FILES);
if (list.size() == 0) {
return null;
}
@@ -546,8 +546,8 @@
@Deprecated
public static void addArchiveToClassPath(Path archive, Configuration conf)
throws IOException {
- String classpath = conf.get("mapred.job.classpath.archives");
- conf.set("mapred.job.classpath.archives", classpath == null ? archive
+ String classpath = conf.get(JobContext.CLASSPATH_ARCHIVES);
+ conf.set(JobContext.CLASSPATH_ARCHIVES, classpath == null ? archive
.toString() : classpath + "," + archive.toString());
FileSystem fs = FileSystem.get(conf);
URI uri = fs.makeQualified(archive).toUri();
@@ -565,7 +565,7 @@
@Deprecated
public static Path[] getArchiveClassPaths(Configuration conf) {
ArrayList list = (ArrayList)conf.getStringCollection(
- "mapred.job.classpath.archives");
+ JobContext.CLASSPATH_ARCHIVES);
if (list.size() == 0) {
return null;
}
@@ -585,7 +585,7 @@
*/
@Deprecated
public static void createSymlink(Configuration conf){
- conf.set("mapred.create.symlink", "yes");
+ conf.set(JobContext.CACHE_SYMLINK, "yes");
}
/**
@@ -598,7 +598,7 @@
*/
@Deprecated
public static boolean getSymlink(Configuration conf){
- String result = conf.get("mapred.create.symlink");
+ String result = conf.get(JobContext.CACHE_SYMLINK);
if ("yes".equals(result)){
return true;
}
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/filecache/TrackerDistributedCacheManager.java Fri Sep 18 15:09:48 2009
@@ -27,6 +27,8 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
@@ -124,7 +126,7 @@
}
}
// setting the cache size to a default of 10GB
- long allowedSize = conf.getLong("local.cache.size", DEFAULT_CACHE_SIZE);
+ long allowedSize = conf.getLong(TTConfig.TT_LOCAL_CACHE_SIZE, DEFAULT_CACHE_SIZE);
if (allowedSize < size) {
// try some cache deletions
deleteCache(conf);
@@ -578,7 +580,7 @@
* The order should be the same as the order in which the archives are added.
*/
static void setArchiveTimestamps(Configuration conf, String timestamps) {
- conf.set("mapred.cache.archives.timestamps", timestamps);
+ conf.set(JobContext.CACHE_ARCHIVES_TIMESTAMPS, timestamps);
}
/**
@@ -589,7 +591,7 @@
* The order should be the same as the order in which the files are added.
*/
static void setFileTimestamps(Configuration conf, String timestamps) {
- conf.set("mapred.cache.files.timestamps", timestamps);
+ conf.set(JobContext.CACHE_FILE_TIMESTAMPS, timestamps);
}
/**
@@ -599,7 +601,7 @@
* @param str a comma separated list of local archives
*/
static void setLocalArchives(Configuration conf, String str) {
- conf.set("mapred.cache.localArchives", str);
+ conf.set(JobContext.CACHE_LOCALARCHIVES, str);
}
/**
@@ -609,6 +611,6 @@
* @param str a comma separated list of local files
*/
static void setLocalFiles(Configuration conf, String str) {
- conf.set("mapred.cache.localFiles", str);
+ conf.set(JobContext.CACHE_LOCALFILES, str);
}
}
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java Fri Sep 18 15:09:48 2009
@@ -43,6 +43,7 @@
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobTracker;
import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.StringUtils;
/**
@@ -92,7 +93,7 @@
long jobTrackerStartTime) throws IOException {
// Get and create the log folder
- String logDirLoc = conf.get("hadoop.job.history.location" ,
+ String logDirLoc = conf.get(JTConfig.JT_JOBHISTORY_LOCATION ,
"file:///" +
new File(System.getProperty("hadoop.log.dir")).getAbsolutePath()
+ File.separator + "history");
@@ -109,10 +110,10 @@
logDir.toString());
}
}
- conf.set("hadoop.job.history.location", logDirLoc);
+ conf.set(JTConfig.JT_JOBHISTORY_LOCATION, logDirLoc);
jobHistoryBlockSize =
- conf.getLong("mapred.jobtracker.job.history.block.size",
+ conf.getLong(JTConfig.JT_JOBHISTORY_BLOCK_SIZE,
3 * 1024 * 1024);
jobTracker = jt;
@@ -122,7 +123,7 @@
public void initDone(JobConf conf, FileSystem fs) throws IOException {
//if completed job history location is set, use that
String doneLocation =
- conf.get("mapred.job.tracker.history.completed.location");
+ conf.get(JTConfig.JT_JOBHISTORY_COMPLETED_LOCATION);
if (doneLocation != null) {
done = fs.makeQualified(new Path(doneLocation));
doneDirFs = fs;
@@ -147,7 +148,7 @@
// Start the History Cleaner Thread
long maxAgeOfHistoryFiles = conf.getLong(
- "mapreduce.cluster.jobhistory.maxage", DEFAULT_HISTORY_MAX_AGE);
+ JTConfig.JT_JOBHISTORY_MAXAGE, DEFAULT_HISTORY_MAX_AGE);
historyCleanerThread = new HistoryCleaner(maxAgeOfHistoryFiles);
historyCleanerThread.start();
}
Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java?rev=816664&r1=816663&r2=816664&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/lib/aggregate/UniqValueCount.java Fri Sep 18 15:09:48 2009
@@ -28,6 +28,8 @@
*
*/
public class UniqValueCount implements ValueAggregator