hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r785392 [1/5] - in /hadoop/core/branches/HADOOP-4687/mapred: conf/ lib/ src/c++/ src/c++/task-controller/ src/contrib/dynamic-scheduler/ src/contrib/sqoop/ src/contrib/streaming/ src/contrib/streaming/src/java/org/apache/hadoop/streaming/ s...
Date Tue, 16 Jun 2009 20:54:28 GMT
Author: omalley
Date: Tue Jun 16 20:54:24 2009
New Revision: 785392

URL: http://svn.apache.org/viewvc?rev=785392&view=rev
Log:
HADOOP-4687 Merge of 776174:784663 from trunk to the branch, to merge:
conf/capacity-scheduler.xml.template
src/c++
src/examples
src/mapred
src/test/mapred
src/webapps/job

Added:
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/dynamic-scheduler/   (props changed)
      - copied from r784974, hadoop/core/trunk/src/contrib/dynamic-scheduler/
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/sqoop/   (props changed)
      - copied from r784974, hadoop/core/trunk/src/contrib/sqoop/
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamJob.java
      - copied unchanged from r784663, hadoop/core/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamJob.java
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java
      - copied unchanged from r784663, hadoop/core/trunk/src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamingStatus.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/db/
      - copied from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/db/
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/db/DBConfiguration.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/db/DBInputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/db/DBOutputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/db/DBWritable.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/db/package.html
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/db/package.html
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/input/CombineFileRecordReader.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/input/CombineFileSplit.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/input/CombineFileSplit.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/
      - copied from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/ControlledJob.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/jobcontrol/package.html
      - copied unchanged from r784663, hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/package.html
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcessesWithLinuxTaskController.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRChildTask.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRChildTask.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestRunningTaskLimits.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapred/TestRunningTaskLimits.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapreduce/MapReduceTestUtil.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/
      - copied from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBJob.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBJob.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapreduce/lib/db/TestDBOutputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/
      - copied from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/
      - copied from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/mapreduce/lib/jobcontrol/TestMapReduceJobControl.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/util/TestRunJar.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/org/apache/hadoop/util/TestRunJar.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/testjar/Hello.java
      - copied unchanged from r784663, hadoop/core/trunk/src/test/mapred/testjar/Hello.java
Removed:
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJobWithDFS.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRTaskTempDir.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/lib/TestCombineFileInputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/lib/db/TestDBJob.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/org/
Modified:
    hadoop/core/branches/HADOOP-4687/mapred/conf/capacity-scheduler.xml.template   (contents, props changed)
    hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-core-0.21.0-dev.jar
    hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-core-test-0.21.0-dev.jar
    hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-hdfs-0.21.0-dev.jar
    hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-hdfs-test-0.21.0-dev.jar
    hadoop/core/branches/HADOOP-4687/mapred/src/c++/   (props changed)
    hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/configuration.c
    hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/configuration.h.in
    hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/main.c
    hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/task-controller.c
    hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/task-controller.h
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/   (props changed)
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/ivy.xml
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/vaidya/   (props changed)
    hadoop/core/branches/HADOOP-4687/mapred/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java
    hadoop/core/branches/HADOOP-4687/mapred/src/examples/   (props changed)
    hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/DBCountPageView.java
    hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java
    hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/Sort.java
    hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/   (props changed)
    hadoop/core/branches/HADOOP-4687/mapred/src/java/mapred-default.xml
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/Counters.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/DefaultTaskController.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/JobClient.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/JobConf.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/JobHistory.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/JobInProgress.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/JobTracker.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/JvmManager.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/LinuxTaskController.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/MRConstants.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/MapTask.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/MapTaskStatus.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/Merger.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/ReduceTask.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/ReduceTaskStatus.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/ResourceEstimator.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/TaskController.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/TaskInProgress.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/TaskMemoryManagerThread.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/TaskRunner.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/TaskStatus.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/TaskTracker.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/jobcontrol/JobControl.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/lib/CombineFileInputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/lib/CombineFileRecordReader.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/lib/CombineFileSplit.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/lib/KeyFieldBasedComparator.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/lib/db/DBConfiguration.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/lib/db/DBInputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/lib/db/DBOutputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapred/lib/db/DBWritable.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/TaskCounter.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/TaskCounter.properties
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java
    hadoop/core/branches/HADOOP-4687/mapred/src/java/org/apache/hadoop/mapreduce/lib/input/SequenceFileRecordReader.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/   (props changed)
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestCounters.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestFileOutputFormat.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestJavaSerialization.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestJobDirCleanup.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestJobExecutionAsDifferentUser.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerRestart.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcesses.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestReduceTask.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestTTMemoryReporting.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
    hadoop/core/branches/HADOOP-4687/mapred/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java
    hadoop/core/branches/HADOOP-4687/mapred/src/webapps/job/   (props changed)
    hadoop/core/branches/HADOOP-4687/mapred/src/webapps/job/jobdetails.jsp
    hadoop/core/branches/HADOOP-4687/mapred/src/webapps/job/taskdetails.jsp

Modified: hadoop/core/branches/HADOOP-4687/mapred/conf/capacity-scheduler.xml.template
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/conf/capacity-scheduler.xml.template?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/conf/capacity-scheduler.xml.template (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/conf/capacity-scheduler.xml.template Tue Jun 16 20:54:24 2009
@@ -56,34 +56,6 @@
       account in scheduling decisions by default in a job queue.
     </description>
   </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.task.default-pmem-percentage-in-vmem</name>
-    <value>-1</value>
-    <description>A percentage (float) of the default VM limit for jobs
-   	  (mapred.task.default.maxvm). This is the default RAM task-limit 
-   	  associated with a task. Unless overridden by a job's setting, this 
-   	  number defines the RAM task-limit.
-
-      If this property is missing, or set to an invalid value, scheduling 
-      based on physical memory, RAM, is disabled.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.task.limit.maxpmem</name>
-    <value>-1</value>
-    <description>Configuration that provides an upper limit on the maximum
-      physical memory that can be specified by a job. The job configuration
-      mapred.task.maxpmem should be less than this value. If not, the job will
-      be rejected by the scheduler.
-      
-      If it is set to -1, scheduler will not consider physical memory for
-      scheduling even if virtual memory based scheduling is enabled(by setting
-      valid values for both mapred.task.default.maxvmem and
-      mapred.task.limit.maxvmem).
-    </description>
-  </property>
   
   <property>
     <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>

Propchange: hadoop/core/branches/HADOOP-4687/mapred/conf/capacity-scheduler.xml.template
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Tue Jun 16 20:54:24 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/mapred/conf/capacity-scheduler.xml.template:713112
+/hadoop/core/trunk/conf/capacity-scheduler.xml.template:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-core-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-core-0.21.0-dev.jar?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-core-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-core-test-0.21.0-dev.jar?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-hdfs-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-hdfs-0.21.0-dev.jar?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
Binary files - no diff available.

Modified: hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-hdfs-test-0.21.0-dev.jar
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/lib/hadoop-hdfs-test-0.21.0-dev.jar?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
Binary files - no diff available.

Propchange: hadoop/core/branches/HADOOP-4687/mapred/src/c++/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Tue Jun 16 20:54:24 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/mapred/src/c++:713112
+/hadoop/core/trunk/src/c++:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/configuration.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/c%2B%2B/task-controller/configuration.c?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/configuration.c (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/configuration.c Tue Jun 16 20:54:24 2009
@@ -202,3 +202,36 @@
   return NULL;
 }
 
+const char ** get_values(char * key) {
+  const char ** toPass = NULL;
+  const char * value = get_value(key);
+  char *tempTok = NULL;
+  char *tempstr = NULL;
+  int size = 0;
+  int len;
+  //first allocate any array of 10
+  if(value != NULL) {
+    toPass = (const char **) malloc(sizeof(char *) * MAX_SIZE);
+    tempTok = strtok_r((char *)value, ",", &tempstr);
+    if (tempTok != NULL) {
+      while (1) {
+        toPass[size++] = tempTok;
+        tempTok = strtok_r(NULL, ",", &tempstr);
+        if(tempTok == NULL){
+          break;
+        }
+        if((size % MAX_SIZE) == 0) {
+          toPass = (const char **) realloc(toPass,(sizeof(char *) *
+              (MAX_SIZE * ((size/MAX_SIZE) +1))));
+        }
+      }
+    } else {
+      toPass[size] = (char *)value;
+    }
+  }
+  if(size > 0) {
+    toPass[size] = NULL;
+  }
+  return toPass;
+}
+

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/configuration.h.in
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/c%2B%2B/task-controller/configuration.h.in?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/configuration.h.in (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/configuration.h.in Tue Jun 16 20:54:24 2009
@@ -57,3 +57,6 @@
 //method to free allocated configuration
 void free_configurations();
 
+//function to return array of values pointing to the key. Values are
+//comma seperated strings.
+const char ** get_values(char* key);

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/main.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/c%2B%2B/task-controller/main.c?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/main.c (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/main.c Tue Jun 16 20:54:24 2009
@@ -25,15 +25,16 @@
   const char * task_id = NULL;
   const char * tt_root = NULL;
   int exit_code = 0;
+  const char * task_pid = NULL;
   const char* const short_options = "l:";
   const struct option long_options[] = { { "log", 1, NULL, 'l' }, { NULL, 0,
       NULL, 0 } };
 
   const char* log_file = NULL;
 
-  // when we support additional commands without ttroot, this check
-  // may become command specific.
-  if (argc < 6) {
+  //Minimum number of arguments required to run the task-controller
+  //command-name user command tt-root
+  if (argc < 3) {
     display_usage(stderr);
     return INVALID_ARGUMENT_NUMBER;
   }
@@ -44,7 +45,6 @@
   strncpy(hadoop_conf_dir,argv[0],(strlen(argv[0]) - strlen(EXEC_PATTERN)));
   hadoop_conf_dir[(strlen(argv[0]) - strlen(EXEC_PATTERN))] = '\0';
 #endif
-
   do {
     next_option = getopt_long(argc, argv, short_options, long_options, NULL);
     switch (next_option) {
@@ -88,24 +88,25 @@
   }
   optind = optind + 1;
   command = atoi(argv[optind++]);
-  job_id = argv[optind++];
-  task_id = argv[optind++];
-
 #ifdef DEBUG
   fprintf(LOGFILE, "main : command provided %d\n",command);
   fprintf(LOGFILE, "main : user is %s\n", user_detail->pw_name);
-  fprintf(LOGFILE, "main : job id %s \n", job_id);
-  fprintf(LOGFILE, "main : task id %s \n", task_id);
 #endif
   switch (command) {
-  case RUN_TASK:
-    tt_root = argv[optind];
+  case LAUNCH_TASK_JVM:
+    tt_root = argv[optind++];
+    job_id = argv[optind++];
+    task_id = argv[optind++];
     exit_code
         = run_task_as_user(user_detail->pw_name, job_id, task_id, tt_root);
     break;
-  case KILL_TASK:
-    tt_root = argv[optind];
-    exit_code = kill_user_task(user_detail->pw_name, job_id, task_id, tt_root);
+  case TERMINATE_TASK_JVM:
+    task_pid = argv[optind++];
+    exit_code = kill_user_task(user_detail->pw_name, task_pid, SIGTERM);
+    break;
+  case KILL_TASK_JVM:
+    task_pid = argv[optind++];
+    exit_code = kill_user_task(user_detail->pw_name, task_pid, SIGKILL);
     break;
   default:
     exit_code = INVALID_COMMAND_PROVIDED;

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/task-controller.c
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/c%2B%2B/task-controller/task-controller.c?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/task-controller.c (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/task-controller.c Tue Jun 16 20:54:24 2009
@@ -23,9 +23,6 @@
 //LOGFILE
 FILE *LOGFILE;
 
-//hadoop temp dir root which is configured in secure configuration
-const char *mapred_local_dir;
-
 //placeholder for global cleanup operations
 void cleanup() {
   free_configurations();
@@ -36,10 +33,14 @@
   if (get_user_details(user) < 0) {
     return -1;
   }
-#ifdef DEBUG
-  fprintf(LOGFILE,"change_user : setting user as %s ", user_detail->pw_name);
-#endif
+
+  if(initgroups(user_detail->pw_name, user_detail->pw_gid) != 0) {
+	  cleanup();
+	  return SETUID_OPER_FAILED;
+  }
+
   errno = 0;
+
   setgid(user_detail->pw_gid);
   if (errno != 0) {
     fprintf(LOGFILE, "unable to setgid : %s\n", strerror(errno));
@@ -70,90 +71,61 @@
   return 0;
 }
 
-//Function to set the hadoop.temp.dir key from configuration.
-//would return -1 if the configuration is not proper.
-
-int get_mapred_local_dir() {
-
-  if (mapred_local_dir == NULL) {
-    mapred_local_dir = get_value(TT_SYS_DIR_KEY);
-  }
-
-  //after the call it should not be null
-  if (mapred_local_dir == NULL) {
-    return -1;
-  } else {
-    return 0;
-  }
-
-}
 // function to check if the passed tt_root is present in hadoop.tmp.dir
 int check_tt_root(const char *tt_root) {
-  char *token;
+  char ** mapred_local_dir;
   int found = -1;
 
   if (tt_root == NULL) {
     return -1;
   }
 
-  if (mapred_local_dir == NULL) {
-    if (get_mapred_local_dir() < 0) {
-      return -1;
-    }
-  }
+  mapred_local_dir = (char **)get_values(TT_SYS_DIR_KEY);
 
-  token = strtok((char *) mapred_local_dir, ",");
-  if (token == NULL && mapred_local_dir != NULL) {
-    token = (char *)mapred_local_dir;
+  if (mapred_local_dir == NULL) {
+    return -1;
   }
 
-  while (1) {
-    if (strcmp(tt_root, token) == 0) {
+  while(*mapred_local_dir != NULL) {
+    if(strcmp(*mapred_local_dir,tt_root) == 0) {
       found = 0;
       break;
     }
-    token = strtok(NULL, ",");
-    if (token == NULL) {
-      break;
-    }
   }
-
+  free(mapred_local_dir);
   return found;
-
 }
 
-/*
- *d function which would return .pid file path which is used while running
- * and killing of the tasks by the user.
- *
- * check TT_SYS_DIR for pattern
+/**
+ * Function to check if the constructed path and absolute
+ * path resolve to one and same.
  */
-void get_pid_path(const char * jobid, const char * taskid, const char *tt_root,
-    char ** pid_path) {
-
-  int str_len = strlen(TT_SYS_DIR) + strlen(jobid) + strlen(taskid) + strlen(
-      tt_root);
-  *pid_path = NULL;
 
-  if (mapred_local_dir == NULL) {
-    if (get_mapred_local_dir() < 0) {
-      return;
-    }
+int check_path(char *path) {
+  char * resolved_path = (char *) canonicalize_file_name(path);
+  if(resolved_path == NULL) {
+    return ERROR_RESOLVING_FILE_PATH;
+  }
+  if(strcmp(resolved_path, path) !=0) {
+    free(resolved_path);
+    return RELATIVE_PATH_COMPONENTS_IN_FILE_PATH;
   }
-
-  *pid_path = (char *) malloc(sizeof(char) * (str_len + 1));
-
-  if (*pid_path == NULL) {
-    fprintf(LOGFILE, "unable to allocate memory for pid path\n");
-    return;
+  free(resolved_path);
+  return 0;
+}
+/**
+ * Function to check if a user actually owns the file.
+ */
+int check_owner(uid_t uid, char *path) {
+  struct stat filestat;
+  if(stat(path, &filestat)!=0) {
+    return UNABLE_TO_STAT_FILE;
+  }
+  //check owner.
+  if(uid != filestat.st_uid){
+    return FILE_NOT_OWNED_BY_TASKTRACKER;
   }
-  memset(*pid_path,'\0',str_len+1);
-  snprintf(*pid_path, str_len, TT_SYS_DIR, tt_root, jobid, taskid);
-#ifdef DEBUG
-  fprintf(LOGFILE, "get_pid_path : pid path = %s\n", *pid_path);
-  fflush(LOGFILE);
-#endif
-
+  return 0;
 }
 
 /*
@@ -163,19 +135,19 @@
  */
 void get_task_file_path(const char * jobid, const char * taskid,
     const char * tt_root, char **task_script_path) {
+  const char ** mapred_local_dir = get_values(TT_SYS_DIR_KEY);
   *task_script_path = NULL;
   int str_len = strlen(TT_LOCAL_TASK_SCRIPT_PATTERN) + strlen(jobid) + (strlen(
       taskid)) + strlen(tt_root);
 
   if (mapred_local_dir == NULL) {
-    if (get_mapred_local_dir() < 0) {
-      return;
-    }
+    return;
   }
 
   *task_script_path = (char *) malloc(sizeof(char) * (str_len + 1));
   if (*task_script_path == NULL) {
     fprintf(LOGFILE, "Unable to allocate memory for task_script_path \n");
+    free(mapred_local_dir);
     return;
   }
 
@@ -186,13 +158,13 @@
   fprintf(LOGFILE, "get_task_file_path : task script path = %s\n", *task_script_path);
   fflush(LOGFILE);
 #endif
-
+  free(mapred_local_dir);
 }
 
 //end of private functions
 void display_usage(FILE *stream) {
   fprintf(stream,
-      "Usage: task-controller [-l logile] user command command-args\n");
+      "Usage: task-controller [-l logfile] user command command-args\n");
 }
 
 //function used to populate and user_details structure.
@@ -212,28 +184,20 @@
  *Function used to launch a task as the provided user.
  * First the function checks if the tt_root passed is found in
  * hadoop.temp.dir
- *
- *Then gets the path to which the task has to write its pid from
- *get_pid_path.
- *
- * THen writes its pid into the file.
- *
- * Then changes the permission of the pid file into 777
- *
- * Then uses get_task_file_path to fetch the task script file path.
- *
+ * Uses get_task_file_path to fetch the task script file path.
  * Does an execlp on the same in order to replace the current image with
  * task image.
- *
  */
 
 int run_task_as_user(const char * user, const char *jobid, const char *taskid,
     const char *tt_root) {
   char *task_script_path = NULL;
-  char *pid_path = NULL;
-  FILE *file_handle = NULL;
   int exit_code = 0;
-  int i = 0;
+  uid_t uid = getuid();
+
+  if(jobid == NULL || taskid == NULL) {
+    return INVALID_ARGUMENT_NUMBER;
+  }
 
 #ifdef DEBUG
   fprintf(LOGFILE,"run_task_as_user : Job id : %s \n", jobid);
@@ -241,7 +205,8 @@
   fprintf(LOGFILE,"run_task_as_user : tt_root : %s \n", tt_root);
   fflush(LOGFILE);
 #endif
-
+  //Check tt_root before switching the user, as reading configuration
+  //file requires privileged access.
   if (check_tt_root(tt_root) < 0) {
     fprintf(LOGFILE, "invalid tt root passed %s\n", tt_root);
     cleanup();
@@ -257,44 +222,21 @@
     return SETUID_OPER_FAILED;
   }
 
-  get_pid_path(jobid, taskid, tt_root, &pid_path);
-
-  if (pid_path == NULL) {
+  get_task_file_path(jobid, taskid, tt_root, &task_script_path);
+  if (task_script_path == NULL) {
     cleanup();
-    return INVALID_PID_PATH;
+    return INVALID_TASK_SCRIPT_PATH;
   }
-
   errno = 0;
-  file_handle = fopen(pid_path, "w");
-
-  if (file_handle == NULL) {
-    exit_code = UNABLE_TO_OPEN_PID_FILE_WRITE_MODE;
+  exit_code = check_path(task_script_path);
+  if(exit_code != 0) {
     goto cleanup;
   }
-
   errno = 0;
-  if (fprintf(file_handle, "%d\n", getpid()) < 0) {
-    exit_code = UNABLE_TO_WRITE_TO_PID_FILE;
+  exit_code = check_owner(uid, task_script_path);
+  if(exit_code != 0) {
     goto cleanup;
   }
-
-  fflush(file_handle);
-  fclose(file_handle);
-  //set file handle to null after closing so it would not be double closed
-  //in cleanup label
-  file_handle = NULL;
-  //change the permissions of the file
-  errno = 0;
-  //free pid_t path which is allocated
-  free(pid_path);
-  pid_path = NULL;
-
-  get_task_file_path(jobid, taskid, tt_root, &task_script_path);
-
-  if (task_script_path == NULL) {
-    cleanup();
-    return INVALID_TASK_SCRIPT_PATH;
-  }
   errno = 0;
   cleanup();
   execlp(task_script_path, task_script_path, NULL);
@@ -306,83 +248,53 @@
   return exit_code;
 
 cleanup:
-  if (pid_path != NULL) {
-    free(pid_path);
-  }
   if (task_script_path != NULL) {
     free(task_script_path);
   }
-  if (file_handle != NULL) {
-    fclose(file_handle);
-  }
   // free configurations
   cleanup();
   return exit_code;
 }
+
 /**
- * Function used to terminate a task launched by the user.
- *
- * The function first checks if the passed tt-root is found in
- * configured hadoop.temp.dir (which is a list of tt_roots).
- *
- * Then gets the task-pid path using function get_pid_path.
- *
- * reads the task-pid from the file which is mentioned by get_pid_path
- *
- * kills the task by sending SIGTERM to that particular process.
- *
+ * Function used to terminate/kill a task launched by the user.
+ * The function sends appropriate signal to the process group
+ * specified by the task_pid.
  */
 
-int kill_user_task(const char *user, const char *jobid, const char *taskid,
-    const char *tt_root) {
+int kill_user_task(const char *user, const char *task_pid, int sig) {
   int pid = 0;
-  int i = 0;
-  char *pid_path = NULL;
-  FILE *file_handle = NULL;
-#ifdef DEBUG
-  fprintf(LOGFILE,"kill_user_task : Job id : %s \n", jobid);
-  fprintf(LOGFILE,"kill_user_task : task id : %s \n", taskid);
-  fprintf(LOGFILE,"kill_user_task : tt_root : %s \n", tt_root);
-  fflush(LOGFILE);
-#endif
 
-  if (check_tt_root(tt_root) < 0) {
-    fprintf(LOGFILE, "invalid tt root passed %s\n", tt_root);
-    cleanup();
-    return INVALID_TT_ROOT;
+  if(task_pid == NULL) {
+    return INVALID_ARGUMENT_NUMBER;
   }
+  pid = atoi(task_pid);
 
+  if(pid <= 0) {
+    return INVALID_TASK_PID;
+  }
   fclose(LOGFILE);
   fcloseall();
-
   if (change_user(user) != 0) {
     cleanup();
     return SETUID_OPER_FAILED;
   }
 
-  get_pid_path(jobid, taskid, tt_root, &pid_path);
-  if (pid_path == NULL) {
-    cleanup();
-    return INVALID_PID_PATH;
-  }
-  file_handle = fopen(pid_path, "r");
-  if (file_handle == NULL) {
-    free(pid_path);
-    cleanup();
-    return UNABLE_TO_OPEN_PID_FILE_READ_MODE;
-  }
-  fscanf(file_handle, "%d", &pid);
-  fclose(file_handle);
-  free(pid_path);
-  if (pid == 0) {
-    cleanup();
-    return UNABLE_TO_READ_PID;
+  //Don't continue if the process-group is not alive anymore.
+  if(kill(-pid,0) < 0) {
+    errno = 0;
+    return 0;
   }
-  if (kill(pid, SIGTERM) < 0) {
-    fprintf(LOGFILE, "%s\n", strerror(errno));
-    cleanup();
-    return UNABLE_TO_KILL_TASK;
+
+  if (kill(-pid, sig) < 0) {
+    if(errno != ESRCH) {
+      fprintf(LOGFILE, "Error is %s\n", strerror(errno));
+      cleanup();
+      return UNABLE_TO_KILL_TASK;
+    }
+    errno = 0;
   }
   cleanup();
   return 0;
 }
+

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/task-controller.h
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/c%2B%2B/task-controller/task-controller.h?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/task-controller.h (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/c++/task-controller/task-controller.h Tue Jun 16 20:54:24 2009
@@ -28,42 +28,37 @@
 #include <sys/stat.h>
 #include <sys/signal.h>
 #include <getopt.h>
+#include<grp.h>
 #include "configuration.h"
 
 //command definitions
 enum command {
-  RUN_TASK,
-  KILL_TASK
+  LAUNCH_TASK_JVM,
+  TERMINATE_TASK_JVM,
+  KILL_TASK_JVM
 };
 
 enum errorcodes {
   INVALID_ARGUMENT_NUMBER = 1,
-  INVALID_USER_NAME,
-  INVALID_COMMAND_PROVIDED,
-  SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS,
-  OUT_OF_MEMORY,
-  INVALID_TT_ROOT,
-  INVALID_PID_PATH,
-  UNABLE_TO_OPEN_PID_FILE_WRITE_MODE,
-  UNABLE_TO_OPEN_PID_FILE_READ_MODE,
-  UNABLE_TO_WRITE_TO_PID_FILE,
-  SETUID_OPER_FAILED,
-  INVALID_TASK_SCRIPT_PATH,
-  UNABLE_TO_EXECUTE_TASK_SCRIPT,
-  UNABLE_TO_READ_PID,
-  UNABLE_TO_KILL_TASK,
-  UNABLE_TO_FIND_PARENT_PID_FILE,
-  TASK_CONTROLLER_SPAWNED_BY_INVALID_PARENT_PROCESS,
-  UNABLE_TO_READ_PARENT_PID
+  INVALID_USER_NAME, //2
+  INVALID_COMMAND_PROVIDED, //3
+  SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS, //4
+  INVALID_TT_ROOT, //5
+  SETUID_OPER_FAILED, //6
+  INVALID_TASK_SCRIPT_PATH, //7
+  UNABLE_TO_EXECUTE_TASK_SCRIPT, //8
+  UNABLE_TO_KILL_TASK, //9
+  INVALID_PROCESS_LAUNCHING_TASKCONTROLLER, //10
+  INVALID_TASK_PID, //11
+  ERROR_RESOLVING_FILE_PATH, //12
+  RELATIVE_PATH_COMPONENTS_IN_FILE_PATH, //13
+  UNABLE_TO_STAT_FILE, //14
+  FILE_NOT_OWNED_BY_TASKTRACKER //15
 };
 
 
-#define TT_PID_PATTERN "%s/hadoop-%s-tasktracker.pid"
-
 #define TT_LOCAL_TASK_SCRIPT_PATTERN "%s/taskTracker/jobcache/%s/%s/taskjvm.sh"
 
-#define TT_SYS_DIR "%s/taskTracker/jobcache/%s/%s/.pid"
-
 #define TT_SYS_DIR_KEY "mapred.local.dir"
 
 #define MAX_ITEMS 10
@@ -81,8 +76,6 @@
 
 int run_task_as_user(const char * user, const char *jobid, const char *taskid, const char *tt_root);
 
-int verify_parent();
-
-int kill_user_task(const char *user, const char *jobid, const char *taskid, const char *tt_root);
+int kill_user_task(const char *user, const char *task_pid, int sig);
 
 int get_user_details(const char *user);

Propchange: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/dynamic-scheduler/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Tue Jun 16 20:54:24 2009
@@ -0,0 +1 @@
+/hadoop/core/branches/branch-0.19/src/contrib/dynamic-scheduler:713112

Propchange: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/sqoop/
------------------------------------------------------------------------------
    svn:mergeinfo = /hadoop/core/branches/branch-0.19/src/contrib/sqoop:713112

Propchange: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Tue Jun 16 20:54:24 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/mapred/src/contrib/streaming:713112
+/hadoop/core/trunk/src/contrib/streaming:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/ivy.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/ivy.xml?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/ivy.xml (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/ivy.xml Tue Jun 16 20:54:24 2009
@@ -28,6 +28,10 @@
       name="commons-cli"
       rev="${commons-cli2.version}"
       conf="common->default"/>
+    <dependency org="commons-cli"
+      name="commons-cli"
+      rev="${commons-cli.version}"
+      conf="common->default"/>
     <dependency org="commons-logging"
       name="commons-logging"
       rev="${commons-logging.version}"

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java Tue Jun 16 20:54:24 2009
@@ -376,6 +376,9 @@
       if (errThread_ != null) {
         errThread_.join(joinDelay_);
       }
+      if (outerrThreadsThrowable != null) {
+        throw new RuntimeException(outerrThreadsThrowable);
+      }
     } catch (InterruptedException e) {
       //ignore
     }
@@ -425,7 +428,11 @@
           if (now-lastStdoutReport > reporterOutDelay_) {
             lastStdoutReport = now;
             String hline = "Records R/W=" + numRecRead_ + "/" + numRecWritten_;
-            reporter.setStatus(hline);
+            if (!processProvidedStatus_) {
+              reporter.setStatus(hline);
+            } else {
+              reporter.progress();
+            }
             logprintln(hline);
             logflush();
           }
@@ -476,6 +483,7 @@
             if (matchesCounter(lineStr)) {
               incrCounter(lineStr);
             } else if (matchesStatus(lineStr)) {
+              processProvidedStatus_ = true;
               setStatus(lineStr);
             } else {
               LOG.warn("Cannot parse reporter line: " + lineStr);
@@ -572,6 +580,7 @@
       if (sim != null) sim.destroy();
       logprintln("mapRedFinished");
     } catch (RuntimeException e) {
+      logprintln("PipeMapRed failed!");
       logStackTrace(e);
       throw e;
     }
@@ -682,4 +691,5 @@
   String LOGNAME;
   PrintStream log_;
 
+  volatile boolean processProvidedStatus_ = false;
 }

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java Tue Jun 16 20:54:24 2009
@@ -727,6 +727,9 @@
           || inputFormatSpec_.equals(KeyValueTextInputFormat.class
               .getCanonicalName())
           || inputFormatSpec_.equals(KeyValueTextInputFormat.class.getSimpleName())) {
+        if (inReaderSpec_ == null) {
+          fmt = KeyValueTextInputFormat.class;
+        }
       } else if (inputFormatSpec_.equals(SequenceFileInputFormat.class
           .getName())
           || inputFormatSpec_
@@ -734,6 +737,9 @@
                   .getCanonicalName())
           || inputFormatSpec_
               .equals(org.apache.hadoop.mapred.SequenceFileInputFormat.class.getSimpleName())) {
+        if (inReaderSpec_ == null) {
+          fmt = SequenceFileInputFormat.class;
+        }
       } else if (inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class
           .getName())
           || inputFormatSpec_.equals(SequenceFileAsTextInputFormat.class
@@ -1125,7 +1131,7 @@
 
   protected RunningJob running_;
   protected JobID jobId_;
-  protected static final String LINK_URI = "You need to specify the uris as hdfs://host:port/#linkname," +
+  protected static final String LINK_URI = "You need to specify the uris as scheme://path#linkname," +
     "Please specify a different link name for all of your caching URIs";
 
 }

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/contrib/streaming/src/test/org/apache/hadoop/streaming/StderrApp.java Tue Jun 16 20:54:24 2009
@@ -32,8 +32,16 @@
    * postWriteLines to stderr.
    */
   public static void go(int preWriteLines, int sleep, int postWriteLines) throws IOException {
+    go(preWriteLines, sleep, postWriteLines, false);
+  }
+  
+  public static void go(int preWriteLines, int sleep, int postWriteLines, boolean status) throws IOException {
     BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
     String line;
+    
+    if (status) {
+      System.err.println("reporter:status:starting echo");
+    }      
        
     while (preWriteLines > 0) {
       --preWriteLines;
@@ -57,13 +65,14 @@
 
   public static void main(String[] args) throws IOException {
     if (args.length < 3) {
-      System.err.println("Usage: StderrApp PREWRITE SLEEP POSTWRITE");
+      System.err.println("Usage: StderrApp PREWRITE SLEEP POSTWRITE [STATUS]");
       return;
     }
     int preWriteLines = Integer.parseInt(args[0]);
     int sleep = Integer.parseInt(args[1]);
     int postWriteLines = Integer.parseInt(args[2]);
+    boolean status = args.length > 3 ? Boolean.parseBoolean(args[3]) : false;
     
-    go(preWriteLines, sleep, postWriteLines);
+    go(preWriteLines, sleep, postWriteLines, status);
   }
 }

Propchange: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/vaidya/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Tue Jun 16 20:54:24 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/mapred/src/contrib/vaidya:713112
+/hadoop/core/trunk/src/contrib/vaidya:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/contrib/vaidya/src/java/org/apache/hadoop/vaidya/statistics/job/JobStatistics.java Tue Jun 16 20:54:24 2009
@@ -417,8 +417,6 @@
           mapTask.setValue(MapTaskKeys.INPUT_RECORDS, parts[1]);
         } else if (parts[0].equals("Map-Reduce Framework.Map output records")) {
           mapTask.setValue(MapTaskKeys.OUTPUT_RECORDS, parts[1]);
-        } else if (parts[0].equals("Map-Reduce Framework.Map input bytes")) {
-          mapTask.setValue(MapTaskKeys.INPUT_BYTES, parts[1]);
         } else if (parts[0].equals("Map-Reduce Framework.Map output bytes")) {
           mapTask.setValue(MapTaskKeys.OUTPUT_BYTES, parts[1]);
         } else if (parts[0].equals("Map-Reduce Framework.Combine input records")) {
@@ -427,6 +425,8 @@
           mapTask.setValue(MapTaskKeys.COMBINE_OUTPUT_RECORDS, parts[1]);
         } else if (parts[0].equals("Map-Reduce Framework.Spilled Records")) {
           mapTask.setValue(MapTaskKeys.SPILLED_RECORDS, parts[1]);
+        } else if (parts[0].equals("FileInputFormatCounters.BYTES_READ")) {
+          mapTask.setValue(MapTaskKeys.INPUT_BYTES, parts[1]);
         } else {
           System.out.println("MapCounterKey:<"+parts[0]+"> ==> NOT INCLUDED IN PERFORMANCE ADVISOR MAP TASK");
         }

Propchange: hadoop/core/branches/HADOOP-4687/mapred/src/examples/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Tue Jun 16 20:54:24 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/mapred/src/examples:713112
+/hadoop/core/trunk/src/examples:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/DBCountPageView.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/DBCountPageView.java?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/DBCountPageView.java (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/DBCountPageView.java Tue Jun 16 20:54:24 2009
@@ -32,23 +32,20 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MapReduceBase;
-import org.apache.hadoop.mapred.Mapper;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.Reducer;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.lib.LongSumReducer;
-import org.apache.hadoop.mapred.lib.db.DBConfiguration;
-import org.apache.hadoop.mapred.lib.db.DBInputFormat;
-import org.apache.hadoop.mapred.lib.db.DBOutputFormat;
-import org.apache.hadoop.mapred.lib.db.DBWritable;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
+import org.apache.hadoop.mapreduce.lib.db.DBInputFormat;
+import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -79,7 +76,8 @@
   private static final String[] AccessFieldNames = {"url", "referrer", "time"};
   private static final String[] PageviewFieldNames = {"url", "pageview"};
   
-  private static final String DB_URL = "jdbc:hsqldb:hsql://localhost/URLAccess";
+  private static final String DB_URL = 
+    "jdbc:hsqldb:hsql://localhost/URLAccess";
   private static final String DRIVER_CLASS = "org.hsqldb.jdbcDriver";
   
   private Server server;
@@ -87,7 +85,7 @@
   private void startHsqldbServer() {
     server = new Server();
     server.setDatabasePath(0, 
-        System.getProperty("test.build.data",".") + "/URLAccess");
+        System.getProperty("test.build.data", "/tmp") + "/URLAccess");
     server.setDatabaseName(0, "URLAccess");
     server.start();
   }
@@ -193,10 +191,11 @@
 
 
       //Pages in the site :
-      String[] pages = {"/a", "/b", "/c", "/d", "/e", "/f", "/g", "/h", "/i", "/j"};
+      String[] pages = {"/a", "/b", "/c", "/d", "/e", 
+                        "/f", "/g", "/h", "/i", "/j"};
       //linkMatrix[i] is the array of pages(indexes) that page_i links to.  
-      int[][] linkMatrix = {{1,5,7}, {0,7,4,6,}, {0,1,7,8}, {0,2,4,6,7,9}, {0,1},
-          {0,3,5,9}, {0}, {0,1,3}, {0,2,6}, {0,2,6}};
+      int[][] linkMatrix = {{1,5,7}, {0,7,4,6,}, {0,1,7,8}, 
+        {0,2,4,6,7,9}, {0,1}, {0,3,5,9}, {0}, {0,1,3}, {0,2,6}, {0,2,6}};
 
       //a mini model of user browsing a la pagerank
       int currentPage = random.nextInt(pages.length); 
@@ -211,7 +210,8 @@
 
         int action = random.nextInt(PROBABILITY_PRECISION);
 
-        //go to a new page with probability NEW_PAGE_PROBABILITY / PROBABILITY_PRECISION
+        // go to a new page with probability 
+        // NEW_PAGE_PROBABILITY / PROBABILITY_PRECISION
         if(action < NEW_PAGE_PROBABILITY) { 
           currentPage = random.nextInt(pages.length); // a random page
           referrer = null;
@@ -337,17 +337,15 @@
    * Mapper extracts URLs from the AccessRecord (tuples from db), 
    * and emits a &lt;url,1&gt; pair for each access record. 
    */
-  static class PageviewMapper extends MapReduceBase 
-    implements Mapper<LongWritable, AccessRecord, Text, LongWritable> {
+  static class PageviewMapper extends 
+      Mapper<LongWritable, AccessRecord, Text, LongWritable> {
     
     LongWritable ONE = new LongWritable(1L);
     @Override
-    public void map(LongWritable key, AccessRecord value,
-        OutputCollector<Text, LongWritable> output, Reporter reporter)
-        throws IOException {
-      
+    public void map(LongWritable key, AccessRecord value, Context context)
+        throws IOException, InterruptedException {
       Text oKey = new Text(value.url);
-      output.collect(oKey, ONE);
+      context.write(oKey, ONE);
     }
   }
   
@@ -355,20 +353,19 @@
    * Reducer sums up the pageviews and emits a PageviewRecord, 
    * which will correspond to one tuple in the db.
    */
-  static class PageviewReducer extends MapReduceBase 
-    implements Reducer<Text, LongWritable, PageviewRecord, NullWritable> {
+  static class PageviewReducer extends 
+      Reducer<Text, LongWritable, PageviewRecord, NullWritable> {
     
     NullWritable n = NullWritable.get();
     @Override
-    public void reduce(Text key, Iterator<LongWritable> values,
-        OutputCollector<PageviewRecord, NullWritable> output, Reporter reporter)
-        throws IOException {
+    public void reduce(Text key, Iterable<LongWritable> values, 
+        Context context) throws IOException, InterruptedException {
       
       long sum = 0L;
-      while(values.hasNext()) {
-        sum += values.next().get();
+      for(LongWritable value: values) {
+        sum += value.get();
       }
-      output.collect(new PageviewRecord(key.toString(), sum), n);
+      context.write(new PageviewRecord(key.toString(), sum), n);
     }
   }
   
@@ -385,17 +382,18 @@
     }
     
     initialize(driverClassName, url);
+    Configuration conf = getConf();
+
+    DBConfiguration.configureDB(conf, driverClassName, url);
 
-    JobConf job = new JobConf(getConf(), DBCountPageView.class);
+    Job job = new Job(conf);
         
     job.setJobName("Count Pageviews of URLs");
-
+    job.setJarByClass(DBCountPageView.class);
     job.setMapperClass(PageviewMapper.class);
     job.setCombinerClass(LongSumReducer.class);
     job.setReducerClass(PageviewReducer.class);
 
-    DBConfiguration.configureDB(job, driverClassName, url);
-    
     DBInputFormat.setInput(job, AccessRecord.class, "Access"
         , null, "url", AccessFieldNames);
 
@@ -406,10 +404,9 @@
 
     job.setOutputKeyClass(PageviewRecord.class);
     job.setOutputValueClass(NullWritable.class);
-
+    int ret;
     try {
-      JobClient.runJob(job);
-      
+      ret = job.waitForCompletion(true) ? 0 : 1;
       boolean correct = verify();
       if(!correct) {
         throw new RuntimeException("Evaluation was not correct!");
@@ -417,7 +414,7 @@
     } finally {
       shutdown();    
     }
-    return 0;
+    return ret;
   }
 
   public static void main(String[] args) throws Exception {

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/MultiFileWordCount.java Tue Jun 16 20:54:24 2009
@@ -18,14 +18,11 @@
 
 package org.apache.hadoop.examples;
 
-import java.io.BufferedReader;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
-import java.io.InputStreamReader;
 import java.util.StringTokenizer;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -33,19 +30,18 @@
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.mapred.FileOutputFormat;
-import org.apache.hadoop.mapred.InputSplit;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MapReduceBase;
-import org.apache.hadoop.mapred.Mapper;
-import org.apache.hadoop.mapred.MultiFileInputFormat;
-import org.apache.hadoop.mapred.MultiFileSplit;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.RecordReader;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.lib.LongSumReducer;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;
+import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;
+import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -99,131 +95,129 @@
 
 
   /**
-   * To use {@link MultiFileInputFormat}, one should extend it, to return a 
-   * (custom) {@link RecordReader}. MultiFileInputFormat uses 
-   * {@link MultiFileSplit}s. 
+   * To use {@link CombineFileInputFormat}, one should extend it, to return a 
+   * (custom) {@link RecordReader}. CombineFileInputFormat uses 
+   * {@link CombineFileSplit}s. 
    */
   public static class MyInputFormat 
-    extends MultiFileInputFormat<WordOffset, Text>  {
+    extends CombineFileInputFormat<WordOffset, Text>  {
 
-    @Override
-    public RecordReader<WordOffset,Text> getRecordReader(InputSplit split
-        , JobConf job, Reporter reporter) throws IOException {
-      return new MultiFileLineRecordReader(job, (MultiFileSplit)split);
+    public RecordReader<WordOffset,Text> createRecordReader(InputSplit split,
+        TaskAttemptContext context) throws IOException {
+      return new CombineFileRecordReader<WordOffset, Text>(
+        (CombineFileSplit)split, context, CombineFileLineRecordReader.class);
     }
   }
 
   /**
-   * RecordReader is responsible from extracting records from the InputSplit. 
-   * This record reader accepts a {@link MultiFileSplit}, which encapsulates several 
-   * files, and no file is divided.
+   * RecordReader is responsible from extracting records from a chunk
+   * of the CombineFileSplit. 
    */
-  public static class MultiFileLineRecordReader 
-    implements RecordReader<WordOffset, Text> {
+  public static class CombineFileLineRecordReader 
+    extends RecordReader<WordOffset, Text> {
 
-    private MultiFileSplit split;
-    private long offset; //total offset read so far;
-    private long totLength;
+    private long startOffset; //offset of the chunk;
+    private long end; //end of the chunk;
+    private long pos; // current pos 
     private FileSystem fs;
-    private int count = 0;
-    private Path[] paths;
+    private Path path;
+    private WordOffset key;
+    private Text value;
     
-    private FSDataInputStream currentStream;
-    private BufferedReader currentReader;
+    private FSDataInputStream fileIn;
+    private LineReader reader;
     
-    public MultiFileLineRecordReader(Configuration conf, MultiFileSplit split)
-      throws IOException {
+    public CombineFileLineRecordReader(CombineFileSplit split,
+        TaskAttemptContext context, Integer index) throws IOException {
       
-      this.split = split;
-      fs = FileSystem.get(conf);
-      this.paths = split.getPaths();
-      this.totLength = split.getLength();
-      this.offset = 0;
+      fs = FileSystem.get(context.getConfiguration());
+      this.path = split.getPath(index);
+      this.startOffset = split.getOffset(index);
+      this.end = startOffset + split.getLength(index);
+      boolean skipFirstLine = false;
       
-      //open the first file
-      Path file = paths[count];
-      currentStream = fs.open(file);
-      currentReader = new BufferedReader(new InputStreamReader(currentStream));
+      //open the file
+      fileIn = fs.open(path);
+      if (startOffset != 0) {
+        skipFirstLine = true;
+        --startOffset;
+        fileIn.seek(startOffset);
+      }
+      reader = new LineReader(fileIn);
+      if (skipFirstLine) {  // skip first line and re-establish "startOffset".
+        startOffset += reader.readLine(new Text(), 0,
+                    (int)Math.min((long)Integer.MAX_VALUE, end - startOffset));
+      }
+      this.pos = startOffset;
     }
 
-    public void close() throws IOException { }
-
-    public long getPos() throws IOException {
-      long currentOffset = currentStream == null ? 0 : currentStream.getPos();
-      return offset + currentOffset;
+    public void initialize(InputSplit split, TaskAttemptContext context)
+        throws IOException, InterruptedException {
     }
 
+    public void close() throws IOException { }
+
     public float getProgress() throws IOException {
-      return ((float)getPos()) / totLength;
+      if (startOffset == end) {
+        return 0.0f;
+      } else {
+        return Math.min(1.0f, (pos - startOffset) / (float)(end - startOffset));
+      }
     }
 
-    public boolean next(WordOffset key, Text value) throws IOException {
-      if(count >= split.getNumPaths())
+    public boolean nextKeyValue() throws IOException {
+      if (key == null) {
+        key = new WordOffset();
+        key.fileName = path.getName();
+      }
+      key.offset = pos;
+      if (value == null) {
+        value = new Text();
+      }
+      int newSize = 0;
+      if (pos < end) {
+        newSize = reader.readLine(value);
+        pos += newSize;
+      }
+      if (newSize == 0) {
+        key = null;
+        value = null;
         return false;
-
-      /* Read from file, fill in key and value, if we reach the end of file,
-       * then open the next file and continue from there until all files are
-       * consumed.  
-       */
-      String line;
-      do {
-        line = currentReader.readLine();
-        if(line == null) {
-          //close the file
-          currentReader.close();
-          offset += split.getLength(count);
-          
-          if(++count >= split.getNumPaths()) //if we are done
-            return false;
-          
-          //open a new file
-          Path file = paths[count];
-          currentStream = fs.open(file);
-          currentReader=new BufferedReader(new InputStreamReader(currentStream));
-          key.fileName = file.getName();
-        }
-      } while(line == null);
-      //update the key and value
-      key.offset = currentStream.getPos();
-      value.set(line);
-      
-      return true;
+      } else {
+        return true;
+      }
     }
 
-    public WordOffset createKey() {
-      WordOffset wo = new WordOffset();
-      wo.fileName = paths[0].toString(); //set as the first file
-      return wo;
+    public WordOffset getCurrentKey() 
+        throws IOException, InterruptedException {
+      return key;
     }
 
-    public Text createValue() {
-      return new Text();
+    public Text getCurrentValue() throws IOException, InterruptedException {
+      return value;
     }
   }
 
   /**
    * This Mapper is similar to the one in {@link WordCount.MapClass}.
    */
-  public static class MapClass extends MapReduceBase
-    implements Mapper<WordOffset, Text, Text, IntWritable> {
-
+  public static class MapClass extends 
+      Mapper<WordOffset, Text, Text, IntWritable> {
     private final static IntWritable one = new IntWritable(1);
     private Text word = new Text();
     
-    public void map(WordOffset key, Text value,
-        OutputCollector<Text, IntWritable> output, Reporter reporter)
-        throws IOException {
+    public void map(WordOffset key, Text value, Context context)
+        throws IOException, InterruptedException {
       
       String line = value.toString();
       StringTokenizer itr = new StringTokenizer(line);
       while (itr.hasMoreTokens()) {
         word.set(itr.nextToken());
-        output.collect(word, one);
+        context.write(word, one);
       }
     }
   }
   
-  
   private void printUsage() {
     System.out.println("Usage : multifilewc <input_dir> <output>" );
   }
@@ -232,14 +226,15 @@
 
     if(args.length < 2) {
       printUsage();
-      return 1;
+      return 2;
     }
 
-    JobConf job = new JobConf(getConf(), MultiFileWordCount.class);
+    Job job = new Job(getConf());
     job.setJobName("MultiFileWordCount");
+    job.setJarByClass(MultiFileWordCount.class);
 
     //set the InputFormat of the job to our InputFormat
-    job.setInputFormat(MyInputFormat.class);
+    job.setInputFormatClass(MyInputFormat.class);
     
     // the keys are words (strings)
     job.setOutputKeyClass(Text.class);
@@ -249,15 +244,13 @@
     //use the defined mapper
     job.setMapperClass(MapClass.class);
     //use the WordCount Reducer
-    job.setCombinerClass(LongSumReducer.class);
-    job.setReducerClass(LongSumReducer.class);
+    job.setCombinerClass(IntSumReducer.class);
+    job.setReducerClass(IntSumReducer.class);
 
     FileInputFormat.addInputPaths(job, args[0]);
     FileOutputFormat.setOutputPath(job, new Path(args[1]));
 
-    JobClient.runJob(job);
-    
-    return 0;
+    return job.waitForCompletion(true) ? 0 : 1;
   }
 
   public static void main(String[] args) throws Exception {

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/Sort.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/Sort.java?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/Sort.java (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/Sort.java Tue Jun 16 20:54:24 2009
@@ -29,11 +29,15 @@
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.mapred.*;
-import org.apache.hadoop.mapred.lib.IdentityMapper;
-import org.apache.hadoop.mapred.lib.IdentityReducer;
-import org.apache.hadoop.mapred.lib.InputSampler;
-import org.apache.hadoop.mapred.lib.TotalOrderPartitioner;
+import org.apache.hadoop.mapred.ClusterStatus;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapreduce.*;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.partition.InputSampler;
+import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -42,7 +46,7 @@
  * other than use the framework to fragment and sort the input values.
  *
  * To run: bin/hadoop jar build/hadoop-examples.jar sort
- *            [-m <i>maps</i>] [-r <i>reduces</i>]
+ *            [-r <i>reduces</i>]
  *            [-inFormat <i>input format class</i>] 
  *            [-outFormat <i>output format class</i>] 
  *            [-outKey <i>output key class</i>] 
@@ -51,10 +55,10 @@
  *            <i>in-dir</i> <i>out-dir</i> 
  */
 public class Sort<K,V> extends Configured implements Tool {
-  private RunningJob jobResult = null;
+  private Job job = null;
 
   static int printUsage() {
-    System.out.println("sort [-m <maps>] [-r <reduces>] " +
+    System.out.println("sort [-r <reduces>] " +
                        "[-inFormat <input format class>] " +
                        "[-outFormat <output format class>] " + 
                        "[-outKey <output key class>] " +
@@ -62,7 +66,7 @@
                        "[-totalOrder <pcnt> <num samples> <max splits>] " +
                        "<input> <output>");
     ToolRunner.printGenericCommandUsage(System.out);
-    return -1;
+    return 2;
   }
 
   /**
@@ -73,16 +77,11 @@
    */
   public int run(String[] args) throws Exception {
 
-    JobConf jobConf = new JobConf(getConf(), Sort.class);
-    jobConf.setJobName("sorter");
-
-    jobConf.setMapperClass(IdentityMapper.class);        
-    jobConf.setReducerClass(IdentityReducer.class);
-
-    JobClient client = new JobClient(jobConf);
+    Configuration conf = getConf();
+    JobClient client = new JobClient(conf);
     ClusterStatus cluster = client.getClusterStatus();
     int num_reduces = (int) (cluster.getMaxReduceTasks() * 0.9);
-    String sort_reduces = jobConf.get("test.sort.reduces_per_host");
+    String sort_reduces = conf.get("test.sort.reduces_per_host");
     if (sort_reduces != null) {
        num_reduces = cluster.getTaskTrackers() * 
                        Integer.parseInt(sort_reduces);
@@ -97,9 +96,7 @@
     InputSampler.Sampler<K,V> sampler = null;
     for(int i=0; i < args.length; ++i) {
       try {
-        if ("-m".equals(args[i])) {
-          jobConf.setNumMapTasks(Integer.parseInt(args[++i]));
-        } else if ("-r".equals(args[i])) {
+        if ("-r".equals(args[i])) {
           num_reduces = Integer.parseInt(args[++i]);
         } else if ("-inFormat".equals(args[i])) {
           inputFormatClass = 
@@ -132,15 +129,21 @@
         return printUsage(); // exits
       }
     }
-
     // Set user-supplied (possibly default) job configs
-    jobConf.setNumReduceTasks(num_reduces);
+    job = new Job(conf);
+    job.setJobName("sorter");
+    job.setJarByClass(Sort.class);
 
-    jobConf.setInputFormat(inputFormatClass);
-    jobConf.setOutputFormat(outputFormatClass);
+    job.setMapperClass(Mapper.class);        
+    job.setReducerClass(Reducer.class);
 
-    jobConf.setOutputKeyClass(outputKeyClass);
-    jobConf.setOutputValueClass(outputValueClass);
+    job.setNumReduceTasks(num_reduces);
+
+    job.setInputFormatClass(inputFormatClass);
+    job.setOutputFormatClass(outputFormatClass);
+
+    job.setOutputKeyClass(outputKeyClass);
+    job.setOutputValueClass(outputValueClass);
 
     // Make sure there are exactly 2 parameters left.
     if (otherArgs.size() != 2) {
@@ -148,37 +151,37 @@
           otherArgs.size() + " instead of 2.");
       return printUsage();
     }
-    FileInputFormat.setInputPaths(jobConf, otherArgs.get(0));
-    FileOutputFormat.setOutputPath(jobConf, new Path(otherArgs.get(1)));
-
+    FileInputFormat.setInputPaths(job, otherArgs.get(0));
+    FileOutputFormat.setOutputPath(job, new Path(otherArgs.get(1)));
+    
     if (sampler != null) {
       System.out.println("Sampling input to effect total-order sort...");
-      jobConf.setPartitionerClass(TotalOrderPartitioner.class);
-      Path inputDir = FileInputFormat.getInputPaths(jobConf)[0];
-      inputDir = inputDir.makeQualified(inputDir.getFileSystem(jobConf));
+      job.setPartitionerClass(TotalOrderPartitioner.class);
+      Path inputDir = FileInputFormat.getInputPaths(job)[0];
+      inputDir = inputDir.makeQualified(inputDir.getFileSystem(conf));
       Path partitionFile = new Path(inputDir, "_sortPartitioning");
-      TotalOrderPartitioner.setPartitionFile(jobConf, partitionFile);
-      InputSampler.<K,V>writePartitionFile(jobConf, sampler);
+      TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
+      InputSampler.<K,V>writePartitionFile(job, sampler);
       URI partitionUri = new URI(partitionFile.toString() +
                                  "#" + "_sortPartitioning");
-      DistributedCache.addCacheFile(partitionUri, jobConf);
-      DistributedCache.createSymlink(jobConf);
+      DistributedCache.addCacheFile(partitionUri, conf);
+      DistributedCache.createSymlink(conf);
     }
 
     System.out.println("Running on " +
         cluster.getTaskTrackers() +
         " nodes to sort from " + 
-        FileInputFormat.getInputPaths(jobConf)[0] + " into " +
-        FileOutputFormat.getOutputPath(jobConf) +
+        FileInputFormat.getInputPaths(job)[0] + " into " +
+        FileOutputFormat.getOutputPath(job) +
         " with " + num_reduces + " reduces.");
     Date startTime = new Date();
     System.out.println("Job started: " + startTime);
-    jobResult = JobClient.runJob(jobConf);
+    int ret = job.waitForCompletion(true) ? 0 : 1;
     Date end_time = new Date();
     System.out.println("Job ended: " + end_time);
     System.out.println("The job took " + 
         (end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
-    return 0;
+    return ret;
   }
 
 
@@ -192,7 +195,7 @@
    * Get the last job that was run using this instance.
    * @return the results of the last job that was run
    */
-  public RunningJob getResult() {
-    return jobResult;
+  public Job getResult() {
+    return job;
   }
 }

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/examples/org/apache/hadoop/examples/dancing/DistributedPentomino.java Tue Jun 16 20:54:24 2009
@@ -28,8 +28,9 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
-import org.apache.hadoop.mapred.*;
-import org.apache.hadoop.mapred.lib.IdentityReducer;
+import org.apache.hadoop.mapreduce.*;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.util.*;
 
 /**
@@ -43,21 +44,25 @@
  */
 public class DistributedPentomino extends Configured implements Tool {
 
+  private static final int PENT_DEPTH = 5;
+  private static final int PENT_WIDTH = 9;
+  private static final int PENT_HEIGHT = 10;
+  private static final int DEFAULT_MAPS = 2000;
+  
   /**
    * Each map takes a line, which represents a prefix move and finds all of 
    * the solutions that start with that prefix. The output is the prefix as
    * the key and the solution as the value.
    */
-  public static class PentMap extends MapReduceBase
-    implements Mapper<WritableComparable, Text, Text, Text> {
+  public static class PentMap extends 
+      Mapper<WritableComparable<?>, Text, Text, Text> {
     
     private int width;
     private int height;
     private int depth;
     private Pentomino pent;
     private Text prefixString;
-    private OutputCollector<Text, Text> output;
-    private Reporter reporter;
+    private Context context;
     
     /**
      * For each solution, generate the prefix and a string representation
@@ -72,10 +77,12 @@
       public void solution(List<List<Pentomino.ColumnName>> answer) {
         String board = Pentomino.stringifySolution(width, height, answer);
         try {
-          output.collect(prefixString, new Text("\n" + board));
-          reporter.incrCounter(pent.getCategory(answer), 1);
+          context.write(prefixString, new Text("\n" + board));
+          context.getCounter(pent.getCategory(answer)).increment(1);
         } catch (IOException e) {
           System.err.println(StringUtils.stringifyException(e));
+        } catch (InterruptedException ie) {
+          System.err.println(StringUtils.stringifyException(ie));
         }
       }
     }
@@ -85,11 +92,8 @@
      * will be selected for each column in order). Find all solutions with
      * that prefix.
      */
-    public void map(WritableComparable key, Text value,
-                    OutputCollector<Text, Text> output, Reporter reporter
-                    ) throws IOException {
-      this.output = output;
-      this.reporter = reporter;
+    public void map(WritableComparable<?> key, Text value,Context context) 
+        throws IOException {
       prefixString = value;
       StringTokenizer itr = new StringTokenizer(prefixString.toString(), ",");
       int[] prefix = new int[depth];
@@ -102,10 +106,12 @@
     }
     
     @Override
-    public void configure(JobConf conf) {
-      depth = conf.getInt("pent.depth", -1);
-      width = conf.getInt("pent.width", -1);
-      height = conf.getInt("pent.height", -1);
+    public void setup(Context context) {
+      this.context = context;
+      Configuration conf = context.getConfiguration();
+      depth = conf.getInt("pent.depth", PENT_DEPTH);
+      width = conf.getInt("pent.width", PENT_WIDTH);
+      height = conf.getInt("pent.height", PENT_HEIGHT);
       pent = (Pentomino) 
         ReflectionUtils.newInstance(conf.getClass("pent.class", 
                                                   OneSidedPentomino.class), 
@@ -123,16 +129,17 @@
    * @param pent the puzzle 
    * @param depth the depth to explore when generating prefixes
    */
-  private static void createInputDirectory(FileSystem fs, 
+  private static long createInputDirectory(FileSystem fs, 
                                            Path dir,
                                            Pentomino pent,
                                            int depth
                                            ) throws IOException {
     fs.mkdirs(dir);
     List<int[]> splits = pent.getSplits(depth);
+    Path input = new Path(dir, "part1");
     PrintStream file = 
       new PrintStream(new BufferedOutputStream
-                      (fs.create(new Path(dir, "part1")), 64*1024));
+                      (fs.create(input), 64*1024));
     for(int[] prefix: splits) {
       for(int i=0; i < prefix.length; ++i) {
         if (i != 0) {
@@ -143,6 +150,7 @@
       file.print('\n');
     }
     file.close();
+    return fs.getFileStatus(input).getLen();
   }
   
   /**
@@ -151,57 +159,54 @@
    * Splits the job into 2000 maps and 1 reduce.
    */
   public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(), new DistributedPentomino(), args);
+    int res = ToolRunner.run(new Configuration(), 
+                new DistributedPentomino(), args);
     System.exit(res);
   }
 
   public int run(String[] args) throws Exception {
-    JobConf conf;
-    int depth = 5;
-    int width = 9;
-    int height = 10;
-    Class<? extends Pentomino> pentClass;
     if (args.length == 0) {
       System.out.println("pentomino <output>");
       ToolRunner.printGenericCommandUsage(System.out);
-      return -1;
+      return 2;
     }
-    
-    conf = new JobConf(getConf());
-    width = conf.getInt("pent.width", width);
-    height = conf.getInt("pent.height", height);
-    depth = conf.getInt("pent.depth", depth);
-    pentClass = conf.getClass("pent.class", OneSidedPentomino.class, Pentomino.class);
-    
+
+    Configuration conf = getConf();
+    int width = conf.getInt("pent.width", PENT_WIDTH);
+    int height = conf.getInt("pent.height", PENT_HEIGHT);
+    int depth = conf.getInt("pent.depth", PENT_DEPTH);
+    Class<? extends Pentomino> pentClass = conf.getClass("pent.class", 
+      OneSidedPentomino.class, Pentomino.class);
+    int numMaps = conf.getInt("mapred.map.tasks", DEFAULT_MAPS);
     Path output = new Path(args[0]);
     Path input = new Path(output + "_input");
     FileSystem fileSys = FileSystem.get(conf);
     try {
-      FileInputFormat.setInputPaths(conf, input);
-      FileOutputFormat.setOutputPath(conf, output);
-      conf.setJarByClass(PentMap.class);
+      Job job = new Job(conf);
+      FileInputFormat.setInputPaths(job, input);
+      FileOutputFormat.setOutputPath(job, output);
+      job.setJarByClass(PentMap.class);
       
-      conf.setJobName("dancingElephant");
+      job.setJobName("dancingElephant");
       Pentomino pent = ReflectionUtils.newInstance(pentClass, conf);
       pent.initialize(width, height);
-      createInputDirectory(fileSys, input, pent, depth);
+      long inputSize = createInputDirectory(fileSys, input, pent, depth);
+      // for forcing the number of maps
+      FileInputFormat.setMaxInputSplitSize(job, (inputSize/numMaps));
    
       // the keys are the prefix strings
-      conf.setOutputKeyClass(Text.class);
+      job.setOutputKeyClass(Text.class);
       // the values are puzzle solutions
-      conf.setOutputValueClass(Text.class);
+      job.setOutputValueClass(Text.class);
       
-      conf.setMapperClass(PentMap.class);        
-      conf.setReducerClass(IdentityReducer.class);
+      job.setMapperClass(PentMap.class);        
+      job.setReducerClass(Reducer.class);
       
-      conf.setNumMapTasks(2000);
-      conf.setNumReduceTasks(1);
+      job.setNumReduceTasks(1);
       
-      JobClient.runJob(conf);
+      return (job.waitForCompletion(true) ? 0 : 1);
       } finally {
       fileSys.delete(input, true);
     }
-    return 0;
   }
-
 }

Propchange: hadoop/core/branches/HADOOP-4687/mapred/src/java/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Tue Jun 16 20:54:24 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/mapred/src/java:713112
+/hadoop/core/trunk/src/mapred:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/mapred/src/java/mapred-default.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/mapred/src/java/mapred-default.xml?rev=785392&r1=785391&r2=785392&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/mapred/src/java/mapred-default.xml (original)
+++ hadoop/core/branches/HADOOP-4687/mapred/src/java/mapred-default.xml Tue Jun 16 20:54:24 2009
@@ -186,42 +186,6 @@
 </property>
 
 <property>
-  <name>mapred.tasktracker.pmem.reserved</name>
-  <value>-1</value>
-  <description>Configuration property to specify the amount of physical memory
-    that has to be reserved by the TaskTracker for system usage (OS, TT etc).
-    The reserved physical memory should be a part of the total physical memory
-    available on the TaskTracker.
-
-    The reserved physical memory and the total physical memory values are
-    reported by the TaskTracker as part of heart-beat so that they can
-    considered by a scheduler. Please refer to the documentation of the
-    configured scheduler to see how this property is used.
-  </description>
-</property>
-
-<property>
-  <name>mapred.task.default.maxvmem</name>
-  <value>-1</value>
-  <description>
-    Cluster-wide configuration in bytes to be set by the administrators that
-    provides default amount of maximum virtual memory for job's tasks. This has
-    to be set on both the JobTracker node for the sake of scheduling decisions
-    and on the TaskTracker nodes for the sake of memory management.
-
-    If a job doesn't specify its virtual memory requirement by setting
-    mapred.task.maxvmem to -1, tasks are assured a memory limit set
-    to this property. This property is set to -1 by default.
-
-    This value should in general be less than the cluster-wide
-    configuration mapred.task.limit.maxvmem. If not or if it is not set,
-    TaskTracker's memory management will be disabled and a scheduler's memory
-    based scheduling decisions may be affected. Please refer to the
-    documentation of the configured scheduler to see how this property is used.
-  </description>
-</property>
-
-<property>
   <name>mapred.task.limit.maxvmem</name>
   <value>-1</value>
   <description>
@@ -272,23 +236,6 @@
 </property>
 
 <property>
-  <name>mapred.task.maxpmem</name>name>
-  <value>-1</value>
-  <description>
-   The maximum amount of physical memory any task of a job will use in bytes.
-
-   This value may be used by schedulers that support scheduling based on job's
-   memory requirements. In general, a task of this job will be scheduled on a
-   TaskTracker, only if the amount of physical memory still unoccupied on the
-   TaskTracker is greater than or equal to this value. Different schedulers can
-   take different decisions, some might just ignore this value. Please refer to
-   the documentation of the scheduler being configured to see if it does
-   memory based scheduling and if it does, how this variable is used by that
-   scheduler.
-  </description>
-</property>
-
-<property>
   <name>mapred.tasktracker.memory_calculator_plugin</name>
   <value></value>
   <description>
@@ -459,6 +406,16 @@
 </property>
 
 <property>
+  <name>mapred.child.env</name>
+  <value></value>
+  <description>User added environment variables for the task tracker child 
+  processes. Example :
+  1) A=foo  This will set the env variable A to foo
+  2) B=$B:c This is inherit tasktracker's B env variable.  
+  </description>
+</property>
+
+<property>
   <name>mapred.child.ulimit</name>
   <value></value>
   <description>The maximum virtual memory, in KB, of a process launched by the 
@@ -707,6 +664,15 @@
 </property>
 
 <property>
+  <name>mapred.heartbeats.in.second</name>
+  <value>100</value>
+  <description>Expert: Approximate number of heart-beats that could arrive 
+               JobTracker in a second. Assuming each RPC can be processed 
+               in 10msec, the default value is made 100 RPCs in a second.
+  </description>
+</property> 
+
+<property>
   <name>mapred.max.tracker.blacklists</name>
   <value>4</value>
   <description>The number of blacklists for a taskTracker by various jobs
@@ -985,4 +951,32 @@
   </description>
 </property>
 
+<property>
+  <name>mapred.max.maps.per.node</name>
+  <value>-1</value>
+  <description>Per-node limit on running map tasks for the job. A value
+    of -1 signifies no limit.</description>
+</property>
+
+<property>
+  <name>mapred.max.reduces.per.node</name>
+  <value>-1</value>
+  <description>Per-node limit on running reduce tasks for the job. A value
+    of -1 signifies no limit.</description>
+</property>
+
+<property>
+  <name>mapred.running.map.limit</name>
+  <value>-1</value>
+  <description>Cluster-wide limit on running map tasks for the job. A value
+    of -1 signifies no limit.</description>
+</property>
+
+<property>
+  <name>mapred.running.reduce.limit</name>
+  <value>-1</value>
+  <description>Cluster-wide limit on running reduce tasks for the job. A value
+    of -1 signifies no limit.</description>
+</property>
+
 </configuration>



Mime
View raw message