hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From sha...@apache.org
Subject svn commit: r771179 - in /hadoop/core/trunk: ./ src/mapred/org/apache/hadoop/mapred/ src/mapred/org/apache/hadoop/mapreduce/ src/test/org/apache/hadoop/mapred/ src/test/org/apache/hadoop/mapreduce/
Date Mon, 04 May 2009 05:19:21 GMT
Author: sharad
Date: Mon May  4 05:19:20 2009
New Revision: 771179

URL: http://svn.apache.org/viewvc?rev=771179&view=rev
Log:
HADOOP-5717. Create public enum class for the Framework counters. Contributed by Amareshwari
Sriramadasu.

Added:
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.java
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.properties
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.java
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.properties
Removed:
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobInProgress_Counter.properties
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Task_Counter.properties
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Counters.java
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapTask.java
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/ReduceTask.java
    hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Task.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestBadRecords.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestCounters.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestJobInProgress.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSpilledRecordsCounter.java
    hadoop/core/trunk/src/test/org/apache/hadoop/mapreduce/TestMapReduceLocal.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon May  4 05:19:20 2009
@@ -281,6 +281,9 @@
     HADOOP-5613. Change S3Exception to checked exception.
     (Andrew Hitchcock via tomwhite)
 
+    HADOOP-5717. Create public enum class for the Framework counters in 
+    org.apache.hadoop.mapreduce. (Amareshwari Sriramadasu via sharad)
+
   OPTIMIZATIONS
 
     HADOOP-5595. NameNode does not need to run a replicator to choose a

Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Counters.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Counters.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Counters.java (original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Counters.java Mon May  4 05:19:20
2009
@@ -355,6 +355,18 @@
    * with the specified name.
    */
   public synchronized Group getGroup(String groupName) {
+    // To provide support for deprecated group names  
+    if (groupName.equals("org.apache.hadoop.mapred.Task$Counter")) {
+      groupName = "org.apache.hadoop.mapreduce.TaskCounter";
+      LOG.warn("Group org.apache.hadoop.mapred.Task$Counter is deprecated." +
+               " Use org.apache.hadoop.mapreduce.TaskCounter instead");
+    } else if (groupName.equals(
+                 "org.apache.hadoop.mapred.JobInProgress$Counter")) {
+      groupName = "org.apache.hadoop.mapreduce.JobCounter";
+      LOG.warn("Group org.apache.hadoop.mapred.JobInProgress$Counter " +
+               "is deprecated. Use " +
+               "org.apache.hadoop.mapreduce.JobCounter instead");
+    }
     Group result = counters.get(groupName);
     if (result == null) {
       result = new Group(groupName);

Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobInProgress.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobInProgress.java (original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/JobInProgress.java Mon May  4 05:19:20
2009
@@ -38,6 +38,7 @@
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobHistory.Values;
+import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
@@ -179,16 +180,6 @@
   private long maxVirtualMemoryForTask;
   private long maxPhysicalMemoryForTask;
   
-  // Per-job counters
-  public static enum Counter { 
-    NUM_FAILED_MAPS, 
-    NUM_FAILED_REDUCES,
-    TOTAL_LAUNCHED_MAPS,
-    TOTAL_LAUNCHED_REDUCES,
-    OTHER_LOCAL_MAPS,
-    DATA_LOCAL_MAPS,
-    RACK_LOCAL_MAPS
-  }
   private Counters jobCounters = new Counters();
   
   private MetricsRecord jobMetrics;
@@ -1300,7 +1291,7 @@
     } else if (tip.isMapTask()) {
       ++runningMapTasks;
       name = Values.MAP.name();
-      counter = Counter.TOTAL_LAUNCHED_MAPS;
+      counter = JobCounter.TOTAL_LAUNCHED_MAPS;
       splits = tip.getSplitNodes();
       if (tip.getActiveTasks().size() > 1)
         speculativeMapTasks++;
@@ -1308,7 +1299,7 @@
     } else {
       ++runningReduceTasks;
       name = Values.REDUCE.name();
-      counter = Counter.TOTAL_LAUNCHED_REDUCES;
+      counter = JobCounter.TOTAL_LAUNCHED_REDUCES;
       if (tip.getActiveTasks().size() > 1)
         speculativeReduceTasks++;
       metrics.launchReduce(id);
@@ -1356,17 +1347,17 @@
       switch (level) {
       case 0 :
         LOG.info("Choosing data-local task " + tip.getTIPId());
-        jobCounters.incrCounter(Counter.DATA_LOCAL_MAPS, 1);
+        jobCounters.incrCounter(JobCounter.DATA_LOCAL_MAPS, 1);
         break;
       case 1:
         LOG.info("Choosing rack-local task " + tip.getTIPId());
-        jobCounters.incrCounter(Counter.RACK_LOCAL_MAPS, 1);
+        jobCounters.incrCounter(JobCounter.RACK_LOCAL_MAPS, 1);
         break;
       default :
         // check if there is any locality
         if (level != this.maxLevel) {
           LOG.info("Choosing cached task at level " + level + tip.getTIPId());
-          jobCounters.incrCounter(Counter.OTHER_LOCAL_MAPS, 1);
+          jobCounters.incrCounter(JobCounter.OTHER_LOCAL_MAPS, 1);
         }
         break;
       }
@@ -2413,9 +2404,9 @@
       //
       if (!tip.isJobCleanupTask() && !tip.isJobSetupTask()) {
         if (tip.isMapTask()) {
-          jobCounters.incrCounter(Counter.NUM_FAILED_MAPS, 1);
+          jobCounters.incrCounter(JobCounter.NUM_FAILED_MAPS, 1);
         } else {
-          jobCounters.incrCounter(Counter.NUM_FAILED_REDUCES, 1);
+          jobCounters.incrCounter(JobCounter.NUM_FAILED_REDUCES, 1);
         }
       }
     }

Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapTask.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapTask.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapTask.java (original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/MapTask.java Mon May  4 05:19:20
2009
@@ -18,13 +18,6 @@
 
 package org.apache.hadoop.mapred;
 
-import static org.apache.hadoop.mapred.Task.Counter.COMBINE_INPUT_RECORDS;
-import static org.apache.hadoop.mapred.Task.Counter.COMBINE_OUTPUT_RECORDS;
-import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_BYTES;
-import static org.apache.hadoop.mapred.Task.Counter.MAP_INPUT_RECORDS;
-import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_BYTES;
-import static org.apache.hadoop.mapred.Task.Counter.MAP_OUTPUT_RECORDS;
-
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutputStream;
@@ -59,6 +52,7 @@
 import org.apache.hadoop.mapred.Merger.Segment;
 import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.util.IndexedSortable;
 import org.apache.hadoop.util.IndexedSorter;
 import org.apache.hadoop.util.Progress;
@@ -157,8 +151,8 @@
     TrackedRecordReader(RecordReader<K,V> raw, TaskReporter reporter) 
       throws IOException{
       rawIn = raw;
-      inputRecordCounter = reporter.getCounter(MAP_INPUT_RECORDS);
-      inputByteCounter = reporter.getCounter(MAP_INPUT_BYTES);
+      inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
+      inputByteCounter = reporter.getCounter(TaskCounter.MAP_INPUT_BYTES);
       this.reporter = reporter;
     }
 
@@ -219,7 +213,7 @@
                          TaskReporter reporter) throws IOException{
       super(raw, reporter);
       this.umbilical = umbilical;
-      this.skipRecCounter = reporter.getCounter(Counter.MAP_SKIPPED_RECORDS);
+      this.skipRecCounter = reporter.getCounter(TaskCounter.MAP_SKIPPED_RECORDS);
       this.toWriteSkipRecs = toWriteSkipRecs() &&  
         SkipBadRecords.getSkipOutputPath(conf)!=null;
       skipIt = getSkipRanges().skipRangeIterator();
@@ -384,7 +378,7 @@
     NewTrackingRecordReader(org.apache.hadoop.mapreduce.RecordReader<K,V> real,
                             TaskReporter reporter) {
       this.real = real;
-      this.inputRecordCounter = reporter.getCounter(MAP_INPUT_RECORDS);
+      this.inputRecordCounter = reporter.getCounter(TaskCounter.MAP_INPUT_RECORDS);
     }
 
     @Override
@@ -557,7 +551,7 @@
 
       out = job.getOutputFormat().getRecordWriter(fs, job, finalName, reporter);
 
-      mapOutputRecordCounter = reporter.getCounter(MAP_OUTPUT_RECORDS);
+      mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
     }
 
     public void close() throws IOException {
@@ -695,11 +689,11 @@
       valSerializer = serializationFactory.getSerializer(valClass);
       valSerializer.open(bb);
       // counters
-      mapOutputByteCounter = reporter.getCounter(MAP_OUTPUT_BYTES);
-      mapOutputRecordCounter = reporter.getCounter(MAP_OUTPUT_RECORDS);
+      mapOutputByteCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_BYTES);
+      mapOutputRecordCounter = reporter.getCounter(TaskCounter.MAP_OUTPUT_RECORDS);
       Counters.Counter combineInputCounter = 
-        reporter.getCounter(COMBINE_INPUT_RECORDS);
-      combineOutputCounter = reporter.getCounter(COMBINE_OUTPUT_RECORDS);
+        reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
+      combineOutputCounter = reporter.getCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
       // compression
       if (job.getCompressMapOutput()) {
         Class<? extends CompressionCodec> codecClass =

Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/ReduceTask.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/ReduceTask.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/ReduceTask.java (original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/ReduceTask.java Mon May  4 05:19:20
2009
@@ -25,8 +25,6 @@
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.lang.Math;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLClassLoader;
@@ -76,6 +74,7 @@
 import org.apache.hadoop.mapred.SortedRanges.SkipRangeIterator;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
 import org.apache.hadoop.metrics.MetricsUtil;
@@ -112,15 +111,15 @@
   private Progress sortPhase;
   private Progress reducePhase;
   private Counters.Counter reduceShuffleBytes = 
-    getCounters().findCounter(Counter.REDUCE_SHUFFLE_BYTES);
+    getCounters().findCounter(TaskCounter.REDUCE_SHUFFLE_BYTES);
   private Counters.Counter reduceInputKeyCounter = 
-    getCounters().findCounter(Counter.REDUCE_INPUT_GROUPS);
+    getCounters().findCounter(TaskCounter.REDUCE_INPUT_GROUPS);
   private Counters.Counter reduceInputValueCounter = 
-    getCounters().findCounter(Counter.REDUCE_INPUT_RECORDS);
+    getCounters().findCounter(TaskCounter.REDUCE_INPUT_RECORDS);
   private Counters.Counter reduceOutputCounter = 
-    getCounters().findCounter(Counter.REDUCE_OUTPUT_RECORDS);
+    getCounters().findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS);
   private Counters.Counter reduceCombineOutputCounter =
-    getCounters().findCounter(Counter.COMBINE_OUTPUT_RECORDS);
+    getCounters().findCounter(TaskCounter.COMBINE_OUTPUT_RECORDS);
 
   // A custom comparator for map output files. Here the ordering is determined
   // by the file's size and path. In case of files with same size and different
@@ -268,9 +267,9 @@
        super(in, comparator, keyClass, valClass, conf, reporter);
        this.umbilical = umbilical;
        this.skipGroupCounter = 
-         reporter.getCounter(Counter.REDUCE_SKIPPED_GROUPS);
+         reporter.getCounter(TaskCounter.REDUCE_SKIPPED_GROUPS);
        this.skipRecCounter = 
-         reporter.getCounter(Counter.REDUCE_SKIPPED_RECORDS);
+         reporter.getCounter(TaskCounter.REDUCE_SKIPPED_RECORDS);
        this.toWriteSkipRecs = toWriteSkipRecs() &&  
          SkipBadRecords.getSkipOutputPath(conf)!=null;
        this.keyClass = keyClass;
@@ -1714,7 +1713,7 @@
       this.maxInFlight = 4 * numCopiers;
       this.maxBackoff = conf.getInt("mapred.reduce.copy.backoff", 300);
       Counters.Counter combineInputCounter = 
-        reporter.getCounter(Task.Counter.COMBINE_INPUT_RECORDS);
+        reporter.getCounter(TaskCounter.COMBINE_INPUT_RECORDS);
       this.combinerRunner = CombinerRunner.create(conf, getTaskID(),
                                                   combineInputCounter,
                                                   reporter, null);

Modified: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Task.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Task.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Task.java (original)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapred/Task.java Mon May  4 05:19:20 2009
@@ -45,6 +45,7 @@
 import org.apache.hadoop.io.serializer.Deserializer;
 import org.apache.hadoop.io.serializer.SerializationFactory;
 import org.apache.hadoop.mapred.IFile.Writer;
+import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progressable;
@@ -56,24 +57,6 @@
   private static final Log LOG =
     LogFactory.getLog(Task.class);
 
-  // Counters used by Task subclasses
-  protected static enum Counter { 
-    MAP_INPUT_RECORDS, 
-    MAP_OUTPUT_RECORDS,
-    MAP_SKIPPED_RECORDS,
-    MAP_INPUT_BYTES, 
-    MAP_OUTPUT_BYTES,
-    COMBINE_INPUT_RECORDS,
-    COMBINE_OUTPUT_RECORDS,
-    REDUCE_INPUT_GROUPS,
-    REDUCE_SHUFFLE_BYTES,
-    REDUCE_INPUT_RECORDS,
-    REDUCE_OUTPUT_RECORDS,
-    REDUCE_SKIPPED_GROUPS,
-    REDUCE_SKIPPED_RECORDS,
-    SPILLED_RECORDS
-  }
-  
   /**
    * Counters to measure the usage of the different file systems.
    * Always return the String array with two elements. First one is the name of  
@@ -144,7 +127,7 @@
   public Task() {
     taskStatus = TaskStatus.createTaskStatus(isMapTask());
     taskId = new TaskAttemptID();
-    spilledRecordsCounter = counters.findCounter(Counter.SPILLED_RECORDS);
+    spilledRecordsCounter = counters.findCounter(TaskCounter.SPILLED_RECORDS);
   }
 
   public Task(String jobFile, TaskAttemptID taskId, int partition) {
@@ -161,7 +144,7 @@
                                                     TaskStatus.Phase.SHUFFLE, 
                                                   counters);
     this.mapOutputFile.setJobId(taskId.getJobID());
-    spilledRecordsCounter = counters.findCounter(Counter.SPILLED_RECORDS);
+    spilledRecordsCounter = counters.findCounter(TaskCounter.SPILLED_RECORDS);
   }
 
   ////////////////////////////////////////////

Added: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.java?rev=771179&view=auto
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.java (added)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.java Mon May  4 05:19:20
2009
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce;
+
+// Per-job counters
+public enum JobCounter {
+  NUM_FAILED_MAPS, 
+  NUM_FAILED_REDUCES,
+  TOTAL_LAUNCHED_MAPS,
+  TOTAL_LAUNCHED_REDUCES,
+  OTHER_LOCAL_MAPS,
+  DATA_LOCAL_MAPS,
+  RACK_LOCAL_MAPS
+}

Added: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.properties
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.properties?rev=771179&view=auto
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.properties (added)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/JobCounter.properties Mon May
 4 05:19:20 2009
@@ -0,0 +1,12 @@
+# ResourceBundle properties file for job-level counters
+
+CounterGroupName=              Job Counters 
+
+NUM_FAILED_MAPS.name=          Failed map tasks
+NUM_FAILED_REDUCES.name=       Failed reduce tasks
+TOTAL_LAUNCHED_MAPS.name=      Launched map tasks
+TOTAL_LAUNCHED_REDUCES.name=   Launched reduce tasks
+OTHER_LOCAL_MAPS.name=         Other local map tasks
+DATA_LOCAL_MAPS.name=          Data-local map tasks
+RACK_LOCAL_MAPS.name=          Rack-local map tasks
+

Added: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.java?rev=771179&view=auto
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.java (added)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.java Mon May  4 05:19:20
2009
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce;
+
+// Counters used by Task classes
+public enum TaskCounter {
+  MAP_INPUT_RECORDS, 
+  MAP_OUTPUT_RECORDS,
+  MAP_SKIPPED_RECORDS,
+  MAP_INPUT_BYTES, 
+  MAP_OUTPUT_BYTES,
+  COMBINE_INPUT_RECORDS,
+  COMBINE_OUTPUT_RECORDS,
+  REDUCE_INPUT_GROUPS,
+  REDUCE_SHUFFLE_BYTES,
+  REDUCE_INPUT_RECORDS,
+  REDUCE_OUTPUT_RECORDS,
+  REDUCE_SKIPPED_GROUPS,
+  REDUCE_SKIPPED_RECORDS,
+  SPILLED_RECORDS
+}

Added: hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.properties
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.properties?rev=771179&view=auto
==============================================================================
--- hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.properties (added)
+++ hadoop/core/trunk/src/mapred/org/apache/hadoop/mapreduce/TaskCounter.properties Mon May
 4 05:19:20 2009
@@ -0,0 +1,19 @@
+# ResourceBundle properties file for Map-Reduce counters
+
+CounterGroupName=              Map-Reduce Framework
+
+MAP_INPUT_RECORDS.name=        Map input records
+MAP_INPUT_BYTES.name=          Map input bytes
+MAP_OUTPUT_RECORDS.name=       Map output records
+MAP_OUTPUT_BYTES.name=         Map output bytes
+MAP_SKIPPED_RECORDS.name=      Map skipped records
+COMBINE_INPUT_RECORDS.name=    Combine input records
+COMBINE_OUTPUT_RECORDS.name=   Combine output records
+REDUCE_INPUT_GROUPS.name=      Reduce input groups
+REDUCE_SHUFFLE_BYTES.name=     Reduce shuffle bytes
+REDUCE_INPUT_RECORDS.name=     Reduce input records
+REDUCE_OUTPUT_RECORDS.name=    Reduce output records
+REDUCE_SKIPPED_RECORDS.name=   Reduce skipped records
+REDUCE_SKIPPED_GROUPS.name=    Reduce skipped groups
+SPILLED_RECORDS.name=          Spilled Records
+

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestBadRecords.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestBadRecords.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestBadRecords.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestBadRecords.java Mon May  4 05:19:20
2009
@@ -37,6 +37,7 @@
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.util.ReflectionUtils;
 
 public class TestBadRecords extends ClusterMapReduceTestCase {
@@ -113,25 +114,25 @@
     
     //validate counters
     Counters counters = runningJob.getCounters();
-    assertEquals(counters.findCounter(Task.Counter.MAP_SKIPPED_RECORDS).
+    assertEquals(counters.findCounter(TaskCounter.MAP_SKIPPED_RECORDS).
         getCounter(),mapperBadRecords.size());
     
     int mapRecs = input.size() - mapperBadRecords.size();
-    assertEquals(counters.findCounter(Task.Counter.MAP_INPUT_RECORDS).
+    assertEquals(counters.findCounter(TaskCounter.MAP_INPUT_RECORDS).
         getCounter(),mapRecs);
-    assertEquals(counters.findCounter(Task.Counter.MAP_OUTPUT_RECORDS).
+    assertEquals(counters.findCounter(TaskCounter.MAP_OUTPUT_RECORDS).
         getCounter(),mapRecs);
     
     int redRecs = mapRecs - redBadRecords.size();
-    assertEquals(counters.findCounter(Task.Counter.REDUCE_SKIPPED_RECORDS).
+    assertEquals(counters.findCounter(TaskCounter.REDUCE_SKIPPED_RECORDS).
         getCounter(),redBadRecords.size());
-    assertEquals(counters.findCounter(Task.Counter.REDUCE_SKIPPED_GROUPS).
+    assertEquals(counters.findCounter(TaskCounter.REDUCE_SKIPPED_GROUPS).
         getCounter(),redBadRecords.size());
-    assertEquals(counters.findCounter(Task.Counter.REDUCE_INPUT_GROUPS).
+    assertEquals(counters.findCounter(TaskCounter.REDUCE_INPUT_GROUPS).
         getCounter(),redRecs);
-    assertEquals(counters.findCounter(Task.Counter.REDUCE_INPUT_RECORDS).
+    assertEquals(counters.findCounter(TaskCounter.REDUCE_INPUT_RECORDS).
         getCounter(),redRecs);
-    assertEquals(counters.findCounter(Task.Counter.REDUCE_OUTPUT_RECORDS).
+    assertEquals(counters.findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS).
         getCounter(),redRecs);
     
     //validate skipped records

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestCounters.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestCounters.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestCounters.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestCounters.java Mon May  4 05:19:20
2009
@@ -21,6 +21,8 @@
 import java.io.IOException;
 import java.text.ParseException;
 
+import org.apache.hadoop.mapreduce.TaskCounter;
+
 /**
  * TestCounters checks the sanity and recoverability of {@code Counters}
  */
@@ -68,8 +70,8 @@
   }
   
   public void testCounters() throws IOException {
-    Enum[] keysWithResource = {Task.Counter.MAP_INPUT_BYTES, 
-                               Task.Counter.MAP_OUTPUT_BYTES};
+    Enum[] keysWithResource = {TaskCounter.MAP_INPUT_BYTES, 
+                               TaskCounter.MAP_OUTPUT_BYTES};
     
     Enum[] keysWithoutResource = {myCounters.TEST1, myCounters.TEST2};
     

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestJobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestJobInProgress.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestJobInProgress.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestJobInProgress.java Mon May  4
05:19:20 2009
@@ -38,6 +38,7 @@
 import org.apache.hadoop.mapred.UtilsForTests;
 import org.apache.hadoop.mapred.lib.IdentityMapper;
 import org.apache.hadoop.mapred.lib.IdentityReducer;
+import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.net.Node;
 
 import junit.framework.TestCase;
@@ -263,8 +264,8 @@
       JobInProgress jip = jt.getJob(js.getJobID());
       Counters counter = jip.getJobCounters();
       long totalTaskCount = counter
-          .getCounter(JobInProgress.Counter.TOTAL_LAUNCHED_MAPS)
-          + counter.getCounter(JobInProgress.Counter.TOTAL_LAUNCHED_REDUCES);
+          .getCounter(JobCounter.TOTAL_LAUNCHED_MAPS)
+          + counter.getCounter(JobCounter.TOTAL_LAUNCHED_REDUCES);
       while (jip.getNumTaskCompletionEvents() < totalTaskCount) {
         assertEquals(true, (jip.runningMaps() >= 0));
         assertEquals(true, (jip.pendingMaps() >= 0));

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSSort.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSSort.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSSort.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRDFSSort.java Mon May  4
05:19:20 2009
@@ -30,6 +30,7 @@
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.lib.NullOutputFormat;
+import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.ToolRunner;
@@ -97,7 +98,7 @@
     Sort sort = new Sort();
     assertEquals(ToolRunner.run(job, sort, sortArgs), 0);
     Counters counters = sort.getResult().getCounters();
-    long mapInput = counters.findCounter(Task.Counter.MAP_INPUT_BYTES
+    long mapInput = counters.findCounter(TaskCounter.MAP_INPUT_BYTES
     ).getValue();
     long hdfsRead = counters.findCounter(Task.FILESYSTEM_COUNTER_GROUP,
                                          "HDFS_BYTES_READ").getValue();

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java Mon May  4
05:19:20 2009
@@ -35,6 +35,7 @@
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapred.MRCaching.TestResult;
+import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.util.Progressable;
 
 /**
@@ -77,9 +78,9 @@
       assertEquals("number of cleanups", 2, reports.length);
       Counters counters = ret.job.getCounters();
       assertEquals("number of map inputs", 3, 
-                   counters.getCounter(Task.Counter.MAP_INPUT_RECORDS));
+                   counters.getCounter(TaskCounter.MAP_INPUT_RECORDS));
       assertEquals("number of reduce outputs", 9, 
-                   counters.getCounter(Task.Counter.REDUCE_OUTPUT_RECORDS));
+                   counters.getCounter(TaskCounter.REDUCE_OUTPUT_RECORDS));
       runCustomFormats(mr);
     } finally {
       if (mr != null) { mr.shutdown(); }

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java Mon
May  4 05:19:20 2009
@@ -29,6 +29,7 @@
 import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.NonSplitableSequenceFileInputFormat;
 import org.apache.hadoop.mapred.lib.IdentityMapper;
 import org.apache.hadoop.mapred.lib.IdentityReducer;
+import org.apache.hadoop.mapreduce.JobCounter;
 
 public class TestRackAwareTaskPlacement extends TestCase {
   private static final String rack1[] = new String[] {
@@ -76,12 +77,12 @@
     RunningJob job = launchJob(jobConf, in, out, numMaps, jobName);
     Counters counters = job.getCounters();
     assertEquals("Number of local maps", 
-            counters.getCounter(JobInProgress.Counter.OTHER_LOCAL_MAPS), otherLocalMaps);
+            counters.getCounter(JobCounter.OTHER_LOCAL_MAPS), otherLocalMaps);
     assertEquals("Number of Data-local maps", 
-            counters.getCounter(JobInProgress.Counter.DATA_LOCAL_MAPS), 
+            counters.getCounter(JobCounter.DATA_LOCAL_MAPS), 
                                 dataLocalMaps);
     assertEquals("Number of Rack-local maps", 
-            counters.getCounter(JobInProgress.Counter.RACK_LOCAL_MAPS), 
+            counters.getCounter(JobCounter.RACK_LOCAL_MAPS), 
                                 rackLocalMaps);
     mr.waitUntilIdle();
     mr.shutdown();

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSpilledRecordsCounter.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSpilledRecordsCounter.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSpilledRecordsCounter.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapred/TestSpilledRecordsCounter.java Mon
May  4 05:19:20 2009
@@ -26,11 +26,11 @@
 
 import junit.framework.TestCase;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.TaskCounter;
 
 /**
  * This is an wordcount application that tests the count of records
@@ -46,7 +46,7 @@
 
   private void validateCounters(Counters counter, long spillRecCnt) {
       // Check if the numer of Spilled Records is same as expected
-      assertEquals(counter.findCounter(Task.Counter.SPILLED_RECORDS).
+      assertEquals(counter.findCounter(TaskCounter.SPILLED_RECORDS).
                      getCounter(), spillRecCnt);
   }
 

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/mapreduce/TestMapReduceLocal.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/mapreduce/TestMapReduceLocal.java?rev=771179&r1=771178&r2=771179&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/mapreduce/TestMapReduceLocal.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/mapreduce/TestMapReduceLocal.java Mon May
 4 05:19:20 2009
@@ -97,7 +97,7 @@
                             ) throws IOException,
                                      InterruptedException,
                                      ClassNotFoundException {
-    final String COUNTER_GROUP = "org.apache.hadoop.mapred.Task$Counter";
+    final String COUNTER_GROUP = "org.apache.hadoop.mapreduce.TaskCounter";
     localFs.delete(new Path(TEST_ROOT_DIR + "/in"), true);
     localFs.delete(new Path(TEST_ROOT_DIR + "/out"), true);    
     writeFile("in/part1", "this is a test\nof word count test\ntest\n");



Mime
View raw message