hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cdoug...@apache.org
Subject svn commit: r884289 - in /hadoop/mapreduce/branches/branch-0.21: ./ src/java/org/apache/hadoop/mapreduce/task/reduce/ src/test/mapred/org/apache/hadoop/mapred/
Date Wed, 25 Nov 2009 21:57:05 GMT
Author: cdouglas
Date: Wed Nov 25 21:57:05 2009
New Revision: 884289

URL: http://svn.apache.org/viewvc?rev=884289&view=rev
Log:
MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the
configured threshold.

Modified:
    hadoop/mapreduce/branches/branch-0.21/CHANGES.txt
    hadoop/mapreduce/branches/branch-0.21/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
    hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java
    hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java

Modified: hadoop/mapreduce/branches/branch-0.21/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/branch-0.21/CHANGES.txt?rev=884289&r1=884288&r2=884289&view=diff
==============================================================================
--- hadoop/mapreduce/branches/branch-0.21/CHANGES.txt (original)
+++ hadoop/mapreduce/branches/branch-0.21/CHANGES.txt Wed Nov 25 21:57:05 2009
@@ -830,3 +830,6 @@
     MAPREDUCE-1007. Fix NPE in CapacityTaskScheduler.getJobs(). 
     (V.V.Chaitanya Krishna via sharad)
 
+    MAPREDUCE-1182. Fix overflow in reduce causing allocations to exceed the
+    configured threshold. (cdouglas)
+

Modified: hadoop/mapreduce/branches/branch-0.21/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/branch-0.21/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java?rev=884289&r1=884288&r2=884289&view=diff
==============================================================================
--- hadoop/mapreduce/branches/branch-0.21/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
(original)
+++ hadoop/mapreduce/branches/branch-0.21/src/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java
Wed Nov 25 21:57:05 2009
@@ -87,12 +87,12 @@
   Set<Path> onDiskMapOutputs = new TreeSet<Path>();
   private final OnDiskMerger onDiskMerger;
   
-  private final int memoryLimit;
-  private int usedMemory;
-  private final int maxSingleShuffleLimit;
+  private final long memoryLimit;
+  private long usedMemory;
+  private final long maxSingleShuffleLimit;
   
   private final int memToMemMergeOutputsThreshold; 
-  private final int mergeThreshold;
+  private final long mergeThreshold;
   
   private final int ioSortFactor;
 
@@ -159,17 +159,17 @@
 
     // Allow unit tests to fix Runtime memory
     this.memoryLimit = 
-      (int)(jobConf.getInt(JobContext.REDUCE_MEMORY_TOTAL_BYTES,
-          (int)Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE))
+      (long)(jobConf.getLong(JobContext.REDUCE_MEMORY_TOTAL_BYTES,
+          Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE))
         * maxInMemCopyUse);
  
     this.ioSortFactor = jobConf.getInt(JobContext.IO_SORT_FACTOR, 100);
 
     this.maxSingleShuffleLimit = 
-      (int)(memoryLimit * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION);
+      (long)(memoryLimit * MAX_SINGLE_SHUFFLE_SEGMENT_FRACTION);
     this.memToMemMergeOutputsThreshold = 
             jobConf.getInt(JobContext.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
-    this.mergeThreshold = (int)(this.memoryLimit * 
+    this.mergeThreshold = (long)(this.memoryLimit * 
                           jobConf.getFloat(JobContext.SHUFFLE_MERGE_EPRCENT, 
                                            0.90f));
     LOG.info("MergerManager: memoryLimit=" + memoryLimit + ", " +

Modified: hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java?rev=884289&r1=884288&r2=884289&view=diff
==============================================================================
--- hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java
(original)
+++ hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetch.java
Wed Nov 25 21:57:05 2009
@@ -36,7 +36,7 @@
     job.set(JobContext.REDUCE_INPUT_BUFFER_PERCENT, "0.0");
     job.setNumMapTasks(MAP_TASKS);
     job.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, "-Xmx128m");
-    job.setInt(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
+    job.setLong(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
     job.set(JobContext.SHUFFLE_INPUT_BUFFER_PERCENT, "0.05");
     job.setInt(JobContext.IO_SORT_FACTOR, 2);
     job.setInt(JobContext.REDUCE_MERGE_INMEM_THRESHOLD, 4);
@@ -58,7 +58,7 @@
     JobConf job = mrCluster.createJobConf();
     job.set(JobContext.REDUCE_INPUT_BUFFER_PERCENT, "1.0");
     job.set(JobContext.SHUFFLE_INPUT_BUFFER_PERCENT, "1.0");
-    job.setInt(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
+    job.setLong(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
     job.setNumMapTasks(MAP_TASKS);
     Counters c = runJob(job);
     final long spill = c.findCounter(TaskCounter.SPILLED_RECORDS).getCounter();

Modified: hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java?rev=884289&r1=884288&r2=884289&view=diff
==============================================================================
--- hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java
(original)
+++ hadoop/mapreduce/branches/branch-0.21/src/test/mapred/org/apache/hadoop/mapred/TestReduceFetchFromPartialMem.java
Wed Nov 25 21:57:05 2009
@@ -85,7 +85,7 @@
     job.setInt(JobContext.SHUFFLE_PARALLEL_COPIES, 1);
     job.setInt(JobContext.IO_SORT_MB, 10);
     job.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, "-Xmx128m");
-    job.setInt(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
+    job.setLong(JobContext.REDUCE_MEMORY_TOTAL_BYTES, 128 << 20);
     job.set(JobContext.SHUFFLE_INPUT_BUFFER_PERCENT, "0.14");
     job.set(JobContext.SHUFFLE_MERGE_EPRCENT, "1.0");
     Counters c = runJob(job);



Mime
View raw message