hadoop-mapreduce-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cdoug...@apache.org
Subject svn commit: r951832 - in /hadoop/mapreduce/trunk: ./ src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/ src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ src/java/org/apache/hadoop/mapred/ src/java/org/apache/hadoop/mapre...
Date Sun, 06 Jun 2010 06:13:47 GMT
Author: cdouglas
Date: Sun Jun  6 06:13:47 2010
New Revision: 951832

URL: http://svn.apache.org/viewvc?rev=951832&view=rev
Log:
MAPREDUCE-1533. Reduce overhead of logging and string manipulation during
heartbeat processing. Contributed by Amar Kamat and Dick King

Modified:
    hadoop/mapreduce/trunk/CHANGES.txt
    hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
    hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java
    hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java
    hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java
    hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java
    hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Counters.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java
    hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java

Modified: hadoop/mapreduce/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/CHANGES.txt?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/CHANGES.txt (original)
+++ hadoop/mapreduce/trunk/CHANGES.txt Sun Jun  6 06:13:47 2010
@@ -48,6 +48,9 @@ Trunk (unreleased changes)
     MAPREDUCE-1762. Add ability to set values of task counters. (Scott Chen via
     cdouglas)
 
+    MAPREDUCE-1533. Reduce overhead of logging and string manipulation during
+    heartbeat processing. (Amar Kamat and Dick King via cdouglas)
+
   OPTIMIZATIONS
 
     MAPREDUCE-1354. Enhancements to JobTracker for better performance and

Modified: hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
(original)
+++ hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/CapacityTaskScheduler.java
Sun Jun  6 06:13:47 2010
@@ -117,7 +117,9 @@ class CapacityTaskScheduler extends Task
     }
     
     static TaskLookupResult getTaskFoundResult(Task t) {
-      LOG.debug("Returning task " + t);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Returning task " + t);
+      }
       return new TaskLookupResult(t, LookUpStatus.TASK_FOUND);
     }
     static TaskLookupResult getNoTaskFoundResult() {
@@ -333,8 +335,10 @@ class CapacityTaskScheduler extends Task
             return TaskLookupResult.getTaskFoundResult(t);
           } else {
             //skip to the next job in the queue.
-            LOG.debug("Job " + j.getJobID().toString()
-                + " returned no tasks of type " + type);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Job " + j.getJobID().toString()
+                        + " returned no tasks of type " + type);
+            }
           }
         } else {
           // if memory requirements don't match then we check if the job has
@@ -413,8 +417,10 @@ class CapacityTaskScheduler extends Task
       }//end of for loop
 
       // found nothing for this queue, look at the next one.
-      String msg = "Found no task from the queue " + qsi.getQueueName();
-      LOG.debug(msg);
+      if (LOG.isDebugEnabled()) {
+        String msg = "Found no task from the queue " + qsi.getQueueName();
+        LOG.debug(msg);
+      }
       return TaskLookupResult.getNoTaskFoundResult();
     }
 
@@ -973,13 +979,15 @@ class CapacityTaskScheduler extends Task
     int currentMapSlots = taskTrackerStatus.countOccupiedMapSlots();
     int maxReduceSlots = taskTrackerStatus.getMaxReduceSlots();
     int currentReduceSlots = taskTrackerStatus.countOccupiedReduceSlots();
-    LOG.debug("TT asking for task, max maps="
-      + taskTrackerStatus.getMaxMapSlots() + 
-        ", run maps=" + taskTrackerStatus.countMapTasks() + ", max reds=" + 
-        taskTrackerStatus.getMaxReduceSlots() + ", run reds=" + 
-        taskTrackerStatus.countReduceTasks() + ", map cap=" + 
-        mapClusterCapacity + ", red cap = " + 
-        reduceClusterCapacity);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("TT asking for task, max maps="
+                + taskTrackerStatus.getMaxMapSlots() + 
+                ", run maps=" + taskTrackerStatus.countMapTasks() + ", max reds=" + 
+                taskTrackerStatus.getMaxReduceSlots() + ", run reds=" + 
+                taskTrackerStatus.countReduceTasks() + ", map cap=" + 
+                mapClusterCapacity + ", red cap = " + 
+                reduceClusterCapacity);
+    }
 
     /* 
      * update all our QSC objects.

Modified: hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java
(original)
+++ hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/JobQueue.java
Sun Jun  6 06:13:47 2010
@@ -130,15 +130,12 @@ class JobQueue extends AbstractQueue {
       (reduceScheduler.getNumReservedTaskTrackers(j) *
         reduceScheduler.getSlotsPerTask(j));
 
-    j.setSchedulingInfo(
-      String.format(
-        TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING,
-        numMapsRunningForThisJob,
-        numRunningMapSlots,
-        numReservedMapSlotsForThisJob,
-        numReducesRunningForThisJob,
-        numRunningReduceSlots,
-        numReservedReduceSlotsForThisJob));
+    j.setSchedulingInfo
+      (getJobQueueSchedInfo(numMapsRunningForThisJob, numRunningMapSlots,
+                            numReservedMapSlotsForThisJob,
+                            numReducesRunningForThisJob,
+                            numRunningReduceSlots,
+                            numReservedReduceSlotsForThisJob));
 
 
     mapTSI.setNumRunningTasks(
@@ -182,6 +179,23 @@ class JobQueue extends AbstractQueue {
   * consider the first few jobs per user.
   */
   }
+ 
+  private static final int JOBQUEUE_SCHEDULINGINFO_INITIAL_LENGTH = 175;
+  
+  static String getJobQueueSchedInfo
+    (int numMapsRunningForThisJob, 
+     int numRunningMapSlots, int numReservedMapSlotsForThisJob, 
+     int numReducesRunningForThisJob, int numRunningReduceSlots, 
+     int numReservedReduceSlotsForThisJob) {
+    StringBuilder sb = new StringBuilder(JOBQUEUE_SCHEDULINGINFO_INITIAL_LENGTH);
+    sb.append(numMapsRunningForThisJob).append(" running map tasks using ")
+      .append(numRunningMapSlots).append(" map slots. ")
+      .append(numReservedMapSlotsForThisJob).append(" additional slots reserved. ")
+      .append(numReducesRunningForThisJob).append(" running reduce tasks using ")
+      .append(numRunningReduceSlots).append(" reduce slots. ")
+      .append(numReservedReduceSlotsForThisJob).append(" additional slots reserved.");
+    return sb.toString();
+  }
 
 
   Map<JobSchedulingInfo, JobInProgress>
@@ -270,9 +284,10 @@ class JobQueue extends AbstractQueue {
     // setup scheduler specific job information
     preInitializeJob(job);
 
-    LOG.debug(
-      "Job " + job.getJobID().toString() + " is added under user "
-        + job.getProfile().getUser() + ", user now has " + i + " jobs");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Job " + job.getJobID().toString() + " is added under user "
+                + job.getProfile().getUser() + ", user now has " + i + " jobs");
+    }
   }
 
 
@@ -303,7 +318,9 @@ class JobQueue extends AbstractQueue {
   // called when a job completes
   synchronized void jobCompleted(JobInProgress job) {
 
-    LOG.debug("Job to be removed for user " + job.getProfile().getUser());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Job to be removed for user " + job.getProfile().getUser());
+    }
     Integer i = qsc.getNumJobsByUser().get(job.getProfile().getUser());
     i--;
     if (0 == i.intValue()) {
@@ -313,14 +330,16 @@ class JobQueue extends AbstractQueue {
         job.getProfile().getUser());
       qsc.getReduceTSC().getNumSlotsOccupiedByUser().remove(
         job.getProfile().getUser());
-      LOG.debug(
-        "No more jobs for user, number of users = " + qsc
-          .getNumJobsByUser().size());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No more jobs for user, number of users = "
+                  + qsc.getNumJobsByUser().size());
+      }
     } else {
       qsc.getNumJobsByUser().put(job.getProfile().getUser(), i);
-      LOG.debug(
-        "User still has " + i + " jobs, number of users = "
-          + qsc.getNumJobsByUser().size());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("User still has " + i + " jobs, number of users = "
+                  + qsc.getNumJobsByUser().size());
+      }
     }
   }
 

Modified: hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java
(original)
+++ hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/MemoryMatcher.java
Sun Jun  6 06:13:47 2010
@@ -85,12 +85,16 @@ class MemoryMatcher {
   boolean matchesMemoryRequirements(JobInProgress job,TaskType taskType, 
                                     TaskTrackerStatus taskTracker) {
 
-    LOG.debug("Matching memory requirements of " + job.getJobID().toString()
-        + " for scheduling on " + taskTracker.trackerName);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Matching memory requirements of " + job.getJobID().toString()
+                + " for scheduling on " + taskTracker.trackerName);
+    }
 
     if (!isSchedulingBasedOnMemEnabled()) {
-      LOG.debug("Scheduling based on job's memory requirements is disabled."
-          + " Ignoring any value set by job.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Scheduling based on job's memory requirements is disabled."
+                  + " Ignoring any value set by job.");
+      }
       return true;
     }
 

Modified: hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java
(original)
+++ hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskDataView.java
Sun Jun  6 06:13:47 2010
@@ -93,9 +93,7 @@ abstract class TaskDataView {
 
     @Override
     int getSlotsPerTask(JobInProgress job) {
-      return
-        job.getJobConf().computeNumSlotsPerMap(
-          MemoryMatcher.getMemSizeForMapSlot());
+      return job.getNumSlotsPerMap();
     }
 
     @Override
@@ -128,9 +126,7 @@ abstract class TaskDataView {
 
     @Override
     int getSlotsPerTask(JobInProgress job) {
-      return
-        job.getJobConf().
-          computeNumSlotsPerReduce(MemoryMatcher.getMemSizeForReduceSlot());
+      return job.getNumSlotsPerReduce();
     }
 
     @Override

Modified: hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java
(original)
+++ hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred/TaskSchedulingContext.java
Sun Jun  6 06:13:47 2010
@@ -61,10 +61,6 @@ public class TaskSchedulingContext {
    */
   private Map<String, Integer> numSlotsOccupiedByUser =
     new HashMap<String, Integer>();
-  final static String JOB_SCHEDULING_INFO_FORMAT_STRING =
-    "%s running map tasks using %d map slots. %d additional slots reserved." +
-      " %s running reduce tasks using %d reduce slots." +
-      " %d additional slots reserved.";
 
   /**
    * reset the variables associated with tasks

Modified: hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java
(original)
+++ hadoop/mapreduce/trunk/src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestCapacityScheduler.java
Sun Jun  6 06:13:47 2010
@@ -1503,11 +1503,8 @@ public class TestCapacityScheduler exten
       // Total 1 map slot should be accounted for.
       checkOccupiedSlots("default", TaskType.MAP, 1, 1, 16.7f);
       checkOccupiedSlots("default", TaskType.REDUCE, 1, 1, 16.7f);
-      assertEquals(
-        String.format(
-          TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING,
-          1, 1, 0, 1, 1, 0),
-        (String) job1.getSchedulingInfo());
+      assertEquals(JobQueue.getJobQueueSchedInfo(1, 1, 0, 1, 1, 0),
+                   job1.getSchedulingInfo().toString());
       checkMemReservedForTasksOnTT("tt1", 1 * 1024L, 1 * 1024L);
 
       expectedStrings.clear();
@@ -1546,11 +1543,8 @@ public class TestCapacityScheduler exten
           "tt3", expectedStrings);
       checkOccupiedSlots("default", TaskType.MAP, 1, 4, 66.7f);
       checkOccupiedSlots("default", TaskType.REDUCE, 1, 4, 66.7f);
-      assertEquals(
-        String.format(
-          TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING,
-          1, 2, 0, 1, 2, 0),
-        (String) job2.getSchedulingInfo());
+      assertEquals(JobQueue.getJobQueueSchedInfo(1, 2, 0, 1, 2, 0),
+                   job2.getSchedulingInfo().toString());
       checkMemReservedForTasksOnTT("tt3", 2 * 1024L, 2 * 1024L);
 
       LOG.debug(
@@ -1575,16 +1569,10 @@ public class TestCapacityScheduler exten
       checkOccupiedSlots("default", TaskType.REDUCE, 1, 6, 100.0f);
       checkMemReservedForTasksOnTT("tt1", 1 * 1024L, 1 * 1024L);
       LOG.info(job2.getSchedulingInfo());
-      assertEquals(
-        String.format(
-          TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING,
-          1, 2, 2, 1, 2, 2),
-        (String) job2.getSchedulingInfo());
-      assertEquals(
-        String.format(
-          TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING,
-          0, 0, 0, 0, 0, 0),
-        (String) job3.getSchedulingInfo());
+      assertEquals(JobQueue.getJobQueueSchedInfo(1, 2, 2, 1, 2, 2),
+                   job2.getSchedulingInfo().toString());
+      assertEquals(JobQueue.getJobQueueSchedInfo(0, 0, 0, 0, 0, 0),
+                   job3.getSchedulingInfo().toString());
 
       // Reservations are already done for job2. So job3 should go ahead.
       expectedStrings.clear();
@@ -2187,10 +2175,8 @@ public class TestCapacityScheduler exten
     scheduler.updateContextInfoForTests();
 
     LOG.info(job1.getSchedulingInfo());
-    assertEquals(
-      String.format(
-        TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING, 3, 3, 0, 0,
-        0, 0), (String) job1.getSchedulingInfo());
+    assertEquals(JobQueue.getJobQueueSchedInfo(3, 3, 0, 0, 0, 0), 
+                 job1.getSchedulingInfo().toString());
 
     LOG.debug(
       "Submit one high memory(2GB maps, 0MB reduces) job of "
@@ -2220,18 +2206,14 @@ public class TestCapacityScheduler exten
     assertNull(scheduler.assignTasks(tracker("tt1")));
     scheduler.updateContextInfoForTests();
     LOG.info(job2.getSchedulingInfo());
-    assertEquals(
-      String.format(
-        TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING, 0, 0, 2, 0,
-        0, 0), (String) job2.getSchedulingInfo());
+    assertEquals(JobQueue.getJobQueueSchedInfo(0, 0, 2, 0, 0, 0), 
+                 job2.getSchedulingInfo().toString()); 
 
     assertNull(scheduler.assignTasks(tracker("tt2")));
     scheduler.updateContextInfoForTests();
     LOG.info(job2.getSchedulingInfo());
-    assertEquals(
-      String.format(
-        TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING, 0, 0, 4, 0,
-        0, 0), (String) job2.getSchedulingInfo());
+    assertEquals(JobQueue.getJobQueueSchedInfo(0, 0, 4, 0, 0, 0), 
+                 job2.getSchedulingInfo().toString());
 
     // Job2 has only 2 pending tasks. So no more reservations. Job3 should get
     // slots on tt3. tt1 and tt2 should not be assigned any slots with the
@@ -2239,28 +2221,22 @@ public class TestCapacityScheduler exten
     assertNull(scheduler.assignTasks(tracker("tt1")));
     scheduler.updateContextInfoForTests();
     LOG.info(job2.getSchedulingInfo());
-    assertEquals(
-      String.format(
-        TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING, 0, 0, 4, 0,
-        0, 0), (String) job2.getSchedulingInfo());
+    assertEquals(JobQueue.getJobQueueSchedInfo(0, 0, 4, 0, 0, 0), 
+                 job2.getSchedulingInfo().toString()); 
 
     assertNull(scheduler.assignTasks(tracker("tt2")));
     scheduler.updateContextInfoForTests();
     LOG.info(job2.getSchedulingInfo());
-    assertEquals(
-      String.format(
-        TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING, 0, 0, 4, 0,
-        0, 0), (String) job2.getSchedulingInfo());
+    assertEquals(JobQueue.getJobQueueSchedInfo(0, 0, 4, 0, 0, 0), 
+                 job2.getSchedulingInfo().toString());
 
     checkAssignment(
       taskTrackerManager, scheduler, "tt3",
       "attempt_test_0003_m_000001_0 on tt3");
     scheduler.updateContextInfoForTests();
     LOG.info(job2.getSchedulingInfo());
-    assertEquals(
-      String.format(
-        TaskSchedulingContext.JOB_SCHEDULING_INFO_FORMAT_STRING, 0, 0, 4, 0,
-        0, 0), (String) job2.getSchedulingInfo());
+    assertEquals(JobQueue.getJobQueueSchedInfo(0, 0, 4, 0, 0, 0), 
+                 job2.getSchedulingInfo().toString());
 
     // No more tasks there in job3 also
     assertNull(scheduler.assignTasks(tracker("tt3")));

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Counters.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Counters.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Counters.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Counters.java Sun Jun  6 06:13:47
2010
@@ -110,27 +110,36 @@ public class Counters implements Writabl
      * [(actual-name)(display-name)(value)]
      */
     public synchronized String makeEscapedCompactString() {
-      StringBuffer buf = new StringBuffer();
-      buf.append(COUNTER_OPEN);
+
+      // First up, obtain the strings that need escaping. This will help us
+      // determine the buffer length apriori.
+      String escapedName = escape(getName());
+      String escapedDispName = escape(getDisplayName());
+      long currentValue = this.getValue();
+      int length = escapedName.length() + escapedDispName.length() + 4;
+
+      length += 8; // For the following delimiting characters
+      StringBuilder builder = new StringBuilder(length);
+      builder.append(COUNTER_OPEN);
       
       // Add the counter name
-      buf.append(UNIT_OPEN);
-      buf.append(escape(getName()));
-      buf.append(UNIT_CLOSE);
+      builder.append(UNIT_OPEN);
+      builder.append(escapedName);
+      builder.append(UNIT_CLOSE);
       
       // Add the display name
-      buf.append(UNIT_OPEN);
-      buf.append(escape(getDisplayName()));
-      buf.append(UNIT_CLOSE);
+      builder.append(UNIT_OPEN);
+      builder.append(escapedDispName);
+      builder.append(UNIT_CLOSE);
       
       // Add the value
-      buf.append(UNIT_OPEN);
-      buf.append(this.getValue());
-      buf.append(UNIT_CLOSE);
+      builder.append(UNIT_OPEN);
+      builder.append(currentValue);
+      builder.append(UNIT_CLOSE);
       
-      buf.append(COUNTER_CLOSE);
+      builder.append(COUNTER_CLOSE);
       
-      return buf.toString();
+      return builder.toString();
     }
     
     // Checks for (content) equality of two (basic) counters
@@ -172,8 +181,10 @@ public class Counters implements Writabl
       }
       this.groupName = groupName;
       this.displayName = localize("CounterGroupName", groupName);
-      LOG.debug("Creating group " + groupName + " with " +
-               (bundle == null ? "nothing" : "bundle"));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Creating group " + groupName + " with " +
+                  (bundle == null ? "nothing" : "bundle"));
+      }
     }
     
     /**
@@ -214,26 +225,41 @@ public class Counters implements Writabl
      * counters within.
      */
     public String makeEscapedCompactString() {
-      StringBuffer buf = new StringBuffer();
-      buf.append(GROUP_OPEN); // group start
+      String[] subcountersArray = new String[subcounters.size()];
+
+      // First up, obtain the strings that need escaping. This will help us
+      // determine the buffer length apriori.
+      String escapedName = escape(getName());
+      String escapedDispName = escape(getDisplayName());
+      int i = 0;
+      int length = escapedName.length() + escapedDispName.length();
+      for (Counter counter : subcounters.values()) {
+        String escapedStr = counter.makeEscapedCompactString();
+        subcountersArray[i++] = escapedStr;
+        length += escapedStr.length();
+      }
+
+      length += 6; // for all the delimiting characters below
+      StringBuilder builder = new StringBuilder(length);
+      builder.append(GROUP_OPEN); // group start
       
       // Add the group name
-      buf.append(UNIT_OPEN);
-      buf.append(escape(getName()));
-      buf.append(UNIT_CLOSE);
+      builder.append(UNIT_OPEN);
+      builder.append(escapedName);
+      builder.append(UNIT_CLOSE);
       
       // Add the display name
-      buf.append(UNIT_OPEN);
-      buf.append(escape(getDisplayName()));
-      buf.append(UNIT_CLOSE);
+      builder.append(UNIT_OPEN);
+      builder.append(escapedDispName);
+      builder.append(UNIT_CLOSE);
       
       // write the value
       for(Counter counter: subcounters.values()) {
-        buf.append(counter.makeEscapedCompactString());
+        builder.append(counter.makeEscapedCompactString());
       }
       
-      buf.append(GROUP_CLOSE); // group end
-      return buf.toString();
+      builder.append(GROUP_CLOSE); // group end
+      return builder.toString();
     }
 
     @Override
@@ -298,7 +324,9 @@ public class Counters implements Writabl
     public synchronized Counter getCounterForName(String name) {
       Counter result = subcounters.get(name);
       if (result == null) {
-        LOG.debug("Adding " + name);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Adding " + name);
+        }
         result = new Counter(name, localize(name + ".name", name), 0L);
         subcounters.put(name, result);
       }
@@ -610,11 +638,24 @@ public class Counters implements Writabl
    * {(groupname)(group-displayname)[(countername)(displayname)(value)][][]}{}{}
    */
   public synchronized String makeEscapedCompactString() {
-    StringBuffer buffer = new StringBuffer();
-    for(Group group: this){
-      buffer.append(group.makeEscapedCompactString());
+    String[] groupsArray = new String[counters.size()];
+    int i = 0;
+    int length = 0;
+
+    // First up, obtain the escaped string for each group so that we can
+    // determine the buffer length apriori.
+    for (Group group : this) {
+      String escapedString = group.makeEscapedCompactString();
+      groupsArray[i++] = escapedString;
+      length += escapedString.length();
     }
-    return buffer.toString();
+
+    // Now construct the buffer
+    StringBuilder builder = new StringBuilder(length);
+    for (String group : groupsArray) {
+      builder.append(group);
+    }
+    return builder.toString();
   }
 
   // Extracts a block (data enclosed within delimeters) ignoring escape 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobInProgress.java Sun Jun  6
06:13:47 2010
@@ -1228,7 +1228,7 @@ public class JobInProgress {
     //
     // Update JobInProgress status
     //
-    if(LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Taking progress for " + tip.getTIPId() + " from " + 
                  oldProgress + " to " + tip.getProgress());
     }
@@ -1636,8 +1636,10 @@ public class JobInProgress {
       splits = tip.getSplitNodes();
       if (tip.isSpeculating()) {
         speculativeMapTasks++;
-        LOG.debug("Chosen speculative task, current speculativeMap task count: "
-            + speculativeMapTasks);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Chosen speculative task, current speculativeMap task count: "
+                    + speculativeMapTasks);
+        }
       }
       metrics.launchMap(id);
     } else {
@@ -1646,8 +1648,10 @@ public class JobInProgress {
       counter = JobCounter.TOTAL_LAUNCHED_REDUCES;
       if (tip.isSpeculating()) {
         speculativeReduceTasks++;
-        LOG.debug("Chosen speculative task, current speculativeReduce task count: "
-          + speculativeReduceTasks);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Chosen speculative task, current speculativeReduce task count: "
+                    + speculativeReduceTasks);
+        }
       }
       metrics.launchReduce(id);
     }
@@ -2332,7 +2336,9 @@ public class JobInProgress {
       LOG.info("Choosing map task " + tip.getTIPId() + 
           " for speculative execution");
     } else {
-      LOG.debug("No speculative map task found for tracker " + taskTrackerName);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No speculative map task found for tracker " + taskTrackerName);
+      }
     }
     return tip;
   }
@@ -2400,8 +2406,11 @@ public class JobInProgress {
     if (tip != null) {
       LOG.info("Choosing reduce task " + tip.getTIPId() + 
           " for speculative execution");
-    }else {
-      LOG.debug("No speculative map task found for tracker " + taskTrackerHost);
+    } else {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No speculative map task found for tracker "
+                  + taskTrackerHost);
+      }
     }
     return tip;
   }
@@ -2576,9 +2585,11 @@ public class JobInProgress {
     if ((flakyTaskTrackers < (clusterSize * CLUSTER_BLACKLIST_PERCENT)) && 
         taskTrackerFailedTasks >= maxTaskFailuresPerTracker) {
       if (LOG.isDebugEnabled()) {
-        String flakyTracker = convertTrackerNameToHostName(taskTracker); 
-        LOG.debug("Ignoring the black-listed tasktracker: '" + flakyTracker 
-                  + "' for assigning a new task");
+        String flakyTracker = convertTrackerNameToHostName(taskTracker);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Ignoring the black-listed tasktracker: '" + flakyTracker 
+                    + "' for assigning a new task");
+        }
       }
       return false;
     }
@@ -3019,14 +3030,20 @@ public class JobInProgress {
     if (wasSpeculating) {
       if (tip.isMapTask()) {
         speculativeMapTasks--;
-        LOG.debug("Decremented count for " + 
-            tip.getTIPId()+"/"+tip.getJob().getJobID() + 
-          ". Current speculativeMap task count: " + speculativeMapTasks);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Decremented count for " + 
+                    tip.getTIPId()+"/"+tip.getJob().getJobID() + 
+                    ". Current speculativeMap task count: "
+                    + speculativeMapTasks);
+        }
       } else {
         speculativeReduceTasks--;
-        LOG.debug("Decremented count for " +
-            tip.getTIPId()+"/"+tip.getJob().getJobID() +
-          ". Current speculativeReduce task count: " + speculativeReduceTasks);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Decremented count for " +
+                    tip.getTIPId()+"/"+tip.getJob().getJobID() +
+                    ". Current speculativeReduce task count: "
+                    + speculativeReduceTasks);
+        }
       }
     }
   }
@@ -3640,7 +3657,9 @@ public class JobInProgress {
     File f = new File (localJobFilePath);
     LOG.info("Deleting localized job conf at " + f);
     if (!f.delete()) {
-      LOG.debug("Failed to delete file " + f);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Failed to delete file " + f);
+      }
     }
   }
   

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/JobTracker.java Sun Jun  6 06:13:47
2010
@@ -329,7 +329,9 @@ public class JobTracker implements MRCon
           // Every 3 minutes check for any tasks that are overdue
           Thread.sleep(tasktrackerExpiryInterval/3);
           long now = clock.getTime();
-          LOG.debug("Starting launching task sweep");
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Starting launching task sweep");
+          }
           synchronized (JobTracker.this) {
             synchronized (launchingTasks) {
               Iterator<Map.Entry<TaskAttemptID, Long>> itr =
@@ -701,7 +703,7 @@ public class JobTracker implements MRCon
     private void blackListTracker(String hostName, String reason, ReasonForBlackListing rfb)
{
       FaultInfo fi = getFaultInfo(hostName, true);
       boolean blackListed = fi.isBlacklisted();
-      if(blackListed) {
+      if (blackListed) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Adding blacklisted reason for tracker : " + hostName 
               + " Reason for blacklisting is : " + rfb);
@@ -729,7 +731,7 @@ public class JobTracker implements MRCon
     private boolean canUnBlackListTracker(String hostName,
         ReasonForBlackListing rfb) {
       FaultInfo fi = getFaultInfo(hostName, false);
-      if(fi == null) {
+      if (fi == null) {
         return false;
       }
       
@@ -741,15 +743,15 @@ public class JobTracker implements MRCon
         ReasonForBlackListing rfb) {
       // check if you can black list the tracker then call this methods
       FaultInfo fi = getFaultInfo(hostName, false);
-      if(fi.removeBlackListedReason(rfb)) {
-        if(fi.getReasonforblacklisting().isEmpty()) {
+      if (fi.removeBlackListedReason(rfb)) {
+        if (fi.getReasonforblacklisting().isEmpty()) {
           addHostCapacity(hostName);
           LOG.info("Unblacklisting tracker : " + hostName);
           fi.unBlacklist();
           //We have unBlackListed tracker, so tracker should
           //definitely be healthy. Check fault count if fault count
           //is zero don't keep it memory.
-          if(fi.numFaults == 0) {
+          if (fi.numFaults == 0) {
             potentiallyFaultyTrackers.remove(hostName);
           }
         }
@@ -1464,7 +1466,9 @@ public class JobTracker implements MRCon
       for (Iterator it = p.keySet().iterator(); it.hasNext();) {
         String key = (String) it.next();
         String val = p.getProperty(key);
-        LOG.debug("Property '" + key + "' is " + val);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Property '" + key + "' is " + val);
+        }
       }
     }
 
@@ -1525,7 +1529,7 @@ public class JobTracker implements MRCon
         }
         // clean up the system dir, which will only work if hdfs is out of 
         // safe mode
-        if(systemDir == null) {
+        if (systemDir == null) {
           systemDir = new Path(getSystemDir());    
         }
         try {
@@ -1870,7 +1874,9 @@ public class JobTracker implements MRCon
     }
     taskset.add(taskid);
       
-    LOG.debug("Marked '" + taskid + "' from '" + taskTracker + "'");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Marked '" + taskid + "' from '" + taskTracker + "'");
+    }
   }
 
   /**
@@ -2456,7 +2462,9 @@ public class JobTracker implements MRCon
         if (tasks != null) {
           for (Task task : tasks) {
             expireLaunchingTasks.addNewTask(task.getTaskID());
-            LOG.debug(trackerName + " -> LaunchTask: " + task.getTaskID());
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(trackerName + " -> LaunchTask: " + task.getTaskID());
+            }
             actions.add(new LaunchTaskAction(task));
           }
         }
@@ -2753,7 +2761,9 @@ public class JobTracker implements MRCon
           //
           if (!tip.getJob().isComplete()) {
             killList.add(new KillTaskAction(killTaskId));
-            LOG.debug(taskTracker + " -> KillTaskAction: " + killTaskId);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(taskTracker + " -> KillTaskAction: " + killTaskId);
+            }
           }
         }
       }
@@ -2776,7 +2786,10 @@ public class JobTracker implements MRCon
    */
   private void addJobForCleanup(JobID id) {
     for (String taskTracker : taskTrackers.keySet()) {
-      LOG.debug("Marking job " + id + " for cleanup by tracker " + taskTracker);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Marking job " + id
+                  + " for cleanup by tracker " + taskTracker);
+      }
       synchronized (trackerToJobsToCleanup) {
         Set<JobID> jobsToKill = trackerToJobsToCleanup.get(taskTracker);
         if (jobsToKill == null) {
@@ -2801,7 +2814,9 @@ public class JobTracker implements MRCon
       List<TaskTrackerAction> killList = new ArrayList<TaskTrackerAction>();
       for (JobID killJobId : jobs) {
         killList.add(new KillJobAction(killJobId));
-        LOG.debug(taskTracker + " -> KillJobAction: " + killJobId);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(taskTracker + " -> KillJobAction: " + killJobId);
+        }
       }
 
       return killList;
@@ -2826,8 +2841,10 @@ public class JobTracker implements MRCon
           }
           if (tip.shouldCommit(taskId)) {
             saveList.add(new CommitTaskAction(taskId));
-            LOG.debug(tts.getTrackerName() + 
-                      " -> CommitTaskAction: " + taskId);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(tts.getTrackerName() + 
+                        " -> CommitTaskAction: " + taskId);
+            }
           }
         }
       }
@@ -3805,7 +3822,7 @@ public class JobTracker implements MRCon
   public synchronized boolean killTask(TaskAttemptID taskid, 
       boolean shouldFail) throws IOException {
     TaskInProgress tip = taskidToTIPMap.get(taskid);
-    if(tip != null) {
+    if (tip != null) {
 
       // check both queue-level and job-level access
       checkAccess(tip.getJob(), UserGroupInformation.getCurrentUser(),
@@ -4209,7 +4226,7 @@ public class JobTracker implements MRCon
     StringUtils.startupShutdownMessage(JobTracker.class, argv, LOG);
     
     try {
-      if(argv.length == 0) {
+      if (argv.length == 0) {
         JobTracker tracker = startTracker(new JobConf());
         tracker.offerService();
       }
@@ -4324,7 +4341,7 @@ public class JobTracker implements MRCon
 
   private synchronized JobStatus[] getJobStatus(Collection<JobInProgress> jips,
       boolean toComplete) {
-    if(jips == null || jips.isEmpty()) {
+    if (jips == null || jips.isEmpty()) {
       return new JobStatus[]{};
     }
     ArrayList<JobStatus> jobStatusList = new ArrayList<JobStatus>();
@@ -4332,8 +4349,8 @@ public class JobTracker implements MRCon
       JobStatus status = jip.getStatus();
       status.setStartTime(jip.getStartTime());
       status.setUsername(jip.getProfile().getUser());
-      if(toComplete) {
-        if(status.getRunState() == JobStatus.RUNNING || 
+      if (toComplete) {
+        if (status.getRunState() == JobStatus.RUNNING || 
             status.getRunState() == JobStatus.PREP) {
           jobStatusList.add(status);
         }
@@ -4455,8 +4472,10 @@ public class JobTracker implements MRCon
   private void checkMemoryRequirements(JobInProgress job)
       throws IOException {
     if (!perTaskMemoryConfigurationSetOnJT()) {
-      LOG.debug("Per-Task memory configuration is not set on JT. "
-          + "Not checking the job for invalid memory requirements.");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Per-Task memory configuration is not set on JT. "
+                  + "Not checking the job for invalid memory requirements.");
+      }
       return;
     }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/Task.java Sun Jun  6 06:13:47
2010
@@ -483,7 +483,9 @@ abstract public class Task implements Wr
       setState(TaskStatus.State.RUNNING);
     }
     if (useNewApi) {
-      LOG.debug("using new api for output committer");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("using new api for output committer");
+      }
       outputFormat =
         ReflectionUtils.newInstance(taskContext.getOutputFormatClass(), job);
       committer = outputFormat.getOutputCommitter(taskContext);
@@ -609,8 +611,10 @@ abstract public class Task implements Wr
             Thread.sleep(PROGRESS_INTERVAL);
           } 
           catch (InterruptedException e) {
-            LOG.debug(getTaskID() + " Progress/ping thread exiting " +
-            "since it got interrupted");
+            if (LOG.isDebugEnabled()) {
+              LOG.debug(getTaskID() + " Progress/ping thread exiting " +
+                        "since it got interrupted");
+            }
             break;
           }
 
@@ -679,7 +683,9 @@ abstract public class Task implements Wr
     SortedRanges.Range range = 
       new SortedRanges.Range(currentRecStartIndex, len);
     taskStatus.setNextRecordRange(range);
-    LOG.debug("sending reportNextRecordRange " + range);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("sending reportNextRecordRange " + range);
+    }
     umbilical.reportNextRecordRange(taskId, range);
   }
 

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java (original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapred/TaskInProgress.java Sun Jun 
6 06:13:47 2010
@@ -752,7 +752,9 @@ class TaskInProgress {
         machinesWhereFailed.add(trackerHostName);
         if(maxSkipRecords>0) {
           //skipping feature enabled
-          LOG.debug("TaskInProgress adding" + status.getNextRecordRange());
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("TaskInProgress adding" + status.getNextRecordRange());
+          }
           failedRanges.add(status.getNextRecordRange());
           skipping = startSkipping();
         }
@@ -1054,8 +1056,10 @@ class TaskInProgress {
     // create the task
     Task t = null;
     if (isMapTask()) {
-      LOG.debug("attempt " + numTaskFailures + " sending skippedRecords "
-          + failedRanges.getIndicesCount());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("attempt " + numTaskFailures + " sending skippedRecords "
+                  + failedRanges.getIndicesCount());
+      }
       t = new MapTask(jobFile, taskid, partition, splitInfo.getSplitIndex(),
           numSlotsNeeded);
     } else {
@@ -1074,7 +1078,9 @@ class TaskInProgress {
     }
     t.setConf(conf);
     t.setUser(getUser());
-    LOG.debug("Launching task with skipRanges:"+failedRanges.getSkipRanges());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Launching task with skipRanges:"+failedRanges.getSkipRanges());
+    }
     t.setSkipRanges(failedRanges.getSkipRanges());
     t.setSkipping(skipping);
     if(failedRanges.isTestAttempt()) {

Modified: hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
URL: http://svn.apache.org/viewvc/hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java?rev=951832&r1=951831&r2=951832&view=diff
==============================================================================
--- hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
(original)
+++ hadoop/mapreduce/trunk/src/java/org/apache/hadoop/mapreduce/jobhistory/JobHistory.java
Sun Jun  6 06:13:47 2010
@@ -396,7 +396,9 @@ public class JobHistory {
         LOG.warn("Unable to move " + fromPath +", deleting it");
         try {
           boolean b = logDirFs.delete(fromPath, false);
-          LOG.debug("Deletion of corrupt file " + fromPath + " returned " + b);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Deletion of corrupt file " + fromPath + " returned " + b);
+          }
         } catch (IOException ioe) {
           // Cannot delete either? Just log and carry on
           LOG.warn("Unable to delete " + fromPath + "Exception: " +



Mime
View raw message