hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r486303 - in /lucene/hadoop/trunk: CHANGES.txt src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java src/webapps/job/jobdetailshistory.jsp
Date Tue, 12 Dec 2006 20:39:53 GMT
Author: cutting
Date: Tue Dec 12 12:39:52 2006
New Revision: 486303

URL: http://svn.apache.org/viewvc?view=rev&rev=486303
Log:
HADOOP-796.  Provide more convenient access to failed task information in the web interface.
 Contributed by Sanjay.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java
    lucene/hadoop/trunk/src/webapps/job/jobdetailshistory.jsp

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=486303&r1=486302&r2=486303
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Tue Dec 12 12:39:52 2006
@@ -51,6 +51,9 @@
 14. HADOOP-786. Log common exception at debug level.
     (Sanjay Dahiya via cutting)
 
+15. HADOOP-796. Provide more convenient access to failed task
+    information in the web interface.  (Sanjay Dahiya via cutting)
+
 
 Release 0.9.1 - 2006-12-06
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java?view=diff&rev=486303&r1=486302&r2=486303
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java Tue
Dec 12 12:39:52 2006
@@ -3,6 +3,7 @@
 import java.util.*;
 import java.io.*;
 import org.apache.hadoop.mapred.JobHistory.Keys ; 
+import org.apache.hadoop.mapred.JobHistory.Values; ;
 
 /**
  * Default parser for job history files. It creates object model from 
@@ -151,6 +152,35 @@
      */ 
     Map<String, Map<String, JobHistory.JobInfo>> getValues() {
       return jobTrackerToJobs;
+    }
+  }
+  
+  
+  // call this only for jobs that succeeded for better results. 
+  static class BadNodesFilter implements JobHistory.Listener {
+    private Map<String, Set<String>> badNodesToNumFaiedTasks = new HashMap();

+    Map<String, Set<String>> getValues(){
+      return badNodesToNumFaiedTasks; 
+    }
+    public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values)
+      throws IOException {
+      
+      if (recType.equals(JobHistory.RecordTypes.MapAttempt) || 
+          recType.equals(JobHistory.RecordTypes.ReduceAttempt) ) {
+        
+        if( Values.FAILED.name().equals(values.get(Keys.TASK_STATUS) )  ){
+          String hostName = values.get(Keys.HOSTNAME) ;
+          String taskid = values.get(Keys.TASKID); 
+          Set tasks = badNodesToNumFaiedTasks.get(hostName); 
+          if( null == tasks  ){
+            tasks = new TreeSet(); 
+            tasks.add(taskid);
+            badNodesToNumFaiedTasks.put(hostName, tasks);
+          }else{
+            tasks.add(taskid);
+          }
+        }
+      }      
     }
   }
 }

Modified: lucene/hadoop/trunk/src/webapps/job/jobdetailshistory.jsp
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/webapps/job/jobdetailshistory.jsp?view=diff&rev=486303&r1=486302&r2=486303
==============================================================================
--- lucene/hadoop/trunk/src/webapps/job/jobdetailshistory.jsp (original)
+++ lucene/hadoop/trunk/src/webapps/job/jobdetailshistory.jsp Tue Dec 12 12:39:52 2006
@@ -101,6 +101,40 @@
 	<td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, reduceFinished, reduceStarted)
%></td>
 </tr>
  </table>
+
+<br/>
+ <%
+	DefaultJobHistoryParser.BadNodesFilter filter = new DefaultJobHistoryParser.BadNodesFilter();
+	String dir = System.getProperty("hadoop.log.dir") + File.separator + "history" ; 
+ 
+	JobHistory.parseHistory(new File(dir, jobTrackerId+"_" + jobid), filter); 
+	Map<String, Set<String>> badNodes = filter.getValues(); 
+	if( badNodes.size() > 0 ) {
+ %>
+<h3>Failed tasks attempts by nodes </h3>
+<table border="1">
+<tr><td>Hostname</td><td>Failed Tasks</td></tr>
+ <%	  
+	for( String node : badNodes.keySet() ) {
+	  Set<String> failedTasks = badNodes.get(node); 
+%>
+	<tr>
+		<td><%=node %></td>
+		<td>
+<%
+		for( String t : failedTasks ) {
+%>
+		 <a href="taskdetailshistory.jsp?jobid=<%=jobid%>&jobTrackerId=<%=jobTrackerId
%>&taskid=<%=t %>"><%=t %></a>,&nbsp;
+<%		  
+		}
+%>	
+		</td>
+	</tr>
+<%	  
+     }
+	}
+ %>
+</table>
  </center>
 
 </body></html>



Mime
View raw message