chukwa-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From asrab...@apache.org
Subject svn commit: r816739 [2/3] - in /hadoop/chukwa/trunk: ./ contrib/chukwa-pig/ src/java/org/apache/hadoop/chukwa/analysis/salsa/fsm/ src/test/org/apache/hadoop/chukwa/analysis/ src/test/org/apache/hadoop/chukwa/analysis/salsa/ src/test/org/apache/hadoop/c...
Date Fri, 18 Sep 2009 18:43:02 GMT
Added: hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/analysis/salsa/fsm/TestFSMBuilder.java
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/analysis/salsa/fsm/TestFSMBuilder.java?rev=816739&view=auto
==============================================================================
--- hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/analysis/salsa/fsm/TestFSMBuilder.java (added)
+++ hadoop/chukwa/trunk/src/test/org/apache/hadoop/chukwa/analysis/salsa/fsm/TestFSMBuilder.java Fri Sep 18 18:43:01 2009
@@ -0,0 +1,562 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.chukwa.analysis.salsa.fsm;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.regex.*;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.chukwa.conf.ChukwaConfiguration;
+import org.apache.hadoop.chukwa.database.TableCreator;
+
+import org.apache.hadoop.chukwa.datacollection.DataFactory;
+import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent;
+import org.apache.hadoop.chukwa.datacollection.agent.ChukwaAgent.AlreadyRunningException;
+import org.apache.hadoop.chukwa.datacollection.connector.http.HttpConnector;
+import org.apache.hadoop.chukwa.datacollection.collector.CaptureWriter;
+import org.apache.hadoop.chukwa.datacollection.collector.servlet.ServletCollector;
+import org.apache.hadoop.chukwa.datacollection.controller.ChukwaAgentController;
+import org.apache.hadoop.chukwa.datacollection.sender.ChukwaHttpSender;
+import org.apache.hadoop.chukwa.datacollection.sender.RetryListOfCollectors;
+import org.apache.hadoop.chukwa.datacollection.test.ConsoleOutConnector;
+import org.apache.hadoop.chukwa.datacollection.writer.PipelineStageWriter;
+import org.apache.hadoop.chukwa.dataloader.MetricDataLoader;
+import org.apache.hadoop.conf.Configuration;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.nio.SelectChannelConnector;
+import org.mortbay.jetty.servlet.Context;
+import org.mortbay.jetty.servlet.ServletHolder;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.FileOutputFormat;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.JobClient;
+import org.apache.hadoop.mapred.JobPriority;
+import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.hadoop.mapred.SequenceFileInputFormat;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.chukwa.ChukwaArchiveKey;
+import org.apache.hadoop.chukwa.ChunkImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.chukwa.extraction.demux.ChukwaRecordOutputFormat;
+import org.apache.hadoop.chukwa.extraction.demux.ChukwaRecordPartitioner;
+import org.apache.hadoop.chukwa.extraction.demux.Demux;
+import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord;
+import org.apache.hadoop.chukwa.extraction.engine.ChukwaRecordKey;
+import org.apache.hadoop.chukwa.util.DatabaseWriter;
+import org.apache.hadoop.chukwa.util.ExceptionUtil;
+import org.apache.hadoop.chukwa.analysis.salsa.fsm.*;
+
+import junit.framework.TestCase;
+
+public class TestFSMBuilder extends TestCase {
+  private static Log log = LogFactory.getLog(TestFSMBuilder.class);
+
+  int LINES = 10000;
+  int THREADS = 2;
+  private MiniDFSCluster dfs = null;
+  int NUM_HADOOP_SLAVES = 4;
+  private FileSystem fileSys = null;
+  private MiniMRCluster mr = null;
+  private Server jettyCollector = null;
+  private ChukwaAgent agent = null;
+  private HttpConnector conn = null;
+  private ChukwaHttpSender sender = null;
+  private int agentPort = 9093;
+  private int collectorPort = 9990;
+  private static final String dataSink = "/demux/input";
+  private static final String fsmSink = "/analysis/salsafsm";
+  private static Path DEMUX_INPUT_PATH = null;
+  private static Path DEMUX_OUTPUT_PATH = null;
+  private static Path FSM_OUTPUT_PATH = null;
+  private ChukwaConfiguration conf = new ChukwaConfiguration();
+  private static SimpleDateFormat day = new java.text.SimpleDateFormat("yyyyMMdd_HH_mm");
+  private static String cluster = "demo";
+  long[] timeWindow = {7, 30, 91, 365, 3650};
+  long current = 1244617200000L;  // 2009-06-10
+
+  public void setUp() {
+    // Startup HDFS cluster - stored collector-ed JobHistory chunks
+    // Startup MR cluster - run Demux, FSMBuilder
+    // Startup collector
+    // Startup agent
+    
+    System.out.println("In setUp()");
+    try {
+      System.setProperty("hadoop.log.dir", System.getProperty(
+          "test.build.data", "/tmp"));
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail("Could not set up: " + e.toString());
+    }  
+
+    // Startup HDFS cluster - stored collector-ed JobHistory chunks
+    try {
+      dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null);
+      fileSys = dfs.getFileSystem();
+      DEMUX_INPUT_PATH = new Path(fileSys.getUri().toString()+File.separator+dataSink);          
+      DEMUX_OUTPUT_PATH = new Path(fileSys.getUri().toString()+File.separator+"/demux/output");
+    } catch(Exception e) {
+      e.printStackTrace();
+      fail("Fail to startup HDFS cluster.");      
+    }
+    // Startup MR Cluster
+    try {
+      mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri()
+          .toString(), 1);
+    } catch(Exception e) {
+      fail("Fail to startup Map/reduce cluster.");
+    }
+    // Startup collector
+    try {
+      // Configure Collector
+      conf.set("chukwaCollector.chunkSuppressBufferSize", "10");
+      conf.set("writer.hdfs.filesystem",fileSys.getUri().toString());
+      conf.set("chukwaCollector.outputDir",dataSink);
+      conf.set("chukwaCollector.rotateInterval", "10000");
+      
+      // Set up jetty connector
+      SelectChannelConnector jettyConnector = new SelectChannelConnector();
+      jettyConnector.setLowResourcesConnections(THREADS-1);
+      jettyConnector.setLowResourceMaxIdleTime(1500);
+      jettyConnector.setPort(collectorPort);
+      
+      // Set up jetty server proper, using connector
+      jettyCollector = new Server(collectorPort);
+      Context root = new Context(jettyCollector, "/", Context.SESSIONS);
+      root.addServlet(new ServletHolder(new ServletCollector(conf)), "/*");
+      jettyCollector.start();
+      jettyCollector.setStopAtShutdown(true);
+      Thread.sleep(10000);
+    } catch(Exception e) {
+      fail("Fail to startup collector.");
+    }
+
+    // Startup agent
+    try {
+      // Configure Agent
+      conf.set("chukwaAgent.tags", "cluster=\"demo\"");
+      DataFactory.getInstance().addDefaultTag(conf.get("chukwaAgent.tags", "cluster=\"unknown\""));
+      conf.set("chukwaAgent.checkpoint.dir", System.getenv("CHUKWA_DATA_DIR")+File.separator+"tmp");
+      conf.set("chukwaAgent.checkpoint.interval", "10000");
+      int portno = conf.getInt("chukwaAgent.control.port", agentPort);
+      agent = new ChukwaAgent(conf);
+      conn = new HttpConnector(agent, "http://localhost:"+collectorPort+"/chukwa");
+      conn.start();      
+      sender = new ChukwaHttpSender(conf);
+      ArrayList<String> collectorList = new ArrayList<String>();
+      collectorList.add("http://localhost:"+collectorPort+"/chukwa");
+      sender.setCollectors(new RetryListOfCollectors(collectorList, conf));
+    } catch (AlreadyRunningException e) {
+      fail("Chukwa Agent is already running");
+    }
+    System.out.println("Done setUp().");
+  }
+
+  public String readFile(File aFile) {
+    StringBuffer contents = new StringBuffer();
+    try {
+      BufferedReader input = new BufferedReader(new FileReader(aFile));
+      try {
+        String line = null; // not declared within while loop
+        while ((line = input.readLine()) != null) {
+          contents.append(line);
+          contents.append(System.getProperty("line.separator"));
+        }
+      } finally {
+        input.close();
+      }
+    } catch (IOException ex) {
+      ex.printStackTrace();
+    }
+    return contents.toString();
+  }
+
+  public void tearDown() {
+    FileSystem fs;
+    System.out.println("In tearDown()");
+    try {      
+
+      fs = dfs.getFileSystem();
+      fs.delete(DEMUX_OUTPUT_PATH, true);
+
+      agent.shutdown();
+      conn.shutdown();
+      jettyCollector.stop();
+      mr.shutdown();
+      dfs.shutdown();
+      Thread.sleep(2000);
+    } catch(Exception e) {
+      e.printStackTrace();
+      fail(e.toString());
+    }
+    System.out.println("Done tearDown()");
+  }
+
+  /**
+   * Performs tasks common to all tests
+   * Sets up agent to collect samples of the 2 log types in use
+   * (job history logs via JobLog and clienttrace via ClientTrace log types)
+   * Calls Demux to process the logs
+   */
+  protected void initialTasks () {
+    System.out.println("In initialTasks()");
+    try {
+      // Test Chukwa Agent Controller and Agent Communication
+      ChukwaAgentController cli = new ChukwaAgentController("localhost", agentPort);
+      String[] source = new File(System.getenv("CHUKWA_DATA_DIR") + File.separator + "log").list(new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          return name.endsWith(".log");
+        }
+      });
+
+      for(String fname : source) {
+        if (!(fname.equals("JobHistory.log") || fname.equals("ClientTrace.log"))) {
+          continue;
+        }
+        StringBuilder fullPath = new StringBuilder();
+        fullPath.append(System.getenv("CHUKWA_DATA_DIR"));
+        fullPath.append(File.separator);
+        fullPath.append("log");
+        fullPath.append(File.separator);        
+        fullPath.append(fname);
+        String recordType = fname.substring(0,fname.indexOf("."));
+        String adaptorId = cli.add(
+          "org.apache.hadoop.chukwa.datacollection.adaptor.filetailer.CharFileTailingAdaptorUTF8NewLineEscaped", 
+          recordType, "0 " + fullPath.toString(), 0);
+        assertNotNull(adaptorId);
+        Thread.sleep(2000);
+      }
+      cli.removeAll();
+      Thread.sleep(30000);
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail(e.toString());
+    }
+    
+    // Test Data Sink files written by Collector    
+    Path demuxDir = new Path(dataSink+"/*");
+    FileSystem fs;
+    try {
+      fs = dfs.getFileSystem();
+      FileStatus[] events = fs.globStatus(demuxDir);
+      log.info("Number of data sink files written:"+events.length);
+      assertTrue(events.length!=0);
+    } catch (IOException e) {
+      e.printStackTrace();
+      fail("File System Error.");
+    }
+    
+    // Test Demux    
+    log.info("Testing demux");
+    try {
+      //ChukwaConfiguration conf = new ChukwaConfiguration();
+      System.setProperty("hadoop.log.dir", System.getProperty(
+          "test.build.data", "/tmp"));
+    
+      String[] sortArgs = { DEMUX_INPUT_PATH.toString(), DEMUX_OUTPUT_PATH.toString() };
+      //      JobConf job = mr.createJobConf();
+      JobConf job = new JobConf(new ChukwaConfiguration(), Demux.class);
+      job.addResource(System.getenv("CHUKWA_CONF_DIR")+File.separator+"chukwa-demux-conf.xml");
+      job.setJobName("Chukwa-Demux_" + day.format(new Date()));
+      job.setInputFormat(SequenceFileInputFormat.class);
+      job.setMapperClass(Demux.MapClass.class);
+      job.setPartitionerClass(ChukwaRecordPartitioner.class);
+      job.setReducerClass(Demux.ReduceClass.class);
+
+      job.setOutputKeyClass(ChukwaRecordKey.class);
+      job.setOutputValueClass(ChukwaRecord.class);
+      job.setOutputFormat(ChukwaRecordOutputFormat.class);
+      job.setJobPriority(JobPriority.VERY_HIGH);
+      job.setNumMapTasks(2);
+      job.setNumReduceTasks(1);
+      Path input = new Path(fileSys.getUri().toString()+File.separator+dataSink+File.separator+"*.done");
+      FileInputFormat.setInputPaths(job, input);
+      FileOutputFormat.setOutputPath(job, DEMUX_OUTPUT_PATH);
+      String[] jars = new File(System.getenv("CHUKWA_HOME")).list(new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          return name.endsWith(".jar");
+        }
+      });
+      job.setJar(System.getenv("CHUKWA_HOME")+File.separator+jars[0]);
+      //assertEquals(ToolRunner.run(job, new Demux(), sortArgs), 0);
+      JobClient.runJob(job);
+    } catch (Exception e) {
+      fail(e.toString());
+    }
+
+    System.out.println("Done initialTasks()");
+  }
+
+  public void testFSMBuilder_JobHistory020 () {
+    initialTasks();
+    // Test FSMBuilder (job history only)
+    log.info("Testing FSMBuilder (Job History only)");
+    System.out.println("In JobHistory020");
+    // Run FSMBuilder on Demux output
+    try {
+      JobConf job = new JobConf(new ChukwaConfiguration(), FSMBuilder.class);
+      job.addResource(System.getenv("CHUKWA_CONF_DIR")+File.separator+"chukwa-demux-conf.xml");
+      job.setJobName("Chukwa-FSMBuilder_" + day.format(new Date()));
+      job.setMapperClass(JobHistoryTaskDataMapper.class);
+      job.setPartitionerClass(FSMIntermedEntryPartitioner.class);
+      job.setReducerClass(FSMBuilder.FSMReducer.class);
+      job.setMapOutputValueClass(FSMIntermedEntry.class);
+      job.setMapOutputKeyClass(ChukwaRecordKey.class);
+
+      job.setInputFormat(SequenceFileInputFormat.class);
+      job.setOutputKeyClass(ChukwaRecordKey.class);
+      job.setOutputValueClass(ChukwaRecord.class);
+      job.setOutputFormat(ChukwaRecordOutputFormat.class);
+      job.setNumReduceTasks(1);
+
+      Path inputPath = new Path(DEMUX_OUTPUT_PATH.toString()+File.separator+"/*/*/TaskData*.evt");
+      this.FSM_OUTPUT_PATH = new Path(fileSys.getUri().toString()+File.separator+fsmSink);
+
+      FileInputFormat.setInputPaths(job, inputPath);
+      FileOutputFormat.setOutputPath(job, FSM_OUTPUT_PATH);
+
+      String[] jars = new File(System.getenv("CHUKWA_HOME")).list(new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          return name.endsWith(".jar");
+        }
+      });
+      job.setJar(System.getenv("CHUKWA_HOME")+File.separator+jars[0]);
+      JobClient.runJob(job);
+    } catch (Exception e) {
+      fail("Error running FSMBuilder: "+e.toString());
+    }
+    System.out.println("Done running FSMBuilder; Checking results");
+  
+    // Check FSMBuilder output by reading the sequence file(s) generated
+    // Hard-coded to check the contents of test/samples/JobLog.log
+    try {
+
+      Pattern task_id_pat = Pattern.compile("attempt_[0-9]*_[0-9]*_[mr]_([0-9]*)_[0-9]*");
+
+      ChukwaRecordKey key = new ChukwaRecordKey();
+      ChukwaRecord record = new ChukwaRecord();
+
+      // initialize data structures for checking FSM
+      // should see 10 maps, 8 reduces
+      boolean mapSeen[] = new boolean[10];
+      boolean reduceSeen[] = new boolean[8];
+      boolean reduceShuffleSeen[] = new boolean[8];
+      boolean reduceSortSeen[] = new boolean[8];
+      boolean reduceReducerSeen[] = new boolean[8];
+      for (int i = 0; i < 10; i++) mapSeen[i] = false;
+      for (int i = 0; i < 8; i++) { 
+        reduceSeen[i] = false;
+        reduceShuffleSeen[i] = false;
+        reduceSortSeen[i] = false;
+        reduceReducerSeen[i] = false;
+      }
+
+      Path fsm_outputs = new Path(FSM_OUTPUT_PATH.toString()+File.separator+
+        "/*/MAPREDUCE_FSM/MAPREDUCE_FSM*.evt");
+      FileStatus [] files;
+      files = fileSys.globStatus(fsm_outputs);
+      int count = 0;
+
+      for (int i = 0; i < files.length; i++) {
+        SequenceFile.Reader r = new SequenceFile.Reader(fileSys, files[i].getPath(), conf);
+        System.out.println("Processing files " + files[i].getPath().toString());
+        while (r.next(key, record)) {
+          String state_name = record.getValue("STATE_NAME");
+          String task_id = record.getValue("TASK_ID");
+          
+          Matcher m = task_id_pat.matcher(task_id);
+          if (!m.matches()) {
+            continue;
+          }
+          String tasknum_string = m.group(1);
+          if (tasknum_string == null) {
+            continue;
+          }
+          int tasknum = Integer.parseInt(tasknum_string);
+  
+          if (state_name.equals("MAP")) {
+            assertTrue("Map sequence number should be < 10",tasknum < 10);
+            mapSeen[tasknum] = true;
+          } else if (state_name.equals("REDUCE")) {
+            assertTrue("Reduce sequence number should be < 8",tasknum < 8);
+            reduceSeen[tasknum] = true;
+          } else if (state_name.equals("REDUCE_SHUFFLEWAIT")) {
+            assertTrue("Reduce sequence number should be < 8",tasknum < 8);
+            reduceShuffleSeen[tasknum] = true;
+          } else if (state_name.equals("REDUCE_SORT")) {
+            assertTrue("Reduce sequence number should be < 8",tasknum < 8);
+            reduceSortSeen[tasknum] = true;
+          } else if (state_name.equals("REDUCE_REDUCER")) {
+            assertTrue("Reduce sequence number should be < 8",tasknum < 8);
+            reduceReducerSeen[tasknum] = true;
+          }
+          count++;
+        }
+      }
+      System.out.println("Processed " + count + " records.");
+      assertTrue("Total number of states is 42 - 10 maps + (8 reduces * 4)",count == 42);  
+
+      // We must have seen all 10 maps and all 8 reduces; 
+      // check for that here
+      boolean passed = true;
+      for (int i = 0; i < 10; i++) passed &= mapSeen[i];
+      for (int i = 0; i < 8; i++) {
+        passed &= reduceSeen[i];
+        passed &= reduceShuffleSeen[i];
+        passed &= reduceSortSeen[i];
+        passed &= reduceReducerSeen[i];
+      }
+
+      assertTrue("Seen all Maps and Reduces in generated states.",passed);
+
+    } catch (Exception e) {
+      fail("Error checking FSMBuilder output: "+e.toString());
+    } 
+    
+  }
+
+  public void testFSMBuilder_ClientTrace020 () {
+    initialTasks();
+    // Test FSMBuilder (job history only)
+    log.info("Testing FSMBuilder (ClientTrace only)");
+    System.out.println("In ClientTrace020");
+    // Run FSMBuilder on Demux output
+    try {
+      // Process TaskTracker shuffle clienttrace entries first
+      JobConf job = new JobConf(new ChukwaConfiguration(), FSMBuilder.class);
+      job.addResource(System.getenv("CHUKWA_CONF_DIR")+File.separator+"chukwa-demux-conf.xml");
+      job.setJobName("Chukwa-FSMBuilder_" + day.format(new Date()));
+      job.setMapperClass(TaskTrackerClientTraceMapper.class);
+      job.setPartitionerClass(FSMIntermedEntryPartitioner.class);
+      job.setReducerClass(FSMBuilder.FSMReducer.class);
+      job.setMapOutputValueClass(FSMIntermedEntry.class);
+      job.setMapOutputKeyClass(ChukwaRecordKey.class);
+
+      job.setInputFormat(SequenceFileInputFormat.class);
+      job.setOutputKeyClass(ChukwaRecordKey.class);
+      job.setOutputValueClass(ChukwaRecord.class);
+      job.setOutputFormat(ChukwaRecordOutputFormat.class);
+      job.setNumReduceTasks(1);
+
+      Path inputPath = new Path(DEMUX_OUTPUT_PATH.toString()+File.separator+"/*/*/ClientTraceDetailed*.evt");
+      Path fsmOutputPath1 = new Path(fileSys.getUri().toString()+File.separator+fsmSink+"1");
+
+      FileInputFormat.setInputPaths(job, inputPath);
+      FileOutputFormat.setOutputPath(job, fsmOutputPath1);
+
+      String[] jars = new File(System.getenv("CHUKWA_HOME")).list(new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          return name.endsWith(".jar");
+        }
+      });
+      job.setJar(System.getenv("CHUKWA_HOME")+File.separator+jars[0]);
+      JobClient.runJob(job);
+      System.out.println("Processed TaskTracker ClientTrace.");
+
+      // Process DataNode clienttrace entries
+      job = new JobConf(new ChukwaConfiguration(), FSMBuilder.class);
+      job.addResource(System.getenv("CHUKWA_CONF_DIR")+File.separator+"chukwa-demux-conf.xml");
+      job.setJobName("Chukwa-FSMBuilder_" + day.format(new Date()));
+      job.setMapperClass(DataNodeClientTraceMapper.class);
+      job.setPartitionerClass(FSMIntermedEntryPartitioner.class);
+      job.setReducerClass(FSMBuilder.FSMReducer.class);
+      job.setMapOutputValueClass(FSMIntermedEntry.class);
+      job.setMapOutputKeyClass(ChukwaRecordKey.class);
+
+      job.setInputFormat(SequenceFileInputFormat.class);
+      job.setOutputKeyClass(ChukwaRecordKey.class);
+      job.setOutputValueClass(ChukwaRecord.class);
+      job.setOutputFormat(ChukwaRecordOutputFormat.class);
+      job.setNumReduceTasks(1);
+
+      inputPath = new Path(DEMUX_OUTPUT_PATH.toString()+File.separator+"/*/*/ClientTraceDetailed*.evt");
+      Path fsmOutputPath2 = new Path(fileSys.getUri().toString()+File.separator+fsmSink+"2");
+
+      FileInputFormat.setInputPaths(job, inputPath);
+      FileOutputFormat.setOutputPath(job, fsmOutputPath2);
+
+      jars = new File(System.getenv("CHUKWA_HOME")).list(new FilenameFilter() {
+        public boolean accept(File dir, String name) {
+          return name.endsWith(".jar");
+        }
+      });
+      job.setJar(System.getenv("CHUKWA_HOME")+File.separator+jars[0]);
+      JobClient.runJob(job);
+      System.out.println("Processed DataNode ClientTrace.");
+
+    } catch (Exception e) {
+      fail("Error running FSMBuilder: "+e.toString());
+    }
+    System.out.println("Done running FSMBuilder; Checking results");
+
+    try {
+      Path fsm_outputs = new Path(fileSys.getUri().toString()+File.separator+
+        fsmSink + "*/*/*/*.evt");
+      FileStatus [] files;
+      files = fileSys.globStatus(fsm_outputs);
+      int count = 0;
+      int numHDFSRead = 0, numHDFSWrite = 0, numShuffles = 0;
+      ChukwaRecordKey key = new ChukwaRecordKey();
+      ChukwaRecord record = new ChukwaRecord();
+
+      for (int i = 0; i < files.length; i++) {
+        SequenceFile.Reader r = new SequenceFile.Reader(fileSys, files[i].getPath(), conf);
+        System.out.println("Processing files " + files[i].getPath().toString());
+        while (r.next(key, record)) {
+          String state_name = record.getValue("STATE_NAME");
+  
+          if (state_name.equals("READ_LOCAL") || state_name.equals("READ_REMOTE")) 
+          {
+            numHDFSRead++;
+          } else if (state_name.equals("WRITE_LOCAL") || state_name.equals("WRITE_REMOTE") 
+              || state_name.equals("WRITE_REPLICATED")) 
+          {
+            numHDFSWrite++;
+          } else if (state_name.equals("SHUFFLE_LOCAL") || state_name.equals("SHUFFLE_REMOTE")) 
+          {
+            numShuffles++;
+          }
+          count++;
+        }
+      }
+      System.out.println("Processed " + count + " records."); 
+      System.out.println("HDFSRD: " + numHDFSRead + " HDFSWR: " + numHDFSWrite + " SHUF: " + numShuffles);
+      assertTrue("Number of HDFS reads", numHDFSRead == 10);
+      assertTrue("Number of HDFS writes", numHDFSWrite == 8);      
+      assertTrue("Number of shuffles", numShuffles == 80);
+
+    } catch (Exception e) {
+      fail("Error checking FSMBuilder results: " + e.toString());
+    }      
+  }
+
+}
+

Added: hadoop/chukwa/trunk/test/samples/ClientTrace.log
URL: http://svn.apache.org/viewvc/hadoop/chukwa/trunk/test/samples/ClientTrace.log?rev=816739&view=auto
==============================================================================
--- hadoop/chukwa/trunk/test/samples/ClientTrace.log (added)
+++ hadoop/chukwa/trunk/test/samples/ClientTrace.log Fri Sep 18 18:43:01 2009
@@ -0,0 +1,98 @@
+2009-09-12 07:14:48,069 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:53107, bytes: 10620380, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000000_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-4693506771893277444_1763, duration: 0
+2009-09-12 07:14:59,812 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:36461, bytes: 1368349, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000000_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 277277094
+2009-09-12 07:15:01,797 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:53116, bytes: 10618472, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000001_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-217917535165838735_1768, duration: 0
+2009-09-12 07:15:05,496 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:36463, bytes: 1377391, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000001_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 461844360
+2009-09-12 07:15:09,461 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:53124, bytes: 10616566, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000002_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-5620086024607352264_1767, duration: 0
+2009-09-12 07:15:15,893 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:36470, bytes: 1415765, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000002_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 295663725
+2009-09-12 07:15:18,753 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:53130, bytes: 10615832, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000003_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-1429699096873749740_1769, duration: 0
+2009-09-12 07:15:26,329 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58818, bytes: 1249256, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000003_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 235653779
+2009-09-12 07:15:27,110 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:40083, bytes: 10615440, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000004_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-1796304879525156200_1770, duration: 0
+2009-09-12 07:15:36,043 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:40090, bytes: 10612440, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000005_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-1127657061245037122_1772, duration: 0
+2009-09-12 07:15:36,747 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58825, bytes: 1420472, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000004_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 272200460
+2009-09-12 07:15:42,118 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58826, bytes: 1363765, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000005_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 307150758
+2009-09-12 07:15:45,951 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:40097, bytes: 10611326, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000006_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-7619565066756915169_1765, duration: 0
+2009-09-12 07:15:52,420 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58835, bytes: 1359998, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000006_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 216467001
+2009-09-12 07:15:54,740 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:40104, bytes: 10611122, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000007_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_846875370439568043_1771, duration: 0
+2009-09-12 07:16:02,828 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58845, bytes: 1123559, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000007_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 312864103
+2009-09-12 07:16:03,772 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:40110, bytes: 10605942, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000008_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-4484271342326988797_1764, duration: 0
+2009-09-12 07:16:12,362 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:50012, dest: /127.0.0.1:40116, bytes: 10604518, op: HDFS_READ, cliID: DFSClient_attempt_200909120624_0005_m_000009_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_6425792267673877182_1766, duration: 0
+2009-09-12 07:16:13,276 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58851, bytes: 1284985, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000008_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 422463079
+2009-09-12 07:16:18,389 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58853, bytes: 1363696, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000009_0, reduceID: attempt_200909120624_0005_r_000000_0, duration: 76697380
+2009-09-12 07:16:21,953 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:40121, dest: /127.0.0.1:50012, bytes: 13353733, op: HDFS_WRITE, cliID: DFSClient_attempt_200909120624_0005_r_000000_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_4332711451661776648_1778, duration: 1099062112
+2009-09-12 07:16:30,379 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1089845, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000000_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 230548648
+2009-09-12 07:16:30,574 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1402871, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000001_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 155027184
+2009-09-12 07:16:30,749 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1475169, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000002_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 129517733
+2009-09-12 07:16:30,949 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1148357, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000003_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 137130308
+2009-09-12 07:16:31,039 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1171970, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000004_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 28262193
+2009-09-12 07:16:31,153 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1399573, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000005_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 74573804
+2009-09-12 07:16:31,244 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1088471, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000006_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 47166795
+2009-09-12 07:16:31,347 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1303630, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000007_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 80370962
+2009-09-12 07:16:31,492 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1274879, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000008_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 119390725
+2009-09-12 07:16:31,590 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58860, bytes: 1274546, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000009_0, reduceID: attempt_200909120624_0005_r_000001_0, duration: 64507982
+2009-09-12 07:16:33,991 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:40128, dest: /127.0.0.1:50012, bytes: 12654164, op: HDFS_WRITE, cliID: DFSClient_attempt_200909120624_0005_r_000001_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-1564679270830529171_1779, duration: 968184914
+2009-09-12 07:16:47,067 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1292703, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000000_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 239729670
+2009-09-12 07:16:47,258 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1307448, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000001_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 129747104
+2009-09-12 07:16:47,462 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1122413, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000002_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 152795766
+2009-09-12 07:16:47,664 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1404250, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000003_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 142128996
+2009-09-12 07:16:47,833 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1327594, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000004_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 47793727
+2009-09-12 07:16:47,990 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1358096, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000005_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 97753775
+2009-09-12 07:16:48,104 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1498657, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000006_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 45251918
+2009-09-12 07:16:48,239 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1308846, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000007_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 66842490
+2009-09-12 07:16:48,366 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1216737, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000008_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 120570552
+2009-09-12 07:16:48,499 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58867, bytes: 1210029, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000009_0, reduceID: attempt_200909120624_0005_r_000002_0, duration: 60210820
+2009-09-12 07:16:50,979 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:40136, dest: /127.0.0.1:50012, bytes: 13073013, op: HDFS_WRITE, cliID: DFSClient_attempt_200909120624_0005_r_000002_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_3286017436964416683_1780, duration: 1055739042
+2009-09-12 07:16:57,247 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1227410, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000000_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 336424592
+2009-09-12 07:16:57,524 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1281791, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000001_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 187176685
+2009-09-12 07:16:57,733 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1220613, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000002_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 169223933
+2009-09-12 07:16:57,954 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1202374, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000003_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 165011706
+2009-09-12 07:16:58,112 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1302449, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000004_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 45968809
+2009-09-12 07:16:58,254 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1146827, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000005_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 89887242
+2009-09-12 07:16:58,370 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1346124, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000006_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 78245151
+2009-09-12 07:16:58,461 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1619603, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000007_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 71232405
+2009-09-12 07:16:58,582 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1299213, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000008_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 99544050
+2009-09-12 07:16:58,720 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58876, bytes: 1368981, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000009_0, reduceID: attempt_200909120624_0005_r_000003_0, duration: 49791581
+2009-09-12 07:17:02,345 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:40144, dest: /127.0.0.1:50012, bytes: 13041998, op: HDFS_WRITE, cliID: DFSClient_attempt_200909120624_0005_r_000003_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-2153288029611089676_1781, duration: 1228518939
+2009-09-12 07:17:14,938 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1520039, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000000_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 244920012
+2009-09-12 07:17:15,156 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1298292, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000001_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 159505942
+2009-09-12 07:17:15,375 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1107909, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000002_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 167851335
+2009-09-12 07:17:15,547 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1526775, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000003_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 98181508
+2009-09-12 07:17:15,690 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1219843, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000004_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 71313426
+2009-09-12 07:17:15,829 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1209543, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000005_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 86041561
+2009-09-12 07:17:15,930 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1302231, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000006_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 51937229
+2009-09-12 07:17:16,066 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1196342, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000007_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 101533522
+2009-09-12 07:17:16,182 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1472129, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000008_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 90035314
+2009-09-12 07:17:16,343 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58883, bytes: 1300291, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000009_0, reduceID: attempt_200909120624_0005_r_000004_0, duration: 87452994
+2009-09-12 07:17:18,374 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:40151, dest: /127.0.0.1:50012, bytes: 13179937, op: HDFS_WRITE, cliID: DFSClient_attempt_200909120624_0005_r_000004_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-3144247784145311416_1782, duration: 990008210
+2009-09-12 07:17:31,357 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1302500, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000000_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 228349917
+2009-09-12 07:17:31,568 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1297099, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000001_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 148444685
+2009-09-12 07:17:31,858 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1465801, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000002_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 228345728
+2009-09-12 07:17:32,054 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1463475, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000003_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 126616359
+2009-09-12 07:17:32,170 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1448712, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000004_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 35023217
+2009-09-12 07:17:32,324 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1409443, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000005_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 105200679
+2009-09-12 07:17:32,502 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1393350, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000006_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 81305212
+2009-09-12 07:17:32,578 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1347744, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000007_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 52573938
+2009-09-12 07:17:32,707 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1353238, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000008_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 101342146
+2009-09-12 07:17:32,862 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58891, bytes: 1290875, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000009_0, reduceID: attempt_200909120624_0005_r_000005_0, duration: 106538356
+2009-09-12 07:17:35,436 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:40159, dest: /127.0.0.1:50012, bytes: 13799732, op: HDFS_WRITE, cliID: DFSClient_attempt_200909120624_0005_r_000005_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_-6461581615355826522_1783, duration: 1232990435
+2009-09-12 07:17:47,580 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1472321, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000000_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 272481518
+2009-09-12 07:17:47,797 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1285137, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000001_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 158518051
+2009-09-12 07:17:48,054 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1385928, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000002_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 199144535
+2009-09-12 07:17:48,235 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1262184, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000003_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 97207584
+2009-09-12 07:17:48,407 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1333731, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000004_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 98070315
+2009-09-12 07:17:48,533 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1333527, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000005_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 95965457
+2009-09-12 07:17:48,660 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1197432, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000006_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 53482206
+2009-09-12 07:17:48,774 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1337398, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000007_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 66369777
+2009-09-12 07:17:48,951 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1211451, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000008_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 140616984
+2009-09-12 07:17:49,162 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58898, bytes: 1171217, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000009_0, reduceID: attempt_200909120624_0005_r_000006_0, duration: 147531108
+2009-09-12 07:17:52,071 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:40167, dest: /127.0.0.1:50012, bytes: 13016594, op: HDFS_WRITE, cliID: DFSClient_attempt_200909120624_0005_r_000006_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_6037495273949116233_1784, duration: 1015593095
+2009-09-12 07:18:05,588 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1244253, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000000_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 248469549
+2009-09-12 07:18:05,833 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1265268, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000001_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 155539848
+2009-09-12 07:18:06,074 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1318957, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000002_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 197926153
+2009-09-12 07:18:06,240 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1255354, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000003_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 124690584
+2009-09-12 07:18:06,357 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1287493, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000004_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 55286170
+2009-09-12 07:18:06,524 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1288430, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000005_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 124984494
+2009-09-12 07:18:06,646 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1322187, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000006_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 83954025
+2009-09-12 07:18:06,744 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1270736, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000007_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 75929082
+2009-09-12 07:18:06,890 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1389998, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000008_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 126354579
+2009-09-12 07:18:06,995 INFO org.apache.hadoop.mapred.TaskTracker.clienttrace: src: 127.0.1.1:50062, dest: 127.0.1.1:58906, bytes: 1521261, op: MAPRED_SHUFFLE, cliID: attempt_200909120624_0005_m_000009_0, reduceID: attempt_200909120624_0005_r_000007_0, duration: 69378433
+2009-09-12 07:18:09,140 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /127.0.0.1:40174, dest: /127.0.0.1:50012, bytes: 13189883, op: HDFS_WRITE, cliID: DFSClient_attempt_200909120624_0005_r_000007_0, srvID: DS-199263106-127.0.1.1-50012-1242830204189, blockid: blk_1656186309044945384_1785, duration: 1084068847



Mime
View raw message