hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r529756 [2/2] - in /lucene/hadoop/trunk/src: contrib/hbase/src/java/org/apache/hadoop/hbase/ contrib/hbase/src/test/org/apache/hadoop/hbase/ contrib/streaming/src/java/org/apache/hadoop/streaming/ java/org/apache/hadoop/dfs/ java/org/apache...
Date Tue, 17 Apr 2007 20:38:02 GMT
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/JobHistory.java Tue Apr 17 13:37:59 2007
@@ -60,10 +60,10 @@
    * It acts as a global namespace for all keys. 
    */
   public static enum Keys { JOBTRACKERID,
-    START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF,SUBMIT_TIME, LAUNCH_TIME, 
-    TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, FINISHED_MAPS, FINISHED_REDUCES,
-    JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, ERROR, TASK_ATTEMPT_ID, TASK_STATUS, 
-    COPY_PHASE, SORT_PHASE, REDUCE_PHASE, SHUFFLE_FINISHED, SORT_FINISHED 
+                            START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF,SUBMIT_TIME, LAUNCH_TIME, 
+                            TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, FINISHED_MAPS, FINISHED_REDUCES,
+                            JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, ERROR, TASK_ATTEMPT_ID, TASK_STATUS, 
+                            COPY_PHASE, SORT_PHASE, REDUCE_PHASE, SHUFFLE_FINISHED, SORT_FINISHED 
   };
   /**
    * This enum contains some of the values commonly used by history log events. 
@@ -94,7 +94,7 @@
         }
         masterIndex = 
           new PrintWriter(
-              new FileOutputStream(new File( LOG_DIR + File.separator + MASTER_INDEX_LOG_FILE), true )) ;
+                          new FileOutputStream(new File( LOG_DIR + File.separator + MASTER_INDEX_LOG_FILE), true )) ;
         // add jobtracker id = tracker start time
         log(masterIndex, RecordTypes.Jobtracker, Keys.START_TIME, JOBTRACKER_START_TIME);  
       }catch(IOException e){
@@ -114,17 +114,17 @@
    * @throws IOException
    */
   public static void parseHistory(File path, Listener l) throws IOException{
-      BufferedReader reader = new BufferedReader(new FileReader(path));
-      String line = null ; 
-      StringBuffer buf = new StringBuffer(); 
-      while ((line = reader.readLine())!= null){
-        buf.append(line); 
-        if( ! line.trim().endsWith("\"")){
-          continue ; 
-        }
-        parseLine(buf.toString(), l );
-        buf = new StringBuffer(); 
+    BufferedReader reader = new BufferedReader(new FileReader(path));
+    String line = null ; 
+    StringBuffer buf = new StringBuffer(); 
+    while ((line = reader.readLine())!= null){
+      buf.append(line); 
+      if( ! line.trim().endsWith("\"")){
+        continue ; 
       }
+      parseLine(buf.toString(), l );
+      buf = new StringBuffer(); 
+    }
   }
   /**
    * Parse a single line of history. 
@@ -305,13 +305,13 @@
      * @param jobConf path to job conf xml file in HDFS. 
      */
     public static void logSubmitted(String jobId, String jobName, String user, 
-        long submitTime, String jobConf){
+                                    long submitTime, String jobConf){
       
       if( ! disableHistory ){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job, 
-              new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
-              new String[]{jobId, jobName, user, String.valueOf(submitTime),jobConf });
+                         new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
+                         new String[]{jobId, jobName, user, String.valueOf(submitTime),jobConf });
         }
         // setup the history log file for this job
         String logFileName =  JOBTRACKER_START_TIME + "_" + jobId ; 
@@ -322,8 +322,8 @@
           openJobs.put(logFileName, writer);
           // add to writer as well 
           JobHistory.log(writer, RecordTypes.Job, 
-              new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
-              new String[]{jobId, jobName, user, String.valueOf(submitTime) ,jobConf}); 
+                         new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
+                         new String[]{jobId, jobName, user, String.valueOf(submitTime) ,jobConf}); 
              
         }catch(IOException e){
           LOG.error("Failed creating job history log file, disabling history", e);
@@ -342,9 +342,9 @@
       if( ! disableHistory ){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job, 
-              new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
-              new String[] {jobId,  String.valueOf(startTime), 
-                String.valueOf(totalMaps), String.valueOf(totalReduces) } ) ; 
+                         new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
+                         new String[] {jobId,  String.valueOf(startTime), 
+                                       String.valueOf(totalMaps), String.valueOf(totalReduces) } ) ; 
         }
         
         String logFileName =  JOBTRACKER_START_TIME + "_" + jobId ; 
@@ -352,8 +352,8 @@
         
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.Job, 
-              new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME,Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
-              new String[] {jobId,  String.valueOf(startTime), String.valueOf(totalMaps), String.valueOf(totalReduces)} ) ; 
+                         new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME,Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
+                         new String[] {jobId,  String.valueOf(startTime), String.valueOf(totalMaps), String.valueOf(totalReduces)} ) ; 
         }
       }
     }
@@ -367,13 +367,13 @@
      * @param failedReduces no of failed reduce tasks. 
      */ 
     public static void logFinished(String jobId, long finishTime, int finishedMaps, int finishedReduces,
-        int failedMaps, int failedReduces){
+                                   int failedMaps, int failedReduces){
       if( ! disableHistory ){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job,          
-              new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
-              new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
-                String.valueOf(finishedMaps), String.valueOf(finishedReduces) } ) ;
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
+                                       String.valueOf(finishedMaps), String.valueOf(finishedReduces) } ) ;
         }
         
         // close job file for this job
@@ -381,11 +381,11 @@
         PrintWriter writer = openJobs.get(logFileName); 
         if( null != writer){
           JobHistory.log(writer, RecordTypes.Job,          
-              new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES,
-              Keys.FAILED_MAPS, Keys.FAILED_REDUCES},
-              new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
-                String.valueOf(finishedMaps), String.valueOf(finishedReduces),
-                String.valueOf(failedMaps), String.valueOf(failedReduces)} ) ;
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES,
+                                     Keys.FAILED_MAPS, Keys.FAILED_REDUCES},
+                         new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
+                                       String.valueOf(finishedMaps), String.valueOf(finishedReduces),
+                                       String.valueOf(failedMaps), String.valueOf(failedReduces)} ) ;
           writer.close();
           openJobs.remove(logFileName); 
         }
@@ -404,20 +404,20 @@
       if( ! disableHistory ){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job,
-              new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
-              new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
-                String.valueOf(finishedReduces)} ) ; 
-        }
-          String logFileName =  JOBTRACKER_START_TIME + "_" + jobid ; 
-          PrintWriter writer = (PrintWriter)openJobs.get(logFileName); 
-          if( null != writer){
-            JobHistory.log(writer, RecordTypes.Job,
-                new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS,Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
-                new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
-                  String.valueOf(finishedReduces)} ) ; 
-            writer.close();
-            openJobs.remove(logFileName); 
-          }
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
+                                       String.valueOf(finishedReduces)} ) ; 
+        }
+        String logFileName =  JOBTRACKER_START_TIME + "_" + jobid ; 
+        PrintWriter writer = (PrintWriter)openJobs.get(logFileName); 
+        if( null != writer){
+          JobHistory.log(writer, RecordTypes.Job,
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS,Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
+                                       String.valueOf(finishedReduces)} ) ; 
+          writer.close();
+          openJobs.remove(logFileName); 
+        }
       }
     }
   }
@@ -437,12 +437,12 @@
      * @param startTime startTime of tip. 
      */
     public static void logStarted(String jobId, String taskId, String taskType, 
-         long startTime){
+                                  long startTime){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE , Keys.START_TIME}, 
-              new String[]{taskId, taskType, String.valueOf(startTime)}) ;
+                         new String[]{taskId, taskType, String.valueOf(startTime)}) ;
         }
       }
     }
@@ -454,13 +454,13 @@
      * @param finishTime finish timeof task in ms
      */
     public static void logFinished(String jobId, String taskId, String taskType, 
-        long finishTime){
+                                   long finishTime){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE, 
-              Keys.TASK_STATUS, Keys.FINISH_TIME}, 
-              new String[]{ taskId,taskType, Values.SUCCESS.name(), String.valueOf(finishTime)}) ;
+                                                              Keys.TASK_STATUS, Keys.FINISH_TIME}, 
+                         new String[]{ taskId,taskType, Values.SUCCESS.name(), String.valueOf(finishTime)}) ;
         }
       }
     }
@@ -477,8 +477,8 @@
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE, 
-              Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.ERROR}, 
-              new String[]{ taskId,  taskType, Values.FAILED.name(), String.valueOf(time) , error}) ;
+                                                              Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.ERROR}, 
+                         new String[]{ taskId,  taskType, Values.FAILED.name(), String.valueOf(time) , error}) ;
         }
       }
     }
@@ -500,43 +500,43 @@
    * a Map Attempt on a node.
    */
   public static class MapAttempt extends TaskAttempt{
-   /**
-    * Log start time of this map task attempt. 
-    * @param jobId job id
-    * @param taskId task id
-    * @param taskAttemptId task attempt id
-    * @param startTime start time of task attempt as reported by task tracker. 
-    * @param hostName host name of the task attempt. 
-    */
-   public static void logStarted(String jobId, String taskId,String taskAttemptId, long startTime, String hostName){
-     if( ! disableHistory ){
-       PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
-       if( null != writer ){
-         JobHistory.log( writer, RecordTypes.MapAttempt, 
-             new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, 
-               Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
-             new String[]{Values.MAP.name(),  taskId, 
-                taskAttemptId, String.valueOf(startTime), hostName} ) ; 
-       }
-      }
-    }
-   /**
-    * Log finish time of map task attempt. 
-    * @param jobId job id
-    * @param taskId task id
-    * @param taskAttemptId task attempt id 
-    * @param finishTime finish time
-    * @param hostName host name 
-    */
+    /**
+     * Log start time of this map task attempt. 
+     * @param jobId job id
+     * @param taskId task id
+     * @param taskAttemptId task attempt id
+     * @param startTime start time of task attempt as reported by task tracker. 
+     * @param hostName host name of the task attempt. 
+     */
+    public static void logStarted(String jobId, String taskId,String taskAttemptId, long startTime, String hostName){
+      if( ! disableHistory ){
+        PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
+        if( null != writer ){
+          JobHistory.log( writer, RecordTypes.MapAttempt, 
+                          new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, 
+                                      Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
+                          new String[]{Values.MAP.name(),  taskId, 
+                                       taskAttemptId, String.valueOf(startTime), hostName} ) ; 
+        }
+      }
+    }
+    /**
+     * Log finish time of map task attempt. 
+     * @param jobId job id
+     * @param taskId task id
+     * @param taskAttemptId task attempt id 
+     * @param finishTime finish time
+     * @param hostName host name 
+     */
     public static void logFinished(String jobId, String taskId, String taskAttemptId, long finishTime, String hostName){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.MapAttempt, 
-              new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
-              Keys.FINISH_TIME, Keys.HOSTNAME},
-              new String[]{Values.MAP.name(), taskId, taskAttemptId, Values.SUCCESS.name(),  
-              String.valueOf(finishTime), hostName} ) ; 
+                         new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                                     Keys.FINISH_TIME, Keys.HOSTNAME},
+                         new String[]{Values.MAP.name(), taskId, taskAttemptId, Values.SUCCESS.name(),  
+                                      String.valueOf(finishTime), hostName} ) ; 
         }
       }
     }
@@ -550,15 +550,15 @@
      * @param error error message if any for this task attempt. 
      */
     public static void logFailed(String jobId, String taskId, String taskAttemptId, 
-        long timestamp, String hostName, String error){
+                                 long timestamp, String hostName, String error){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if( null != writer ){
           JobHistory.log( writer, RecordTypes.MapAttempt, 
-              new Enum[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
-                Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR},
-              new String[]{ Values.MAP.name(), taskId, taskAttemptId, Values.FAILED.name(),
-                String.valueOf(timestamp), hostName, error} ) ; 
+                          new Enum[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                                     Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR},
+                          new String[]{ Values.MAP.name(), taskId, taskAttemptId, Values.FAILED.name(),
+                                        String.valueOf(timestamp), hostName, error} ) ; 
         }
       }
     } 
@@ -577,18 +577,18 @@
      * @param hostName host name 
      */
     public static void logStarted(String jobId, String taskId, String taskAttemptId, 
-        long startTime, String hostName){
+                                  long startTime, String hostName){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if( null != writer ){
           JobHistory.log( writer, RecordTypes.ReduceAttempt, 
-              new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, 
-                Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
-              new String[]{Values.REDUCE.name(),  taskId, 
-                taskAttemptId, String.valueOf(startTime), hostName} ) ; 
+                          new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, 
+                                       Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
+                          new String[]{Values.REDUCE.name(),  taskId, 
+                                       taskAttemptId, String.valueOf(startTime), hostName} ) ; 
         }
       }
-     }
+    }
     /**
      * Log finished event of this task. 
      * @param jobId job id
@@ -599,42 +599,42 @@
      * @param finishTime finish time of task
      * @param hostName host name where task attempt executed
      */
-     public static void logFinished(String jobId, String taskId, String taskAttemptId, 
-        long shuffleFinished, long sortFinished, long finishTime, String hostName){
-       if( ! disableHistory ){
-         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
-         if( null != writer ){
-           JobHistory.log( writer, RecordTypes.ReduceAttempt, 
-               new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
-               Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED, Keys.FINISH_TIME, Keys.HOSTNAME},
-               new String[]{Values.REDUCE.name(),  taskId, taskAttemptId, Values.SUCCESS.name(), 
-               String.valueOf(shuffleFinished), String.valueOf(sortFinished),
-               String.valueOf(finishTime), hostName} ) ; 
-         }
-       }
-     }
-     /**
-      * Log failed reduce task attempt. 
-      * @param jobId job id 
-      * @param taskId task id
-      * @param taskAttemptId task attempt id
-      * @param timestamp time stamp when task failed
-      * @param hostName host name of the task attempt.  
-      * @param error error message of the task. 
-      */
-     public static void logFailed(String jobId, String taskId,String taskAttemptId, long timestamp, 
-          String hostName, String error){
-       if( ! disableHistory ){
-         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
-         if( null != writer ){
-           JobHistory.log( writer, RecordTypes.ReduceAttempt, 
-               new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID,Keys.TASK_STATUS, 
-                 Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR },
-               new String[]{ Values.REDUCE.name(), taskId, taskAttemptId, Values.FAILED.name(), 
-               String.valueOf(timestamp), hostName, error } ) ; 
-         }
-       }
-     }
+    public static void logFinished(String jobId, String taskId, String taskAttemptId, 
+                                   long shuffleFinished, long sortFinished, long finishTime, String hostName){
+      if( ! disableHistory ){
+        PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
+        if( null != writer ){
+          JobHistory.log( writer, RecordTypes.ReduceAttempt, 
+                          new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                                      Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED, Keys.FINISH_TIME, Keys.HOSTNAME},
+                          new String[]{Values.REDUCE.name(),  taskId, taskAttemptId, Values.SUCCESS.name(), 
+                                       String.valueOf(shuffleFinished), String.valueOf(sortFinished),
+                                       String.valueOf(finishTime), hostName} ) ; 
+        }
+      }
+    }
+    /**
+     * Log failed reduce task attempt. 
+     * @param jobId job id 
+     * @param taskId task id
+     * @param taskAttemptId task attempt id
+     * @param timestamp time stamp when task failed
+     * @param hostName host name of the task attempt.  
+     * @param error error message of the task. 
+     */
+    public static void logFailed(String jobId, String taskId,String taskAttemptId, long timestamp, 
+                                 String hostName, String error){
+      if( ! disableHistory ){
+        PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
+        if( null != writer ){
+          JobHistory.log( writer, RecordTypes.ReduceAttempt, 
+                          new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID,Keys.TASK_STATUS, 
+                                       Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR },
+                          new String[]{ Values.REDUCE.name(), taskId, taskAttemptId, Values.FAILED.name(), 
+                                        String.valueOf(timestamp), hostName, error } ) ; 
+        }
+      }
+    }
   }
   /**
    * Callback interface for reading back log events from JobHistory. This interface 
@@ -677,12 +677,12 @@
       if( lastRan ==0 || (now - lastRan) < ONE_DAY_IN_MS ){
         return ; 
       }
-       lastRan = now;  
-       isRunning = true ; 
-        // update master Index first
-        try{
+      lastRan = now;  
+      isRunning = true ; 
+      // update master Index first
+      try{
         File logFile = new File(
-            LOG_DIR + File.separator + MASTER_INDEX_LOG_FILE); 
+                                LOG_DIR + File.separator + MASTER_INDEX_LOG_FILE); 
         
         synchronized(MASTER_INDEX_LOG_FILE){
           Map<String, Map<String, JobHistory.JobInfo>> jobTrackersToJobs = 
@@ -728,14 +728,14 @@
       }
       
       File[] oldFiles = new File(LOG_DIR).listFiles(new FileFilter(){
-        public boolean accept(File file){
-          // delete if older than 30 days
-          if( now - file.lastModified() > THIRTY_DAYS_IN_MS ){
-            return true ; 
-          }
+          public boolean accept(File file){
+            // delete if older than 30 days
+            if( now - file.lastModified() > THIRTY_DAYS_IN_MS ){
+              return true ; 
+            }
             return false; 
-        }
-      });
+          }
+        });
       for( File f : oldFiles){
         f.delete(); 
         LOG.info("Deleting old history file : " + f.getName());

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/LineRecordReader.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/LineRecordReader.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/LineRecordReader.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/LineRecordReader.java Tue Apr 17 13:37:59 2007
@@ -45,7 +45,7 @@
   private TextStuffer bridge = new TextStuffer();
 
   public LineRecordReader(Configuration job, FileSplit split)
-      throws IOException {
+    throws IOException {
     long start = split.getStart();
     long end = start + split.getLength();
     final Path file = split.getPath();
@@ -77,7 +77,7 @@
     this.start = offset;
     this.pos = offset;
     this.end = endOffset;    
-//    readLine(in, null); 
+    //    readLine(in, null); 
   }
   
   public WritableComparable createKey() {
@@ -111,7 +111,7 @@
   }
 
   public static long readLine(InputStream in, 
-      OutputStream out) throws IOException {
+                              OutputStream out) throws IOException {
     long bytes = 0;
     while (true) {
       

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/PhasedFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/PhasedFileSystem.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/PhasedFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/PhasedFileSystem.java Tue Apr 17 13:37:59 2007
@@ -44,7 +44,7 @@
    * @param taskid taskId
    */
   public PhasedFileSystem(FileSystem fs, String jobid, 
-      String tipid, String taskid) {
+                          String tipid, String taskid) {
     super(fs); 
     this.jobid = jobid; 
     this.tipid = tipid ; 
@@ -73,7 +73,7 @@
     if( finalNameToFileInfo.containsKey(finalFile) ){
       if( !overwrite ){
         throw new IOException("Error, file already exists : " + 
-            finalFile.toString()); 
+                              finalFile.toString()); 
       }else{
         // delete tempp file and let create a new one. 
         FileInfo fInfo = finalNameToFileInfo.get(finalFile); 
@@ -100,14 +100,14 @@
   }
   
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-          short replication, long blockSize,Progressable progress)
-      throws IOException {
+                                   short replication, long blockSize,Progressable progress)
+    throws IOException {
     if( fs.exists(f) && !overwrite ){
       throw new IOException("Error creating file - already exists : " + f); 
     }
     FSDataOutputStream stream = 
       fs.create(setupFile(f, overwrite), overwrite, bufferSize, replication, 
-          blockSize, progress);
+                blockSize, progress);
     finalNameToFileInfo.get(f).setOpenFileStream(stream); 
     return stream ; 
   }
@@ -128,7 +128,7 @@
     FileInfo fInfo = finalNameToFileInfo.get(fPath) ; 
     if( null == fInfo ){
       throw new IOException("Error committing file! File was not created " + 
-          "with PhasedFileSystem : " + fPath); 
+                            "with PhasedFileSystem : " + fPath); 
     }
     try{
       fInfo.getOpenFileStream().close();
@@ -156,7 +156,7 @@
         }catch(IOException ioe){
           // rename failed, log error and delete temp files
           LOG.error("PhasedFileSystem failed to commit file : " + fPath 
-              + " error : " + ioe.getMessage()); 
+                    + " error : " + ioe.getMessage()); 
           fs.delete(fInfo.getTempPath());
         }
       }else{
@@ -225,81 +225,81 @@
   }
   
   @Override
-  public boolean setReplication(
-      Path src, short replication)
-      throws IOException {
+    public boolean setReplication(
+                                  Path src, short replication)
+    throws IOException {
     // throw IOException for interface compatibility with 
     // base class. 
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public boolean rename(
-      Path src, Path dst)
-      throws IOException {
+    public boolean rename(
+                          Path src, Path dst)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public boolean delete(
-      Path f)
-      throws IOException {
+    public boolean delete(
+                          Path f)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   /** @deprecated */ @Deprecated
-  @Override
-  public void lock(
-      Path f, boolean shared)
-      throws IOException {
+    @Override
+    public void lock(
+                     Path f, boolean shared)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   /** @deprecated */ @Deprecated
-  @Override
-  public void release(
-      Path f)
-      throws IOException {
+    @Override
+    public void release(
+                        Path f)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public void copyFromLocalFile(
-      boolean delSrc, Path src, Path dst)
-      throws IOException {
+    public void copyFromLocalFile(
+                                  boolean delSrc, Path src, Path dst)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public void copyToLocalFile(
-      boolean delSrc, Path src, Path dst)
-      throws IOException {
+    public void copyToLocalFile(
+                                boolean delSrc, Path src, Path dst)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public Path startLocalOutput(
-      Path fsOutputFile, Path tmpLocalFile)
-      throws IOException {
+    public Path startLocalOutput(
+                                 Path fsOutputFile, Path tmpLocalFile)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
- }
+  }
 
   @Override
-  public void completeLocalOutput(
-      Path fsOutputFile, Path tmpLocalFile)
-      throws IOException {
+    public void completeLocalOutput(
+                                    Path fsOutputFile, Path tmpLocalFile)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
- }
+  }
 
   @Override
-  public String[][] getFileCacheHints(
-      Path f, long start, long len)
-      throws IOException {
+    public String[][] getFileCacheHints(
+                                        Path f, long start, long len)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public String getName() {
+    public String getName() {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
@@ -318,28 +318,28 @@
       return openFileStream;
     }
     public void setOpenFileStream(
-        OutputStream openFileStream) {
+                                  OutputStream openFileStream) {
       this.openFileStream = openFileStream;
     }
     public Path getFinalPath() {
       return finalPath;
     }
     public void setFinalPath(
-        Path finalPath) {
+                             Path finalPath) {
       this.finalPath = finalPath;
     }
     public boolean isOverwrite() {
       return overwrite;
     }
     public void setOverwrite(
-        boolean overwrite) {
+                             boolean overwrite) {
       this.overwrite = overwrite;
     }
     public Path getTempPath() {
       return tempPath;
     }
     public void setTempPath(
-        Path tempPath) {
+                            Path tempPath) {
       this.tempPath = tempPath;
     }
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java Tue Apr 17 13:37:59 2007
@@ -12,137 +12,137 @@
  *
  */
 public class TaskCompletionEvent implements Writable{
-    static public enum Status {FAILED, SUCCEEDED, OBSOLETE};
+  static public enum Status {FAILED, SUCCEEDED, OBSOLETE};
     
-    private int eventId ; 
-    private String taskTrackerHttp ;
-    private String taskId ;
-    Status status ; 
-    boolean isMap = false ;
-    private int idWithinJob;
-    public static final TaskCompletionEvent[] EMPTY_ARRAY = 
-        new TaskCompletionEvent[0];
-    /**
-     * Default constructor for Writable.
-     *
-     */
-    public TaskCompletionEvent(){}
-    /**
-     * Constructor. eventId should be created externally and incremented
-     * per event for each job. 
-     * @param eventId event id, event id should be unique and assigned in
-     *  incrementally, starting from 0. 
-     * @param taskId task id
-     * @param status task's status 
-     * @param taskTrackerHttp task tracker's host:port for http. 
-     */
-    public TaskCompletionEvent(int eventId, 
-        String taskId,
-        int idWithinJob,
-        boolean isMap,
-        Status status, 
-        String taskTrackerHttp){
+  private int eventId ; 
+  private String taskTrackerHttp ;
+  private String taskId ;
+  Status status ; 
+  boolean isMap = false ;
+  private int idWithinJob;
+  public static final TaskCompletionEvent[] EMPTY_ARRAY = 
+    new TaskCompletionEvent[0];
+  /**
+   * Default constructor for Writable.
+   *
+   */
+  public TaskCompletionEvent(){}
+  /**
+   * Constructor. eventId should be created externally and incremented
+   * per event for each job. 
+   * @param eventId event id, event id should be unique and assigned in
+   *  incrementally, starting from 0. 
+   * @param taskId task id
+   * @param status task's status 
+   * @param taskTrackerHttp task tracker's host:port for http. 
+   */
+  public TaskCompletionEvent(int eventId, 
+                             String taskId,
+                             int idWithinJob,
+                             boolean isMap,
+                             Status status, 
+                             String taskTrackerHttp){
       
-        this.taskId = taskId ;
-        this.idWithinJob = idWithinJob ;
-        this.isMap = isMap ;
-        this.eventId = eventId ; 
-        this.status =status ; 
-        this.taskTrackerHttp = taskTrackerHttp ;
-    }
-    /**
-     * Returns event Id. 
-     * @return event id
-     */
-    public int getEventId() {
-        return eventId;
-    }
-    /**
-     * Returns task id. 
-     * @return task id
-     */
-    public String getTaskId() {
-        return taskId;
-    }
-    /**
-     * Returns enum Status.SUCESS or Status.FAILURE.
-     * @return task tracker status
-     */
-    public Status getTaskStatus() {
-        return status;
-    }
-    /**
-     * http location of the tasktracker where this task ran. 
-     * @return http location of tasktracker user logs
-     */
-    public String getTaskTrackerHttp() {
-        return taskTrackerHttp;
-    }
-    /**
-     * set event Id. should be assigned incrementally starting from 0. 
-     * @param eventId
-     */
-    public void setEventId(
-        int eventId) {
-        this.eventId = eventId;
-    }
-    /**
-     * Sets task id. 
-     * @param taskId
-     */
-    public void setTaskId(
-        String taskId) {
-        this.taskId = taskId;
-    }
-    /**
-     * Set task status. 
-     * @param status
-     */
-    public void setTaskStatus(
-        Status status) {
-        this.status = status;
-    }
-    /**
-     * Set task tracker http location. 
-     * @param taskTrackerHttp
-     */
-    public void setTaskTrackerHttp(
-        String taskTrackerHttp) {
-        this.taskTrackerHttp = taskTrackerHttp;
-    }
+    this.taskId = taskId ;
+    this.idWithinJob = idWithinJob ;
+    this.isMap = isMap ;
+    this.eventId = eventId ; 
+    this.status =status ; 
+    this.taskTrackerHttp = taskTrackerHttp ;
+  }
+  /**
+   * Returns event Id. 
+   * @return event id
+   */
+  public int getEventId() {
+    return eventId;
+  }
+  /**
+   * Returns task id. 
+   * @return task id
+   */
+  public String getTaskId() {
+    return taskId;
+  }
+  /**
+   * Returns enum Status.SUCESS or Status.FAILURE.
+   * @return task tracker status
+   */
+  public Status getTaskStatus() {
+    return status;
+  }
+  /**
+   * http location of the tasktracker where this task ran. 
+   * @return http location of tasktracker user logs
+   */
+  public String getTaskTrackerHttp() {
+    return taskTrackerHttp;
+  }
+  /**
+   * set event Id. should be assigned incrementally starting from 0. 
+   * @param eventId
+   */
+  public void setEventId(
+                         int eventId) {
+    this.eventId = eventId;
+  }
+  /**
+   * Sets task id. 
+   * @param taskId
+   */
+  public void setTaskId(
+                        String taskId) {
+    this.taskId = taskId;
+  }
+  /**
+   * Set task status. 
+   * @param status
+   */
+  public void setTaskStatus(
+                            Status status) {
+    this.status = status;
+  }
+  /**
+   * Set task tracker http location. 
+   * @param taskTrackerHttp
+   */
+  public void setTaskTrackerHttp(
+                                 String taskTrackerHttp) {
+    this.taskTrackerHttp = taskTrackerHttp;
+  }
     
-    public String toString(){
-        StringBuffer buf = new StringBuffer(); 
-        buf.append("Task Id : "); 
-        buf.append( taskId ) ; 
-        buf.append(", Status : ");  
-        buf.append( status.name() ) ;
-        return buf.toString();
-    }
+  public String toString(){
+    StringBuffer buf = new StringBuffer(); 
+    buf.append("Task Id : "); 
+    buf.append( taskId ) ; 
+    buf.append(", Status : ");  
+    buf.append( status.name() ) ;
+    return buf.toString();
+  }
     
-    public boolean isMapTask() {
-        return isMap;
-    }
+  public boolean isMapTask() {
+    return isMap;
+  }
     
-    public int idWithinJob() {
-      return idWithinJob;
-    }
-    //////////////////////////////////////////////
-    // Writable
-    //////////////////////////////////////////////
-    public void write(DataOutput out) throws IOException {
-        WritableUtils.writeString(out, taskId); 
-        WritableUtils.writeVInt(out, idWithinJob);
-        out.writeBoolean(isMap);
-        WritableUtils.writeEnum(out, status); 
-        WritableUtils.writeString(out, taskTrackerHttp);
-    }
+  public int idWithinJob() {
+    return idWithinJob;
+  }
+  //////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    WritableUtils.writeString(out, taskId); 
+    WritableUtils.writeVInt(out, idWithinJob);
+    out.writeBoolean(isMap);
+    WritableUtils.writeEnum(out, status); 
+    WritableUtils.writeString(out, taskTrackerHttp);
+  }
   
-    public void readFields(DataInput in) throws IOException {
-        this.taskId = WritableUtils.readString(in) ; 
-        this.idWithinJob = WritableUtils.readVInt(in);
-        this.isMap = in.readBoolean();
-        this.status = WritableUtils.readEnum(in, Status.class);
-        this.taskTrackerHttp = WritableUtils.readString(in);
-    }
+  public void readFields(DataInput in) throws IOException {
+    this.taskId = WritableUtils.readString(in) ; 
+    this.idWithinJob = WritableUtils.readVInt(in);
+    this.isMap = in.readBoolean();
+    this.status = WritableUtils.readEnum(in, Status.class);
+    this.taskTrackerHttp = WritableUtils.readString(in);
+  }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskLogAppender.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskLogAppender.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskLogAppender.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskLogAppender.java Tue Apr 17 13:37:59 2007
@@ -24,13 +24,13 @@
   public void activateOptions() {
     taskLogWriter = 
       new TaskLog.Writer(taskId, TaskLog.LogFilter.SYSLOG, 
-              noKeepSplits, totalLogFileSize, purgeLogSplits, logsRetainHours);
+                         noKeepSplits, totalLogFileSize, purgeLogSplits, logsRetainHours);
     try {
       taskLogWriter.init();
     } catch (IOException ioe) {
       taskLogWriter = null;
       errorHandler.error("Failed to initialize the task's logging " +
-              "infrastructure: " + StringUtils.stringifyException(ioe));
+                         "infrastructure: " + StringUtils.stringifyException(ioe));
     }
   }
   
@@ -42,7 +42,7 @@
 
     if (this.layout == null) {
       errorHandler.error("No layout for appender " + name , 
-              null, ErrorCode.MISSING_LAYOUT );
+                         null, ErrorCode.MISSING_LAYOUT );
     }
     
     // Log the message to the task's log
@@ -51,8 +51,8 @@
       taskLogWriter.write(logMessage.getBytes(), 0, logMessage.length());
     } catch (IOException ioe) {
       errorHandler.error("Failed to log: '" + logMessage + 
-              "' to the task's logging infrastructure with the exception: " + 
-              StringUtils.stringifyException(ioe));
+                         "' to the task's logging infrastructure with the exception: " + 
+                         StringUtils.stringifyException(ioe));
     }
   }
 
@@ -66,7 +66,7 @@
         taskLogWriter.close();
       } catch (IOException ioe) {
         errorHandler.error("Failed to close the task's log with the exception: " 
-                + StringUtils.stringifyException(ioe));
+                           + StringUtils.stringifyException(ioe));
       }
     } else {
       errorHandler.error("Calling 'close' on uninitialize/closed logger");

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java Tue Apr 17 13:37:59 2007
@@ -36,7 +36,7 @@
  */
 public class MultithreadedMapRunner implements MapRunnable {
   private static final Log LOG =
-      LogFactory.getLog(MultithreadedMapRunner.class.getName());
+    LogFactory.getLog(MultithreadedMapRunner.class.getName());
 
   private JobConf job;
   private Mapper mapper;
@@ -45,10 +45,10 @@
 
   public void configure(JobConf job) {
     int numberOfThreads =
-        job.getInt("mapred.map.multithreadedrunner.threads", 10);
+      job.getInt("mapred.map.multithreadedrunner.threads", 10);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Configuring job " + job.getJobName() +
-          " to use " + numberOfThreads + " threads" );
+                " to use " + numberOfThreads + " threads" );
     }
 
     this.job = job;
@@ -75,7 +75,7 @@
         // If threads are not available from the thread-pool this method
         // will block until there is a thread available.
         executorService.execute(
-            new MapperInvokeRunable(key, value, output, reporter));
+                                new MapperInvokeRunable(key, value, output, reporter));
 
         // Checking if a Mapper.map within a Runnable has generated an
         // IOException. If so we rethrow it to force an abort of the Map
@@ -92,7 +92,7 @@
 
       if (LOG.isDebugEnabled()) {
         LOG.debug("Finished dispatching all Mappper.map calls, job "
-            + job.getJobName());
+                  + job.getJobName());
       }
 
       // Graceful shutdown of the Threadpool, it will let all scheduled
@@ -105,7 +105,7 @@
         while (!executorService.awaitTermination(100, TimeUnit.MILLISECONDS)) {
           if (LOG.isDebugEnabled()) {
             LOG.debug("Awaiting all running Mappper.map calls to finish, job "
-                + job.getJobName());
+                      + job.getJobName());
           }
 
           // Checking if a Mapper.map within a Runnable has generated an
@@ -141,7 +141,7 @@
       }
 
     } finally {
-        mapper.close();
+      mapper.close();
     }
   }
 
@@ -165,7 +165,7 @@
      * @param reporter
      */
     public MapperInvokeRunable(WritableComparable key, Writable value,
-        OutputCollector output, Reporter reporter) {
+                               OutputCollector output, Reporter reporter) {
       this.key = key;
       this.value = value;
       this.output = output;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/CodeGenerator.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/CodeGenerator.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/CodeGenerator.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/CodeGenerator.java Tue Apr 17 13:37:59 2007
@@ -28,7 +28,7 @@
 abstract class CodeGenerator {
   
   private static HashMap<String, CodeGenerator> generators =
-      new HashMap<String, CodeGenerator>();
+    new HashMap<String, CodeGenerator>();
   
   static {
     register("c", new CGenerator());
@@ -45,8 +45,8 @@
   }
   
   abstract void genCode(String file,
-      ArrayList<JFile> inclFiles,
-      ArrayList<JRecord> records,
-      String destDir,
-      ArrayList<String> options) throws IOException;
+                        ArrayList<JFile> inclFiles,
+                        ArrayList<JRecord> records,
+                        String destDir,
+                        ArrayList<String> options) throws IOException;
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java Tue Apr 17 13:37:59 2007
@@ -10,34 +10,34 @@
 
 public class DiskChecker {
 
-    public static class DiskErrorException extends IOException {
-      public DiskErrorException(String msg) {
-        super(msg);
-      }
+  public static class DiskErrorException extends IOException {
+    public DiskErrorException(String msg) {
+      super(msg);
     }
+  }
     
-    public static class DiskOutOfSpaceException extends IOException {
-        public DiskOutOfSpaceException(String msg) {
-          super(msg);
-        }
-      }
+  public static class DiskOutOfSpaceException extends IOException {
+    public DiskOutOfSpaceException(String msg) {
+      super(msg);
+    }
+  }
       
-    public static void checkDir( File dir ) throws DiskErrorException {
-        if( !dir.exists() && !dir.mkdirs() )
-            throw new DiskErrorException( "can not create directory: " 
-                    + dir.toString() );
+  public static void checkDir( File dir ) throws DiskErrorException {
+    if( !dir.exists() && !dir.mkdirs() )
+      throw new DiskErrorException( "can not create directory: " 
+                                    + dir.toString() );
         
-        if ( !dir.isDirectory() )
-            throw new DiskErrorException( "not a directory: " 
-                    + dir.toString() );
+    if ( !dir.isDirectory() )
+      throw new DiskErrorException( "not a directory: " 
+                                    + dir.toString() );
             
-        if( !dir.canRead() )
-            throw new DiskErrorException( "directory is not readable: " 
-                    + dir.toString() );
+    if( !dir.canRead() )
+      throw new DiskErrorException( "directory is not readable: " 
+                                    + dir.toString() );
             
-        if( !dir.canWrite() )
-            throw new DiskErrorException( "directory is not writable: " 
-                    + dir.toString() );
-    }
+    if( !dir.canWrite() )
+      throw new DiskErrorException( "directory is not writable: " 
+                                    + dir.toString() );
+  }
 
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/HostsFileReader.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/HostsFileReader.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/HostsFileReader.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/HostsFileReader.java Tue Apr 17 13:37:59 2007
@@ -44,7 +44,7 @@
     excludes.clear();
     
     if (!includesFile.equals("")) {
-        readFileToSet(includesFile, includes);
+      readFileToSet(includesFile, includes);
     }
     if (!excludesFile.equals("")) {
       readFileToSet(excludesFile, excludes);

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Progressable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Progressable.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Progressable.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Progressable.java Tue Apr 17 13:37:59 2007
@@ -9,8 +9,8 @@
  * @author Owen O'Malley
  */
 public interface Progressable {
-    /** callback for reporting progress. Used by DFSclient to report
-     * progress while writing a block of DFS file.
-     */
-    public void progress() throws IOException;
-}
\ No newline at end of file
+  /** callback for reporting progress. Used by DFSclient to report
+   * progress while writing a block of DFS file.
+   */
+  public void progress() throws IOException;
+}

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java Tue Apr 17 13:37:59 2007
@@ -17,17 +17,17 @@
   private static NameNode namenode;
   private static FSNamesystem.ReplicationTargetChooser replicator;
   private static DatanodeDescriptor dataNodes[] = 
-         new DatanodeDescriptor[] {
-    new DatanodeDescriptor(new DatanodeID("h1:5020", "0", -1), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h2:5020", "0", -1), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h3:5020", "0", -1), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h4:5020", "0", -1), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h5:5020", "0", -1), "/d2/r3"),
-    new DatanodeDescriptor(new DatanodeID("h6:5020", "0", -1), "/d2/r3")
- };
+    new DatanodeDescriptor[] {
+      new DatanodeDescriptor(new DatanodeID("h1:5020", "0", -1), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h2:5020", "0", -1), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h3:5020", "0", -1), "/d1/r2"),
+      new DatanodeDescriptor(new DatanodeID("h4:5020", "0", -1), "/d1/r2"),
+      new DatanodeDescriptor(new DatanodeID("h5:5020", "0", -1), "/d2/r3"),
+      new DatanodeDescriptor(new DatanodeID("h6:5020", "0", -1), "/d2/r3")
+    };
    
-private final static DatanodeDescriptor NODE = 
-  new DatanodeDescriptor(new DatanodeID("h7:5020", "0", -1), "/d2/r4");
+  private final static DatanodeDescriptor NODE = 
+    new DatanodeDescriptor(new DatanodeID("h7:5020", "0", -1), "/d2/r4");
   
   static {
     try {
@@ -47,8 +47,8 @@
     }
     for( int i=0; i<NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
     }
   }
   
@@ -62,34 +62,34 @@
    */
   public void testChooseTarget1() throws Exception {
     dataNodes[0].updateHeartbeat(
-              2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-              FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 4); // overloaded
+                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 4); // overloaded
 
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], null, BLOCK_SIZE);
+                                      0, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], null, BLOCK_SIZE);
+                                      1, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[0]);
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], null, BLOCK_SIZE);
+                                      2, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, dataNodes[0], null, BLOCK_SIZE);
+                                      3, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[0]);
     assertTrue(cluster.isOnSameRack(targets[0], targets[1]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[2]));
     
     targets = replicator.chooseTarget(
-        4, dataNodes[0], null, BLOCK_SIZE);
+                                      4, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[0]);
     assertTrue(cluster.isOnSameRack(targets[0], targets[1]));
@@ -97,8 +97,8 @@
     assertFalse(cluster.isOnSameRack(targets[0], targets[3]));
 
     dataNodes[0].updateHeartbeat(
-        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
+                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
   }
 
   /**
@@ -116,20 +116,20 @@
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        0, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      0, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        1, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      1, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[0]);
     
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        2, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      2, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
@@ -137,7 +137,7 @@
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        3, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      3, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
@@ -146,14 +146,14 @@
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        4, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      4, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[0]);
     for(int i=1; i<4; i++) {
       assertFalse(cluster.isOnSameRack(targets[0], targets[i]));
     }
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
-        cluster.isOnSameRack(targets[2], targets[3]));
+               cluster.isOnSameRack(targets[2], targets[3]));
     assertFalse(cluster.isOnSameRack(targets[1], targets[3]));
   }
 
@@ -168,46 +168,46 @@
   public void testChooseTarget3() throws Exception {
     // make data node 0 to be not qualified to choose
     dataNodes[0].updateHeartbeat(
-        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-        (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0); // no space
+                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                 (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0); // no space
         
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], null, BLOCK_SIZE);
+                                      0, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], null, BLOCK_SIZE);
+                                      1, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[1]);
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], null, BLOCK_SIZE);
+                                      2, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[1]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, dataNodes[0], null, BLOCK_SIZE);
+                                      3, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[1]);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        4, dataNodes[0], null, BLOCK_SIZE);
+                                      4, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[1]);
     for(int i=1; i<4; i++) {
       assertFalse(cluster.isOnSameRack(targets[0], targets[i]));
     }
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
-        cluster.isOnSameRack(targets[2], targets[3]));
+               cluster.isOnSameRack(targets[2], targets[3]));
     assertFalse(cluster.isOnSameRack(targets[1], targets[3]));
 
     dataNodes[0].updateHeartbeat(
-        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
+                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
   }
   
   /**
@@ -222,40 +222,40 @@
     // make data node 0 & 1 to be not qualified to choose: not enough disk space
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-          (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0);
+                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                   (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0);
     }
       
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], null, BLOCK_SIZE);
+                                      0, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], null, BLOCK_SIZE);
+                                      1, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], null, BLOCK_SIZE);
+                                      2, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, dataNodes[0], null, BLOCK_SIZE);
+                                      3, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     for(int i=0; i<3; i++) {
       assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0]));
     }
     assertTrue(cluster.isOnSameRack(targets[0], targets[1]) ||
-        cluster.isOnSameRack(targets[1], targets[2]));
+               cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[2]));
     
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-          FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                   FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
     }
   }
   /**
@@ -268,20 +268,20 @@
   public void testChooseTarget5() throws Exception {
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, NODE, null, BLOCK_SIZE);
+                                      0, NODE, null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, NODE, null, BLOCK_SIZE);
+                                      1, NODE, null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     
     targets = replicator.chooseTarget(
-        2, NODE, null, BLOCK_SIZE);
+                                      2, NODE, null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, NODE, null, BLOCK_SIZE);
+                                      3, NODE, null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertTrue(cluster.isOnSameRack(targets[0], targets[1]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[2]));    
@@ -300,22 +300,22 @@
     DatanodeDescriptor[] targets;
     
     targets = replicator.chooseTarget(
-        0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      3, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));
@@ -336,16 +336,16 @@
 
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));
@@ -365,16 +365,16 @@
     
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java Tue Apr 17 13:37:59 2007
@@ -5,7 +5,7 @@
 public class Jets3tS3FileSystemTest extends S3FileSystemBaseTest {
 
   @Override
-  public FileSystemStore getFileSystemStore() throws IOException {
+    public FileSystemStore getFileSystemStore() throws IOException {
     return null; // use default store
   }
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java Tue Apr 17 13:37:59 2007
@@ -21,7 +21,7 @@
   abstract FileSystemStore getFileSystemStore() throws IOException;
 
   @Override
-  protected void setUp() throws IOException {
+    protected void setUp() throws IOException {
     Configuration conf = new Configuration();
     
     s3FileSystem = new S3FileSystem(getFileSystemStore());
@@ -34,7 +34,7 @@
   }
 
   @Override
-  protected void tearDown() throws Exception {
+    protected void tearDown() throws Exception {
     s3FileSystem.purge();
     s3FileSystem.close();
   }
@@ -83,7 +83,7 @@
 
   public void testListPathsRaw() throws Exception {
     Path[] testDirs = { new Path("/test/hadoop/a"), new Path("/test/hadoop/b"),
-        new Path("/test/hadoop/c/1"), };
+                        new Path("/test/hadoop/c/1"), };
     assertNull(s3FileSystem.listPaths(testDirs[0]));
 
     for (Path path : testDirs) {
@@ -136,8 +136,8 @@
     s3FileSystem.mkdirs(path.getParent());
 
     FSDataOutputStream out = s3FileSystem.create(path, false,
-            s3FileSystem.getConf().getInt("io.file.buffer.size", 4096), 
-            (short) 1, BLOCK_SIZE);
+                                                 s3FileSystem.getConf().getInt("io.file.buffer.size", 4096), 
+                                                 (short) 1, BLOCK_SIZE);
     out.write(data, 0, len);
     out.close();
 
@@ -175,16 +175,16 @@
     
     try {
       s3FileSystem.create(path, false,
-              s3FileSystem.getConf().getInt("io.file.buffer.size", 4096),
-              (short) 1, 128);
+                          s3FileSystem.getConf().getInt("io.file.buffer.size", 4096),
+                          (short) 1, 128);
       fail("Should throw IOException.");
     } catch (IOException e) {
       // Expected
     }
     
     FSDataOutputStream out = s3FileSystem.create(path, true,
-            s3FileSystem.getConf().getInt("io.file.buffer.size", 4096), 
-            (short) 1, BLOCK_SIZE);
+                                                 s3FileSystem.getConf().getInt("io.file.buffer.size", 4096), 
+                                                 (short) 1, BLOCK_SIZE);
     out.write(data, 0, BLOCK_SIZE / 2);
     out.close();
     
@@ -328,8 +328,8 @@
 
   private void createEmptyFile(Path path) throws IOException {
     FSDataOutputStream out = s3FileSystem.create(path, false,
-            s3FileSystem.getConf().getInt("io.file.buffer.size", 4096),
-            (short) 1, BLOCK_SIZE);
+                                                 s3FileSystem.getConf().getInt("io.file.buffer.size", 4096),
+                                                 (short) 1, BLOCK_SIZE);
     out.write(data, 0, BLOCK_SIZE);
     out.close();
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestINode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestINode.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestINode.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestINode.java Tue Apr 17 13:37:59 2007
@@ -23,7 +23,7 @@
     assertEquals("Length", 1, deserializedBlocks.length);
     assertEquals("Id", blocks[0].getId(), deserializedBlocks[0].getId());
     assertEquals("Length", blocks[0].getLength(), deserializedBlocks[0]
-        .getLength());
+                 .getLength());
 
   }
   

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java Tue Apr 17 13:37:59 2007
@@ -8,7 +8,7 @@
 public class TestInMemoryS3FileSystem extends S3FileSystemBaseTest {
 
   @Override
-  public FileSystemStore getFileSystemStore() throws IOException {
+    public FileSystemStore getFileSystemStore() throws IOException {
     return new InMemoryFileSystemStore();
   }
   

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java Tue Apr 17 13:37:59 2007
@@ -22,7 +22,7 @@
   private UnreliableImplementation unreliableImpl;
   
   @Override
-  protected void setUp() throws Exception {
+    protected void setUp() throws Exception {
     unreliableImpl = new UnreliableImplementation();
   }
 
@@ -53,7 +53,7 @@
   
   public void testRetryForever() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
     unreliable.alwaysSucceeds();
     unreliable.failsOnceThenSucceeds();
     unreliable.failsTenTimesThenSucceeds();
@@ -61,8 +61,8 @@
   
   public void testRetryUpToMaximumCountWithFixedSleep() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-        retryUpToMaximumCountWithFixedSleep(8, 1, TimeUnit.NANOSECONDS));
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+                        retryUpToMaximumCountWithFixedSleep(8, 1, TimeUnit.NANOSECONDS));
     unreliable.alwaysSucceeds();
     unreliable.failsOnceThenSucceeds();
     try {
@@ -75,8 +75,8 @@
   
   public void testRetryUpToMaximumTimeWithFixedSleep() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-        retryUpToMaximumTimeWithFixedSleep(80, 10, TimeUnit.NANOSECONDS));
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+                        retryUpToMaximumTimeWithFixedSleep(80, 10, TimeUnit.NANOSECONDS));
     unreliable.alwaysSucceeds();
     unreliable.failsOnceThenSucceeds();
     try {
@@ -89,8 +89,8 @@
   
   public void testRetryUpToMaximumCountWithProportionalSleep() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-        retryUpToMaximumCountWithProportionalSleep(8, 1, TimeUnit.NANOSECONDS));
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+                        retryUpToMaximumCountWithProportionalSleep(8, 1, TimeUnit.NANOSECONDS));
     unreliable.alwaysSucceeds();
     unreliable.failsOnceThenSucceeds();
     try {
@@ -106,8 +106,8 @@
       Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
     
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-        retryByException(RETRY_FOREVER, exceptionToPolicyMap));
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+                        retryByException(RETRY_FOREVER, exceptionToPolicyMap));
     unreliable.failsOnceThenSucceeds();
     try {
       unreliable.alwaysfailsWithFatalException();

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/net/TestNetworkTopology.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/net/TestNetworkTopology.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/net/TestNetworkTopology.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/net/TestNetworkTopology.java Tue Apr 17 13:37:59 2007
@@ -8,13 +8,13 @@
 public class TestNetworkTopology extends TestCase {
   private final static NetworkTopology cluster = new NetworkTopology();
   private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
-      new DatanodeDescriptor(new DatanodeID("h1:5020", "0", -1), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h2:5020", "0", -1), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h3:5020", "0", -1), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h4:5020", "0", -1), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h5:5020", "0", -1), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h6:5020", "0", -1), "/d2/r3"),
-      new DatanodeDescriptor(new DatanodeID("h7:5020", "0", -1), "/d2/r3")
+    new DatanodeDescriptor(new DatanodeID("h1:5020", "0", -1), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("h2:5020", "0", -1), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("h3:5020", "0", -1), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h4:5020", "0", -1), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h5:5020", "0", -1), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h6:5020", "0", -1), "/d2/r3"),
+    new DatanodeDescriptor(new DatanodeID("h7:5020", "0", -1), "/d2/r3")
   };
   private final static DatanodeDescriptor NODE = 
     new DatanodeDescriptor(new DatanodeID("h8:5020", "0", -1), "/d2/r4");

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestReflectionUtils.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestReflectionUtils.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestReflectionUtils.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/util/TestReflectionUtils.java Tue Apr 17 13:37:59 2007
@@ -8,37 +8,37 @@
 
 public class TestReflectionUtils extends TestCase {
 
-    private static Class toConstruct[] = { String.class, TestReflectionUtils.class, HashMap.class };
-    private Throwable failure = null;
+  private static Class toConstruct[] = { String.class, TestReflectionUtils.class, HashMap.class };
+  private Throwable failure = null;
 
-    public void setUp() {
-      ReflectionUtils.clearCache();
-    }
-    
-    public void testCache() throws Exception {
-      assertEquals(0, cacheSize());
-      doTestCache();
-      assertEquals(toConstruct.length, cacheSize());
-      ReflectionUtils.clearCache();
-      assertEquals(0, cacheSize());
-    }
-    
-    
-    private void doTestCache() {
-      for (int i=0; i<toConstruct.length; i++) {
-          Class cl = toConstruct[i];
-          Object x = ReflectionUtils.newInstance(cl, null);
-          Object y = ReflectionUtils.newInstance(cl, null);
-          assertEquals(cl, x.getClass());
-          assertEquals(cl, y.getClass());
-      }
-    }
-    
-    public void testThreadSafe() throws Exception {
-      Thread[] th = new Thread[32];
-      for (int i=0; i<th.length; i++) {
-          th[i] = new Thread() {
-            public void run() {
+  public void setUp() {
+    ReflectionUtils.clearCache();
+  }
+    
+  public void testCache() throws Exception {
+    assertEquals(0, cacheSize());
+    doTestCache();
+    assertEquals(toConstruct.length, cacheSize());
+    ReflectionUtils.clearCache();
+    assertEquals(0, cacheSize());
+  }
+    
+    
+  private void doTestCache() {
+    for (int i=0; i<toConstruct.length; i++) {
+      Class cl = toConstruct[i];
+      Object x = ReflectionUtils.newInstance(cl, null);
+      Object y = ReflectionUtils.newInstance(cl, null);
+      assertEquals(cl, x.getClass());
+      assertEquals(cl, y.getClass());
+    }
+  }
+    
+  public void testThreadSafe() throws Exception {
+    Thread[] th = new Thread[32];
+    for (int i=0; i<th.length; i++) {
+      th[i] = new Thread() {
+          public void run() {
             try {
               doTestCache();
             } catch (Throwable t) {
@@ -46,46 +46,46 @@
             }
           }
         };
-        th[i].start();
-      }
-      for (int i=0; i<th.length; i++) {
-        th[i].join();
-      }
-      if (failure != null) {
-        failure.printStackTrace();
-        fail(failure.getMessage());
-      }
-    }
-    
-    private int cacheSize() throws Exception {
-      return ReflectionUtils.getCacheSize();
-    }
-    
-    public void testCantCreate() {
-      try {
-        ReflectionUtils.newInstance(NoDefaultCtor.class, null);
-        fail("invalid call should fail");
-      } catch (RuntimeException rte) {
-        assertEquals(NoSuchMethodException.class, rte.getCause().getClass());
-      }
-    }
-    
-    public void testCacheDoesntLeak() throws Exception {
-      int iterations=9999; // very fast, but a bit less reliable - bigger numbers force GC
-      for (int i=0; i<iterations; i++) {
-        URLClassLoader loader = new URLClassLoader(new URL[0], getClass().getClassLoader());
-        Class cl = Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild", false, loader);
-        Object o = ReflectionUtils.newInstance(cl, null);
-        assertEquals(cl, o.getClass());
-      }
-      System.gc();
-      assertTrue(cacheSize()+" too big", cacheSize()<iterations);
-    }
-    
-    private static class LoadedInChild {
+      th[i].start();
     }
-    
-    public static class NoDefaultCtor {
-      public NoDefaultCtor(int x) {}
+    for (int i=0; i<th.length; i++) {
+      th[i].join();
     }
+    if (failure != null) {
+      failure.printStackTrace();
+      fail(failure.getMessage());
+    }
+  }
+    
+  private int cacheSize() throws Exception {
+    return ReflectionUtils.getCacheSize();
+  }
+    
+  public void testCantCreate() {
+    try {
+      ReflectionUtils.newInstance(NoDefaultCtor.class, null);
+      fail("invalid call should fail");
+    } catch (RuntimeException rte) {
+      assertEquals(NoSuchMethodException.class, rte.getCause().getClass());
+    }
+  }
+    
+  public void testCacheDoesntLeak() throws Exception {
+    int iterations=9999; // very fast, but a bit less reliable - bigger numbers force GC
+    for (int i=0; i<iterations; i++) {
+      URLClassLoader loader = new URLClassLoader(new URL[0], getClass().getClassLoader());
+      Class cl = Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild", false, loader);
+      Object o = ReflectionUtils.newInstance(cl, null);
+      assertEquals(cl, o.getClass());
+    }
+    System.gc();
+    assertTrue(cacheSize()+" too big", cacheSize()<iterations);
+  }
+    
+  private static class LoadedInChild {
+  }
+    
+  public static class NoDefaultCtor {
+    public NoDefaultCtor(int x) {}
+  }
 }

Modified: lucene/hadoop/trunk/src/test/testjar/ExternalMapperReducer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/testjar/ExternalMapperReducer.java?view=diff&rev=529756&r1=529755&r2=529756
==============================================================================
--- lucene/hadoop/trunk/src/test/testjar/ExternalMapperReducer.java (original)
+++ lucene/hadoop/trunk/src/test/testjar/ExternalMapperReducer.java Tue Apr 17 13:37:59 2007
@@ -26,7 +26,7 @@
   }
 
   public void map(WritableComparable key, Writable value,
-    OutputCollector output, Reporter reporter)
+                  OutputCollector output, Reporter reporter)
     throws IOException {
     
     if (value instanceof Text) {
@@ -37,7 +37,7 @@
   }
 
   public void reduce(WritableComparable key, Iterator values,
-    OutputCollector output, Reporter reporter)
+                     OutputCollector output, Reporter reporter)
     throws IOException {
     
     int count = 0;



Mime
View raw message