hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r530556 [10/12] - in /lucene/hadoop/trunk: ./ src/contrib/abacus/src/java/org/apache/hadoop/abacus/ src/contrib/hbase/src/java/org/apache/hadoop/hbase/ src/contrib/hbase/src/test/org/apache/hadoop/hbase/ src/contrib/streaming/src/java/org/a...
Date Thu, 19 Apr 2007 21:34:53 GMT
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TaskTrackerStatus.java Thu Apr 19 14:34:41 2007
@@ -110,7 +110,7 @@
    */
   public int countMapTasks() {
     int mapCount = 0;
-    for (Iterator it = taskReports.iterator(); it.hasNext(); ) {
+    for (Iterator it = taskReports.iterator(); it.hasNext();) {
       TaskStatus ts = (TaskStatus) it.next();
       TaskStatus.State state = ts.getRunState();
       if (ts.getIsMap() &&
@@ -127,7 +127,7 @@
    */
   public int countReduceTasks() {
     int reduceCount = 0;
-    for (Iterator it = taskReports.iterator(); it.hasNext(); ) {
+    for (Iterator it = taskReports.iterator(); it.hasNext();) {
       TaskStatus ts = (TaskStatus) it.next();
       TaskStatus.State state = ts.getRunState();
       if ((!ts.getIsMap()) &&
@@ -160,7 +160,7 @@
 
     out.writeInt(taskReports.size());
     out.writeInt(failures);
-    for (Iterator it = taskReports.iterator(); it.hasNext(); ) {
+    for (Iterator it = taskReports.iterator(); it.hasNext();) {
       ((TaskStatus) it.next()).write(out);
     }
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextInputFormat.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextInputFormat.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextInputFormat.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextInputFormat.java Thu Apr 19 14:34:41 2007
@@ -39,7 +39,7 @@
   }
 
   public RecordReader getRecordReader(InputSplit genericSplit, JobConf job,
-      Reporter reporter) throws IOException {
+                                      Reporter reporter) throws IOException {
     reporter.setStatus(genericSplit.toString());
     return new LineRecordReader(job, (FileSplit) genericSplit);
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/TextOutputFormat.java Thu Apr 19 14:34:41 2007
@@ -42,7 +42,7 @@
     }
     
     public synchronized void write(WritableComparable key, Writable value)
-        throws IOException {
+      throws IOException {
 
       if (key == null && value == null) {
         return;
@@ -78,7 +78,7 @@
       Class codecClass = getOutputCompressorClass(job, GzipCodec.class);
       // create the named codec
       CompressionCodec codec = (CompressionCodec)
-                               ReflectionUtils.newInstance(codecClass, job);
+        ReflectionUtils.newInstance(codecClass, job);
       // build the filename including the extension
       Path filename = new Path(dir, name + codec.getDefaultExtension());
       FSDataOutputStream fileOut = fs.create(filename, progress);

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/jobcontrol/Job.java Thu Apr 19 14:34:41 2007
@@ -243,7 +243,7 @@
       this.state = Job.FAILED;
       this.message = StringUtils.stringifyException(ioe);
       try {
-        if(running != null)
+        if (running != null)
           running.killJob();
       } catch (IOException e1) {
 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java Thu Apr 19 14:34:41 2007
@@ -48,7 +48,7 @@
       job.getInt("mapred.map.multithreadedrunner.threads", 10);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Configuring job " + job.getJobName() +
-                " to use " + numberOfThreads + " threads" );
+                " to use " + numberOfThreads + " threads");
     }
 
     this.job = job;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/MetricsUtil.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/MetricsUtil.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/MetricsUtil.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/metrics/MetricsUtil.java Thu Apr 19 14:34:41 2007
@@ -34,63 +34,63 @@
  */
 public class MetricsUtil {
     
-    private static final Log LOG =
-        LogFactory.getLog("org.apache.hadoop.util.MetricsUtil");
+  private static final Log LOG =
+    LogFactory.getLog("org.apache.hadoop.util.MetricsUtil");
 
-    /**
-     * Don't allow creation of a new instance of Metrics
-     */
-    private MetricsUtil() {}
+  /**
+   * Don't allow creation of a new instance of Metrics
+   */
+  private MetricsUtil() {}
     
-    /**
-     * Utility method to return the named context.
-     * If the desired context cannot be created for any reason, the exception
-     * is logged, and a null context is returned.
-     */
-    public static MetricsContext getContext(String contextName) {
-        MetricsContext metricsContext;
-        try {
-            metricsContext = ContextFactory.getFactory().getContext(contextName);
-            if (!metricsContext.isMonitoring()) {
-                metricsContext.startMonitoring();
-            }
-        } catch (Exception ex) {
-            LOG.error("Unable to create metrics context " + contextName, ex);
-            metricsContext = ContextFactory.getNullContext(contextName);
-        }
-        return metricsContext;
+  /**
+   * Utility method to return the named context.
+   * If the desired context cannot be created for any reason, the exception
+   * is logged, and a null context is returned.
+   */
+  public static MetricsContext getContext(String contextName) {
+    MetricsContext metricsContext;
+    try {
+      metricsContext = ContextFactory.getFactory().getContext(contextName);
+      if (!metricsContext.isMonitoring()) {
+        metricsContext.startMonitoring();
+      }
+    } catch (Exception ex) {
+      LOG.error("Unable to create metrics context " + contextName, ex);
+      metricsContext = ContextFactory.getNullContext(contextName);
     }
+    return metricsContext;
+  }
 
-    /**
-     * Utility method to create and return new metrics record instance within the
-     * given context. This record is tagged with the host name.
-     *
-     * @param context the context
-     * @param recordName name of the record
-     * @return newly created metrics record
-     */
-    public static MetricsRecord createRecord(MetricsContext context, 
-                                             String recordName) 
-    {
-      MetricsRecord metricsRecord = context.createRecord(recordName);
-      metricsRecord.setTag("hostName", getHostName());
-      return metricsRecord;        
-    }
+  /**
+   * Utility method to create and return new metrics record instance within the
+   * given context. This record is tagged with the host name.
+   *
+   * @param context the context
+   * @param recordName name of the record
+   * @return newly created metrics record
+   */
+  public static MetricsRecord createRecord(MetricsContext context, 
+                                           String recordName) 
+  {
+    MetricsRecord metricsRecord = context.createRecord(recordName);
+    metricsRecord.setTag("hostName", getHostName());
+    return metricsRecord;        
+  }
     
-    /**
-     * Returns the host name.  If the host name is unobtainable, logs the
-     * exception and returns "unknown".
-     */
-    private static String getHostName() {
-        String hostName = null;
-        try {
-            hostName = InetAddress.getLocalHost().getHostName();
-        } 
-        catch (UnknownHostException ex) {
-            LOG.info("Unable to obtain hostName", ex);
-            hostName = "unknown";
-        }
-        return hostName;
+  /**
+   * Returns the host name.  If the host name is unobtainable, logs the
+   * exception and returns "unknown".
+   */
+  private static String getHostName() {
+    String hostName = null;
+    try {
+      hostName = InetAddress.getLocalHost().getHostName();
+    } 
+    catch (UnknownHostException ex) {
+      LOG.info("Unable to obtain hostName", ex);
+      hostName = "unknown";
     }
+    return hostName;
+  }
 
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetworkTopology.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetworkTopology.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetworkTopology.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NetworkTopology.java Thu Apr 19 14:34:41 2007
@@ -52,19 +52,19 @@
     private int numOfLeaves;
         
     /** Construct an InnerNode from a path-like string */
-    InnerNode( String path ) {
-      super( path );
+    InnerNode(String path) {
+      super(path);
     }
         
     /** Construct an InnerNode from its name and its network location */
-    InnerNode( String name, String location ) {
-      super( name, location );
+    InnerNode(String name, String location) {
+      super(name, location);
     }
         
     /** Construct an InnerNode
      * from its name, its network location, its parent, and its level */
-    InnerNode( String name, String location, InnerNode parent, int level ) {
-      super( name, location, parent, level );
+    InnerNode(String name, String location, InnerNode parent, int level) {
+      super(name, location, parent, level);
     }
         
     /** Get its children */
@@ -79,12 +79,12 @@
      * Return true if it has no child or its children are not InnerNodes
      */ 
     boolean isRack() {
-      if(children.isEmpty()) {
+      if (children.isEmpty()) {
         return true;
       }
             
       Node firstChild = children.get(0);
-      if(firstChild instanceof InnerNode) {
+      if (firstChild instanceof InnerNode) {
         return false;
       }
             
@@ -107,22 +107,22 @@
      * @param n: a node
      * @return true if this node is the parent of <i>n</i>
      */
-    boolean isParent( Node n ) {
-      return n.getNetworkLocation().equals( getPath() );
+    boolean isParent(Node n) {
+      return n.getNetworkLocation().equals(getPath());
     }
         
     /* Return a child name of this node who is an ancestor of node <i>n</i> */
-    private String getNextAncestorName( Node n ) {
-      if( !isAncestor(n)) {
-        throw new IllegalArgumentException( 
+    private String getNextAncestorName(Node n) {
+      if (!isAncestor(n)) {
+        throw new IllegalArgumentException(
                                            this + "is not an ancestor of " + n);
       }
       String name = n.getNetworkLocation().substring(getPath().length());
-      if(name.charAt(0) == PATH_SEPARATOR) {
+      if (name.charAt(0) == PATH_SEPARATOR) {
         name = name.substring(1);
       }
       int index=name.indexOf(PATH_SEPARATOR);
-      if( index !=-1 )
+      if (index !=-1)
         name = name.substring(0, index);
       return name;
     }
@@ -131,16 +131,16 @@
      * @param n node to be added
      * @return true if the node is added; false otherwise
      */
-    boolean add( DatanodeDescriptor n ) {
-      if( !isAncestor( n ) )
-        throw new IllegalArgumentException( n.getName()+", which is located at "
-                                            +n.getNetworkLocation()+", is not a decendent of "+getPath());
-      if( isParent( n ) ) {
+    boolean add(DatanodeDescriptor n) {
+      if (!isAncestor(n))
+        throw new IllegalArgumentException(n.getName()+", which is located at "
+                                           +n.getNetworkLocation()+", is not a decendent of "+getPath());
+      if (isParent(n)) {
         // this node is the parent of n; add n directly
-        n.setParent( this );
-        n.setLevel( this.level+1 );
+        n.setParent(this);
+        n.setLevel(this.level+1);
         for(int i=0; i<children.size(); i++) {
-          if(children.get(i).getName().equals(n.getName())) {
+          if (children.get(i).getName().equals(n.getName())) {
             children.set(i, n);
             return false;
           }
@@ -150,22 +150,22 @@
         return true;
       } else {
         // find the next ancestor node
-        String parentName = getNextAncestorName( n );
+        String parentName = getNextAncestorName(n);
         InnerNode parentNode = null;
         for(int i=0; i<children.size(); i++) {
-          if(children.get(i).getName().equals(parentName)) {
+          if (children.get(i).getName().equals(parentName)) {
             parentNode = (InnerNode)children.get(i);
             break;
           }
         }
-        if( parentNode == null ) {
+        if (parentNode == null) {
           // create a new InnerNode
-          parentNode = new InnerNode( parentName, getPath(),
-                                      this, this.getLevel()+1 );
+          parentNode = new InnerNode(parentName, getPath(),
+                                     this, this.getLevel()+1);
           children.add(parentNode);
         }
         // add n to the subtree of the next ancestor node
-        if( parentNode.add(n) ) {
+        if (parentNode.add(n)) {
           numOfLeaves++;
           return true;
         } else {
@@ -178,17 +178,17 @@
      * @parameter n node to be deleted 
      * @return true if the node is deleted; false otherwise
      */
-    boolean remove( DatanodeDescriptor n ) {
+    boolean remove(DatanodeDescriptor n) {
       String parent = n.getNetworkLocation();
       String currentPath = getPath();
-      if(!isAncestor(n))
-        throw new IllegalArgumentException( n.getName()
-                                            +", which is located at "
-                                            +parent+", is not a decendent of "+currentPath);
-      if( isParent(n) ) {
+      if (!isAncestor(n))
+        throw new IllegalArgumentException(n.getName()
+                                           +", which is located at "
+                                           +parent+", is not a decendent of "+currentPath);
+      if (isParent(n)) {
         // this node is the parent of n; remove n directly
         for(int i=0; i<children.size(); i++) {
-          if(children.get(i).getName().equals(n.getName())) {
+          if (children.get(i).getName().equals(n.getName())) {
             children.remove(i);
             numOfLeaves--;
             n.setParent(null);
@@ -198,23 +198,23 @@
         return false;
       } else {
         // find the next ancestor node: the parent node
-        String parentName = getNextAncestorName( n );
+        String parentName = getNextAncestorName(n);
         InnerNode parentNode = null;
         int i;
         for(i=0; i<children.size(); i++) {
-          if(children.get(i).getName().equals(parentName)) {
+          if (children.get(i).getName().equals(parentName)) {
             parentNode = (InnerNode)children.get(i);
             break;
           }
         }
-        if(parentNode==null) {
+        if (parentNode==null) {
           return false;
         }
         // remove n from the parent node
-        boolean isRemoved = parentNode.remove( n );
+        boolean isRemoved = parentNode.remove(n);
         // if the parent node has no children, remove the parent node too
-        if(isRemoved) {
-          if(parentNode.getNumOfChildren() == 0 ) {
+        if (isRemoved) {
+          if (parentNode.getNumOfChildren() == 0) {
             children.remove(i);
           }
           numOfLeaves--;
@@ -224,19 +224,19 @@
     } // end of remove
         
     /** Given a node's string representation, return a reference to the node */ 
-    Node getLoc( String loc ) {
-      if( loc == null || loc.length() == 0 ) return this;
+    Node getLoc(String loc) {
+      if (loc == null || loc.length() == 0) return this;
             
       String[] path = loc.split(PATH_SEPARATOR_STR, 2);
       Node childnode = null;
       for(int i=0; i<children.size(); i++) {
-        if(children.get(i).getName().equals(path[0])) {
+        if (children.get(i).getName().equals(path[0])) {
           childnode = children.get(i);
         }
       }
-      if(childnode == null ) return null; // non-existing node
-      if( path.length == 1 ) return childnode;
-      if( childnode instanceof InnerNode ) {
+      if (childnode == null) return null; // non-existing node
+      if (path.length == 1) return childnode;
+      if (childnode instanceof InnerNode) {
         return ((InnerNode)childnode).getLoc(path[1]);
       } else {
         return null;
@@ -248,36 +248,36 @@
     private DatanodeDescriptor getLeaf(int leaveIndex, Node excludedNode) {
       int count=0;
       int numOfExcludedLeaves = 1;
-      if( excludedNode instanceof InnerNode ) {
+      if (excludedNode instanceof InnerNode) {
         numOfExcludedLeaves = ((InnerNode)excludedNode).getNumOfLeaves();
       }
-      if( isRack() ) { // children are leaves
+      if (isRack()) { // children are leaves
         // range check
-        if(leaveIndex<0 || leaveIndex>=this.getNumOfChildren()) {
+        if (leaveIndex<0 || leaveIndex>=this.getNumOfChildren()) {
           return null;
         }
         DatanodeDescriptor child =
           (DatanodeDescriptor)children.get(leaveIndex);
-        if(excludedNode == null || excludedNode != child) {
+        if (excludedNode == null || excludedNode != child) {
           // child is not the excludedNode
           return child;
         } else { // child is the excludedNode so return the next child
-          if(leaveIndex+1>=this.getNumOfChildren()) {
+          if (leaveIndex+1>=this.getNumOfChildren()) {
             return null;
           } else {
             return (DatanodeDescriptor)children.get(leaveIndex+1);
           }
         }
       } else {
-        for( int i=0; i<children.size(); i++ ) {
+        for(int i=0; i<children.size(); i++) {
           InnerNode child = (InnerNode)children.get(i);
-          if(excludedNode == null || excludedNode != child) {
+          if (excludedNode == null || excludedNode != child) {
             // not the excludedNode
             int numOfLeaves = child.getNumOfLeaves();
-            if( excludedNode != null && child.isAncestor(excludedNode) ) {
+            if (excludedNode != null && child.isAncestor(excludedNode)) {
               numOfLeaves -= numOfExcludedLeaves;
             }
-            if( count+numOfLeaves > leaveIndex ) {
+            if (count+numOfLeaves > leaveIndex) {
               // the leaf is in the child subtree
               return child.getLeaf(leaveIndex-count, excludedNode);
             } else {
@@ -298,7 +298,7 @@
     }
   } // end of InnerNode
     
-  InnerNode clusterMap = new InnerNode( InnerNode.ROOT ); // the root
+  InnerNode clusterMap = new InnerNode(InnerNode.ROOT); // the root
   private int numOfRacks = 0;  // rack counter
     
   public NetworkTopology() {
@@ -310,17 +310,17 @@
    *          data node to be added
    * @exception IllegalArgumentException if add a data node to a leave
    */
-  public synchronized void add( DatanodeDescriptor node ) {
-    if( node==null ) return;
+  public synchronized void add(DatanodeDescriptor node) {
+    if (node==null) return;
     LOG.info("Adding a new node: "+node.getPath());
     Node rack = getNode(node.getNetworkLocation());
-    if(rack != null && !(rack instanceof InnerNode) ) {
-      throw new IllegalArgumentException( "Unexpected data node " 
-                                          + node.toString() 
-                                          + " at an illegal network location");
+    if (rack != null && !(rack instanceof InnerNode)) {
+      throw new IllegalArgumentException("Unexpected data node " 
+                                         + node.toString() 
+                                         + " at an illegal network location");
     }
-    if( clusterMap.add( node) ) {
-      if( rack == null ) {
+    if (clusterMap.add(node)) {
+      if (rack == null) {
         numOfRacks++;
       }
     }
@@ -332,12 +332,12 @@
    * @param node
    *          data node to be removed
    */ 
-  public synchronized void remove( DatanodeDescriptor node ) {
-    if( node==null ) return;
+  public synchronized void remove(DatanodeDescriptor node) {
+    if (node==null) return;
     LOG.info("Removing a node: "+node.getPath());
-    if( clusterMap.remove( node ) ) {
+    if (clusterMap.remove(node)) {
       InnerNode rack = (InnerNode)getNode(node.getNetworkLocation());
-      if(rack == null) {
+      if (rack == null) {
         numOfRacks--;
       }
     }
@@ -350,12 +350,12 @@
    *          a data node
    * @return true if <i>node</i> is already in the tree; false otherwise
    */
-  public synchronized boolean contains( DatanodeDescriptor node ) {
-    if( node == null ) return false;
+  public synchronized boolean contains(DatanodeDescriptor node) {
+    if (node == null) return false;
     Node parent = node.getParent();
-    for( int level=node.getLevel(); parent!=null&&level>0;
-         parent=parent.getParent(), level-- ) {
-      if(parent == clusterMap)
+    for(int level=node.getLevel(); parent!=null&&level>0;
+        parent=parent.getParent(), level--) {
+      if (parent == clusterMap)
         return true;
     }
     return false; 
@@ -367,15 +367,15 @@
    *          a path-like string representation of a node
    * @return a reference to the node; null if the node is not in the tree
    */
-  public synchronized Node getNode( String loc ) {
+  public synchronized Node getNode(String loc) {
     loc = NodeBase.normalize(loc);
-    if(!NodeBase.ROOT.equals(loc))
+    if (!NodeBase.ROOT.equals(loc))
       loc = loc.substring(1);
-    return clusterMap.getLoc( loc );
+    return clusterMap.getLoc(loc);
   }
     
   /** Return the total number of racks */
-  public synchronized int getNumOfRacks( ) {
+  public synchronized int getNumOfRacks() {
     return numOfRacks;
   }
     
@@ -393,20 +393,20 @@
    * @return the distance between node1 and node2
    * node1 or node2 do not belong to the cluster
    */
-  public int getDistance(DatanodeDescriptor node1, DatanodeDescriptor node2 ) {
-    if( node1 == node2 ) {
+  public int getDistance(DatanodeDescriptor node1, DatanodeDescriptor node2) {
+    if (node1 == node2) {
       return 0;
     }
     int i;
     Node n1=node1, n2=node2;
     int level1=node1.getLevel(), level2=node2.getLevel();
     int dis = 0;
-    while( n1!=null && level1>level2 ) {
+    while(n1!=null && level1>level2) {
       n1 = n1.getParent();
       level1--;
       dis++;
     }
-    while( n2!=null && level2>level1 ) {
+    while(n2!=null && level2>level1) {
       n2 = n2.getParent();
       level2--;
       dis++;
@@ -420,7 +420,7 @@
       LOG.warn("The cluster does not contain data node: "+node1.getPath());
       return Integer.MAX_VALUE;
     }
-    if(n2==null) {
+    if (n2==null) {
       LOG.warn("The cluster does not contain data node: "+node2.getPath());
       return Integer.MAX_VALUE;
     }
@@ -436,11 +436,11 @@
    */
   public boolean isOnSameRack(
                               DatanodeDescriptor node1, DatanodeDescriptor node2) {
-    if( node1 == null || node2 == null ) {
+    if (node1 == null || node2 == null) {
       return false;
     }
       
-    if( node1 == node2 || node1.equals(node2)) {
+    if (node1 == node2 || node1.equals(node2)) {
       return true;
     }
         
@@ -455,7 +455,7 @@
    * @return the choosen data node
    */
   public DatanodeDescriptor chooseRandom(String scope) {
-    if(scope.startsWith("~")) {
+    if (scope.startsWith("~")) {
       return chooseRandom(NodeBase.ROOT, scope.substring(1));
     } else {
       return chooseRandom(scope, null);
@@ -463,25 +463,25 @@
   }
     
   private DatanodeDescriptor chooseRandom(String scope, String excludedScope){
-    if(excludedScope != null) {
-      if(scope.startsWith(excludedScope)) {
+    if (excludedScope != null) {
+      if (scope.startsWith(excludedScope)) {
         return null;
       }
-      if(!excludedScope.startsWith(scope)) {
+      if (!excludedScope.startsWith(scope)) {
         excludedScope = null;
       }
     }
     Node node = getNode(scope);
-    if(node instanceof DatanodeDescriptor) {
+    if (node instanceof DatanodeDescriptor) {
       return (DatanodeDescriptor)node;
     }
     InnerNode innerNode = (InnerNode)node;
     int numOfDatanodes = innerNode.getNumOfLeaves();
-    if(excludedScope == null) {
+    if (excludedScope == null) {
       node = null;
     } else {
       node = getNode(excludedScope);
-      if(node instanceof DatanodeDescriptor) {
+      if (node instanceof DatanodeDescriptor) {
         numOfDatanodes -= 1;
       } else {
         numOfDatanodes -= ((InnerNode)node).getNumOfLeaves();
@@ -501,24 +501,24 @@
   public int countNumOfAvailableNodes(String scope,
                                       List<DatanodeDescriptor> excludedNodes) {
     boolean isExcluded=false;
-    if(scope.startsWith("~")) {
+    if (scope.startsWith("~")) {
       isExcluded=true;
       scope=scope.substring(1);
     }
     scope = NodeBase.normalize(scope);
     int count=0; // the number of nodes in both scope & excludedNodes
-    for( DatanodeDescriptor node:excludedNodes) {
-      if( (node.getPath()+NodeBase.PATH_SEPARATOR_STR).
+    for(DatanodeDescriptor node:excludedNodes) {
+      if ((node.getPath()+NodeBase.PATH_SEPARATOR_STR).
           startsWith(scope+NodeBase.PATH_SEPARATOR_STR)) {
         count++;
       }
     }
     Node n=getNode(scope);
     int scopeNodeCount=1;
-    if(n instanceof InnerNode) {
+    if (n instanceof InnerNode) {
       scopeNodeCount=((InnerNode)n).getNumOfLeaves();
     }
-    if(isExcluded) {
+    if (isExcluded) {
       return clusterMap.getNumOfLeaves()-
         scopeNodeCount-excludedNodes.size()+count;
     } else {
@@ -530,18 +530,18 @@
   public String toString() {
     // print the number of racks
     StringBuffer tree = new StringBuffer();
-    tree.append( "Number of racks: " );
-    tree.append( numOfRacks );
-    tree.append( "\n" );
+    tree.append("Number of racks: ");
+    tree.append(numOfRacks);
+    tree.append("\n");
     // print the number of leaves
     int numOfLeaves = getNumOfLeaves();
-    tree.append( "Expected number of leaves:" );
-    tree.append( numOfLeaves );
-    tree.append( "\n" );
+    tree.append("Expected number of leaves:");
+    tree.append(numOfLeaves);
+    tree.append("\n");
     // print datanodes
-    for( int i=0; i<numOfLeaves; i++ ) {
-      tree.append( clusterMap.getLeaf(i, null).getPath() );
-      tree.append( "\n");
+    for(int i=0; i<numOfLeaves; i++) {
+      tree.append(clusterMap.getLeaf(i, null).getPath());
+      tree.append("\n");
     }
     return tree.toString();
   }
@@ -558,11 +558,11 @@
     };
       
   /** Sorts nodes array by their distances to <i>reader</i>. */
-  public synchronized void sortByDistance( final DatanodeDescriptor reader,
-                                           DatanodeDescriptor[] nodes ) { 
-    if(reader != null && contains(reader)) {
+  public synchronized void sortByDistance(final DatanodeDescriptor reader,
+                                          DatanodeDescriptor[] nodes) { 
+    if (reader != null && contains(reader)) {
       distFrom = reader;
-      Arrays.sort( nodes, nodeDistanceComparator );
+      Arrays.sort(nodes, nodeDistanceComparator);
       distFrom = null;
     }
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NodeBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NodeBase.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NodeBase.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/net/NodeBase.java Thu Apr 19 14:34:41 2007
@@ -34,20 +34,20 @@
   protected Node parent; //its parent
   
   /** Default constructor */
-  public NodeBase( ) {
+  public NodeBase() {
   }
   
   /** Construct a node from its path
    * @param path 
    *   a concatenation of this node's location, the path seperator, and its name 
    */
-  public NodeBase( String path ) {
+  public NodeBase(String path) {
     path = normalize(path);
-    int index = path.lastIndexOf( PATH_SEPARATOR );
-    if( index== -1 ) {
-      set( ROOT, path );
+    int index = path.lastIndexOf(PATH_SEPARATOR);
+    if (index== -1) {
+      set(ROOT, path);
     } else {
-      set( path.substring(index+1), path.substring(0, index) );
+      set(path.substring(index+1), path.substring(0, index));
     }
   }
   
@@ -55,7 +55,7 @@
    * @param name this node's name 
    * @param location this node's location 
    */
-  public NodeBase( String name, String location ) {
+  public NodeBase(String name, String location) {
     set(name, normalize(location));
   }
   
@@ -65,15 +65,15 @@
    * @param parent this node's parent node
    * @param level this node's level in the tree
    */
-  public NodeBase( String name, String location, Node parent, int level ) {
+  public NodeBase(String name, String location, Node parent, int level) {
     set(name, normalize(location));
     this.parent = parent;
     this.level = level;
   }
 
   /* set this node's name and location */
-  private void set( String name, String location ) {
-    if(name != null && name.contains(PATH_SEPARATOR_STR))
+  private void set(String name, String location) {
+    if (name != null && name.contains(PATH_SEPARATOR_STR))
       throw new IllegalArgumentException(
                                          "Network location name contains /: "+name);
     this.name = (name==null)?"":name;
@@ -98,16 +98,16 @@
 
   /** Normalize a path */
   static public String normalize(String path) {
-    if( path == null || path.length() == 0 ) return ROOT;
+    if (path == null || path.length() == 0) return ROOT;
     
-    if( path.charAt(0) != PATH_SEPARATOR ) {
-      throw new IllegalArgumentException( 
+    if (path.charAt(0) != PATH_SEPARATOR) {
+      throw new IllegalArgumentException(
                                          "Network Location path does not start with "
                                          +PATH_SEPARATOR_STR+ ": "+path);
     }
     
     int len = path.length();
-    if(path.charAt(len-1) == PATH_SEPARATOR) {
+    if (path.charAt(len-1) == PATH_SEPARATOR) {
       return path.substring(0, len-1);
     }
     return path;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/Index.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/Index.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/Index.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/Index.java Thu Apr 19 14:34:41 2007
@@ -34,6 +34,6 @@
  * @author Milind Bhandarkar
  */
 public interface Index {
-    boolean done();
-    void incr();
+  boolean done();
+  void incr();
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/XmlRecordInput.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/XmlRecordInput.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/XmlRecordInput.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/XmlRecordInput.java Thu Apr 19 14:34:41 2007
@@ -96,7 +96,7 @@
       throws SAXException {
       if (charsValid) {
         Value v = valList.get(valList.size()-1);
-        v.addChars(buf, offset,len);
+        v.addChars(buf, offset, len);
       }
     }
         

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JMap.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JMap.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JMap.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JMap.java Thu Apr 19 14:34:41 2007
@@ -61,7 +61,7 @@
       cb.append(iterType+getId("miter2")+" = "+
                 getId("set2")+".iterator();\n");
       cb.append("for(; "+getId("miter1")+".hasNext() && "+
-                getId("miter2")+".hasNext(); ) {\n");
+                getId("miter2")+".hasNext();) {\n");
       cb.append(key.getType()+" "+getId("k1")+
                 " = "+getId("miter1")+".next();\n");
       cb.append(key.getType()+" "+getId("k2")+
@@ -82,8 +82,8 @@
       cb.append("org.apache.hadoop.record.Index "+getId("midx")+" = a.startMap(\""+tag+"\");\n");
       cb.append(fname+"=new "+getType()+"();\n");
       cb.append("for (; !"+getId("midx")+".done(); "+getId("midx")+".incr()) {\n");
-      key.genReadMethod(cb, getId("k"),getId("k"),true);
-      value.genReadMethod(cb, getId("v"),getId("v"),true);
+      key.genReadMethod(cb, getId("k"),getId("k"), true);
+      value.genReadMethod(cb, getId("v"), getId("v"), true);
       cb.append(fname+".put("+getId("k")+","+getId("v")+");\n");
       cb.append("}\n");
       cb.append("a.endMap(\""+tag+"\");\n");
@@ -102,12 +102,12 @@
       incrLevel();
       cb.append("a.startMap("+fname+",\""+tag+"\");\n");
       cb.append(setType+getId("es")+" = "+fname+".entrySet();\n");
-      cb.append("for("+iterType+getId("midx")+" = "+getId("es")+".iterator(); "+getId("midx")+".hasNext(); ) {\n");
+      cb.append("for("+iterType+getId("midx")+" = "+getId("es")+".iterator(); "+getId("midx")+".hasNext();) {\n");
       cb.append(entryType+getId("me")+" = "+getId("midx")+".next();\n");
       cb.append(key.getType()+" "+getId("k")+" = "+getId("me")+".getKey();\n");
       cb.append(value.getType()+" "+getId("v")+" = "+getId("me")+".getValue();\n");
-      key.genWriteMethod(cb, getId("k"),getId("k"));
-      value.genWriteMethod(cb, getId("v"),getId("v"));
+      key.genWriteMethod(cb, getId("k"), getId("k"));
+      value.genWriteMethod(cb, getId("v"), getId("v"));
       cb.append("}\n");
       cb.append("a.endMap("+fname+",\""+tag+"\");\n");
       cb.append("}\n");
@@ -124,8 +124,8 @@
       cb.append(s+"+="+getId("mz")+"; "+l+"-="+getId("mz")+";\n");
       cb.append("for (int "+getId("midx")+" = 0; "+getId("midx")+
                 " < "+getId("mi")+"; "+getId("midx")+"++) {");
-      key.genSlurpBytes(cb, b,s,l);
-      value.genSlurpBytes(cb, b,s,l);
+      key.genSlurpBytes(cb, b, s, l);
+      value.genSlurpBytes(cb, b, s, l);
       cb.append("}\n");
       decrLevel();
       cb.append("}\n");

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JRecord.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JRecord.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JRecord.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JRecord.java Thu Apr 19 14:34:41 2007
@@ -441,7 +441,7 @@
   
   void genCppCode(FileWriter hh, FileWriter cc, ArrayList<String> options)
     throws IOException {
-    ((CppRecord)getCppType()).genCode(hh,cc, options);
+    ((CppRecord)getCppType()).genCode(hh, cc, options);
   }
   
   void genJavaCode(String destDir, ArrayList<String> options)

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JVector.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JVector.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JVector.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JVector.java Thu Apr 19 14:34:41 2007
@@ -110,7 +110,7 @@
       cb.append(s+"+="+getId("vz")+"; "+l+"-="+getId("vz")+";\n");
       cb.append("for (int "+getId("vidx")+" = 0; "+getId("vidx")+
                 " < "+getId("vi")+"; "+getId("vidx")+"++)");
-      element.genSlurpBytes(cb, b,s,l);
+      element.genSlurpBytes(cb, b, s, l);
       decrLevel();
       cb.append("}\n");
     }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JavaGenerator.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JavaGenerator.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JavaGenerator.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/JavaGenerator.java Thu Apr 19 14:34:41 2007
@@ -44,7 +44,7 @@
   void genCode(String name, ArrayList<JFile> ilist,
                ArrayList<JRecord> rlist, String destDir, ArrayList<String> options)
     throws IOException {
-    for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext(); ) {
+    for (Iterator<JRecord> iter = rlist.iterator(); iter.hasNext();) {
       JRecord rec = iter.next();
       rec.genJavaCode(destDir, options);
     }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/Rcc.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/Rcc.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/Rcc.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/record/compiler/generated/Rcc.java Thu Apr 19 14:34:41 2007
@@ -384,10 +384,10 @@
     jj_la1_1();
   }
   private static void jj_la1_0() {
-    jj_la1_0 = new int[] {0x2800,0x2800,0x40000000,0x1000,0xffc000,0xffc000,};
+    jj_la1_0 = new int[] {0x2800, 0x2800, 0x40000000, 0x1000, 0xffc000, 0xffc000,};
   }
   private static void jj_la1_1() {
-    jj_la1_1 = new int[] {0x0,0x0,0x0,0x0,0x1,0x1,};
+    jj_la1_1 = new int[] {0x0, 0x0, 0x0, 0x0, 0x1, 0x1,};
   }
 
   public Rcc(java.io.InputStream stream) {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/tools/Logalyzer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/tools/Logalyzer.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/tools/Logalyzer.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/tools/Logalyzer.java Thu Apr 19 14:34:41 2007
@@ -101,7 +101,7 @@
       
       //Initialize the specification for *comparision*
       String sortColumns = this.conf.get("mapred.reducer.sort", null);
-      if(sortColumns != null) {
+      if (sortColumns != null) {
         sortSpec = sortColumns.split(",");
       }
       
@@ -116,7 +116,7 @@
     public int compare(byte[] b1, int s1, int l1,
                        byte[] b2, int s2, int l2) {
       
-      if(sortSpec == null) {
+      if (sortSpec == null) {
         return super.compare(b1, s1, l1, b2, s2, l2);
       }
       
@@ -131,7 +131,7 @@
         String line2 = logline2.toString();
         String[] logColumns2 = line2.split(columnSeparator);
         
-        if(logColumns1 == null || logColumns2 == null) {
+        if (logColumns1 == null || logColumns2 == null) {
           return super.compare(b1, s1, l1, b2, s2, l2);
         }
         
@@ -148,7 +148,7 @@
                                                );
           
           //They differ!
-          if(comparision != 0) {
+          if (comparision != 0) {
             return comparision;
           }
         }
@@ -199,7 +199,7 @@
     Path grepInput = new Path(inputFilesDirectory);
     
     Path analysisOutput = null;
-    if(outputDirectory.equals("")) {
+    if (outputDirectory.equals("")) {
       analysisOutput =  new Path(inputFilesDirectory, "logalyzer_" + 
                                  Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
     } else {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/CopyFiles.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/CopyFiles.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/CopyFiles.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/CopyFiles.java Thu Apr 19 14:34:41 2007
@@ -347,7 +347,7 @@
       for(int idx=0; idx < numMaps; ++idx) {
         Path file = new Path(inDir, "part"+idx);
         SequenceFile.Writer writer = 
-          SequenceFile.createWriter(fileSys,conf,file,Text.class,Text.class);
+          SequenceFile.createWriter(fileSys, conf, file, Text.class, Text.class);
         for (int ipath = idx; ipath < finalPathList.size(); ipath += numMaps) {
           String path = finalPathList.get(ipath);
           writer.append(new Text(path), new Text(""));
@@ -366,7 +366,7 @@
       Path jobDirectory = new Path(jobConf.get("distcp.job.dir", "/"));
       FileSystem fs = FileSystem.get(jobConf);
       
-      if(!jobDirectory.equals("/")) {
+      if (!jobDirectory.equals("/")) {
         fs.delete(jobDirectory);
       }
     }
@@ -505,7 +505,7 @@
       Path jobDirectory = new Path(jobConf.get("distcp.job.dir", "/"));
       FileSystem fs = FileSystem.get(jobConf);
       
-      if(!jobDirectory.equals("/")) {
+      if (!jobDirectory.equals("/")) {
         fs.delete(jobDirectory);
       }
     }
@@ -520,7 +520,7 @@
         destFileSys = 
           FileSystem.getNamed(job.get("copy.dest.fs", "local"), job);
         destPath = new Path(job.get("copy.dest.path", "/"));
-        if(!destFileSys.exists(destPath)) {
+        if (!destFileSys.exists(destPath)) {
           return;
         }
       } catch(IOException ioe) {
@@ -572,7 +572,7 @@
         
       } catch(Exception e) {
         reporter.setStatus("Failed to copy from: " + (Text)key);
-        if(ignoreReadFailures) {
+        if (ignoreReadFailures) {
           return;
         } else {
           throw new IOException("Failed to copy from: " + (Text)key);
@@ -597,10 +597,10 @@
       }
       protocol = protocol.toLowerCase();
       
-      if(HDFS.equalsIgnoreCase(protocol) || "file".equalsIgnoreCase(protocol) ||
-         S3.equalsIgnoreCase(protocol)) {
+      if (HDFS.equalsIgnoreCase(protocol) || "file".equalsIgnoreCase(protocol) ||
+          S3.equalsIgnoreCase(protocol)) {
         mapper = new FSCopyFilesMapper();
-      } else if("http".equalsIgnoreCase(protocol)) {
+      } else if ("http".equalsIgnoreCase(protocol)) {
         mapper = new HTTPCopyFilesMapper();
       }
       
@@ -616,7 +616,7 @@
     String srcListURIScheme = srcListURI.getScheme();
     String srcListURIPath = srcListURI.getPath();
     
-    if("file".equalsIgnoreCase(srcListURIScheme)) {
+    if ("file".equalsIgnoreCase(srcListURIScheme)) {
       fis = new BufferedReader(new FileReader(srcListURIPath));
     } else if (srcListURIScheme != null &&
                HDFS.equalsIgnoreCase(srcListURIScheme)) {
@@ -624,7 +624,7 @@
       fis = new BufferedReader(
                                new InputStreamReader(fs.open(new Path(srcListURIPath)))
                                );
-    } else if("http".equalsIgnoreCase(srcListURIScheme)) {
+    } else if ("http".equalsIgnoreCase(srcListURIScheme)) {
       //Copy the file 
       URL url = srcListURI.toURL();
       HttpURLConnection connection = (HttpURLConnection)url.openConnection();
@@ -641,7 +641,7 @@
     try {
       String uri = null;
       while((uri = fis.readLine()) != null) {
-        if(!uri.startsWith("#")) {
+        if (!uri.startsWith("#")) {
           // Check source is parseable as URI by passing via getPathURI.
           toURI(uri);
           uris.add(uri);
@@ -670,7 +670,7 @@
     
     for(int i=0; i < uris.length; ++i) {
       // uri must start w/ protocol 
-      if(uris[i].startsWith(protocol)) {
+      if (uris[i].startsWith(protocol)) {
         protocolURIs.add(uris[i]);
       }
     }
@@ -711,7 +711,7 @@
     //Create the task-specific mapper 
     CopyFilesMapper mapper = null;
     String[] srcPaths = null;
-    if(srcAsList) {
+    if (srcAsList) {
       //Ugly?!
       
       //Source paths
@@ -719,7 +719,7 @@
       
       // Protocol - 'hdfs://'
       String[] dfsUrls = parseInputFile(HDFS, srcPaths);
-      if(dfsUrls != null) {
+      if (dfsUrls != null) {
         for(int i=0; i < dfsUrls.length; ++i) {
           copy(conf, dfsUrls[i], destPath, false, ignoreReadFailures);
         }
@@ -727,7 +727,7 @@
       
       // Protocol - 'file://'
       String[] localUrls = parseInputFile("file", srcPaths);
-      if(localUrls != null) {
+      if (localUrls != null) {
         for(int i=0; i < localUrls.length; ++i) {
           copy(conf, localUrls[i], destPath, false, ignoreReadFailures);
         }
@@ -735,13 +735,13 @@
       
       // Protocol - 'http://'
       String[] httpUrls = parseInputFile("http", srcPaths);
-      if(httpUrls != null) {
+      if (httpUrls != null) {
         srcPaths = httpUrls;
         mapper = CopyMapperFactory.getMapper(conf, "http");
       } else {   
         // Protocol - 's3://'
         String[] s3Urls = parseInputFile(S3, srcPaths);
-        if(s3Urls != null) {
+        if (s3Urls != null) {
           srcPaths = s3Urls;
           mapper = CopyMapperFactory.getMapper(conf, S3);
         } else {   

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/DiskChecker.java Thu Apr 19 14:34:41 2007
@@ -22,22 +22,22 @@
     }
   }
       
-  public static void checkDir( File dir ) throws DiskErrorException {
-    if( !dir.exists() && !dir.mkdirs() )
-      throw new DiskErrorException( "can not create directory: " 
-                                    + dir.toString() );
+  public static void checkDir(File dir) throws DiskErrorException {
+    if (!dir.exists() && !dir.mkdirs())
+      throw new DiskErrorException("can not create directory: " 
+                                   + dir.toString());
         
-    if ( !dir.isDirectory() )
-      throw new DiskErrorException( "not a directory: " 
-                                    + dir.toString() );
+    if (!dir.isDirectory())
+      throw new DiskErrorException("not a directory: " 
+                                   + dir.toString());
             
-    if( !dir.canRead() )
-      throw new DiskErrorException( "directory is not readable: " 
-                                    + dir.toString() );
+    if (!dir.canRead())
+      throw new DiskErrorException("directory is not readable: " 
+                                   + dir.toString());
             
-    if( !dir.canWrite() )
-      throw new DiskErrorException( "directory is not writable: " 
-                                    + dir.toString() );
+    if (!dir.canWrite())
+      throw new DiskErrorException("directory is not writable: " 
+                                   + dir.toString());
   }
 
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/MergeSort.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/MergeSort.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/MergeSort.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/MergeSort.java Thu Apr 19 14:34:41 2007
@@ -40,7 +40,7 @@
     // Insertion sort on smallest arrays
     if (length < 7) {
       for (int i=low; i<high; i++) {
-        for (int j=i;j > low ; j--) {
+        for (int j=i;j > low; j--) {
           I.set(dest[j-1]);
           J.set(dest[j]);
           if (comparator.compare(I, J)>0)

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/PriorityQueue.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/PriorityQueue.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/PriorityQueue.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/PriorityQueue.java Thu Apr 19 14:34:41 2007
@@ -56,11 +56,11 @@
    * @return true if element is added, false otherwise.
    */
   public boolean insert(Object element){
-    if(size < maxSize){
+    if (size < maxSize){
       put(element);
       return true;
     }
-    else if(size > 0 && !lessThan(element, top())){
+    else if (size > 0 && !lessThan(element, top())){
       heap[1] = element;
       adjustTop();
       return true;

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/RunJar.java Thu Apr 19 14:34:41 2007
@@ -112,7 +112,7 @@
       System.err.println("Mkdirs failed to create " + tmpDir);
       System.exit(-1);
     }
-    final File workDir = File.createTempFile("hadoop-unjar", "", tmpDir );
+    final File workDir = File.createTempFile("hadoop-unjar", "", tmpDir);
     workDir.delete();
     workDir.mkdirs();
     if (!workDir.isDirectory()) {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/StringUtils.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/StringUtils.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/StringUtils.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/StringUtils.java Thu Apr 19 14:34:41 2007
@@ -208,20 +208,20 @@
    * @param startTime start time
    */
   public static String formatTimeDiff(long finishTime, long startTime){
-    StringBuffer buf = new StringBuffer() ;
+    StringBuffer buf = new StringBuffer();
     
-    long timeDiff = finishTime - startTime ; 
-    long hours = timeDiff / (60*60*1000) ;
-    long rem = (timeDiff % (60*60*1000)) ;
+    long timeDiff = finishTime - startTime; 
+    long hours = timeDiff / (60*60*1000);
+    long rem = (timeDiff % (60*60*1000));
     long minutes =  rem / (60*1000);
-    rem = rem % (60*1000) ;
-    long seconds = rem / 1000 ;
+    rem = rem % (60*1000);
+    long seconds = rem / 1000;
     
-    if( hours != 0 ){
+    if (hours != 0){
       buf.append(hours);
       buf.append("hrs, ");
     }
-    if( minutes != 0 ){
+    if (minutes != 0){
       buf.append(minutes);
       buf.append("mins, ");
     }
@@ -243,9 +243,9 @@
   public static String getFormattedTimeWithDiff(DateFormat dateFormat, 
                                                 long finishTime, long startTime){
     StringBuffer buf = new StringBuffer();
-    if( 0 != finishTime ) {
+    if (0 != finishTime) {
       buf.append(dateFormat.format(new Date(finishTime)));
-      if( 0 != startTime ){
+      if (0 != startTime){
         buf.append(" (" + formatTimeDiff(finishTime , startTime) + ")");
       }
     }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Tool.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Tool.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Tool.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/Tool.java Thu Apr 19 14:34:41 2007
@@ -33,5 +33,5 @@
    * @return exit code
    * @throws Exception
    */
-  int run( String [] args ) throws Exception;
+  int run(String [] args) throws Exception;
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/util/ToolBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/util/ToolBase.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/util/ToolBase.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/util/ToolBase.java Thu Apr 19 14:34:41 2007
@@ -108,7 +108,7 @@
       .create("jt");
     Option oconf = OptionBuilder.withArgName("configuration file")
       .hasArg()
-      .withDescription("specify an application configuration file" )
+      .withDescription("specify an application configuration file")
       .create("conf");
     Option property = OptionBuilder.withArgName("property=value")
       .hasArgs()
@@ -129,22 +129,22 @@
    * @param conf Configuration to be modified
    * @param line User-specified generic options
    */
-  static private void processGeneralOptions( Configuration conf,
-                                             CommandLine line ) {
-    if(line.hasOption("fs")) {
+  static private void processGeneralOptions(Configuration conf,
+                                            CommandLine line) {
+    if (line.hasOption("fs")) {
       conf.set("fs.default.name", line.getOptionValue("fs"));
     }
         
-    if(line.hasOption("jt")) {
+    if (line.hasOption("jt")) {
       conf.set("mapred.job.tracker", line.getOptionValue("jt"));
     }
-    if(line.hasOption("conf")) {
+    if (line.hasOption("conf")) {
       conf.addFinalResource(new Path(line.getOptionValue("conf")));
     }
-    if(line.hasOption('D')) {
+    if (line.hasOption('D')) {
       String[] property = line.getOptionValues('D');
       for(int i=0; i<property.length-1; i=i+2) {
-        if(property[i]!=null)
+        if (property[i]!=null)
           conf.set(property[i], property[i+1]);
       }
     }           
@@ -157,13 +157,13 @@
    * @param args User-specified arguments
    * @return Commoand-specific arguments
    */
-  static private String[] parseGeneralOptions( Configuration conf, 
-                                               String[] args ) {
+  static private String[] parseGeneralOptions(Configuration conf, 
+                                              String[] args) {
     Options opts = buildGeneralOptions();
     CommandLineParser parser = new GnuParser();
     try {
-      CommandLine line = parser.parse( opts, args, true );
-      processGeneralOptions( conf, line );
+      CommandLine line = parser.parse(opts, args, true);
+      processGeneralOptions(conf, line);
       return line.getArgs();
     } catch(ParseException e) {
       LOG.warn("options parsing failed: "+e.getMessage());

Modified: lucene/hadoop/trunk/src/test/checkstyle.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/checkstyle.xml?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/checkstyle.xml (original)
+++ lucene/hadoop/trunk/src/test/checkstyle.xml Thu Apr 19 14:34:41 2007
@@ -159,7 +159,7 @@
         <module name="ArrayTypeStyle"/>
         <module name="Indentation">
             <property name="basicOffset" value="2" />
-            <property name="caseIndent" value="2" />
+            <property name="caseIndent" value="0" />
         </module> 
         <module name="TodoComment"/>
         <module name="UpperEll"/>

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/conf/TestConfiguration.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/conf/TestConfiguration.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/conf/TestConfiguration.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/conf/TestConfiguration.java Thu Apr 19 14:34:41 2007
@@ -80,7 +80,7 @@
       String gotVal = conf.get(p.name);
       String gotRawVal = (String)conf.getObject(p.name);
       assertEq(p.val, gotRawVal);
-      if(p.expectEval == UNSPEC) {
+      if (p.expectEval == UNSPEC) {
         // expansion is system-dependent (uses System properties)
         // can't do exact match so just check that all variables got expanded
         assertTrue(gotVal != null && -1 == gotVal.indexOf("${"));

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java Thu Apr 19 14:34:41 2007
@@ -72,11 +72,11 @@
  */
 public class ClusterTestDFS extends TestCase implements FSConstants {
   private static final Log LOG =
-      LogFactory.getLog("org.apache.hadoop.dfs.ClusterTestDFS");
+    LogFactory.getLog("org.apache.hadoop.dfs.ClusterTestDFS");
 
   private static Configuration conf = new Configuration();
   private static int BUFFER_SIZE =
-      conf.getInt("io.file.buffer.size", 4096);
+    conf.getInt("io.file.buffer.size", 4096);
 
   private static int testCycleNumber = 0;
 
@@ -99,7 +99,7 @@
    * (array length should be prime).
    */
   private static final int[] FILE_SIZES =
-      {100000, 100001, 4095, 4096, 4097, 1000000, 1000001};
+  {100000, 100001, 4095, 4096, 4097, 1000000, 1000001};
 
   /** DFS file counts to permute over in multiple test cycles
    * (array length should be prime).
@@ -130,10 +130,10 @@
     conf.setBoolean("test.dfs.same.host.targets.allowed", true);
   }
 
- /**
-  * Remove old files from temp area used by this test case and be sure
-  * base temp directory can be created.
-  */
+  /**
+   * Remove old files from temp area used by this test case and be sure
+   * base temp directory can be created.
+   */
   protected void prepareTempFileSpace() {
     if (baseDir.exists()) {
       try { // start from a blank slate
@@ -144,7 +144,7 @@
     baseDir.mkdirs();
     if (!baseDir.isDirectory()) {
       throw new RuntimeException("Value of root directory property test.dfs.data for dfs test is not a directory: "
-          + baseDirSpecified);
+                                 + baseDirSpecified);
     }
   }
 
@@ -157,15 +157,15 @@
    * @throws Exception
    */
   public void testFsPseudoDistributed()
-      throws Exception {
+    throws Exception {
     while (testCycleNumber < TEST_PERMUTATION_MAX &&
-        testCycleNumber < TEST_PERMUTATION_MAX_CEILING) {
-        int blockSize = BLOCK_SIZES[testCycleNumber % BLOCK_SIZES.length];
-        int numFiles = FILE_COUNTS[testCycleNumber % FILE_COUNTS.length];
-        int fileSize = FILE_SIZES[testCycleNumber % FILE_SIZES.length];
-        prepareTempFileSpace();
-        testFsPseudoDistributed(fileSize, numFiles, blockSize,
-            (testCycleNumber % 2) + 2);
+           testCycleNumber < TEST_PERMUTATION_MAX_CEILING) {
+      int blockSize = BLOCK_SIZES[testCycleNumber % BLOCK_SIZES.length];
+      int numFiles = FILE_COUNTS[testCycleNumber % FILE_COUNTS.length];
+      int fileSize = FILE_SIZES[testCycleNumber % FILE_SIZES.length];
+      prepareTempFileSpace();
+      testFsPseudoDistributed(fileSize, numFiles, blockSize,
+                              (testCycleNumber % 2) + 2);
     }
   }
 
@@ -181,7 +181,7 @@
    */
   public void testFsPseudoDistributed(long nBytes, int numFiles,
                                       int blockSize, int initialDNcount)
-      throws Exception {
+    throws Exception {
     long startTime = System.currentTimeMillis();
     int bufferSize = Math.min(BUFFER_SIZE, blockSize);
     boolean checkDataDirsEmpty = false;
@@ -202,7 +202,7 @@
     conf.setInt("dfs.namenode.handler.count", 3);
     if (false) { //  use MersenneTwister, if present
       conf.set("hadoop.random.class",
-                          "org.apache.hadoop.util.MersenneTwister");
+               "org.apache.hadoop.util.MersenneTwister");
     }
     conf.setLong("dfs.blockreport.intervalMsec", 50*1000L);
     conf.setLong("dfs.datanode.startupMsec", 15*1000L);
@@ -241,7 +241,7 @@
       }
       try {
         assertTrue("insufficient datanodes for test to continue",
-            (listOfDataNodeDaemons.size() >= 2));
+                   (listOfDataNodeDaemons.size() >= 2));
 
         //
         //          wait for datanodes to report in
@@ -290,7 +290,7 @@
         //
         //                     take one datanode down
         iDatanodeClosed =
-            currentTestCycleNumber % listOfDataNodeDaemons.size();
+          currentTestCycleNumber % listOfDataNodeDaemons.size();
         DataNode dn = (DataNode) listOfDataNodeDaemons.get(iDatanodeClosed);
         msg("shutdown datanode daemon " + iDatanodeClosed +
             " dn=" + dn.data);
@@ -434,7 +434,7 @@
       if (randomDataGeneratorCtor == null) {
         // lazy init
         String rndDataGenClassname =
-            conf.get("hadoop.random.class", "java.util.Random");
+          conf.get("hadoop.random.class", "java.util.Random");
         Class clazz = Class.forName(rndDataGenClassname);
         randomDataGeneratorCtor = clazz.getConstructor(new Class[]{Long.TYPE});
       }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java Thu Apr 19 14:34:41 2007
@@ -119,16 +119,16 @@
     testFsPseudoDistributed(3);
   }
   
-  private void testFsPseudoDistributed( int datanodeNum ) throws Exception {
+  private void testFsPseudoDistributed(int datanodeNum) throws Exception {
     try {
       prepareTempFileSpace();
 
       configureDFS();
       startDFS(datanodeNum);
 
-      if( logfh == null )
+      if (logfh == null)
         try {
-          logfh = new BufferedReader( new FileReader( logFile ) );
+          logfh = new BufferedReader(new FileReader(logFile));
         } catch (FileNotFoundException e) {
           // TODO Auto-generated catch block
           throw new AssertionFailedError("Log file does not exist: "+logFile);
@@ -136,16 +136,16 @@
     
       // create a directory
       try {
-        assertTrue(dfsClient.mkdirs( new UTF8( "/data") ));
-        assertMkdirs( "/data", false );
-      } catch ( IOException ioe ) {
+        assertTrue(dfsClient.mkdirs(new UTF8("/data")));
+        assertMkdirs("/data", false);
+      } catch (IOException ioe) {
       	ioe.printStackTrace();
       }
        
       try {
-        assertTrue(dfsClient.mkdirs( new UTF8( "data") ));
-        assertMkdirs( "data", true );
-      } catch ( IOException ioe ) {
+        assertTrue(dfsClient.mkdirs(new UTF8("data")));
+        assertMkdirs("data", true);
+      } catch (IOException ioe) {
        	ioe.printStackTrace();
       }
       
@@ -153,41 +153,41 @@
       // create a file with 1 data block
       try {
         createFile("/data/xx", 1);
-        assertCreate( "/data/xx", 1, false );
-      } catch( IOException ioe ) {
-    	assertCreate( "/data/xx", 1, true );
+        assertCreate("/data/xx", 1, false);
+      } catch(IOException ioe) {
+    	assertCreate("/data/xx", 1, true);
       }
     
       // create a file with 2 data blocks
       try {
         createFile("/data/yy", BLOCK_SIZE+1);
-        assertCreate( "/data/yy", BLOCK_SIZE+1, false );
-      } catch( IOException ioe ) {
-    	assertCreate( "/data/yy", BLOCK_SIZE+1, true );
+        assertCreate("/data/yy", BLOCK_SIZE+1, false);
+      } catch(IOException ioe) {
+    	assertCreate("/data/yy", BLOCK_SIZE+1, true);
       }
 
       // create an existing file
       try {
         createFile("/data/xx", 2);
-        assertCreate( "/data/xx", 2, false );
-      } catch( IOException ioe ) {
-      	assertCreate( "/data/xx", 2, true );
+        assertCreate("/data/xx", 2, false);
+      } catch(IOException ioe) {
+      	assertCreate("/data/xx", 2, true);
       }
     
       // delete the file
       try {
-        dfsClient.delete( new UTF8("/data/yy") );
+        dfsClient.delete(new UTF8("/data/yy"));
         assertDelete("/data/yy", false);
-      } catch( IOException ioe ) {
+      } catch(IOException ioe) {
         ioe.printStackTrace();
       }
 
     
       // rename the file
       try {
-        dfsClient.rename( new UTF8("/data/xx"), new UTF8("/data/yy") );
-        assertRename( "/data/xx", "/data/yy", false );
-      } catch( IOException ioe ) {
+        dfsClient.rename(new UTF8("/data/xx"), new UTF8("/data/yy"));
+        assertRename("/data/xx", "/data/yy", false);
+      } catch(IOException ioe) {
       	ioe.printStackTrace();
       }
 
@@ -199,9 +199,9 @@
       }
       
       try {
-        dfsClient.rename( new UTF8("/data/xx"), new UTF8("/data/yy") );    
-        assertRename( "/data/xx", "/data/yy", true );
-      } catch( IOException ioe) {
+        dfsClient.rename(new UTF8("/data/xx"), new UTF8("/data/yy"));    
+        assertRename("/data/xx", "/data/yy", true);
+      } catch(IOException ioe) {
     	ioe.printStackTrace();
       }
         
@@ -217,7 +217,7 @@
     }
   }
 
-  private void createFile( String filename, long fileSize ) throws IOException { 
+  private void createFile(String filename, long fileSize) throws IOException { 
     //
     //           write filesize of data to file
     //
@@ -232,12 +232,12 @@
         if ((nBytesWritten + buffer.length) > fileSize) {
           int pb = (int) (fileSize - nBytesWritten);
           byte[] bufferPartial = new byte[pb];
-          for( int i=0; i<pb; i++) {
+          for(int i=0; i<pb; i++) {
             bufferPartial[i]='a';
           }
           nos.write(buffer);
         } else {
-          for( int i=0; i<buffer.length;i++) {
+          for(int i=0; i<buffer.length;i++) {
             buffer[i]='a';
           }
           nos.write(buffer);
@@ -249,27 +249,27 @@
     }
   }
 
-  private void assertMkdirs( String fileName, boolean failed ) {
+  private void assertMkdirs(String fileName, boolean failed) {
     assertHasLogged("NameNode.mkdirs: " +fileName, DIR_LOG_HEADER_LEN+1);
     assertHasLogged("NameSystem.mkdirs: "+fileName, DIR_LOG_HEADER_LEN);
-    if( failed )
+    if (failed)
       assertHasLogged("FSDirectory.mkdirs: "
                       +"failed to create directory "+fileName, DIR_LOG_HEADER_LEN);
     else
-      assertHasLogged( "FSDirectory.mkdirs: created directory "+fileName, DIR_LOG_HEADER_LEN);
+      assertHasLogged("FSDirectory.mkdirs: created directory "+fileName, DIR_LOG_HEADER_LEN);
   }
   
-  private void assertCreate( String fileName, int filesize, boolean failed ) {
+  private void assertCreate(String fileName, int filesize, boolean failed) {
     assertHasLogged("NameNode.create: file "+fileName, DIR_LOG_HEADER_LEN+1);
     assertHasLogged("NameSystem.startFile: file "+fileName, DIR_LOG_HEADER_LEN);
-    if( failed ) {
+    if (failed) {
       assertHasLogged("NameSystem.startFile: "
                       +"failed to create file " + fileName, DIR_LOG_HEADER_LEN);
     } else {
       assertHasLogged("NameSystem.allocateBlock: "+fileName, BLOCK_LOG_HEADER_LEN);
       int blockNum = (filesize/BLOCK_SIZE*BLOCK_SIZE==filesize)?
         filesize/BLOCK_SIZE : 1+filesize/BLOCK_SIZE;
-      for( int i=1; i<blockNum; i++) {
+      for(int i=1; i<blockNum; i++) {
         assertHasLogged("NameNode.addBlock: file "+fileName, BLOCK_LOG_HEADER_LEN+1);
         assertHasLogged("NameSystem.getAdditionalBlock: file "+fileName, BLOCK_LOG_HEADER_LEN);
         assertHasLogged("NameSystem.allocateBlock: "+fileName, BLOCK_LOG_HEADER_LEN);
@@ -283,42 +283,42 @@
     }
   }
   
-  private void assertDelete( String fileName, boolean failed ) {
+  private void assertDelete(String fileName, boolean failed) {
     assertHasLogged("NameNode.delete: "+fileName, DIR_LOG_HEADER_LEN+1);
     assertHasLogged("NameSystem.delete: "+fileName, DIR_LOG_HEADER_LEN);
     assertHasLogged("FSDirectory.delete: "+fileName, DIR_LOG_HEADER_LEN);
-    if( failed )
+    if (failed)
       assertHasLogged("FSDirectory.unprotectedDelete: "
-                      +"failed to remove "+fileName, DIR_LOG_HEADER_LEN );
+                      +"failed to remove "+fileName, DIR_LOG_HEADER_LEN);
     else
       assertHasLogged("FSDirectory.unprotectedDelete: "
                       +fileName+" is removed", DIR_LOG_HEADER_LEN);
   }
   
-  private void assertRename( String src, String dst, boolean failed ) {
+  private void assertRename(String src, String dst, boolean failed) {
     assertHasLogged("NameNode.rename: "+src+" to "+dst, DIR_LOG_HEADER_LEN+1);
-    assertHasLogged("NameSystem.renameTo: "+src+" to "+dst, DIR_LOG_HEADER_LEN );
-    assertHasLogged("FSDirectory.renameTo: "+src+" to "+dst, DIR_LOG_HEADER_LEN );
-    if( failed )
+    assertHasLogged("NameSystem.renameTo: "+src+" to "+dst, DIR_LOG_HEADER_LEN);
+    assertHasLogged("FSDirectory.renameTo: "+src+" to "+dst, DIR_LOG_HEADER_LEN);
+    if (failed)
       assertHasLogged("FSDirectory.unprotectedRenameTo: "
                       +"failed to rename "+src+" to "+dst, DIR_LOG_HEADER_LEN);
     else
       assertHasLogged("FSDirectory.unprotectedRenameTo: "
-                      +src+" is renamed to "+dst, DIR_LOG_HEADER_LEN );
+                      +src+" is renamed to "+dst, DIR_LOG_HEADER_LEN);
   }
   
-  private void assertHasLogged( String target, int headerLen ) {
+  private void assertHasLogged(String target, int headerLen) {
     String line;
     boolean notFound = true;
     try {
-      while( notFound && (line=logfh.readLine()) != null ) {
-        if(line.length()>headerLen && line.startsWith(target, headerLen))
+      while(notFound && (line=logfh.readLine()) != null) {
+        if (line.length()>headerLen && line.startsWith(target, headerLen))
           notFound = false;
       }
     } catch(java.io.IOException e) {
       throw new AssertionFailedError("error reading the log file");
     }
-    if(notFound) {
+    if (notFound) {
       throw new AssertionFailedError(target+" not logged");
     }
   }
@@ -341,7 +341,7 @@
     conf.setInt("hadoop.logfile.size", 1000000000);
   }
   
-  private void startDFS( int dataNodeNum) throws IOException {
+  private void startDFS(int dataNodeNum) throws IOException {
     //
     //          start a NameNode
     String nameNodeSocketAddr = "localhost:" + nameNodePort;
@@ -375,7 +375,7 @@
     //          wait for datanodes to report in
     try {
       awaitQuiescence();
-    } catch( InterruptedException e) {
+    } catch(InterruptedException e) {
       e.printStackTrace();
     }
       
@@ -395,7 +395,7 @@
 
     //
     // shut down datanode daemons (this takes advantage of being same-process)
-    msg("begin shutdown of all datanode daemons" );
+    msg("begin shutdown of all datanode daemons");
 
     for (int i = 0; i < dataNodeDaemons.size(); i++) {
       DataNode dataNode = (DataNode) dataNodeDaemons.get(i);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/DFSTestUtil.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/DFSTestUtil.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/DFSTestUtil.java Thu Apr 19 14:34:41 2007
@@ -69,7 +69,7 @@
     
     MyFile() {
       int nLevels = gen.nextInt(maxLevels);
-      if(nLevels != 0) {
+      if (nLevels != 0) {
         int[] levels = new int[nLevels];
         for (int idx = 0; idx < nLevels; idx++) {
           levels[idx] = gen.nextInt(10);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java Thu Apr 19 14:34:41 2007
@@ -90,16 +90,16 @@
       String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
       String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
       
-      log("Finalize with existing previous dir",numDirs);
+      log("Finalize with existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf,1,StartupOption.REGULAR);
+      cluster = new MiniDFSCluster(conf, 1, StartupOption.REGULAR);
       cluster.finalizeCluster(conf);
       checkResult(nameNodeDirs, dataNodeDirs);
 
-      log("Finalize without existing previous dir",numDirs);
+      log("Finalize without existing previous dir", numDirs);
       cluster.finalizeCluster(conf);
       checkResult(nameNodeDirs, dataNodeDirs);
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java Thu Apr 19 14:34:41 2007
@@ -87,7 +87,7 @@
    */
   void startNameNodeShouldFail(StartupOption operation) {
     try {
-      cluster = new MiniDFSCluster(conf,0,operation); // should fail
+      cluster = new MiniDFSCluster(conf, 0, operation); // should fail
       throw new AssertionError("NameNode should have failed to start");
     } catch (Exception expected) {
       // expected
@@ -100,7 +100,7 @@
    */
   void startDataNodeShouldFail(StartupOption operation) {
     try {
-      cluster.startDataNodes(conf,1,false,operation,null); // should fail
+      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
       throw new AssertionError("DataNode should have failed to start");
     } catch (Exception expected) {
       // expected
@@ -121,47 +121,47 @@
       String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
       String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
       
-      log("Normal NameNode rollback",numDirs);
+      log("Normal NameNode rollback", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.ROLLBACK);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
       checkResult(NAME_NODE, nameNodeDirs);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("Normal DataNode rollback",numDirs);
+      log("Normal DataNode rollback", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.ROLLBACK);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
+      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
       checkResult(DATA_NODE, dataNodeDirs);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
-      log("NameNode rollback without existing previous dir",numDirs);
+      log("NameNode rollback without existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       startNameNodeShouldFail(StartupOption.ROLLBACK);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("DataNode rollback without existing previous dir",numDirs);
+      log("DataNode rollback without existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
-      cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
+      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
-      log("DataNode rollback with future stored layout version in previous",numDirs);
+      log("DataNode rollback with future stored layout version in previous", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.ROLLBACK);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
+      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
@@ -170,13 +170,13 @@
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
       
-      log("DataNode rollback with newer fsscTime in previous",numDirs);
+      log("DataNode rollback with newer fsscTime in previous", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.ROLLBACK);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
+      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          Long.MAX_VALUE));
@@ -185,7 +185,7 @@
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
-      log("NameNode rollback with no edits file",numDirs);
+      log("NameNode rollback with no edits file", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       for (File f : baseDirs) { 
@@ -194,7 +194,7 @@
       startNameNodeShouldFail(StartupOption.ROLLBACK);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("NameNode rollback with no image file",numDirs);
+      log("NameNode rollback with no image file", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       for (File f : baseDirs) { 
@@ -203,7 +203,7 @@
       startNameNodeShouldFail(StartupOption.ROLLBACK);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("NameNode rollback with corrupt version file",numDirs);
+      log("NameNode rollback with corrupt version file", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       for (File f : baseDirs) { 
@@ -212,10 +212,10 @@
       startNameNodeShouldFail(StartupOption.ROLLBACK);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("NameNode rollback with old layout version in previous",numDirs);
+      log("NameNode rollback with old layout version in previous", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      UpgradeUtilities.createVersionFile(NAME_NODE,baseDirs,
+      UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
                                          new StorageInfo(1,
                                                          UpgradeUtilities.getCurrentNamespaceID(null),
                                                          UpgradeUtilities.getCurrentFsscTime(null)));

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSShellGenericOptions.java Thu Apr 19 14:34:41 2007
@@ -95,7 +95,7 @@
     execute(args, namenode);        
   }
     
-  private void execute( String [] args, String namenode ) {
+  private void execute(String [] args, String namenode) {
     FsShell shell=new FsShell();
     FileSystem fs=null;
     try {
@@ -103,14 +103,14 @@
       fs = new DistributedFileSystem(
                                      DataNode.createSocketAddr(namenode), 
                                      shell.getConf());
-      assertTrue( "Directory does not get created", 
-                  fs.isDirectory(new Path("/data")) );
+      assertTrue("Directory does not get created", 
+                 fs.isDirectory(new Path("/data")));
       fs.delete(new Path("/data"));
     } catch (Exception e) {
       System.err.println(e.getMessage());
       e.printStackTrace();
     } finally {
-      if( fs!=null ) {
+      if (fs!=null) {
         try {
           fs.close();
         } catch (IOException ignored) {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java Thu Apr 19 14:34:41 2007
@@ -169,19 +169,19 @@
     StorageInfo[] versions = initializeVersions();
     UpgradeUtilities.createStorageDirs(
                                        NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
-    cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
+    cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
     StorageInfo nameNodeVersion = new StorageInfo(
                                                   UpgradeUtilities.getCurrentLayoutVersion(),
                                                   UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                   UpgradeUtilities.getCurrentFsscTime(cluster));
-    log("NameNode version info",NAME_NODE,null,nameNodeVersion);
+    log("NameNode version info", NAME_NODE, null, nameNodeVersion);
     for (int i = 0; i < versions.length; i++) {
       File[] storage = UpgradeUtilities.createStorageDirs(
                                                           DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
-      log("DataNode version info",DATA_NODE,i,versions[i]);
+      log("DataNode version info", DATA_NODE, i, versions[i]);
       UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
       try {
-        cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
+        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
       } catch (Exception ignore) {
         // Ignore.  The asserts below will check for problems.
         // ignore.printStackTrace();



Mime
View raw message