hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r530556 [6/12] - in /lucene/hadoop/trunk: ./ src/contrib/abacus/src/java/org/apache/hadoop/abacus/ src/contrib/hbase/src/java/org/apache/hadoop/hbase/ src/contrib/hbase/src/test/org/apache/hadoop/hbase/ src/contrib/streaming/src/java/org/ap...
Date Thu, 19 Apr 2007 21:34:53 GMT
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java Thu Apr 19 14:34:41 2007
@@ -29,19 +29,19 @@
  */
 class InconsistentFSStateException extends IOException {
 
-  public InconsistentFSStateException( File dir, String descr ) {
-    super( "Directory " + getFilePath( dir )
-           + " is in an inconsistent state: " + descr );
+  public InconsistentFSStateException(File dir, String descr) {
+    super("Directory " + getFilePath(dir)
+          + " is in an inconsistent state: " + descr);
   }
 
-  public InconsistentFSStateException( File dir, String descr, Throwable ex ) {
-    this( dir, descr + "\n" + StringUtils.stringifyException(ex) );
+  public InconsistentFSStateException(File dir, String descr, Throwable ex) {
+    this(dir, descr + "\n" + StringUtils.stringifyException(ex));
   }
   
-  private static String getFilePath( File dir ) {
+  private static String getFilePath(File dir) {
     try {
       return dir.getCanonicalPath();
-    } catch( IOException e ) {}
+    } catch(IOException e) {}
     return dir.getPath();
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/IncorrectVersionException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/IncorrectVersionException.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/IncorrectVersionException.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/IncorrectVersionException.java Thu Apr 19 14:34:41 2007
@@ -27,16 +27,16 @@
  */
 class IncorrectVersionException extends IOException {
 
-  public IncorrectVersionException( int versionReported, String ofWhat ) {
-    this( versionReported, ofWhat, FSConstants.LAYOUT_VERSION );
+  public IncorrectVersionException(int versionReported, String ofWhat) {
+    this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION);
   }
   
-  public IncorrectVersionException( int versionReported,
-                                    String ofWhat,
-                                    int versionExpected ) {
-    super( "Unexpected version " 
-           + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
-           + versionReported + ". Expecting = " + versionExpected + "." );
+  public IncorrectVersionException(int versionReported,
+                                   String ofWhat,
+                                   int versionExpected) {
+    super("Unexpected version " 
+          + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
+          + versionReported + ". Expecting = " + versionExpected + ".");
   }
 
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/JspHelper.java Thu Apr 19 14:34:41 2007
@@ -34,7 +34,7 @@
   static Configuration conf = new Configuration();
 
   static int defaultChunkSizeToView = 
-    conf.getInt("dfs.default.chunk.view.size",32 * 1024);
+    conf.getInt("dfs.default.chunk.view.size", 32 * 1024);
   static Random rand = new Random();
 
   public JspHelper() {
@@ -140,9 +140,9 @@
     in.close();
     out.print(new String(buf));
   }
-  public void DFSNodesStatus( ArrayList<DatanodeDescriptor> live,
-                              ArrayList<DatanodeDescriptor> dead ) {
-    if ( fsn != null )
+  public void DFSNodesStatus(ArrayList<DatanodeDescriptor> live,
+                             ArrayList<DatanodeDescriptor> dead) {
+    if (fsn != null)
       fsn.DFSNodesStatus(live, dead);
   }
   public void addTableHeader(JspWriter out) throws IOException {
@@ -161,7 +161,7 @@
     out.print("<tr>");
       
     for (int i = 0; i < columns.length; i++) {
-      if( row/2*2 == row ) {//even
+      if (row/2*2 == row) {//even
         out.print("<td style=\"vertical-align: top;background-color:LightGrey;\"><B>"+columns[i]+"</B><br></td>");
       } else {
         out.print("<td style=\"vertical-align: top;background-color:LightBlue;\"><B>"+columns[i]+"</B><br></td>");
@@ -175,7 +175,7 @@
   }
 
   public String getSafeModeText() {
-    if( ! fsn.isInSafeMode() )
+    if (!fsn.isInSafeMode())
       return "";
     return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
   }
@@ -197,29 +197,29 @@
       int sortOrder = SORT_ORDER_ASC;
             
       public NodeComapare(String field, String order) {
-        if ( field.equals( "lastcontact" ) ) {
+        if (field.equals("lastcontact")) {
           sortField = FIELD_LAST_CONTACT;
-        } else if ( field.equals( "size" ) ) {
+        } else if (field.equals("size")) {
           sortField = FIELD_SIZE;
-        } else if ( field.equals( "blocks" ) ) {
+        } else if (field.equals("blocks")) {
           sortField = FIELD_BLOCKS;
-        } else if ( field.equals( "pcused" ) ) {
+        } else if (field.equals("pcused")) {
           sortField = FIELD_DISK_USED;
         } else {
           sortField = FIELD_NAME;
         }
                 
-        if ( order.equals("DSC") ) {
+        if (order.equals("DSC")) {
           sortOrder = SORT_ORDER_DSC;
         } else {
           sortOrder = SORT_ORDER_ASC;
         }
       }
 
-      public int compare( DatanodeDescriptor d1,
-                          DatanodeDescriptor d2 ) {
+      public int compare(DatanodeDescriptor d1,
+                         DatanodeDescriptor d2) {
         int ret = 0;
-        switch ( sortField ) {
+        switch (sortField) {
         case FIELD_LAST_CONTACT:
           ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
           break;
@@ -228,21 +228,21 @@
           break;
         case FIELD_SIZE:
           long  dlong = d1.getCapacity() - d2.getCapacity();
-          ret = (dlong < 0) ? -1 : ( (dlong > 0) ? 1 : 0 );
+          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
           break;
         case FIELD_DISK_USED:
           double ddbl =((d2.getRemaining()*1.0/d2.getCapacity())-
                         (d1.getRemaining()*1.0/d1.getCapacity()));
-          ret = (ddbl < 0) ? -1 : ( (ddbl > 0) ? 1 : 0 );
+          ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
           break;
         case FIELD_NAME: 
           ret = d1.getHostName().compareTo(d2.getHostName());
           break;
         }
-        return ( sortOrder == SORT_ORDER_DSC ) ? -ret : ret;
+        return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
       }
     }
         
-    Collections.sort( nodes, new NodeComapare( field, order ) );
+    Collections.sort(nodes, new NodeComapare(field, order));
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Thu Apr 19 14:34:41 2007
@@ -81,7 +81,7 @@
   }
     
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
-  public static final Log stateChangeLog = LogFactory.getLog( "org.apache.hadoop.dfs.StateChange");
+  public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.dfs.StateChange");
 
   private FSNamesystem namesystem;
   private Server server;
@@ -96,7 +96,7 @@
   /** Format a new filesystem.  Destroys any filesystem that may already
    * exist at this location.  **/
   public static void format(Configuration conf) throws IOException {
-    format( conf, false );
+    format(conf, false);
   }
 
   private class NameNodeMetrics implements Updater {
@@ -208,7 +208,7 @@
   public NameNode(Configuration conf) throws IOException {
     InetSocketAddress addr = 
       DataNode.createSocketAddr(conf.get("fs.default.name"));
-    init( addr.getHostName(), addr.getPort(), conf );
+    init(addr.getHostName(), addr.getPort(), conf);
   }
 
   /**
@@ -221,7 +221,7 @@
   public NameNode(String bindAddress, int port, 
                   Configuration conf
                   ) throws IOException {
-    init( bindAddress, port, conf );
+    init(bindAddress, port, conf);
   }
 
   /**
@@ -239,7 +239,7 @@
    * Stop all NameNode threads and wait for all to finish.
    */
   public void stop() {
-    if (! stopRequested) {
+    if (!stopRequested) {
       stopRequested = true;
       namesystem.close();
       emptier.interrupt();
@@ -255,7 +255,7 @@
    */
   public LocatedBlock[] open(String src) throws IOException {
     String clientMachine = Server.getRemoteAddress();
-    if ( clientMachine == null ) {
+    if (clientMachine == null) {
       clientMachine = "";
     }
     Object openResults[] = namesystem.open(clientMachine, new UTF8(src));
@@ -282,7 +282,7 @@
                              long blockSize
                              ) throws IOException {
     String clientMachine = Server.getRemoteAddress();
-    if ( clientMachine == null ) {
+    if (clientMachine == null) {
       clientMachine = "";
     }
     stateChangeLog.debug("*DIR* NameNode.create: file "
@@ -303,10 +303,10 @@
     return new LocatedBlock(b, targets);
   }
 
-  public boolean setReplication( String src, 
-                                 short replication
-                                 ) throws IOException {
-    return namesystem.setReplication( src, replication );
+  public boolean setReplication(String src, 
+                                short replication
+                                ) throws IOException {
+    return namesystem.setReplication(src, replication);
   }
     
   /**
@@ -328,8 +328,8 @@
    */
   public void abandonBlock(Block b, String src) throws IOException {
     stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
-                         +b.getBlockName()+" of file "+src );
-    if (! namesystem.abandonBlock(b, new UTF8(src))) {
+                         +b.getBlockName()+" of file "+src);
+    if (!namesystem.abandonBlock(b, new UTF8(src))) {
       throw new IOException("Cannot abandon block during write to " + src);
     }
   }
@@ -337,13 +337,13 @@
    */
   public void abandonFileInProgress(String src, 
                                     String holder) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.abandonFileInProgress:" + src );
+    stateChangeLog.debug("*DIR* NameNode.abandonFileInProgress:" + src);
     namesystem.abandonFileInProgress(new UTF8(src), new UTF8(holder));
   }
   /**
    */
   public boolean complete(String src, String clientName) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName );
+    stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName);
     int returnCode = namesystem.completeFile(new UTF8(src), new UTF8(clientName));
     if (returnCode == STILL_WAITING) {
       return false;
@@ -375,7 +375,7 @@
   /**
    */
   public String[][] getHints(String src, long start, long len) throws IOException {
-    return namesystem.getDatanodeHints( src, start, len );
+    return namesystem.getDatanodeHints(src, start, len);
   }
     
   public long getBlockSize(String filename) throws IOException {
@@ -385,7 +385,7 @@
   /**
    */
   public boolean rename(String src, String dst) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst );
+    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
     if (!checkPathLength(dst)) {
       throw new IOException("rename: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
@@ -400,7 +400,7 @@
   /**
    */
   public boolean delete(String src) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.delete: " + src );
+    stateChangeLog.debug("*DIR* NameNode.delete: " + src);
     return namesystem.delete(new UTF8(src));
   }
 
@@ -431,12 +431,12 @@
   /**
    */
   public boolean mkdirs(String src) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src );
+    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
     if (!checkPathLength(src)) {
       throw new IOException("mkdirs: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
-    return namesystem.mkdirs( src );
+    return namesystem.mkdirs(src);
   }
 
   /** @deprecated */ @Deprecated
@@ -502,8 +502,8 @@
   /**
    * @inheritDoc
    */
-  public boolean setSafeMode( SafeModeAction action ) throws IOException {
-    switch( action ) {
+  public boolean setSafeMode(SafeModeAction action) throws IOException {
+    switch(action) {
     case SAFEMODE_LEAVE: // leave safe mode
       namesystem.leaveSafeMode();
       break;
@@ -567,11 +567,11 @@
   ////////////////////////////////////////////////////////////////
   /** 
    */
-  public DatanodeRegistration register( DatanodeRegistration nodeReg,
-                                        String networkLocation
-                                        ) throws IOException {
-    verifyVersion( nodeReg.getVersion() );
-    namesystem.registerDatanode( nodeReg, networkLocation );
+  public DatanodeRegistration register(DatanodeRegistration nodeReg,
+                                       String networkLocation
+                                       ) throws IOException {
+    verifyVersion(nodeReg.getVersion());
+    namesystem.registerDatanode(nodeReg, networkLocation);
       
     return nodeReg;
   }
@@ -581,25 +581,25 @@
    * Return a block-oriented command for the datanode to execute.
    * This will be either a transfer or a delete operation.
    */
-  public DatanodeCommand sendHeartbeat( DatanodeRegistration nodeReg,
-                                        long capacity, 
-                                        long remaining,
-                                        int xmitsInProgress,
-                                        int xceiverCount) throws IOException {
+  public DatanodeCommand sendHeartbeat(DatanodeRegistration nodeReg,
+                                       long capacity, 
+                                       long remaining,
+                                       int xmitsInProgress,
+                                       int xceiverCount) throws IOException {
     Object xferResults[] = new Object[2];
     xferResults[0] = xferResults[1] = null;
     Object deleteList[] = new Object[1];
     deleteList[0] = null; 
 
-    verifyRequest( nodeReg );
-    if( namesystem.gotHeartbeat( nodeReg, capacity, remaining, 
-                                 xceiverCount, 
-                                 xmitsInProgress,
-                                 xferResults,
-                                 deleteList)) {
+    verifyRequest(nodeReg);
+    if (namesystem.gotHeartbeat(nodeReg, capacity, remaining, 
+                                xceiverCount, 
+                                xmitsInProgress,
+                                xferResults,
+                                deleteList)) {
       // request block report from the datanode
       assert(xferResults[0] == null && deleteList[0] == null);
-      return new DatanodeCommand( DataNodeAction.DNA_REGISTER );
+      return new DatanodeCommand(DataNodeAction.DNA_REGISTER);
     }
         
     //
@@ -622,27 +622,27 @@
     return null;
   }
 
-  public DatanodeCommand blockReport( DatanodeRegistration nodeReg,
-                                      Block blocks[]) throws IOException {
-    verifyRequest( nodeReg );
+  public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
+                                     Block blocks[]) throws IOException {
+    verifyRequest(nodeReg);
     stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
-                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks" );
+                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks");
 
-    Block blocksToDelete[] = namesystem.processReport( nodeReg, blocks );
-    if( blocksToDelete != null && blocksToDelete.length > 0 )
-      return new BlockCommand( blocksToDelete );
-    if( getFSImage().isUpgradeFinalized() )
-      return new DatanodeCommand( DataNodeAction.DNA_FINALIZE );
+    Block blocksToDelete[] = namesystem.processReport(nodeReg, blocks);
+    if (blocksToDelete != null && blocksToDelete.length > 0)
+      return new BlockCommand(blocksToDelete);
+    if (getFSImage().isUpgradeFinalized())
+      return new DatanodeCommand(DataNodeAction.DNA_FINALIZE);
     return null;
   }
 
   public void blockReceived(DatanodeRegistration nodeReg, 
                             Block blocks[]) throws IOException {
-    verifyRequest( nodeReg );
+    verifyRequest(nodeReg);
     stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
-                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks." );
+                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
     for (int i = 0; i < blocks.length; i++) {
-      namesystem.blockReceived( nodeReg, blocks[i] );
+      namesystem.blockReceived(nodeReg, blocks[i]);
     }
   }
 
@@ -653,12 +653,12 @@
                           String msg) throws IOException {
     // Log error message from datanode
     LOG.info("Report from " + nodeReg.getName() + ": " + msg);
-    if( errorCode == DatanodeProtocol.NOTIFY ) {
+    if (errorCode == DatanodeProtocol.NOTIFY) {
       return;
     }
-    verifyRequest( nodeReg );
-    if( errorCode == DatanodeProtocol.DISK_ERROR ) {
-      namesystem.removeDatanode( nodeReg );            
+    verifyRequest(nodeReg);
+    if (errorCode == DatanodeProtocol.DISK_ERROR) {
+      namesystem.removeDatanode(nodeReg);            
     }
   }
     
@@ -675,10 +675,10 @@
    * @param nodeReg data node registration
    * @throws IOException
    */
-  public void verifyRequest( DatanodeRegistration nodeReg ) throws IOException {
-    verifyVersion( nodeReg.getVersion() );
-    if( ! namesystem.getRegistrationID().equals( nodeReg.getRegistrationID() ))
-      throw new UnregisteredDatanodeException( nodeReg );
+  public void verifyRequest(DatanodeRegistration nodeReg) throws IOException {
+    verifyVersion(nodeReg.getVersion());
+    if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID()))
+      throw new UnregisteredDatanodeException(nodeReg);
   }
     
   /**
@@ -687,9 +687,9 @@
    * @param version
    * @throws IOException
    */
-  public void verifyVersion( int version ) throws IOException {
-    if( version != LAYOUT_VERSION )
-      throw new IncorrectVersionException( version, "data node" );
+  public void verifyVersion(int version) throws IOException {
+    if (version != LAYOUT_VERSION)
+      throw new IncorrectVersionException(version, "data node");
   }
 
   /**
@@ -739,22 +739,22 @@
   private static boolean format(Configuration conf,
                                 boolean isConfirmationNeeded
                                 ) throws IOException {
-    Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs( conf );
-    for( Iterator<File> it = dirsToFormat.iterator(); it.hasNext(); ) {
+    Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+    for(Iterator<File> it = dirsToFormat.iterator(); it.hasNext();) {
       File curDir = it.next();
-      if( ! curDir.exists() )
+      if (!curDir.exists())
         continue;
-      if( isConfirmationNeeded ) {
+      if (isConfirmationNeeded) {
         System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
         if (!(System.in.read() == 'Y')) {
           System.err.println("Format aborted in "+ curDir);
           return true;
         }
-        while( System.in.read() != '\n' ); // discard the enter-key
+        while(System.in.read() != '\n'); // discard the enter-key
       }
     }
 
-    FSNamesystem nsys = new FSNamesystem(new FSImage( dirsToFormat ));
+    FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat));
     nsys.dir.fsImage.format();
     return false;
   }
@@ -765,38 +765,38 @@
   }
 
   private static StartupOption parseArguments(String args[], 
-                                              Configuration conf ) {
+                                              Configuration conf) {
     int argsLen = (args == null) ? 0 : args.length;
     StartupOption startOpt = StartupOption.REGULAR;
-    for( int i=0; i < argsLen; i++ ) {
+    for(int i=0; i < argsLen; i++) {
       String cmd = args[i];
-      if( "-format".equalsIgnoreCase(cmd) ) {
+      if ("-format".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.FORMAT;
-      } else if( "-regular".equalsIgnoreCase(cmd) ) {
+      } else if ("-regular".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.REGULAR;
-      } else if( "-upgrade".equalsIgnoreCase(cmd) ) {
+      } else if ("-upgrade".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.UPGRADE;
-      } else if( "-rollback".equalsIgnoreCase(cmd) ) {
+      } else if ("-rollback".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLBACK;
       } else
         return null;
     }
-    conf.setObject( "dfs.namenode.startup", startOpt );
+    conf.setObject("dfs.namenode.startup", startOpt);
     return startOpt;
   }
 
-  static NameNode createNameNode( String argv[], 
-                                  Configuration conf ) throws IOException {
-    if( conf == null )
+  static NameNode createNameNode(String argv[], 
+                                 Configuration conf) throws IOException {
+    if (conf == null)
       conf = new Configuration();
-    StartupOption startOpt = parseArguments( argv, conf );
-    if( startOpt == null ) {
+    StartupOption startOpt = parseArguments(argv, conf);
+    if (startOpt == null) {
       printUsage();
       return null;
     }
       
-    if( startOpt == StartupOption.FORMAT ) {
-      boolean aborted = format( conf, true );
+    if (startOpt == StartupOption.FORMAT) {
+      boolean aborted = format(conf, true);
       System.exit(aborted ? 1 : 0);
     }
       
@@ -808,11 +808,11 @@
    */
   public static void main(String argv[]) throws Exception {
     try {
-      NameNode namenode = createNameNode( argv, null );
-      if( namenode != null )
+      NameNode namenode = createNameNode(argv, null);
+      if (namenode != null)
         namenode.join();
-    } catch ( Throwable e ) {
-      LOG.error( StringUtils.stringifyException( e ) );
+    } catch (Throwable e) {
+      LOG.error(StringUtils.stringifyException(e));
       System.exit(-1);
     }
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Thu Apr 19 14:34:41 2007
@@ -95,9 +95,9 @@
    * @throws IOException
    */
   public NamenodeFsck(Configuration conf,
-      NameNode nn,
-      Map<String,String[]> pmap,
-      HttpServletResponse response) throws IOException {
+                      NameNode nn,
+                      Map<String,String[]> pmap,
+                      HttpServletResponse response) throws IOException {
     this.conf = conf;
     this.nn = nn;
     this.out = response.getWriter();
@@ -215,13 +215,13 @@
       }
       res.corruptFiles++;
       switch(fixing) {
-        case FIXING_NONE:
-          break;
-        case FIXING_MOVE:
-          lostFoundMove(file, blocks);
-          break;
-        case FIXING_DELETE:
-          nn.delete(file.getPath());
+      case FIXING_NONE:
+        break;
+      case FIXING_MOVE:
+        lostFoundMove(file, blocks);
+        break;
+      case FIXING_DELETE:
+        nn.delete(file.getPath());
       }
     }
     if (showFiles) {
@@ -237,9 +237,9 @@
   }
   
   private void lostFoundMove(DFSFileInfo file, LocatedBlock[] blocks)
-  throws IOException {
+    throws IOException {
     DFSClient dfs = new DFSClient(DataNode.createSocketAddr(
-        conf.get("fs.default.name", "local")), conf);
+                                                            conf.get("fs.default.name", "local")), conf);
     if (!lfInited) {
       lostFoundInit(dfs);
     }
@@ -304,8 +304,8 @@
    * bad. Both places should be refactored to provide a method to copy blocks
    * around.
    */
-      private void copyBlock(DFSClient dfs, LocatedBlock lblock,
-          OutputStream fos) throws Exception {
+  private void copyBlock(DFSClient dfs, LocatedBlock lblock,
+                         OutputStream fos) throws Exception {
     int failures = 0;
     InetSocketAddress targetAddr = null;
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
@@ -398,11 +398,11 @@
    * Pick the best node from which to stream the data.
    * That's the local one, if available.
    */
-      Random r = new Random();
+  Random r = new Random();
   private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
-      TreeSet<DatanodeInfo> deadNodes) throws IOException {
+                                TreeSet<DatanodeInfo> deadNodes) throws IOException {
     if ((nodes == null) ||
-            (nodes.length - deadNodes.size() < 1)) {
+        (nodes.length - deadNodes.size() < 1)) {
       throw new IOException("No live nodes contain current block");
     }
     DatanodeInfo chosenNode = null;
@@ -433,12 +433,12 @@
         lfInitedOk = dfs.mkdirs(lfName);
         lostFound = lfName;
       } else        if (!dfs.isDirectory(lfName)) {
-          LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
-          lfInitedOk = false;
-        }  else { // exists and isDirectory
-          lostFound = lfName;
-          lfInitedOk = true;
-        }
+        LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
+        lfInitedOk = false;
+      }  else { // exists and isDirectory
+        lostFound = lfName;
+        lfInitedOk = true;
+      }
     }  catch (Exception e) {
       e.printStackTrace();
       lfInitedOk = false;
@@ -584,7 +584,7 @@
       res.append("\n Total size:\t" + totalSize + " B");
       res.append("\n Total blocks:\t" + totalBlocks);
       if (totalBlocks > 0) res.append(" (avg. block size "
-          + (totalSize / totalBlocks) + " B)");
+                                      + (totalSize / totalBlocks) + " B)");
       res.append("\n Total dirs:\t" + totalDirs);
       res.append("\n Total files:\t" + totalFiles);
       if (missingSize > 0) {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamespaceInfo.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamespaceInfo.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NamespaceInfo.java Thu Apr 19 14:34:41 2007
@@ -41,8 +41,8 @@
     buildVersion = null;
   }
   
-  public NamespaceInfo( int nsID, long cT ) {
-    super( FSConstants.LAYOUT_VERSION, nsID, cT );
+  public NamespaceInfo(int nsID, long cT) {
+    super(FSConstants.LAYOUT_VERSION, nsID, cT);
     buildVersion = Storage.getBuildVersion();
   }
   
@@ -60,14 +60,14 @@
   }
 
   public void write(DataOutput out) throws IOException {
-    UTF8.writeString( out, getBuildVersion() );
-    out.writeInt( getLayoutVersion() );
-    out.writeInt( getNamespaceID() );
-    out.writeLong( getCTime() );
+    UTF8.writeString(out, getBuildVersion());
+    out.writeInt(getLayoutVersion());
+    out.writeInt(getNamespaceID());
+    out.writeLong(getCTime());
   }
 
   public void readFields(DataInput in) throws IOException {
-    buildVersion = UTF8.readString( in );
+    buildVersion = UTF8.readString(in);
     layoutVersion = in.readInt();
     namespaceID = in.readInt();
     cTime = in.readLong();

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SafeModeException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SafeModeException.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SafeModeException.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SafeModeException.java Thu Apr 19 14:34:41 2007
@@ -10,8 +10,8 @@
  */
 public class SafeModeException extends IOException {
 
-  public SafeModeException( String text, FSNamesystem.SafeModeInfo mode  ) {
-    super( text + ". Name node is in safe mode.\n" + mode.getTurnOffTip());
+  public SafeModeException(String text, FSNamesystem.SafeModeInfo mode ) {
+    super(text + ". Name node is in safe mode.\n" + mode.getTurnOffTip());
   }
 
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/SecondaryNameNode.java Thu Apr 19 14:34:41 2007
@@ -426,9 +426,9 @@
    */
   public static class GetImageServlet extends HttpServlet {
     @SuppressWarnings("unchecked")
-      public void doGet(HttpServletRequest request,
-                        HttpServletResponse response
-                        ) throws ServletException, IOException {
+    public void doGet(HttpServletRequest request,
+                      HttpServletResponse response
+                      ) throws ServletException, IOException {
       Map<String,String[]> pmap = request.getParameterMap();
       try {
         ServletContext context = getServletContext();

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java Thu Apr 19 14:34:41 2007
@@ -38,7 +38,7 @@
 /**
  * Common class for storage information.
  * 
- * TODO namespaceID should be long and computed as hash( address + port )
+ * TODO namespaceID should be long and computed as hash(address + port)
  * @author Konstantin Shvachko
  */
 class StorageInfo {
@@ -47,16 +47,16 @@
   long  cTime;          // creation timestamp
   
   StorageInfo () {
-    this( 0, 0, 0L );
+    this(0, 0, 0L);
   }
   
-  StorageInfo( int layoutV, int nsID, long cT ) {
+  StorageInfo(int layoutV, int nsID, long cT) {
     layoutVersion = layoutV;
     namespaceID = nsID;
     cTime = cT;
   }
   
-  StorageInfo( StorageInfo from ) {
+  StorageInfo(StorageInfo from) {
     layoutVersion = from.layoutVersion;
     namespaceID = from.namespaceID;
     cTime = from.cTime;
@@ -124,7 +124,7 @@
     File              root; // root directory
     FileLock          lock; // storage lock
     
-    StorageDirectory( File dir ) {
+    StorageDirectory(File dir) {
       this.root = dir;
       this.lock = null;
     }
@@ -135,17 +135,17 @@
      * @throws IOException if file cannot be read or contains inconsistent data
      */
     void read() throws IOException {
-      read( getVersionFile() );
+      read(getVersionFile());
     }
     
-    void read( File from ) throws IOException {
-      RandomAccessFile file = new RandomAccessFile( from, "rws" );
+    void read(File from) throws IOException {
+      RandomAccessFile file = new RandomAccessFile(from, "rws");
       try {
-        FileInputStream in = new FileInputStream( file.getFD() );
+        FileInputStream in = new FileInputStream(file.getFD());
         file.seek(0);
         Properties props = new Properties();
-        props.load( in );
-        getFields( props, this );
+        props.load(in);
+        getFields(props, this);
       } finally {
         file.close();
       }
@@ -157,17 +157,17 @@
      * @throws IOException
      */
     void write() throws IOException {
-      write( getVersionFile() );
+      write(getVersionFile());
     }
 
-    void write( File to ) throws IOException {
+    void write(File to) throws IOException {
       Properties props = new Properties();
-      setFields( props, this );
-      RandomAccessFile file = new RandomAccessFile( to, "rws" );
+      setFields(props, this);
+      RandomAccessFile file = new RandomAccessFile(to, "rws");
       try {
         file.seek(0);
-        FileOutputStream out = new FileOutputStream( file.getFD() );
-        props.store( out, null );
+        FileOutputStream out = new FileOutputStream(file.getFD());
+        props.store(out, null);
       } finally {
         file.close();
       }
@@ -188,33 +188,33 @@
      */
     void clearDirectory() throws IOException {
       File curDir = this.getCurrentDir();
-      if( curDir.exists() )
-        if( ! (FileUtil.fullyDelete( curDir )) )
-          throw new IOException("Cannot remove current directory: " + curDir );
-      if( ! curDir.mkdirs() )
-        throw new IOException( "Cannot create directory " + curDir );
+      if (curDir.exists())
+        if (!(FileUtil.fullyDelete(curDir)))
+          throw new IOException("Cannot remove current directory: " + curDir);
+      if (!curDir.mkdirs())
+        throw new IOException("Cannot create directory " + curDir);
     }
 
     File getCurrentDir() {
-      return new File( root, STORAGE_DIR_CURRENT );
+      return new File(root, STORAGE_DIR_CURRENT);
     }
     File getVersionFile() {
-      return new File( new File( root, STORAGE_DIR_CURRENT ), STORAGE_FILE_VERSION );
+      return new File(new File(root, STORAGE_DIR_CURRENT), STORAGE_FILE_VERSION);
     }
     File getPreviousVersionFile() {
-      return new File( new File( root, STORAGE_DIR_PREVIOUS ), STORAGE_FILE_VERSION );
+      return new File(new File(root, STORAGE_DIR_PREVIOUS), STORAGE_FILE_VERSION);
     }
     File getPreviousDir() {
-      return new File( root, STORAGE_DIR_PREVIOUS );
+      return new File(root, STORAGE_DIR_PREVIOUS);
     }
     File getPreviousTmp() {
-      return new File( root, STORAGE_TMP_PREVIOUS );
+      return new File(root, STORAGE_TMP_PREVIOUS);
     }
     File getRemovedTmp() {
-      return new File( root, STORAGE_TMP_REMOVED );
+      return new File(root, STORAGE_TMP_REMOVED);
     }
     File getFinalizedTmp() {
-      return new File( root, STORAGE_TMP_FINALIZED );
+      return new File(root, STORAGE_TMP_FINALIZED);
     }
 
     /**
@@ -226,40 +226,40 @@
      * @throws {@link InconsistentFSStateException} if directory state is not 
      * consistent and cannot be recovered 
      */
-    StorageState analyzeStorage( StartupOption startOpt ) throws IOException {
+    StorageState analyzeStorage(StartupOption startOpt) throws IOException {
       assert root != null : "root is null";
       String rootPath = root.getCanonicalPath();
       try { // check that storage exists
-        if( ! root.exists() ) {
+        if (!root.exists()) {
           // storage directory does not exist
-          if( startOpt != StartupOption.FORMAT ) {
-            LOG.info( "Storage directory " + rootPath + " does not exist." );
+          if (startOpt != StartupOption.FORMAT) {
+            LOG.info("Storage directory " + rootPath + " does not exist.");
             return StorageState.NON_EXISTENT;
           }
-          LOG.info( rootPath + " does not exist. Creating ..." );
-          if( ! root.mkdirs() )
-            throw new IOException( "Cannot create directory " + rootPath );
+          LOG.info(rootPath + " does not exist. Creating ...");
+          if (!root.mkdirs())
+            throw new IOException("Cannot create directory " + rootPath);
         }
         // or is inaccessible
-        if( ! root.isDirectory() ) {
-          LOG.info( rootPath + "is not a directory." );
+        if (!root.isDirectory()) {
+          LOG.info(rootPath + "is not a directory.");
           return StorageState.NON_EXISTENT;
         }
-        if( ! root.canWrite() ) {
-          LOG.info( "Cannot access storage directory " + rootPath );
+        if (!root.canWrite()) {
+          LOG.info("Cannot access storage directory " + rootPath);
           return StorageState.NON_EXISTENT;
         }
-      } catch( SecurityException ex ) {
-        LOG.info( "Cannot access storage directory " + rootPath, ex );
+      } catch(SecurityException ex) {
+        LOG.info("Cannot access storage directory " + rootPath, ex);
         return StorageState.NON_EXISTENT;
       }
 
       this.lock(); // lock storage if it exists
 
-      if( startOpt == StartupOption.FORMAT )
+      if (startOpt == StartupOption.FORMAT)
         return StorageState.NOT_FORMATTED;
       // check whether a conversion is required
-      if( isConversionNeeded( this ) )
+      if (isConversionNeeded(this))
         return StorageState.CONVERT;
       // check whether current directory is valid
       File versionFile = getVersionFile();
@@ -271,48 +271,48 @@
       boolean hasRemovedTmp = getRemovedTmp().exists();
       boolean hasFinalizedTmp = getFinalizedTmp().exists();
 
-      if( !(hasPreviousTmp || hasRemovedTmp || hasFinalizedTmp) ) {
+      if (!(hasPreviousTmp || hasRemovedTmp || hasFinalizedTmp)) {
         // no temp dirs - no recovery
-        if( hasCurrent )
+        if (hasCurrent)
           return StorageState.NORMAL;
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-                      "version file in current directory it is missing." );
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 "version file in current directory it is missing.");
         return StorageState.NOT_FORMATTED;
       }
 
-      if( (hasPreviousTmp?1:0)+(hasRemovedTmp?1:0)+(hasFinalizedTmp?1:0) > 1 )
+      if ((hasPreviousTmp?1:0)+(hasRemovedTmp?1:0)+(hasFinalizedTmp?1:0) > 1)
         // more than one temp dirs
-        throw new InconsistentFSStateException( root,
-                    "too many temporary directories." );
+        throw new InconsistentFSStateException(root,
+                                               "too many temporary directories.");
 
       // # of temp dirs == 1 should either recover or complete a transition
-      if( hasFinalizedTmp ) {
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-              STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED
-              + "cannot exist together." );
+      if (hasFinalizedTmp) {
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED
+                                                 + "cannot exist together.");
         return StorageState.COMPLETE_FINALIZE;
       }
 
-      if( hasPreviousTmp ) {
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-              STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS
-              + " cannot exist together." );
-        if( hasCurrent )
+      if (hasPreviousTmp) {
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS
+                                                 + " cannot exist together.");
+        if (hasCurrent)
           return StorageState.COMPLETE_UPGRADE;
         return StorageState.RECOVER_UPGRADE;
       }
       
       assert hasRemovedTmp : "hasRemovedTmp must be true";
-      if( !(hasCurrent ^ hasPrevious) )
-        throw new InconsistentFSStateException( root,
-            "one and only one directory " + STORAGE_DIR_CURRENT 
-            + " or " + STORAGE_DIR_PREVIOUS 
-            + " must be present when " + STORAGE_TMP_REMOVED
-            + " exists." );
-      if( hasCurrent )
+      if (!(hasCurrent ^ hasPrevious))
+        throw new InconsistentFSStateException(root,
+                                               "one and only one directory " + STORAGE_DIR_CURRENT 
+                                               + " or " + STORAGE_DIR_PREVIOUS 
+                                               + " must be present when " + STORAGE_TMP_REMOVED
+                                               + " exists.");
+      if (hasCurrent)
         return StorageState.COMPLETE_ROLLBACK;
       return StorageState.RECOVER_ROLLBACK;
     }
@@ -323,39 +323,39 @@
      * @param curState specifies what/how the state should be recovered
      * @throws IOException
      */
-    void doRecover( StorageState curState ) throws IOException {
+    void doRecover(StorageState curState) throws IOException {
       File curDir = getCurrentDir();
       String rootPath = root.getCanonicalPath();
-      switch( curState ) {
-        case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
-          LOG.info( "Completing previous upgrade for storage directory " 
-                    + rootPath + "." );
-          rename( getPreviousTmp(), getPreviousDir() );
-          return;
-        case RECOVER_UPGRADE:   // mv previous.tmp -> current
-          LOG.info( "Recovering storage directory " + rootPath
-                    + " from previous upgrade." );
-          if( curDir.exists() )
-            deleteDir( curDir );
-          rename( getPreviousTmp(), curDir );
-          return;
-        case COMPLETE_ROLLBACK: // rm removed.tmp
-          LOG.info( "Completing previous rollback for storage directory "
-                    + rootPath + "." );
-          deleteDir( getRemovedTmp() );
-          return;
-        case RECOVER_ROLLBACK:  // mv removed.tmp -> current
-          LOG.info( "Recovering storage directory " + rootPath
-                    + " from previous rollback." );
-          rename( getRemovedTmp(), curDir );
-          return;
-        case COMPLETE_FINALIZE: // rm finalized.tmp
-          LOG.info( "Completing previous finalize for storage directory "
-                    + rootPath + "." );
-          deleteDir( getFinalizedTmp() );
-          return;
-        default:
-          throw new IOException( "Unexpected FS state: " + curState );
+      switch(curState) {
+      case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
+        LOG.info("Completing previous upgrade for storage directory " 
+                 + rootPath + ".");
+        rename(getPreviousTmp(), getPreviousDir());
+        return;
+      case RECOVER_UPGRADE:   // mv previous.tmp -> current
+        LOG.info("Recovering storage directory " + rootPath
+                 + " from previous upgrade.");
+        if (curDir.exists())
+          deleteDir(curDir);
+        rename(getPreviousTmp(), curDir);
+        return;
+      case COMPLETE_ROLLBACK: // rm removed.tmp
+        LOG.info("Completing previous rollback for storage directory "
+                 + rootPath + ".");
+        deleteDir(getRemovedTmp());
+        return;
+      case RECOVER_ROLLBACK:  // mv removed.tmp -> current
+        LOG.info("Recovering storage directory " + rootPath
+                 + " from previous rollback.");
+        rename(getRemovedTmp(), curDir);
+        return;
+      case COMPLETE_FINALIZE: // rm finalized.tmp
+        LOG.info("Completing previous finalize for storage directory "
+                 + rootPath + ".");
+        deleteDir(getFinalizedTmp());
+        return;
+      default:
+        throw new IOException("Unexpected FS state: " + curState);
       }
     }
 
@@ -365,22 +365,22 @@
      * @throws IOException if locking fails
      */
     void lock() throws IOException {
-      File lockF = new File( root, STORAGE_FILE_LOCK );
+      File lockF = new File(root, STORAGE_FILE_LOCK);
       lockF.deleteOnExit();
-      RandomAccessFile file = new RandomAccessFile( lockF, "rws" );
+      RandomAccessFile file = new RandomAccessFile(lockF, "rws");
       try {
         this.lock = file.getChannel().tryLock();
-      } catch( IOException e ) {
-        LOG.info( StringUtils.stringifyException(e) );
+      } catch(IOException e) {
+        LOG.info(StringUtils.stringifyException(e));
         file.close();
         throw e;
       }
-      if( lock == null ) {
+      if (lock == null) {
         String msg = "Cannot lock storage " + this.root 
-                      + ". The directory is already locked.";
-        LOG.info( msg );
+          + ". The directory is already locked.";
+        LOG.info(msg);
         file.close();
-        throw new IOException( msg );
+        throw new IOException(msg);
       }
     }
 
@@ -390,7 +390,7 @@
      * @throws IOException
      */
     void unlock() throws IOException {
-      if( this.lock == null )
+      if (this.lock == null)
         return;
       this.lock.release();
       lock.channel().close();
@@ -400,18 +400,18 @@
   /**
    * Create empty storage info of the specified type
    */
-  Storage( NodeType type ) {
+  Storage(NodeType type) {
     super();
     this.storageType = type;
   }
   
-  Storage( NodeType type, int nsID, long cT ) {
-    super( FSConstants.LAYOUT_VERSION, nsID, cT );
+  Storage(NodeType type, int nsID, long cT) {
+    super(FSConstants.LAYOUT_VERSION, nsID, cT);
     this.storageType = type;
   }
   
-  Storage( NodeType type, StorageInfo storageInfo ) {
-    super( storageInfo );
+  Storage(NodeType type, StorageInfo storageInfo) {
+    super(storageInfo);
     this.storageType = type;
   }
   
@@ -419,15 +419,15 @@
     return storageDirs.size();
   }
   
-  StorageDirectory getStorageDir( int idx ) {
-    return storageDirs.get( idx );
+  StorageDirectory getStorageDir(int idx) {
+    return storageDirs.get(idx);
   }
   
-  protected void addStorageDir( StorageDirectory sd ) {
-    storageDirs.add( sd );
+  protected void addStorageDir(StorageDirectory sd) {
+    storageDirs.add(sd);
   }
   
-  abstract boolean isConversionNeeded( StorageDirectory sd ) throws IOException;
+  abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException;
   
   /**
    * Get common storage fields.
@@ -436,28 +436,28 @@
    * @param props
    * @throws IOException
    */
-  protected void getFields( Properties props, 
-                            StorageDirectory sd 
-                          ) throws IOException {
+  protected void getFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
     String sv, st, sid, sct;
-    sv = props.getProperty( "layoutVersion" );
-    st = props.getProperty( "storageType" );
-    sid = props.getProperty( "namespaceID" );
-    sct = props.getProperty( "cTime" );
-    if( sv == null || st == null || sid == null || sct == null )
-      throw new InconsistentFSStateException( sd.root,
-                    "file " + STORAGE_FILE_VERSION + " is invalid." );
-    int rv = Integer.parseInt( sv );
-    NodeType rt = NodeType.valueOf( st );
-    int rid = Integer.parseInt( sid );
-    long rct = Long.parseLong( sct );
-    if( ! storageType.equals( rt ) ||
-        ! (( namespaceID == 0 ) || ( rid == 0 ) || namespaceID == rid ))
-      throw new InconsistentFSStateException( sd.root,
-                  "is incompatible with others." );
-    if( rv < FSConstants.LAYOUT_VERSION ) // future version
-        throw new IncorrectVersionException(rv, "storage directory " 
-                                            + sd.root.getCanonicalPath() );
+    sv = props.getProperty("layoutVersion");
+    st = props.getProperty("storageType");
+    sid = props.getProperty("namespaceID");
+    sct = props.getProperty("cTime");
+    if (sv == null || st == null || sid == null || sct == null)
+      throw new InconsistentFSStateException(sd.root,
+                                             "file " + STORAGE_FILE_VERSION + " is invalid.");
+    int rv = Integer.parseInt(sv);
+    NodeType rt = NodeType.valueOf(st);
+    int rid = Integer.parseInt(sid);
+    long rct = Long.parseLong(sct);
+    if (!storageType.equals(rt) ||
+        !((namespaceID == 0) || (rid == 0) || namespaceID == rid))
+      throw new InconsistentFSStateException(sd.root,
+                                             "is incompatible with others.");
+    if (rv < FSConstants.LAYOUT_VERSION) // future version
+      throw new IncorrectVersionException(rv, "storage directory " 
+                                          + sd.root.getCanonicalPath());
     layoutVersion = rv;
     storageType = rt;
     namespaceID = rid;
@@ -471,24 +471,24 @@
    * @param props
    * @throws IOException
    */
-  protected void setFields( Properties props, 
-                            StorageDirectory sd 
-                          ) throws IOException {
-    props.setProperty( "layoutVersion", String.valueOf( layoutVersion ));
-    props.setProperty( "storageType", storageType.toString() );
-    props.setProperty( "namespaceID", String.valueOf( namespaceID ));
-    props.setProperty( "cTime", String.valueOf( cTime ));
-  }
-
-  static void rename( File from, File to ) throws IOException {
-    if( ! from.renameTo( to ))
-      throw new IOException( "Failed to rename " 
-          + from.getCanonicalPath() + " to " + to.getCanonicalPath() );
-  }
-
-  static void deleteDir( File dir ) throws IOException {
-    if( ! FileUtil.fullyDelete( dir ) )
-      throw new IOException( "Failed to delete " + dir.getCanonicalPath() );
+  protected void setFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    props.setProperty("layoutVersion", String.valueOf(layoutVersion));
+    props.setProperty("storageType", storageType.toString());
+    props.setProperty("namespaceID", String.valueOf(namespaceID));
+    props.setProperty("cTime", String.valueOf(cTime));
+  }
+
+  static void rename(File from, File to) throws IOException {
+    if (!from.renameTo(to))
+      throw new IOException("Failed to rename " 
+                            + from.getCanonicalPath() + " to " + to.getCanonicalPath());
+  }
+
+  static void deleteDir(File dir) throws IOException {
+    if (!FileUtil.fullyDelete(dir))
+      throw new IOException("Failed to delete " + dir.getCanonicalPath());
   }
   
   /**
@@ -516,9 +516,9 @@
     return VersionInfo.getRevision();
   }
 
-  static String getRegistrationID( StorageInfo storage ) {
-    return "NS-" + Integer.toString( storage.getNamespaceID() )
-           + "-" + Integer.toString( storage.getLayoutVersion() )
-           + "-" + Long.toString( storage.getCTime() );
+  static String getRegistrationID(StorageInfo storage) {
+    return "NS-" + Integer.toString(storage.getNamespaceID())
+      + "-" + Integer.toString(storage.getLayoutVersion())
+      + "-" + Long.toString(storage.getCTime());
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java Thu Apr 19 14:34:41 2007
@@ -11,12 +11,12 @@
  */
 class UnregisteredDatanodeException extends IOException {
 
-  public UnregisteredDatanodeException( DatanodeID nodeID ) {
-    super("Unregistered data node: " + nodeID.getName() );
+  public UnregisteredDatanodeException(DatanodeID nodeID) {
+    super("Unregistered data node: " + nodeID.getName());
   }
 
-  public UnregisteredDatanodeException( DatanodeID nodeID, 
-                                        DatanodeInfo storedNode ) {
+  public UnregisteredDatanodeException(DatanodeID nodeID, 
+                                       DatanodeInfo storedNode) {
     super("Data node " + nodeID.getName() 
           + " is attempting to report storage ID "
           + nodeID.getStorageID() + ". Node " 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/filecache/DistributedCache.java Thu Apr 19 14:34:41 2007
@@ -288,9 +288,9 @@
     byte[] digest = null;
 
     FileSystem fileSystem = getFileSystem(cache, conf);
-    if(!(fileSystem instanceof ChecksumFileSystem)) {
-      throw new IOException( "Not a checksummed file system: "
-                             +fileSystem.getUri() );
+    if (!(fileSystem instanceof ChecksumFileSystem)) {
+      throw new IOException("Not a checksummed file system: "
+                            +fileSystem.getUri());
     }
     String filename = cache.getPath();
     Path filePath = new Path(filename);
@@ -304,7 +304,7 @@
     }
     if (!fileSystem.exists(md5File)) {
       ChecksumFileSystem checksumFs;
-      if(!(fileSystem instanceof ChecksumFileSystem)) {
+      if (!(fileSystem instanceof ChecksumFileSystem)) {
         throw new IOException(
                               "Not a checksumed file system: "+fileSystem.getUri());
       } else {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java Thu Apr 19 14:34:41 2007
@@ -112,7 +112,7 @@
       this.file = file;
       Path sumFile = fs.getChecksumFile(file);
       try {
-        int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(),bufferSize);
+        int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(), bufferSize);
         sums = fs.getRawFileSystem().open(sumFile, sumBufferSize);
 
         byte[] version = new byte[CHECKSUM_VERSION.length];
@@ -133,14 +133,14 @@
     public void seek(long desired) throws IOException {
       // seek to a checksum boundary
       long checksumBoundary = desired/bytesPerSum*bytesPerSum;
-      if(checksumBoundary != getPos()) {
+      if (checksumBoundary != getPos()) {
         datas.seek(checksumBoundary);
-        if(sums != null) {
+        if (sums != null) {
           sums.seek(HEADER_LENGTH + 4*(checksumBoundary/bytesPerSum));
         }
       }
       
-      if(sums != null) {
+      if (sums != null) {
         sum.reset();
         inSum = 0;
       }
@@ -207,9 +207,9 @@
               summed += toSum;
               
               inSum += toSum;
-              if (inSum == bytesPerSum ) {
+              if (inSum == bytesPerSum) {
                 verifySum(read-(summed-bytesPerSum));
-              } else if( read == summed && endOfFile ) {
+              } else if (read == summed && endOfFile) {
                 verifySum(read-read/bytesPerSum*bytesPerSum);
               }
             }
@@ -314,7 +314,7 @@
     }
 
     @Override
-      public boolean seekToNewSource(long targetPos) throws IOException {
+    public boolean seekToNewSource(long targetPos) throws IOException {
       return datas.seekToNewSource(targetPos) ||
         sums.seekToNewSource(targetPos/bytesPerSum);
     }
@@ -327,7 +327,7 @@
    * @param bufferSize the size of the buffer to be used.
    */
   @Override
-    public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     if (!exists(f)) {
       throw new FileNotFoundException(f.toString());
     }
@@ -405,7 +405,7 @@
     
     public void close() throws IOException {
       writeSum();
-      if(sums != null) {
+      if (sums != null) {
         sums.close();
       }
       out.close();
@@ -429,8 +429,8 @@
    * @param replication required block replication for the file. 
    */
   @Override
-    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-                                     short replication, long blockSize, Progressable progress)
+  public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
+                                   short replication, long blockSize, Progressable progress)
     throws IOException {
     if (exists(f) && !overwrite) {
       throw new IOException("File already exists:" + f);
@@ -497,7 +497,7 @@
       return fs.delete(f);
     } else {
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.delete(checkFile);
       }
 
@@ -518,7 +518,7 @@
    * @exception IOException
    */
   @Override
-    public Path[] listPaths(Path[] files) throws IOException {
+  public Path[] listPaths(Path[] files) throws IOException {
     return fs.listPaths(files, DEFAULT_FILTER);
   }
 
@@ -533,17 +533,17 @@
   }
 
   @Override
-    public boolean mkdirs(Path f) throws IOException {
+  public boolean mkdirs(Path f) throws IOException {
     return fs.mkdirs(f);
   }
 
   @Override
-    public void lock(Path f, boolean shared) throws IOException {
+  public void lock(Path f, boolean shared) throws IOException {
     if (fs.isDirectory(f)) {
       fs.lock(f, shared);
     } else {
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.lock(checkFile, shared);
       }
       fs.lock(f, shared);
@@ -551,12 +551,12 @@
   }
 
   @Override
-    public void release(Path f) throws IOException {
+  public void release(Path f) throws IOException {
     if (fs.isDirectory(f)) {
       fs.release(f);
     } else {
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.release(getChecksumFile(f));
       }
       fs.release(f);
@@ -564,7 +564,7 @@
   }
 
   @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     FileSystem localFs = getNamed("file:///", getConf());
     FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
@@ -575,7 +575,7 @@
    * Copy it from FS control to the local dst name.
    */
   @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     FileSystem localFs = getNamed("file:///", getConf());
     FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
@@ -615,13 +615,13 @@
   }
 
   @Override
-    public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+  public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     return tmpLocalFile;
   }
 
   @Override
-    public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+  public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     moveFromLocalFile(tmpLocalFile, fsOutputFile);
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/DF.java Thu Apr 19 14:34:41 2007
@@ -42,19 +42,19 @@
   private int percentUsed;
   private String mount;
   
-  public DF(File path, Configuration conf ) throws IOException {
-    this( path, conf.getLong( "dfs.df.interval", DF.DF_INTERVAL_DEFAULT ));
+  public DF(File path, Configuration conf) throws IOException {
+    this(path, conf.getLong("dfs.df.interval", DF.DF_INTERVAL_DEFAULT));
   }
 
   public DF(File path, long dfInterval) throws IOException {
     this.dirPath = path.getCanonicalPath();
     this.dfInterval = dfInterval;
-    lastDF = ( dfInterval < 0 ) ? 0 : -dfInterval;
+    lastDF = (dfInterval < 0) ? 0 : -dfInterval;
     this.doDF();
   }
   
   private void doDF() throws IOException { 
-    if( lastDF + dfInterval > System.currentTimeMillis() )
+    if (lastDF + dfInterval > System.currentTimeMillis())
       return;
     Process process;
     process = Runtime.getRuntime().exec(getExecString());
@@ -138,10 +138,10 @@
   }
 
   private String[] getExecString() {
-    return new String[] {"df","-k",dirPath};
+    return new String[] {"df","-k", dirPath};
   }
   
-  private void parseExecResult( BufferedReader lines ) throws IOException {
+  private void parseExecResult(BufferedReader lines) throws IOException {
     lines.readLine();                         // skip headings
   
     StringTokenizer tokens =
@@ -161,7 +161,7 @@
 
   public static void main(String[] args) throws Exception {
     String path = ".";
-    if( args.length > 0 )
+    if (args.length > 0)
       path = args[0];
 
     System.out.println(new DF(new File(path), DF_INTERVAL_DEFAULT).toString());

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataInputStream.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataInputStream.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataInputStream.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataInputStream.java Thu Apr 19 14:34:41 2007
@@ -38,7 +38,7 @@
     // calls to it in order to cache the position.
     public int read(byte b[], int off, int len) throws IOException {
       int result;
-      if( (result = in.read(b, off, len)) > 0 )
+      if ((result = in.read(b, off, len)) > 0)
         position += result;
       return result;
     }
@@ -53,12 +53,12 @@
     }
     
     public int read(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       return ((FSInputStream)in).read(position, buffer, offset, length);
     }
     
     public void readFully(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       ((FSInputStream)in).readFully(position, buffer, offset, length);
     }
   }
@@ -95,12 +95,12 @@
     }
 
     public int read(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       return ((PositionCache)in).read(position, buffer, offset, length);
     }
     
     public void readFully(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       ((PositionCache)in).readFully(position, buffer, offset, length);
     }
   }
@@ -113,7 +113,7 @@
   
   public FSDataInputStream(FSInputStream in, int bufferSize)
     throws IOException {
-    super( new Buffer(new PositionCache(in), bufferSize) );
+    super(new Buffer(new PositionCache(in), bufferSize));
     this.inStream = in;
   }
   
@@ -126,17 +126,17 @@
   }
   
   public int read(long position, byte[] buffer, int offset, int length)
-  throws IOException {
+    throws IOException {
     return ((Buffer)in).read(position, buffer, offset, length);
   }
   
   public void readFully(long position, byte[] buffer, int offset, int length)
-  throws IOException {
+    throws IOException {
     ((Buffer)in).readFully(position, buffer, offset, length);
   }
   
   public void readFully(long position, byte[] buffer)
-  throws IOException {
+    throws IOException {
     ((Buffer)in).readFully(position, buffer, 0, buffer.length);
   }
   

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataOutputStream.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataOutputStream.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataOutputStream.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FSDataOutputStream.java Thu Apr 19 14:34:41 2007
@@ -74,12 +74,12 @@
   }
 
   public FSDataOutputStream(OutputStream out, int bufferSize)
-  throws IOException {
+    throws IOException {
     super(new Buffer(new PositionCache(out), bufferSize));
   }
   
   public FSDataOutputStream(OutputStream out, Configuration conf)
-  throws IOException {
+    throws IOException {
     this(out, conf.getInt("io.file.buffer.size", 4096));
   }
 



Mime
View raw message