hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cutt...@apache.org
Subject svn commit: r530556 [3/12] - in /lucene/hadoop/trunk: ./ src/contrib/abacus/src/java/org/apache/hadoop/abacus/ src/contrib/hbase/src/java/org/apache/hadoop/hbase/ src/contrib/hbase/src/test/org/apache/hadoop/hbase/ src/contrib/streaming/src/java/org/ap...
Date Thu, 19 Apr 2007 21:34:53 GMT
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Thu Apr 19 14:34:41 2007
@@ -216,10 +216,10 @@
    * @return output stream
    * @throws IOException
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite
-                              ) throws IOException {
-    return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite
+                             ) throws IOException {
+    return create(src, overwrite, defaultReplication, defaultBlockSize, null);
   }
     
   /**
@@ -231,11 +231,11 @@
    * @return output stream
    * @throws IOException
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite,
-                              Progressable progress
-                              ) throws IOException {
-    return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite,
+                             Progressable progress
+                             ) throws IOException {
+    return create(src, overwrite, defaultReplication, defaultBlockSize, null);
   }
     
   /**
@@ -248,11 +248,11 @@
    * @return output stream
    * @throws IOException
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite, 
-                              short replication,
-                              long blockSize
-                              ) throws IOException {
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite, 
+                             short replication,
+                             long blockSize
+                             ) throws IOException {
     return create(src, overwrite, replication, blockSize, null);
   }
 
@@ -267,12 +267,12 @@
    * @return output stream
    * @throws IOException
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite, 
-                              short replication,
-                              long blockSize,
-                              Progressable progress
-                              ) throws IOException {
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite, 
+                             short replication,
+                             long blockSize,
+                             Progressable progress
+                             ) throws IOException {
     checkOpen();
     OutputStream result = new DFSOutputStream(src, overwrite, 
                                               replication, blockSize, progress);
@@ -360,8 +360,8 @@
    * 
    * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    */
-  public boolean setSafeMode( SafeModeAction action ) throws IOException {
-    return namenode.setSafeMode( action );
+  public boolean setSafeMode(SafeModeAction action) throws IOException {
+    return namenode.setSafeMode(action);
   }
 
   /**
@@ -405,9 +405,9 @@
   public void lock(UTF8 src, boolean exclusive) throws IOException {
     long start = System.currentTimeMillis();
     boolean hasLock = false;
-    while (! hasLock) {
+    while (!hasLock) {
       hasLock = namenode.obtainLock(src.toString(), clientName, exclusive);
-      if (! hasLock) {
+      if (!hasLock) {
         try {
           Thread.sleep(400);
           if (System.currentTimeMillis() - start > 5000) {
@@ -425,9 +425,9 @@
    */
   public void release(UTF8 src) throws IOException {
     boolean hasReleased = false;
-    while (! hasReleased) {
+    while (!hasReleased) {
       hasReleased = namenode.releaseLock(src.toString(), clientName);
-      if (! hasReleased) {
+      if (!hasReleased) {
         LOG.info("Could not release.  Retrying...");
         try {
           Thread.sleep(2000);
@@ -464,7 +464,7 @@
       while (running) {
         if (System.currentTimeMillis() - lastRenewed > (LEASE_SOFTLIMIT_PERIOD / 2)) {
           try {
-            if( pendingCreates.size() > 0 )
+            if (pendingCreates.size() > 0)
               namenode.renewLease(clientName);
             lastRenewed = System.currentTimeMillis();
           } catch (IOException ie) {
@@ -538,7 +538,7 @@
 
       if (oldBlocks != null) {
         for (int i = 0; i < oldBlocks.length; i++) {
-          if (! oldBlocks[i].equals(newBlocks[i])) {
+          if (!oldBlocks[i].equals(newBlocks[i])) {
             throw new IOException("Blocklist for " + src + " has changed!");
           }
         }
@@ -912,7 +912,7 @@
       deadNodes.add(currentNode);
       DatanodeInfo oldNode = currentNode;
       DatanodeInfo newNode = blockSeekTo(targetPos);
-      if ( !markedDead ) {
+      if (!markedDead) {
         /* remove it from deadNodes. blockSeekTo could have cleared 
          * deadNodes and added currentNode again. Thats ok. */
         deadNodes.remove(oldNode);
@@ -1037,7 +1037,7 @@
      * filedescriptor that we don't own.
      */
     private void closeBackupStream() throws IOException {
-      if ( backupStream != null ) {
+      if (backupStream != null) {
         OutputStream stream = backupStream;
         backupStream = null;
         stream.close();
@@ -1047,7 +1047,7 @@
      * twice could result in deleting a file that we should not.
      */
     private void deleteBackupFile() {
-      if ( backupFile != null ) {
+      if (backupFile != null) {
         File file = backupFile;
         backupFile = null;
         file.delete();
@@ -1081,8 +1081,8 @@
         }
 
         block = lb.getBlock();
-        if ( block.getNumBytes() < bytesWrittenToBlock ) {
-          block.setNumBytes( bytesWrittenToBlock );
+        if (block.getNumBytes() < bytesWrittenToBlock) {
+          block.setNumBytes(bytesWrittenToBlock);
         }
         DatanodeInfo nodes[] = lb.getLocations();
 
@@ -1270,9 +1270,9 @@
       int workingPos = Math.min(pos, maxPos);
             
       if (workingPos > 0) {
-        if ( backupStream == null ) {
-          throw new IOException( "Trying to write to backupStream " +
-                                 "but it already closed or not open");
+        if (backupStream == null) {
+          throw new IOException("Trying to write to backupStream " +
+                                "but it already closed or not open");
         }
         //
         // To the local block backup, write just the bytes
@@ -1417,7 +1417,7 @@
 
         long localstart = System.currentTimeMillis();
         boolean fileComplete = false;
-        while (! fileComplete) {
+        while (!fileComplete) {
           fileComplete = namenode.complete(src.toString(), clientName.toString());
           if (!fileComplete) {
             try {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSFileInfo.java Thu Apr 19 14:34:41 2007
@@ -52,10 +52,10 @@
   /**
    * Create DFSFileInfo by file INode 
    */
-  public DFSFileInfo( FSDirectory.INode node ) {
+  public DFSFileInfo(FSDirectory.INode node) {
     this.path = new UTF8(node.computeName());
     this.isDir = node.isDir();
-    if( isDir ) {
+    if (isDir) {
       this.len = 0;
       this.contentsLen = node.computeContentsLength();
     } else 

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSck.java Thu Apr 19 14:34:41 2007
@@ -108,7 +108,7 @@
     URLConnection connection = path.openConnection();
     InputStream stream = connection.getInputStream();
     InputStreamReader input =
-        new InputStreamReader(stream, "UTF-8");
+      new InputStreamReader(stream, "UTF-8");
     try {
       int c = input.read();
       while (c != -1) {
@@ -122,7 +122,7 @@
   }
 
   public static void main(String[] args) throws Exception {
-      int res = new DFSck().doMain(new Configuration(), args);
-      System.exit(res);
+    int res = new DFSck().doMain(new Configuration(), args);
+    System.exit(res);
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Thu Apr 19 14:34:41 2007
@@ -193,19 +193,19 @@
    * Create the DataNode given a configuration and an array of dataDirs.
    * 'dataDirs' is where the blocks are stored.
    */
-  DataNode( Configuration conf, 
-            AbstractList<File> dataDirs ) throws IOException {
+  DataNode(Configuration conf, 
+           AbstractList<File> dataDirs) throws IOException {
     try {
-      startDataNode( conf, dataDirs );
+      startDataNode(conf, dataDirs);
     } catch (IOException ie) {
       shutdown();
       throw ie;
     }
   }
     
-  void startDataNode( Configuration conf, 
-                      AbstractList<File> dataDirs
-                      ) throws IOException {
+  void startDataNode(Configuration conf, 
+                     AbstractList<File> dataDirs
+                     ) throws IOException {
     // use configured nameserver & interface to get local hostname
     machineName = DNS.getDefaultHost(
                                      conf.get("dfs.datanode.dns.interface","default"),
@@ -223,14 +223,14 @@
     NamespaceInfo nsInfo = handshake();
 
     // read storage info, lock data dirs and transition fs state if necessary
-    StartupOption startOpt = (StartupOption)conf.get( "dfs.datanode.startup", 
-                                                      StartupOption.REGULAR );
+    StartupOption startOpt = (StartupOption)conf.get("dfs.datanode.startup", 
+                                                     StartupOption.REGULAR);
     assert startOpt != null : "Startup option must be set.";
     storage = new DataStorage();
-    storage.recoverTransitionRead( nsInfo, dataDirs, startOpt );
+    storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
       
     // initialize data node internal structure
-    this.data = new FSDataset( storage, conf );
+    this.data = new FSDataset(storage, conf);
       
     // find free port
     ServerSocket ss = null;
@@ -238,7 +238,7 @@
     String bindAddress = conf.get("dfs.datanode.bindAddress", "0.0.0.0");
     while (ss == null) {
       try {
-        ss = new ServerSocket(tmpPort,0,InetAddress.getByName(bindAddress));
+        ss = new ServerSocket(tmpPort, 0, InetAddress.getByName(bindAddress));
         LOG.info("Opened server at " + tmpPort);
       } catch (IOException ie) {
         LOG.info("Could not open server at " + tmpPort + ", trying new port");
@@ -246,10 +246,10 @@
       }
     }
     // construct registration
-    this.dnRegistration = new DatanodeRegistration( 
+    this.dnRegistration = new DatanodeRegistration(
                                                    machineName + ":" + tmpPort, 
                                                    -1,   // info port determined later
-                                                   storage );
+                                                   storage);
       
     this.dataXceiveServer = new Daemon(new DataXceiveServer(ss));
 
@@ -268,9 +268,9 @@
     this.infoServer.start();
     this.dnRegistration.infoPort = this.infoServer.getPort();
     // get network location
-    this.networkLoc = conf.get( "dfs.datanode.rack" );
-    if( networkLoc == null )  // exec network script or set the default rack
-      networkLoc = getNetworkLoc( conf );
+    this.networkLoc = conf.get("dfs.datanode.rack");
+    if (networkLoc == null)  // exec network script or set the default rack
+      networkLoc = getNetworkLoc(conf);
     // register datanode
     register();
     datanodeObject = this;
@@ -282,7 +282,7 @@
       try {
         nsInfo = namenode.versionRequest();
         break;
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         try {
           Thread.sleep(1000);
@@ -291,18 +291,18 @@
     }
     String errorMsg = null;
     // verify build version
-    if( ! nsInfo.getBuildVersion().equals( Storage.getBuildVersion() )) {
+    if (!nsInfo.getBuildVersion().equals(Storage.getBuildVersion())) {
       errorMsg = "Incompatible build versions: namenode BV = " 
         + nsInfo.getBuildVersion() + "; datanode BV = "
         + Storage.getBuildVersion();
-      LOG.fatal( errorMsg );
+      LOG.fatal(errorMsg);
       try {
-        namenode.errorReport( dnRegistration,
-                              DatanodeProtocol.NOTIFY, errorMsg );
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+        namenode.errorReport(dnRegistration,
+                             DatanodeProtocol.NOTIFY, errorMsg);
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
       }
-      throw new IOException( errorMsg );
+      throw new IOException(errorMsg);
     }
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
       "Data-node and name-node layout versions must be the same.";
@@ -340,21 +340,21 @@
    * @throws IOException
    */
   private void register() throws IOException {
-    while( shouldRun ) {
+    while(shouldRun) {
       try {
         // reset name to machineName. Mainly for web interface.
         dnRegistration.name = machineName + ":" + dnRegistration.getPort();
-        dnRegistration = namenode.register( dnRegistration, networkLoc );
+        dnRegistration = namenode.register(dnRegistration, networkLoc);
         break;
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         try {
           Thread.sleep(1000);
         } catch (InterruptedException ie) {}
       }
     }
-    if( storage.getStorageID().equals("") ) {
-      storage.setStorageID( dnRegistration.getStorageID());
+    if (storage.getStorageID().equals("")) {
+      storage.setStorageID(dnRegistration.getStorageID());
       storage.writeAll();
     }
   }
@@ -390,12 +390,12 @@
     }
   }
 
-  void handleDiskError( String errMsgr ) {
-    LOG.warn( "DataNode is shutting down.\n" + errMsgr );
+  void handleDiskError(String errMsgr) {
+    LOG.warn("DataNode is shutting down.\n" + errMsgr);
     try {
       namenode.errorReport(
                            dnRegistration, DatanodeProtocol.DISK_ERROR, errMsgr);
-    } catch( IOException ignored) {              
+    } catch(IOException ignored) {              
     }
     shutdown();
   }
@@ -438,20 +438,20 @@
           // -- Total capacity
           // -- Bytes remaining
           //
-          DatanodeCommand cmd = namenode.sendHeartbeat( dnRegistration, 
-                                                        data.getCapacity(), 
-                                                        data.getRemaining(), 
-                                                        xmitsInProgress,
-                                                        xceiverCount.getValue());
+          DatanodeCommand cmd = namenode.sendHeartbeat(dnRegistration, 
+                                                       data.getCapacity(), 
+                                                       data.getRemaining(), 
+                                                       xmitsInProgress,
+                                                       xceiverCount.getValue());
           //LOG.info("Just sent heartbeat, with name " + localName);
           lastHeartbeat = now;
-          if( ! processCommand( cmd ) )
+          if (!processCommand(cmd))
             continue;
         }
             
         // check if there are newly received blocks
         Block [] blockArray=null;
-        synchronized( receivedBlockList ) {
+        synchronized(receivedBlockList) {
           if (receivedBlockList.size() > 0) {
             //
             // Send newly-received blockids to namenode
@@ -459,8 +459,8 @@
             blockArray = receivedBlockList.toArray(new Block[receivedBlockList.size()]);
           }
         }
-        if( blockArray != null ) {
-          namenode.blockReceived( dnRegistration, blockArray );
+        if (blockArray != null) {
+          namenode.blockReceived(dnRegistration, blockArray);
           synchronized (receivedBlockList) {
             for(Block b: blockArray) {
               receivedBlockList.remove(b);
@@ -475,9 +475,9 @@
           // Get back a list of local block(s) that are obsolete
           // and can be safely GC'ed.
           //
-          DatanodeCommand cmd = namenode.blockReport( dnRegistration,
-                                                      data.getBlockReport());
-          processCommand( cmd );
+          DatanodeCommand cmd = namenode.blockReport(dnRegistration,
+                                                     data.getBlockReport());
+          processCommand(cmd);
           lastBlockReport = now;
         }
             
@@ -486,7 +486,7 @@
         // or work arrives, and then iterate again.
         //
         long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat);
-        synchronized( receivedBlockList ) {
+        synchronized(receivedBlockList) {
           if (waitTime > 0 && receivedBlockList.size() == 0) {
             try {
               receivedBlockList.wait(waitTime);
@@ -497,12 +497,12 @@
       } catch(DiskErrorException e) {
         handleDiskError(e.getLocalizedMessage());
         return;
-      } catch( RemoteException re ) {
+      } catch(RemoteException re) {
         String reClass = re.getClassName();
-        if( UnregisteredDatanodeException.class.getName().equals( reClass ) ||
-            DisallowedDatanodeException.class.getName().equals( reClass )) {
-          LOG.warn( "DataNode is shutting down: " + 
-                    StringUtils.stringifyException(re));
+        if (UnregisteredDatanodeException.class.getName().equals(reClass) ||
+            DisallowedDatanodeException.class.getName().equals(reClass)) {
+          LOG.warn("DataNode is shutting down: " + 
+                   StringUtils.stringifyException(re));
           shutdown();
           return;
         }
@@ -519,16 +519,16 @@
      * @return true if further processing may be required or false otherwise. 
      * @throws IOException
      */
-  private boolean processCommand( DatanodeCommand cmd ) throws IOException {
-    if( cmd == null )
+  private boolean processCommand(DatanodeCommand cmd) throws IOException {
+    if (cmd == null)
       return true;
-    switch( cmd.action ) {
+    switch(cmd.action) {
     case DNA_TRANSFER:
       //
       // Send a copy of a block to another datanode
       //
       BlockCommand bcmd = (BlockCommand)cmd;
-      transferBlocks( bcmd.getBlocks(), bcmd.getTargets() );
+      transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
       break;
     case DNA_INVALIDATE:
       //
@@ -553,21 +553,21 @@
       storage.finalizeUpgrade();
       break;
     default:
-      LOG.warn( "Unknown DatanodeCommand action: " + cmd.action);
+      LOG.warn("Unknown DatanodeCommand action: " + cmd.action);
     }
     return true;
   }
     
-  private void transferBlocks(  Block blocks[], 
-                                DatanodeInfo xferTargets[][] 
-                                ) throws IOException {
+  private void transferBlocks( Block blocks[], 
+                               DatanodeInfo xferTargets[][] 
+                               ) throws IOException {
     for (int i = 0; i < blocks.length; i++) {
       if (!data.isValidBlock(blocks[i])) {
         String errStr = "Can't send invalid block " + blocks[i];
         LOG.info(errStr);
-        namenode.errorReport( dnRegistration, 
-                              DatanodeProtocol.INVALID_BLOCK, 
-                              errStr );
+        namenode.errorReport(dnRegistration, 
+                             DatanodeProtocol.INVALID_BLOCK, 
+                             errStr);
         break;
       }
       if (xferTargets[i].length > 0) {
@@ -689,7 +689,7 @@
         //
         // Write filelen of -1 if error
         //
-        if (! data.isValidBlock(b)) {
+        if (!data.isValidBlock(b)) {
           out.writeLong(-1);
         } else {
           //
@@ -1130,11 +1130,11 @@
   /** Start a single datanode daemon and wait for it to finish.
    *  If this thread is specifically interrupted, it will stop waiting.
    */
-  static DataNode createDataNode( String args[],
-                                  Configuration conf ) throws IOException {
-    if( conf == null )
+  static DataNode createDataNode(String args[],
+                                 Configuration conf) throws IOException {
+    if (conf == null)
       conf = new Configuration();
-    if( ! parseArguments( args, conf )) {
+    if (!parseArguments(args, conf)) {
       printUsage();
       return null;
     }
@@ -1160,21 +1160,21 @@
    * no directory from this directory list can be created.
    * @throws IOException
    */
-  static DataNode makeInstance( String[] dataDirs, Configuration conf )
+  static DataNode makeInstance(String[] dataDirs, Configuration conf)
     throws IOException {
     ArrayList<File> dirs = new ArrayList<File>();
     for (int i = 0; i < dataDirs.length; i++) {
       File data = new File(dataDirs[i]);
       try {
-        DiskChecker.checkDir( data );
+        DiskChecker.checkDir(data);
         dirs.add(data);
-      } catch( DiskErrorException e ) {
-        LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage() );
+      } catch(DiskErrorException e) {
+        LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
       }
     }
-    if( dirs.size() > 0 ) 
+    if (dirs.size() > 0) 
       return new DataNode(conf, dirs);
-    LOG.error("All directories in dfs.data.dir are invalid." );
+    LOG.error("All directories in dfs.data.dir are invalid.");
     return null;
   }
 
@@ -1199,45 +1199,45 @@
    * @return false if passed argements are incorrect
    */
   private static boolean parseArguments(String args[], 
-                                        Configuration conf ) {
+                                        Configuration conf) {
     int argsLen = (args == null) ? 0 : args.length;
     StartupOption startOpt = StartupOption.REGULAR;
     String networkLoc = null;
-    for( int i=0; i < argsLen; i++ ) {
+    for(int i=0; i < argsLen; i++) {
       String cmd = args[i];
-      if( "-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd) ) {
-        if( i==args.length-1 )
+      if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
+        if (i==args.length-1)
           return false;
         networkLoc = args[++i];
-        if( networkLoc.startsWith("-") )
+        if (networkLoc.startsWith("-"))
           return false;
-      } else if( "-rollback".equalsIgnoreCase(cmd) ) {
+      } else if ("-rollback".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLBACK;
-      } else if( "-regular".equalsIgnoreCase(cmd) ) {
+      } else if ("-regular".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.REGULAR;
       } else
         return false;
     }
-    if( networkLoc != null )
-      conf.set( "dfs.datanode.rack", NodeBase.normalize( networkLoc ));
-    conf.setObject( "dfs.datanode.startup", startOpt );
+    if (networkLoc != null)
+      conf.set("dfs.datanode.rack", NodeBase.normalize(networkLoc));
+    conf.setObject("dfs.datanode.startup", startOpt);
     return true;
   }
 
   /* Get the network location by running a script configured in conf */
-  private static String getNetworkLoc( Configuration conf ) 
+  private static String getNetworkLoc(Configuration conf) 
     throws IOException {
-    String locScript = conf.get("dfs.network.script" );
-    if( locScript == null ) 
+    String locScript = conf.get("dfs.network.script");
+    if (locScript == null) 
       return NetworkTopology.DEFAULT_RACK;
 
-    LOG.info( "Starting to run script to get datanode network location");
-    Process p = Runtime.getRuntime().exec( locScript );
+    LOG.info("Starting to run script to get datanode network location");
+    Process p = Runtime.getRuntime().exec(locScript);
     StringBuffer networkLoc = new StringBuffer();
     final BufferedReader inR = new BufferedReader(
-                                                  new InputStreamReader(p.getInputStream() ) );
+                                                  new InputStreamReader(p.getInputStream()));
     final BufferedReader errR = new BufferedReader(
-                                                   new InputStreamReader( p.getErrorStream() ) );
+                                                   new InputStreamReader(p.getErrorStream()));
 
     // read & log any error messages from the running script
     Thread errThread = new Thread() {
@@ -1248,7 +1248,7 @@
               LOG.warn("Network script error: "+errLine);
               errLine = errR.readLine();
             }
-          } catch( IOException e) {
+          } catch(IOException e) {
                     
           }
         }
@@ -1258,32 +1258,32 @@
             
       // fetch output from the process
       String line = inR.readLine();
-      while( line != null ) {
-        networkLoc.append( line );
+      while(line != null) {
+        networkLoc.append(line);
         line = inR.readLine();
       }
       try {
         // wait for the process to finish
         int returnVal = p.waitFor();
         // check the exit code
-        if( returnVal != 0 ) {
+        if (returnVal != 0) {
           throw new IOException("Process exits with nonzero status: "+locScript);
         }
       } catch (InterruptedException e) {
-        throw new IOException( e.getMessage() );
+        throw new IOException(e.getMessage());
       } finally {
         try {
           // make sure that the error thread exits
           errThread.join();
         } catch (InterruptedException je) {
-          LOG.warn( StringUtils.stringifyException(je));
+          LOG.warn(StringUtils.stringifyException(je));
         }
       }
     } finally {
       // close in & error streams
       try {
         inR.close();
-      } catch ( IOException ine ) {
+      } catch (IOException ine) {
         throw ine;
       } finally {
         errR.close();
@@ -1297,11 +1297,11 @@
    */
   public static void main(String args[]) {
     try {
-      DataNode datanode = createDataNode( args, null );
-      if( datanode != null )
+      DataNode datanode = createDataNode(args, null);
+      if (datanode != null)
         datanode.join();
-    } catch ( Throwable e ) {
-      LOG.error( StringUtils.stringifyException( e ) );
+    } catch (Throwable e) {
+      LOG.error(StringUtils.stringifyException(e));
       System.exit(-1);
     }
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataStorage.java Thu Apr 19 14:34:41 2007
@@ -29,17 +29,17 @@
   private String storageID;
 
   DataStorage() {
-    super( NodeType.DATA_NODE );
+    super(NodeType.DATA_NODE);
     storageID = "";
   }
   
-  DataStorage( int nsID, long cT, String strgID ) {
-    super( NodeType.DATA_NODE, nsID, cT );
+  DataStorage(int nsID, long cT, String strgID) {
+    super(NodeType.DATA_NODE, nsID, cT);
     this.storageID = strgID;
   }
   
-  DataStorage( StorageInfo storageInfo, String strgID ) {
-    super( NodeType.DATA_NODE, storageInfo );
+  DataStorage(StorageInfo storageInfo, String strgID) {
+    super(NodeType.DATA_NODE, storageInfo);
     this.storageID = strgID;
   }
 
@@ -47,7 +47,7 @@
     return storageID;
   }
   
-  void setStorageID( String newStorageID ) {
+  void setStorageID(String newStorageID) {
     this.storageID = newStorageID;
   }
   
@@ -62,10 +62,10 @@
    * @param startOpt startup option
    * @throws IOException
    */
-  void recoverTransitionRead( NamespaceInfo nsInfo,
-                              Collection<File> dataDirs,
-                              StartupOption startOpt
-                              ) throws IOException {
+  void recoverTransitionRead(NamespaceInfo nsInfo,
+                             Collection<File> dataDirs,
+                             StartupOption startOpt
+                             ) throws IOException {
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
       "Data-node and name-node layout versions must be the same.";
     
@@ -73,53 +73,53 @@
     // check whether all is consistent before transitioning.
     // Format and recover.
     this.storageID = "";
-    this.storageDirs = new ArrayList<StorageDirectory>( dataDirs.size() );
-    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>( dataDirs.size() );
-    for( Iterator<File> it = dataDirs.iterator(); it.hasNext(); ) {
+    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
+    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size());
+    for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
       File dataDir = it.next();
-      StorageDirectory sd = new StorageDirectory( dataDir );
+      StorageDirectory sd = new StorageDirectory(dataDir);
       StorageState curState;
       try {
-        curState = sd.analyzeStorage( startOpt );
+        curState = sd.analyzeStorage(startOpt);
         // sd is locked but not opened
-        switch( curState ) {
+        switch(curState) {
         case NORMAL:
           break;
         case NON_EXISTENT:
           // ignore this storage
-          LOG.info( "Storage directory " + dataDir + " does not exist." );
+          LOG.info("Storage directory " + dataDir + " does not exist.");
           it.remove();
           continue;
         case CONVERT:
-          convertLayout( sd, nsInfo );
+          convertLayout(sd, nsInfo);
           break;
         case NOT_FORMATTED: // format
-          LOG.info( "Storage directory " + dataDir + " is not formatted." );
-          LOG.info( "Formatting ..." );
-          format( sd, nsInfo );
+          LOG.info("Storage directory " + dataDir + " is not formatted.");
+          LOG.info("Formatting ...");
+          format(sd, nsInfo);
           break;
         default:  // recovery part is common
-          sd.doRecover( curState );
+          sd.doRecover(curState);
         }
       } catch (IOException ioe) {
         sd.unlock();
         throw ioe;
       }
       // add to the storage list
-      addStorageDir( sd );
-      dataDirStates.add( curState );
+      addStorageDir(sd);
+      dataDirStates.add(curState);
     }
 
-    if( dataDirs.size() == 0 )  // none of the data dirs exist
-      throw new IOException( 
-                            "All specified directories are not accessible or do not exist." );
+    if (dataDirs.size() == 0)  // none of the data dirs exist
+      throw new IOException(
+                            "All specified directories are not accessible or do not exist.");
 
     // 2. Do transitions
     // Each storage directory is treated individually.
     // During sturtup some of them can upgrade or rollback 
     // while others could be uptodate for the regular startup.
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      doTransition( getStorageDir( idx ), nsInfo, startOpt );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      doTransition(getStorageDir(idx), nsInfo, startOpt);
       assert this.getLayoutVersion() == nsInfo.getLayoutVersion() :
         "Data-node and name-node layout versions must be the same.";
       assert this.getCTime() == nsInfo.getCTime() :
@@ -130,7 +130,7 @@
     this.writeAll();
   }
 
-  void format( StorageDirectory sd, NamespaceInfo nsInfo ) throws IOException {
+  void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
     sd.clearDirectory(); // create directory
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.namespaceID = nsInfo.getNamespaceID();
@@ -139,42 +139,42 @@
     sd.write();
   }
 
-  protected void setFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.setFields( props, sd );
-    props.setProperty( "storageID", storageID );
-  }
-
-  protected void getFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.getFields( props, sd );
-    String ssid = props.getProperty( "storageID" );
-    if( ssid == null ||
-        ! ("".equals( storageID ) || "".equals( ssid ) ||
-           storageID.equals( ssid )))
-      throw new InconsistentFSStateException( sd.root,
-                                              "has incompatible storage Id." );
-    if( "".equals( storageID ) ) // update id only if it was empty
+  protected void setFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.setFields(props, sd);
+    props.setProperty("storageID", storageID);
+  }
+
+  protected void getFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.getFields(props, sd);
+    String ssid = props.getProperty("storageID");
+    if (ssid == null ||
+        !("".equals(storageID) || "".equals(ssid) ||
+          storageID.equals(ssid)))
+      throw new InconsistentFSStateException(sd.root,
+                                             "has incompatible storage Id.");
+    if ("".equals(storageID)) // update id only if it was empty
       storageID = ssid;
   }
 
-  boolean isConversionNeeded( StorageDirectory sd ) throws IOException {
-    File oldF = new File( sd.root, "storage" );
-    if( ! oldF.exists() )
+  boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+    File oldF = new File(sd.root, "storage");
+    if (!oldF.exists())
       return false;
     // check consistency of the old storage
-    File oldDataDir = new File( sd.root, "data" );
-    if( ! oldDataDir.exists() ) 
-      throw new InconsistentFSStateException( sd.root,
-                                              "Old layout block directory " + oldDataDir + " is missing" ); 
-    if( ! oldDataDir.isDirectory() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldDataDir + " is not a directory." );
-    if( ! oldDataDir.canWrite() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldDataDir + " is not writable." );
+    File oldDataDir = new File(sd.root, "data");
+    if (!oldDataDir.exists()) 
+      throw new InconsistentFSStateException(sd.root,
+                                             "Old layout block directory " + oldDataDir + " is missing"); 
+    if (!oldDataDir.isDirectory())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldDataDir + " is not a directory.");
+    if (!oldDataDir.canWrite())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldDataDir + " is not writable.");
     return true;
   }
   
@@ -185,44 +185,44 @@
    * @param nsInfo namespace information
    * @throws IOException
    */
-  private void convertLayout( StorageDirectory sd,
-                              NamespaceInfo nsInfo 
-                              ) throws IOException {
+  private void convertLayout(StorageDirectory sd,
+                             NamespaceInfo nsInfo 
+                             ) throws IOException {
     assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
       "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
-    File oldF = new File( sd.root, "storage" );
-    File oldDataDir = new File( sd.root, "data" );
+    File oldF = new File(sd.root, "storage");
+    File oldDataDir = new File(sd.root, "data");
     assert oldF.exists() : "Old datanode layout \"storage\" file is missing";
     assert oldDataDir.exists() : "Old layout block directory \"data\" is missing";
-    LOG.info( "Old layout version file " + oldF
-              + " is found. New layout version is "
-              + FSConstants.LAYOUT_VERSION );
-    LOG.info( "Converting ..." );
+    LOG.info("Old layout version file " + oldF
+             + " is found. New layout version is "
+             + FSConstants.LAYOUT_VERSION);
+    LOG.info("Converting ...");
     
     // Lock and Read old storage file
-    RandomAccessFile oldFile = new RandomAccessFile( oldF, "rws" );
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
     if (oldFile == null)
-      throw new IOException( "Cannot read file: " + oldF );
+      throw new IOException("Cannot read file: " + oldF);
     FileLock oldLock = oldFile.getChannel().tryLock();
     if (oldLock == null)
-      throw new IOException( "Cannot lock file: " + oldF );
+      throw new IOException("Cannot lock file: " + oldF);
     try {
       oldFile.seek(0);
       int odlVersion = oldFile.readInt();
-      if( odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION )
-        throw new IncorrectVersionException( odlVersion, "file " + oldF,
-                                             LAST_PRE_UPGRADE_LAYOUT_VERSION );
-      String odlStorageID = org.apache.hadoop.io.UTF8.readString( oldFile );
+      if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+        throw new IncorrectVersionException(odlVersion, "file " + oldF,
+                                            LAST_PRE_UPGRADE_LAYOUT_VERSION);
+      String odlStorageID = org.apache.hadoop.io.UTF8.readString(oldFile);
   
       // check new storage
       File newDataDir = sd.getCurrentDir();
       File versionF = sd.getVersionFile();
-      if( versionF.exists() )
-        throw new IOException( "Version file already exists: " + versionF );
-      if( newDataDir.exists() ) // somebody created current dir manually
-        deleteDir( newDataDir );
+      if (versionF.exists())
+        throw new IOException("Version file already exists: " + versionF);
+      if (newDataDir.exists()) // somebody created current dir manually
+        deleteDir(newDataDir);
       // Write new layout
-      rename( oldDataDir, newDataDir );
+      rename(oldDataDir, newDataDir);
   
       this.layoutVersion = FSConstants.LAYOUT_VERSION;
       this.namespaceID = nsInfo.getNamespaceID();
@@ -235,8 +235,8 @@
       oldFile.close();
     }
     // move old storage file into current dir
-    rename( oldF, new File( sd.getCurrentDir(), "storage" ));
-    LOG.info( "Conversion of " + oldF + " is complete." );
+    rename(oldF, new File(sd.getCurrentDir(), "storage"));
+    LOG.info("Conversion of " + oldF + " is complete.");
   }
 
   /**
@@ -252,26 +252,26 @@
    * @param startOpt  startup option
    * @throws IOException
    */
-  private void doTransition(  StorageDirectory sd, 
-                              NamespaceInfo nsInfo, 
-                              StartupOption startOpt
-                              ) throws IOException {
-    if( startOpt == StartupOption.ROLLBACK )
-      doRollback( sd, nsInfo ); // rollback if applicable
+  private void doTransition( StorageDirectory sd, 
+                             NamespaceInfo nsInfo, 
+                             StartupOption startOpt
+                             ) throws IOException {
+    if (startOpt == StartupOption.ROLLBACK)
+      doRollback(sd, nsInfo); // rollback if applicable
     sd.read();
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
       "Future version is not allowed";
-    if( getNamespaceID() != nsInfo.getNamespaceID() )
-      throw new IOException( 
+    if (getNamespaceID() != nsInfo.getNamespaceID())
+      throw new IOException(
                             "Incompatible namespaceIDs in " + sd.root.getCanonicalPath()
                             + ": namenode namespaceID = " + nsInfo.getNamespaceID() 
-                            + "; datanode namespaceID = " + getNamespaceID() );
-    if( this.layoutVersion == FSConstants.LAYOUT_VERSION 
-        && this.cTime == nsInfo.getCTime() )
+                            + "; datanode namespaceID = " + getNamespaceID());
+    if (this.layoutVersion == FSConstants.LAYOUT_VERSION 
+        && this.cTime == nsInfo.getCTime())
       return; // regular startup
-    if( this.layoutVersion > FSConstants.LAYOUT_VERSION
-        || this.cTime < nsInfo.getCTime() ) {
-      doUpgrade( sd, nsInfo );  // upgrade
+    if (this.layoutVersion > FSConstants.LAYOUT_VERSION
+        || this.cTime < nsInfo.getCTime()) {
+      doUpgrade(sd, nsInfo);  // upgrade
       return;
     }
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
@@ -280,7 +280,7 @@
                           + " CTime = " + this.getCTime() 
                           + " is newer than the namespace state: LV = "
                           + nsInfo.getLayoutVersion() 
-                          + " CTime = " + nsInfo.getCTime() );
+                          + " CTime = " + nsInfo.getCTime());
   }
 
   /**
@@ -290,26 +290,26 @@
    * @param sd  storage directory
    * @throws IOException
    */
-  void doUpgrade( StorageDirectory sd,
-                  NamespaceInfo nsInfo
-                  ) throws IOException {
-    LOG.info( "Upgrading storage directory " + sd.root 
-              + ".\n   old LV = " + this.getLayoutVersion()
-              + "; old CTime = " + this.getCTime()
-              + ".\n   new LV = " + nsInfo.getLayoutVersion()
-              + "; new CTime = " + nsInfo.getCTime() );
+  void doUpgrade(StorageDirectory sd,
+                 NamespaceInfo nsInfo
+                 ) throws IOException {
+    LOG.info("Upgrading storage directory " + sd.root 
+             + ".\n   old LV = " + this.getLayoutVersion()
+             + "; old CTime = " + this.getCTime()
+             + ".\n   new LV = " + nsInfo.getLayoutVersion()
+             + "; new CTime = " + nsInfo.getCTime());
     File curDir = sd.getCurrentDir();
     File prevDir = sd.getPreviousDir();
     assert curDir.exists() : "Current directory must exist.";
     // delete previous dir before upgrading
-    if( prevDir.exists() )
-      deleteDir( prevDir );
+    if (prevDir.exists())
+      deleteDir(prevDir);
     File tmpDir = sd.getPreviousTmp();
-    assert ! tmpDir.exists() : "previous.tmp directory must not exist.";
+    assert !tmpDir.exists() : "previous.tmp directory must not exist.";
     // rename current to tmp
-    rename( curDir, tmpDir );
+    rename(curDir, tmpDir);
     // hardlink blocks
-    linkBlocks( tmpDir, curDir );
+    linkBlocks(tmpDir, curDir);
     // write version file
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     assert this.namespaceID == nsInfo.getNamespaceID() :
@@ -317,69 +317,69 @@
     this.cTime = nsInfo.getCTime();
     sd.write();
     // rename tmp to previous
-    rename( tmpDir, prevDir );
-    LOG.info( "Upgrade of " + sd.root + " is complete." );
+    rename(tmpDir, prevDir);
+    LOG.info("Upgrade of " + sd.root + " is complete.");
   }
 
-  void doRollback(  StorageDirectory sd,
-                    NamespaceInfo nsInfo
-                    ) throws IOException {
+  void doRollback( StorageDirectory sd,
+                   NamespaceInfo nsInfo
+                   ) throws IOException {
     File prevDir = sd.getPreviousDir();
     // regular startup if previous dir does not exist
-    if( ! prevDir.exists() )
+    if (!prevDir.exists())
       return;
     DataStorage prevInfo = new DataStorage();
-    StorageDirectory prevSD = prevInfo.new StorageDirectory( sd.root );
-    prevSD.read( prevSD.getPreviousVersionFile() );
+    StorageDirectory prevSD = prevInfo.new StorageDirectory(sd.root);
+    prevSD.read(prevSD.getPreviousVersionFile());
 
     // We allow rollback to a state, which is either consistent with
     // the namespace state or can be further upgraded to it.
-    if( ! ( prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
-            && prevInfo.getCTime() <= nsInfo.getCTime() ))  // cannot rollback
-      throw new InconsistentFSStateException( prevSD.root,
-                                              "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
-                                              + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
-                                              + " is newer than the namespace state: LV = "
-                                              + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime() );
-    LOG.info( "Rolling back storage directory " + sd.root 
-              + ".\n   target LV = " + nsInfo.getLayoutVersion()
-              + "; target CTime = " + nsInfo.getCTime() );
+    if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
+          && prevInfo.getCTime() <= nsInfo.getCTime()))  // cannot rollback
+      throw new InconsistentFSStateException(prevSD.root,
+                                             "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
+                                             + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
+                                             + " is newer than the namespace state: LV = "
+                                             + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
+    LOG.info("Rolling back storage directory " + sd.root 
+             + ".\n   target LV = " + nsInfo.getLayoutVersion()
+             + "; target CTime = " + nsInfo.getCTime());
     File tmpDir = sd.getRemovedTmp();
-    assert ! tmpDir.exists() : "removed.tmp directory must not exist.";
+    assert !tmpDir.exists() : "removed.tmp directory must not exist.";
     // rename current to tmp
     File curDir = sd.getCurrentDir();
     assert curDir.exists() : "Current directory must exist.";
-    rename( curDir, tmpDir );
+    rename(curDir, tmpDir);
     // rename previous to current
-    rename( prevDir, curDir );
+    rename(prevDir, curDir);
     // delete tmp dir
-    deleteDir( tmpDir );
-    LOG.info( "Rollback of " + sd.root + " is complete." );
+    deleteDir(tmpDir);
+    LOG.info("Rollback of " + sd.root + " is complete.");
   }
 
-  void doFinalize( StorageDirectory sd ) throws IOException {
+  void doFinalize(StorageDirectory sd) throws IOException {
     File prevDir = sd.getPreviousDir();
-    if( ! prevDir.exists() )
+    if (!prevDir.exists())
       return; // already discarded
     final String dataDirPath = sd.root.getCanonicalPath();
-    LOG.info( "Finalizing upgrade for storage directory " 
-              + dataDirPath 
-              + ".\n   cur LV = " + this.getLayoutVersion()
-              + "; cur CTime = " + this.getCTime() );
+    LOG.info("Finalizing upgrade for storage directory " 
+             + dataDirPath 
+             + ".\n   cur LV = " + this.getLayoutVersion()
+             + "; cur CTime = " + this.getCTime());
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     final File tmpDir = sd.getFinalizedTmp();
     // rename previous to tmp
-    rename( prevDir, tmpDir );
+    rename(prevDir, tmpDir);
 
     // delete tmp dir in a separate thread
-    new Daemon( new Runnable() {
+    new Daemon(new Runnable() {
         public void run() {
           try {
-            deleteDir( tmpDir );
-          } catch( IOException ex ) {
-            LOG.error( "Finalize upgrade for " + dataDirPath + " failed.", ex );
+            deleteDir(tmpDir);
+          } catch(IOException ex) {
+            LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
           }
-          LOG.info( "Finalize upgrade for " + dataDirPath + " is complete." );
+          LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
         }
         public String toString() { return "Finalize " + dataDirPath; }
       }).start();
@@ -387,26 +387,26 @@
   
   void finalizeUpgrade() throws IOException {
     for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
-      doFinalize( it.next() );
+      doFinalize(it.next());
     }
   }
   
-  static void linkBlocks( File from, File to ) throws IOException {
-    if( ! from.isDirectory() ) {
-      HardLink.createHardLink( from, to );
+  static void linkBlocks(File from, File to) throws IOException {
+    if (!from.isDirectory()) {
+      HardLink.createHardLink(from, to);
       return;
     }
     // from is a directory
-    if( ! to.mkdir() )
-      throw new IOException("Cannot create directory " + to );
-    String[] blockNames = from.list( new java.io.FilenameFilter() {
+    if (!to.mkdir())
+      throw new IOException("Cannot create directory " + to);
+    String[] blockNames = from.list(new java.io.FilenameFilter() {
         public boolean accept(File dir, String name) {
-          return name.startsWith( BLOCK_SUBDIR_PREFIX ) 
-            || name.startsWith( BLOCK_FILE_PREFIX );
+          return name.startsWith(BLOCK_SUBDIR_PREFIX) 
+            || name.startsWith(BLOCK_FILE_PREFIX);
         }
       });
     
-    for( int i = 0; i < blockNames.length; i++ )
-      linkBlocks( new File(from, blockNames[i]), new File(to, blockNames[i]) );
+    for(int i = 0; i < blockNames.length; i++)
+      linkBlocks(new File(from, blockNames[i]), new File(to, blockNames[i]));
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java Thu Apr 19 14:34:41 2007
@@ -60,8 +60,8 @@
   /** DatanodeDescriptor constructor
    * @param nodeID id of the data node
    */
-  public DatanodeDescriptor( DatanodeID nodeID ) {
-    this( nodeID, 0L, 0L, 0 );
+  public DatanodeDescriptor(DatanodeID nodeID) {
+    this(nodeID, 0L, 0L, 0);
   }
 
   /** DatanodeDescriptor constructor
@@ -69,9 +69,9 @@
    * @param nodeID id of the data node
    * @param networkLocation location of the data node in network
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             String networkLocation ) {
-    this( nodeID, networkLocation, null );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            String networkLocation) {
+    this(nodeID, networkLocation, null);
   }
   
   /** DatanodeDescriptor constructor
@@ -80,10 +80,10 @@
    * @param networkLocation location of the data node in network
    * @param hostName it could be different from host specified for DatanodeID
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             String networkLocation,
-                             String hostName ) {
-    this( nodeID, networkLocation, hostName, 0L, 0L, 0 );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            String networkLocation,
+                            String hostName) {
+    this(nodeID, networkLocation, hostName, 0L, 0L, 0);
   }
   
   /** DatanodeDescriptor constructor
@@ -93,11 +93,11 @@
    * @param remaining remaing capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             long capacity, 
-                             long remaining,
-                             int xceiverCount ) {
-    super( nodeID );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            long capacity, 
+                            long remaining,
+                            int xceiverCount) {
+    super(nodeID);
     updateHeartbeat(capacity, remaining, xceiverCount);
     initWorkLists();
   }
@@ -110,14 +110,14 @@
    * @param remaining remaing capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    */
-  public DatanodeDescriptor( DatanodeID nodeID,
-                             String networkLocation,
-                             String hostName,
-                             long capacity, 
-                             long remaining,
-                             int xceiverCount ) {
-    super( nodeID, networkLocation, hostName );
-    updateHeartbeat( capacity, remaining, xceiverCount);
+  public DatanodeDescriptor(DatanodeID nodeID,
+                            String networkLocation,
+                            String hostName,
+                            long capacity, 
+                            long remaining,
+                            int xceiverCount) {
+    super(nodeID, networkLocation, hostName);
+    updateHeartbeat(capacity, remaining, xceiverCount);
     initWorkLists();
   }
 
@@ -169,7 +169,7 @@
   }
   
   Block getBlock(long blockId) {
-    return blocks.get( new Block(blockId, 0) );
+    return blocks.get(new Block(blockId, 0));
   }
   
   Block getBlock(Block b) {

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeID.java Thu Apr 19 14:34:41 2007
@@ -24,7 +24,7 @@
    * DatanodeID default constructor
    */
   public DatanodeID() {
-    this( new String(), new String(), -1 );
+    this(new String(), new String(), -1);
   }
 
   /**
@@ -32,8 +32,8 @@
    * 
    * @param from
    */
-  public DatanodeID( DatanodeID from ) {
-    this( from.getName(), from.getStorageID(), from.getInfoPort() );
+  public DatanodeID(DatanodeID from) {
+    this(from.getName(), from.getStorageID(), from.getInfoPort());
   }
   
   /**
@@ -42,7 +42,7 @@
    * @param nodeName (hostname:portNumber) 
    * @param storageID data storage ID
    */
-  public DatanodeID( String nodeName, String storageID, int infoPort ) {
+  public DatanodeID(String nodeName, String storageID, int infoPort) {
     this.name = nodeName;
     this.storageID = storageID;
     this.infoPort = infoPort;
@@ -90,13 +90,13 @@
   
   public int getPort() {
     int colon = name.indexOf(":");
-    if ( colon < 0 ) {
+    if (colon < 0) {
       return 50010; // default port.
     }
     return Integer.parseInt(name.substring(colon+1));
   }
 
-  public boolean equals( Object to ) {
+  public boolean equals(Object to) {
     return (name.equals(((DatanodeID)to).getName()) &&
             storageID.equals(((DatanodeID)to).getStorageID()));
   }
@@ -113,7 +113,7 @@
    * Update fields when a new registration request comes in.
    * Note that this does not update storageID.
    */
-  void updateRegInfo( DatanodeID nodeReg ) {
+  void updateRegInfo(DatanodeID nodeReg) {
     name = nodeReg.getName();
     infoPort = nodeReg.getInfoPort();
     // update any more fields added in future.

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeInfo.java Thu Apr 19 14:34:41 2007
@@ -63,8 +63,8 @@
     adminState = null;
   }
   
-  DatanodeInfo( DatanodeInfo from ) {
-    super( from );
+  DatanodeInfo(DatanodeInfo from) {
+    super(from);
     this.capacity = from.getCapacity();
     this.remaining = from.getRemaining();
     this.lastUpdate = from.getLastUpdate();
@@ -74,8 +74,8 @@
     this.hostName = from.hostName;
   }
 
-  DatanodeInfo( DatanodeID nodeID ) {
-    super( nodeID );
+  DatanodeInfo(DatanodeID nodeID) {
+    super(nodeID);
     this.capacity = 0L;
     this.remaining = 0L;
     this.lastUpdate = 0L;
@@ -83,7 +83,7 @@
     this.adminState = null;    
   }
   
-  DatanodeInfo( DatanodeID nodeID, String location, String hostName ) {
+  DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
     this(nodeID);
     this.location = location;
     this.hostName = hostName;
@@ -135,10 +135,10 @@
 
   
   public String getHostName() {
-    return ( hostName == null || hostName.length()==0 ) ? getHost() : hostName;
+    return (hostName == null || hostName.length()==0) ? getHost() : hostName;
   }
   
-  public void setHostName( String host ) {
+  public void setHostName(String host) {
     hostName = host;
   }
   
@@ -149,7 +149,7 @@
     long r = getRemaining();
     long u = c - r;
     buffer.append("Name: "+name+"\n");
-    if(!NetworkTopology.DEFAULT_RACK.equals(location)) {
+    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append("Rack: "+location+"\n");
     }
     if (isDecommissioned()) {
@@ -161,7 +161,7 @@
     }
     buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
     buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
-    buffer.append("% used: "+FsShell.limitDecimal(((1.0*u)/c)*100,2)+"%"+"\n");
+    buffer.append("% used: "+FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%"+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     return buffer.toString();
   }
@@ -173,7 +173,7 @@
     long r = getRemaining();
     long u = c - r;
     buffer.append(name);
-    if(!NetworkTopology.DEFAULT_RACK.equals(location)) {
+    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
     }
     if (isDecommissioned()) {
@@ -185,7 +185,7 @@
     }
     buffer.append(" " + c + "(" + FsShell.byteDesc(c)+")");
     buffer.append(" " + u + "(" + FsShell.byteDesc(u)+")");
-    buffer.append(" " + FsShell.limitDecimal(((1.0*u)/c)*100,2)+"%");
+    buffer.append(" " + FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%");
     buffer.append(" " + new Date(lastUpdate));
     return buffer.toString();
   }
@@ -260,13 +260,13 @@
 
   /** Return this node's parent */
   public Node getParent() { return parent; }
-  public void setParent( Node parent ) {this.parent = parent;}
+  public void setParent(Node parent) {this.parent = parent;}
    
   /** Return this node's level in the tree.
    * E.g. the root of a tree returns 0 and its children return 1
    */
   public int getLevel() { return level; }
-  public void setLevel( int level) {this.level = level;}
+  public void setLevel(int level) {this.level = level;}
 
   /////////////////////////////////////////////////
   // Writable
@@ -282,12 +282,12 @@
   /**
    */
   public void write(DataOutput out) throws IOException {
-    super.write( out );
+    super.write(out);
     out.writeLong(capacity);
     out.writeLong(remaining);
     out.writeLong(lastUpdate);
     out.writeInt(xceiverCount);
-    Text.writeString( out, location );
+    Text.writeString(out, location);
     WritableUtils.writeEnum(out, getAdminState());
   }
 
@@ -299,7 +299,7 @@
     this.remaining = in.readLong();
     this.lastUpdate = in.readLong();
     this.xceiverCount = in.readInt();
-    this.location = Text.readString( in );
+    this.location = Text.readString(in);
     AdminStates newState = (AdminStates) WritableUtils.readEnum(in,
                                                                 AdminStates.class);
     setAdminState(newState);

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeProtocol.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeProtocol.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeProtocol.java Thu Apr 19 14:34:41 2007
@@ -64,9 +64,9 @@
    * new storageID if the datanode did not have one and
    * registration ID for further communication.
    */
-  public DatanodeRegistration register( DatanodeRegistration registration,
-                                        String networkLocation
-                                        ) throws IOException;
+  public DatanodeRegistration register(DatanodeRegistration registration,
+                                       String networkLocation
+                                       ) throws IOException;
   /**
    * sendHeartbeat() tells the NameNode that the DataNode is still
    * alive and well.  Includes some status info, too. 
@@ -74,10 +74,10 @@
    * A DatanodeCommand tells the DataNode to invalidate local block(s), 
    * or to copy them to other DataNodes, etc.
    */
-  public DatanodeCommand sendHeartbeat( DatanodeRegistration registration,
-                                        long capacity, long remaining,
-                                        int xmitsInProgress,
-                                        int xceiverCount) throws IOException;
+  public DatanodeCommand sendHeartbeat(DatanodeRegistration registration,
+                                       long capacity, long remaining,
+                                       int xmitsInProgress,
+                                       int xceiverCount) throws IOException;
 
   /**
    * blockReport() tells the NameNode about all the locally-stored blocks.
@@ -86,8 +86,8 @@
    * the locally-stored blocks.  It's invoked upon startup and then
    * infrequently afterwards.
    */
-  public DatanodeCommand blockReport( DatanodeRegistration registration,
-                                      Block blocks[]) throws IOException;
+  public DatanodeCommand blockReport(DatanodeRegistration registration,
+                                     Block blocks[]) throws IOException;
     
   /**
    * blockReceived() allows the DataNode to tell the NameNode about

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DatanodeRegistration.java Thu Apr 19 14:34:41 2007
@@ -31,7 +31,7 @@
    * Default constructor.
    */
   public DatanodeRegistration() {
-    super( null, null, -1 );
+    super(null, null, -1);
     this.storageInfo = new StorageInfo();
   }
   
@@ -40,9 +40,9 @@
    */
   public DatanodeRegistration(String nodeName, 
                               int infoPort,
-                              DataStorage storage ) {
-    super( nodeName, storage.getStorageID(), infoPort );
-    this.storageInfo = new StorageInfo( storage );
+                              DataStorage storage) {
+    super(nodeName, storage.getStorageID(), infoPort);
+    this.storageInfo = new StorageInfo(storage);
   }
 
   /**
@@ -54,7 +54,7 @@
   /**
    */
   public String getRegistrationID() {
-    return Storage.getRegistrationID( storageInfo );
+    return Storage.getRegistrationID(storageInfo);
   }
 
   /////////////////////////////////////////////////
@@ -63,10 +63,10 @@
   /**
    */
   public void write(DataOutput out) throws IOException {
-    super.write( out );
-    out.writeInt( storageInfo.getLayoutVersion() );
-    out.writeInt( storageInfo.getNamespaceID() );
-    out.writeLong( storageInfo.getCTime() );
+    super.write(out);
+    out.writeInt(storageInfo.getLayoutVersion());
+    out.writeInt(storageInfo.getNamespaceID());
+    out.writeLong(storageInfo.getCTime());
   }
 
   /**

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java Thu Apr 19 14:34:41 2007
@@ -12,7 +12,7 @@
  */
 class DisallowedDatanodeException extends IOException {
 
-  public DisallowedDatanodeException( DatanodeID nodeID ) {
-    super("Datanode denied communication with namenode: " + nodeID.getName() );
+  public DisallowedDatanodeException(DatanodeID nodeID) {
+    super("Datanode denied communication with namenode: " + nodeID.getName());
   }
 }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DistributedFileSystem.java Thu Apr 19 14:34:41 2007
@@ -64,7 +64,7 @@
       setConf(conf);
       String host = uri.getHost();
       int port = uri.getPort();
-      this.dfs = new DFSClient(new InetSocketAddress(host,port), conf);
+      this.dfs = new DFSClient(new InetSocketAddress(host, port), conf);
       this.uri = URI.create("hdfs://"+host+":"+port);
       this.localFs = getNamed("file:///", conf);
     }
@@ -122,7 +122,7 @@
     }
 
     public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-      if (! exists(f)) {
+      if (!exists(f)) {
         throw new FileNotFoundException(f.toString());
       }
 
@@ -132,7 +132,7 @@
     public FSDataOutputStream create(Path f, boolean overwrite,
                                      int bufferSize, short replication, long blockSize,
                                      Progressable progress) throws IOException {
-      if (exists(f) && ! overwrite) {
+      if (exists(f) && !overwrite) {
         throw new IOException("File already exists:"+f);
       }
       Path parent = f.getParent();
@@ -146,9 +146,9 @@
                                     bufferSize);
     }
     
-    public boolean setReplication( Path src, 
-                                   short replication
-                                   ) throws IOException {
+    public boolean setReplication(Path src, 
+                                  short replication
+                                  ) throws IOException {
       return dfs.setReplication(getPath(src), replication);
     }
     
@@ -223,7 +223,7 @@
 
     /** @deprecated */ @Deprecated
       public void lock(Path f, boolean shared) throws IOException {
-      dfs.lock(getPath(f), ! shared);
+      dfs.lock(getPath(f), !shared);
     }
 
     /** @deprecated */ @Deprecated
@@ -232,13 +232,13 @@
     }
 
     @Override
-      public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
       throws IOException {
       FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
     }
 
     @Override
-      public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
       throws IOException {
       FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
     }
@@ -290,9 +290,9 @@
      *  
      * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
      */
-    public boolean setSafeMode( FSConstants.SafeModeAction action ) 
+    public boolean setSafeMode(FSConstants.SafeModeAction action) 
       throws IOException {
-      return dfs.setSafeMode( action );
+      return dfs.setSafeMode(action);
     }
 
     /*
@@ -368,17 +368,17 @@
   }
 
   public DistributedFileSystem() {
-    super( new RawDistributedFileSystem() );
+    super(new RawDistributedFileSystem());
   }
 
   /** @deprecated */
   public DistributedFileSystem(InetSocketAddress namenode,
                                Configuration conf) throws IOException {
-    super( new RawDistributedFileSystem(namenode, conf) );
+    super(new RawDistributedFileSystem(namenode, conf));
   }
 
   @Override
-    public long getContentLength(Path f) throws IOException {
+  public long getContentLength(Path f) throws IOException {
     return fs.getContentLength(f);
   }
 
@@ -404,9 +404,9 @@
    *  
    * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    */
-  public boolean setSafeMode( FSConstants.SafeModeAction action ) 
+  public boolean setSafeMode(FSConstants.SafeModeAction action) 
     throws IOException {
-    return ((RawDistributedFileSystem)fs).setSafeMode( action );
+    return ((RawDistributedFileSystem)fs).setSafeMode(action);
   }
 
   /*

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDataset.java Thu Apr 19 14:34:41 2007
@@ -50,8 +50,8 @@
       throws IOException {
       this.dir = dir;
       this.children = null;
-      if (! dir.exists()) {
-        if (! dir.mkdirs()) {
+      if (!dir.exists()) {
+        if (!dir.mkdirs()) {
           throw new IOException("Mkdirs failed to create " + 
                                 dir.toString());
         }
@@ -78,14 +78,14 @@
       }
     }
         
-    public File addBlock( Block b, File src ) throws IOException {
+    public File addBlock(Block b, File src) throws IOException {
       //First try without creating subdirectories
-      File file = addBlock( b, src, false, false );          
-      return ( file != null ) ? file : addBlock( b, src, true, true );
+      File file = addBlock(b, src, false, false);          
+      return (file != null) ? file : addBlock(b, src, true, true);
     }
 
-    private File addBlock( Block b, File src, boolean createOk, 
-                           boolean resetIdx ) throws IOException {
+    private File addBlock(Block b, File src, boolean createOk, 
+                          boolean resetIdx) throws IOException {
       if (numBlocks < maxBlocksPerDir) {
         File dest = new File(dir, b.getBlockName());
         src.renameTo(dest);
@@ -93,17 +93,17 @@
         return dest;
       }
             
-      if ( lastChildIdx < 0 && resetIdx ) {
+      if (lastChildIdx < 0 && resetIdx) {
         //reset so that all children will be checked
-        lastChildIdx = random.nextInt( children.length );              
+        lastChildIdx = random.nextInt(children.length);              
       }
             
-      if ( lastChildIdx >= 0 && children != null ) {
+      if (lastChildIdx >= 0 && children != null) {
         //Check if any child-tree has room for a block.
         for (int i=0; i < children.length; i++) {
-          int idx = ( lastChildIdx + i )%children.length;
-          File file = children[idx].addBlock( b, src, false, resetIdx );
-          if ( file != null ) {
+          int idx = (lastChildIdx + i)%children.length;
+          File file = children[idx].addBlock(b, src, false, resetIdx);
+          if (file != null) {
             lastChildIdx = idx;
             return file; 
           }
@@ -111,20 +111,20 @@
         lastChildIdx = -1;
       }
             
-      if ( !createOk ) {
+      if (!createOk) {
         return null;
       }
             
-      if ( children == null || children.length == 0 ) {
+      if (children == null || children.length == 0) {
         children = new FSDir[maxBlocksPerDir];
         for (int idx = 0; idx < maxBlocksPerDir; idx++) {
-          children[idx] = new FSDir( new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx) );
+          children[idx] = new FSDir(new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx));
         }
       }
             
       //now pick a child randomly for creating a new set of subdirs.
-      lastChildIdx = random.nextInt( children.length );
-      return children[ lastChildIdx ].addBlock( b, src, true, false ); 
+      lastChildIdx = random.nextInt(children.length);
+      return children[ lastChildIdx ].addBlock(b, src, true, false); 
     }
 
     /**
@@ -194,13 +194,13 @@
     void clearPath(File f) {
       String root = dir.getAbsolutePath();
       String dir = f.getAbsolutePath();
-      if ( dir.startsWith( root ) ) {
-        String[] dirNames = dir.substring( root.length() ).
-          split( File.separator + "subdir" );
-        if ( clearPath( f, dirNames, 1 ) )
+      if (dir.startsWith(root)) {
+        String[] dirNames = dir.substring(root.length()).
+          split(File.separator + "subdir");
+        if (clearPath(f, dirNames, 1))
           return;
       }
-      clearPath( f, null, -1 );
+      clearPath(f, null, -1);
     }
         
     /*
@@ -211,33 +211,33 @@
      * children in common case. If directory structure changes 
      * in later versions, we need to revisit this.
      */
-    private boolean clearPath( File f, String[] dirNames, int idx ) {
-      if ( ( dirNames == null || idx == dirNames.length ) &&
-           dir.compareTo(f) == 0) {
+    private boolean clearPath(File f, String[] dirNames, int idx) {
+      if ((dirNames == null || idx == dirNames.length) &&
+          dir.compareTo(f) == 0) {
         numBlocks--;
         return true;
       }
           
-      if ( dirNames != null ) {
+      if (dirNames != null) {
         //guess the child index from the directory name
-        if ( idx > ( dirNames.length - 1 ) || children == null ) {
+        if (idx > (dirNames.length - 1) || children == null) {
           return false;
         }
         int childIdx; 
         try {
-          childIdx = Integer.parseInt( dirNames[idx] );
-        } catch ( NumberFormatException ignored ) {
+          childIdx = Integer.parseInt(dirNames[idx]);
+        } catch (NumberFormatException ignored) {
           // layout changed? we could print a warning.
           return false;
         }
-        return ( childIdx >= 0 && childIdx < children.length ) ?
-          children[childIdx].clearPath( f, dirNames, idx+1 ) : false;
+        return (childIdx >= 0 && childIdx < children.length) ?
+          children[childIdx].clearPath(f, dirNames, idx+1) : false;
       }
 
       //guesses failed. back to blind iteration.
-      if ( children != null ) {
+      if (children != null) {
         for(int i=0; i < children.length; i++) {
-          if ( children[i].clearPath( f, null, -1 ) ){
+          if (children[i].clearPath(f, null, -1)){
             return true;
           }
         }
@@ -262,12 +262,12 @@
     private long reserved;
     private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
     
-    FSVolume( File currentDir, Configuration conf) throws IOException {
+    FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
       this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
                                          (float) USABLE_DISK_PCT_DEFAULT);
       File parent = currentDir.getParentFile();
-      this.dataDir = new FSDir( currentDir );
+      this.dataDir = new FSDir(currentDir);
       this.tmpDir = new File(parent, "tmp");
       if (tmpDir.exists()) {
         FileUtil.fullyDelete(tmpDir);
@@ -288,7 +288,7 @@
       long capacity = usage.getCapacity();
       long freespace = Math.round(usage.getAvailableSkipRefresh() -
                                   capacity * (1 - usableDiskPct) - reserved); 
-      return ( freespace > 0 ) ? freespace : 0;
+      return (freespace > 0) ? freespace : 0;
     }
       
     String getMount() throws IOException {
@@ -309,7 +309,7 @@
                                 b + ".  File " + f + " should be creatable, but is already present.");
         }
       } catch (IOException ie) {
-        System.out.println("Exception!  " + ie);
+        System.out.println("Exception! " + ie);
         throw ie;
       }
       return f;
@@ -430,7 +430,7 @@
   /**
    * An FSDataset has a directory where it loads its data files.
    */
-  public FSDataset( DataStorage storage, Configuration conf) throws IOException {
+  public FSDataset(DataStorage storage, Configuration conf) throws IOException {
     this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64);
     FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
@@ -461,7 +461,7 @@
    * Find the block's on-disk length
    */
   public long getLength(Block b) throws IOException {
-    if (! isValidBlock(b)) {
+    if (!isValidBlock(b)) {
       throw new IOException("Block " + b + " is not valid.");
     }
     File f = getFile(b);
@@ -472,7 +472,7 @@
    * Get a stream of data from the indicated block.
    */
   public synchronized InputStream getBlockData(Block b) throws IOException {
-    if (! isValidBlock(b)) {
+    if (!isValidBlock(b)) {
       throw new IOException("Block " + b + " is not valid.");
     }
     // File should be opened with the lock.
@@ -495,7 +495,7 @@
     // Serialize access to /tmp, and check if file already there.
     //
     File f = null;
-    synchronized ( this ) {
+    synchronized (this) {
       //
       // Is it already in the create process?
       //
@@ -514,7 +514,7 @@
         }
       }
       FSVolume v = null;
-      synchronized ( volumes ) {
+      synchronized (volumes) {
         v = volumes.getNextVolume(blockSize);
         // create temporary file to hold block in the designated volume
         f = v.createTmpFile(b);
@@ -544,7 +544,7 @@
    */
   public synchronized void finalizeBlock(Block b) throws IOException {
     File f = ongoingCreates.get(b);
-    if (f == null || ! f.exists()) {
+    if (f == null || !f.exists()) {
       throw new IOException("No temporary file " + f + " for block " + b);
     }
     long finalLen = f.length();
@@ -552,7 +552,7 @@
     FSVolume v = volumeMap.get(b);
         
     File dest = null;
-    synchronized ( volumes ) {
+    synchronized (volumes) {
       dest = v.addBlock(b, f);
     }
     blockMap.put(b, dest);



Mime
View raw message