hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From omal...@apache.org
Subject svn commit: r785005 [2/3] - in /hadoop/core/branches/HADOOP-4687/hdfs: lib/ src/contrib/hdfsproxy/ src/contrib/hdfsproxy/bin/ src/java/ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/co...
Date Mon, 15 Jun 2009 22:13:09 GMT
Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/common/Storage.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/common/Storage.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/common/Storage.java Mon Jun 15 22:13:06 2009
@@ -767,7 +767,7 @@
   
     file.seek(0);
     file.writeInt(FSConstants.LAYOUT_VERSION);
-    org.apache.hadoop.io.DeprecatedUTF8.writeString(file, "");
+    org.apache.hadoop.hdfs.DeprecatedUTF8.writeString(file, "");
     file.writeBytes(messageForPreUpgradeVersion);
     file.getFD().sync();
   }

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java Mon Jun 15 22:13:06 2009
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hdfs.server.common;
 
 import java.io.DataInput;

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java Mon Jun 15 22:13:06 2009
@@ -346,7 +346,10 @@
     info.lastLogTime = now;
     LogFileHandler log = verificationLog;
     if (log != null) {
-      log.appendLine(LogEntry.newEnry(block, now));
+      log.appendLine("date=\"" + dateFormat.format(new Date(now)) + "\"\t " +
+          "time=\"" + now + "\"\t " +
+          "genstamp=\"" + block.getGenerationStamp() + "\"\t " +
+          "id=\"" + block.getBlockId() +"\"");
     }
   }
   
@@ -381,13 +384,6 @@
     private static Pattern entryPattern = 
       Pattern.compile("\\G\\s*([^=\\p{Space}]+)=\"(.*?)\"\\s*");
     
-    static String newEnry(Block block, long time) {
-      return "date=\"" + dateFormat.format(new Date(time)) + "\"\t " +
-             "time=\"" + time + "\"\t " +
-             "genstamp=\"" + block.getGenerationStamp() + "\"\t " +
-             "id=\"" + block.getBlockId() +"\"";
-    }
-    
     static LogEntry parseEntry(String line) {
       LogEntry entry = new LogEntry();
       

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Mon Jun 15 22:13:06 2009
@@ -93,6 +93,7 @@
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DiskChecker;
+import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -632,6 +633,7 @@
 
       // wait for all data receiver threads to exit
       if (this.threadGroup != null) {
+        int sleepMs = 2;
         while (true) {
           this.threadGroup.interrupt();
           LOG.info("Waiting for threadgroup to exit, active threads is " +
@@ -640,8 +642,12 @@
             break;
           }
           try {
-            Thread.sleep(1000);
+            Thread.sleep(sleepMs);
           } catch (InterruptedException e) {}
+          sleepMs = sleepMs * 3 / 2; // exponential backoff
+          if (sleepMs > 1000) {
+            sleepMs = 1000;
+          }
         }
       }
       // wait for dataXceiveServer to terminate
@@ -1304,6 +1310,10 @@
                                       Configuration conf) throws IOException {
     if (conf == null)
       conf = new Configuration();
+    // parse generic hadoop options
+    GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
+    args = hParser.getRemainingArgs();
+    
     if (!parseArguments(args, conf)) {
       printUsage();
       return null;

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java Mon Jun 15 22:13:06 2009
@@ -363,8 +363,17 @@
   static void linkBlocks(File from, File to, int oldLV) throws IOException {
     if (!from.isDirectory()) {
       if (from.getName().startsWith(COPY_FILE_PREFIX)) {
-        IOUtils.copyBytes(new FileInputStream(from), 
-                          new FileOutputStream(to), 16*1024, true);
+        FileInputStream in = new FileInputStream(from);
+        try {
+          FileOutputStream out = new FileOutputStream(to);
+          try {
+            IOUtils.copyBytes(in, out, 16*1024);
+          } finally {
+            out.close();
+          }
+        } finally {
+          in.close();
+        }
       } else {
         
         //check if we are upgrading from pre-generation stamp version.

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeBlockInfo.java Mon Jun 15 22:13:06 2009
@@ -86,9 +86,17 @@
   private void detachFile(File file, Block b) throws IOException {
     File tmpFile = volume.createDetachFile(b, file.getName());
     try {
-      IOUtils.copyBytes(new FileInputStream(file),
-                        new FileOutputStream(tmpFile),
-                        16*1024, true);
+      FileInputStream in = new FileInputStream(file);
+      try {
+        FileOutputStream out = new FileOutputStream(tmpFile);
+        try {
+          IOUtils.copyBytes(in, out, 16*1024);
+        } finally {
+          out.close();
+        }
+      } finally {
+        in.close();
+      }
       if (file.length() != tmpFile.length()) {
         throw new IOException("Copy of file " + file + " size " + file.length()+
                               " into file " + tmpFile +

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Mon Jun 15 22:13:06 2009
@@ -51,7 +51,7 @@
  * namespace image to local disk(s).</li>
  * </ol>
  */
-class BackupNode extends NameNode {
+public class BackupNode extends NameNode {
   private static final String BN_ADDRESS_NAME_KEY = "dfs.backup.address";
   private static final String BN_ADDRESS_DEFAULT = "localhost:50100";
   private static final String BN_HTTP_ADDRESS_NAME_KEY = "dfs.backup.http.address";
@@ -90,6 +90,9 @@
 
   @Override // NameNode
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
+    // It is necessary to resolve the hostname at this point in order
+    // to ensure that the server address that is sent to the namenode
+    // is correct.
     assert rpcAddress != null : "rpcAddress should be calculated first";
     String addr = conf.get(BN_HTTP_ADDRESS_NAME_KEY, BN_HTTP_ADDRESS_DEFAULT);
     int port = NetUtils.createSocketAddr(addr).getPort();

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Mon Jun 15 22:13:06 2009
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
@@ -102,9 +119,9 @@
 
   void setConfigurationParameters(Configuration conf) throws IOException {
     this.replicator = new ReplicationTargetChooser(
-                          conf.getBoolean("dfs.replication.considerLoad", true),
-                          namesystem,
-                          namesystem.clusterMap);
+                         conf.getBoolean("dfs.replication.considerLoad", true),
+                         namesystem,
+                         namesystem.clusterMap);
 
     this.defaultReplication = conf.getInt("dfs.replication", 3);
     this.maxReplication = conf.getInt("dfs.replication.max", 512);
@@ -144,8 +161,8 @@
     // Dump contents of neededReplication
     //
     synchronized (neededReplications) {
-      out.println("Metasave: Blocks waiting for replication: "
-          + neededReplications.size());
+      out.println("Metasave: Blocks waiting for replication: " + 
+                  neededReplications.size());
       for (Block block : neededReplications) {
         List<DatanodeDescriptor> containingNodes =
                                           new ArrayList<DatanodeDescriptor>();
@@ -201,8 +218,7 @@
   }
 
   /**
-   * Get all valid locations of the block & add the block to results
-   * return the length of the added block; 0 if the block is not added
+   * Get all valid locations of the block
    */
   ArrayList<String> addBlock(Block block) {
     ArrayList<String> machineSet =
@@ -251,16 +267,16 @@
             + " but corrupt replicas map has " + numCorruptReplicas);
       }
       boolean blockCorrupt = (numCorruptNodes == numNodes);
-      int numMachineSet = blockCorrupt ? numNodes
-          : (numNodes - numCorruptNodes);
+      int numMachineSet = blockCorrupt ? numNodes :
+                          (numNodes - numCorruptNodes);
       DatanodeDescriptor[] machineSet = new DatanodeDescriptor[numMachineSet];
       if (numMachineSet > 0) {
         numNodes = 0;
-        for (Iterator<DatanodeDescriptor> it = blocksMap
-            .nodeIterator(blocks[curBlk]); it.hasNext();) {
+        for (Iterator<DatanodeDescriptor> it = 
+             blocksMap.nodeIterator(blocks[curBlk]); it.hasNext();) {
           DatanodeDescriptor dn = it.next();
-          boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(
-              blocks[curBlk], dn);
+          boolean replicaCorrupt = 
+            corruptReplicas.isReplicaCorrupt(blocks[curBlk], dn);
           if (blockCorrupt || (!blockCorrupt && !replicaCorrupt))
             machineSet[numNodes++] = dn;
         }
@@ -274,8 +290,9 @@
       results.add(b);
       curPos += blocks[curBlk].getNumBytes();
       curBlk++;
-    } while (curPos < endOff && curBlk < blocks.length
-        && results.size() < nrBlocksToReturn);
+    } while (curPos < endOff 
+          && curBlk < blocks.length
+          && results.size() < nrBlocksToReturn);
     return results;
   }
 
@@ -291,9 +308,11 @@
       //common case. avoid building 'text'
       return;
     }
-
-    String text = "file " + src + ((clientName != null) ? " on client "
-      + clientName : "") + ".\n" + "Requested replication " + replication;
+    
+    String text = "file " + src 
+      + ((clientName != null) ? " on client " + clientName : "")
+      + ".\n"
+      + "Requested replication " + replication;
 
     if (replication > maxReplication)
       throw new IOException(text + " exceeds maximum " + maxReplication);
@@ -362,9 +381,9 @@
   void markBlockAsCorrupt(Block blk, DatanodeInfo dn) throws IOException {
     DatanodeDescriptor node = namesystem.getDatanode(dn);
     if (node == null) {
-      throw new IOException("Cannot mark block" + blk.getBlockName()
-          + " as corrupt because datanode " + dn.getName()
-          + " does not exist. ");
+      throw new IOException("Cannot mark block" + blk.getBlockName() +
+                            " as corrupt because datanode " + dn.getName() +
+                            " does not exist. ");
     }
 
     final BlockInfo storedBlockInfo = blocksMap.getStoredBlock(blk);
@@ -373,18 +392,20 @@
       // ignore the request for now. This could happen when BlockScanner
       // thread of Datanode reports bad block before Block reports are sent
       // by the Datanode on startup
-      NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: "
-          + "block " + blk + " could not be marked "
-          + "as corrupt as it does not exists in " + "blocksMap");
+      NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " +
+                                   "block " + blk + " could not be marked " +
+                                   "as corrupt as it does not exists in " +
+                                   "blocksMap");
     } else {
       INodeFile inode = storedBlockInfo.getINode();
       if (inode == null) {
-        NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: "
-            + "block " + blk + " could not be marked "
-            + "as corrupt as it does not belong to " + "any file");
+        NameNode.stateChangeLog.info("BLOCK NameSystem.markBlockAsCorrupt: " +
+                                     "block " + blk + " could not be marked " +
+                                     "as corrupt as it does not belong to " +
+                                     "any file");
         addToInvalidates(storedBlockInfo, node);
         return;
-      }
+      } 
       // Add this replica to corruptReplicas Map
       corruptReplicas.addToCorruptReplicasMap(storedBlockInfo, node);
       if (countNodes(storedBlockInfo).liveReplicas() > inode.getReplication()) {
@@ -402,12 +423,13 @@
    */
   private void invalidateBlock(Block blk, DatanodeInfo dn)
       throws IOException {
-    NameNode.stateChangeLog.info("DIR* NameSystem.invalidateBlock: " + blk
-        + " on " + dn.getName());
+    NameNode.stateChangeLog.info("DIR* NameSystem.invalidateBlock: "
+                                 + blk + " on " + dn.getName());
     DatanodeDescriptor node = namesystem.getDatanode(dn);
     if (node == null) {
-      throw new IOException("Cannot invalidate block " + blk
-          + " because datanode " + dn.getName() + " does not exist.");
+      throw new IOException("Cannot invalidate block " + blk +
+                            " because datanode " + dn.getName() +
+                            " does not exist.");
     }
 
     // Check how many copies we have of the block. If we have at least one
@@ -417,7 +439,8 @@
       addToInvalidates(blk, dn);
       removeStoredBlock(blk, node);
       NameNode.stateChangeLog.debug("BLOCK* NameSystem.invalidateBlocks: "
-          + blk + " on " + dn.getName() + " listed for deletion.");
+                                   + blk + " on "
+                                   + dn.getName() + " listed for deletion.");
     } else {
       NameNode.stateChangeLog.info("BLOCK* NameSystem.invalidateBlocks: "
           + blk + " on " + dn.getName()
@@ -799,9 +822,8 @@
   }
 
   /**
-   * Modify (block-->datanode) map. Remove block from set of needed replications
-   * if this takes care of the problem.
-   *
+   * Modify (block-->datanode) map. Remove block from set of
+   * needed replications if this takes care of the problem.
    * @return the block that is stored in blockMap.
    */
   private Block addStoredBlock(Block block, DatanodeDescriptor node,
@@ -810,9 +832,10 @@
     if (storedBlock == null || storedBlock.getINode() == null) {
       // If this block does not belong to anyfile, then we are done.
       NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: "
-          + "addStoredBlock request received for " + block + " on "
-          + node.getName() + " size " + block.getNumBytes()
-          + " But it does not belong to any file.");
+                                   + "addStoredBlock request received for "
+                                   + block + " on " + node.getName()
+                                   + " size " + block.getNumBytes()
+                                   + " But it does not belong to any file.");
       // we could add this block to invalidate set of this datanode.
       // it will happen in next block report otherwise.
       return block;
@@ -829,9 +852,10 @@
         if (cursize == 0) {
           storedBlock.setNumBytes(block.getNumBytes());
         } else if (cursize != block.getNumBytes()) {
-          FSNamesystem.LOG.warn("Inconsistent size for block " + block
-              + " reported from " + node.getName() + " current size is "
-              + cursize + " reported size is " + block.getNumBytes());
+          FSNamesystem.LOG.warn("Inconsistent size for block " + block +
+                   " reported from " + node.getName() +
+                   " current size is " + cursize +
+                   " reported size is " + block.getNumBytes());
           try {
             if (cursize > block.getNumBytes()) {
               // new replica is smaller in size than existing block.
@@ -847,7 +871,7 @@
               int count = 0;
               DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes];
               Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
-              for (; it != null && it.hasNext();) {
+              for (; it != null && it.hasNext(); ) {
                 DatanodeDescriptor dd = it.next();
                 if (!dd.equals(node)) {
                   nodes[count++] = dd;
@@ -878,11 +902,11 @@
 
         // Updated space consumed if required.
         INodeFile file = (storedBlock != null) ? storedBlock.getINode() : null;
-        long diff = (file == null) ? 0
-            : (file.getPreferredBlockSize() - storedBlock.getNumBytes());
-
-        if (diff > 0 && file.isUnderConstruction()
-            && cursize < storedBlock.getNumBytes()) {
+        long diff = (file == null) ? 0 :
+                    (file.getPreferredBlockSize() - storedBlock.getNumBytes());
+        
+        if (diff > 0 && file.isUnderConstruction() &&
+            cursize < storedBlock.getNumBytes()) {
           try {
             String path = /* For finding parents */
             namesystem.leaseManager.findPath((INodeFileUnderConstruction) file);
@@ -923,7 +947,7 @@
     NumberReplicas num = countNodes(storedBlock);
     int numLiveReplicas = num.liveReplicas();
     int numCurrentReplica = numLiveReplicas
-        + pendingReplications.getNumReplicas(block);
+      + pendingReplications.getNumReplicas(block);
 
     // check whether safe replication is reached for the block
     namesystem.incrementSafeBlockCount(numCurrentReplica);
@@ -958,9 +982,9 @@
     int corruptReplicasCount = corruptReplicas.numCorruptReplicas(block);
     int numCorruptNodes = num.corruptReplicas();
     if (numCorruptNodes != corruptReplicasCount) {
-      FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for "
-          + block + "blockMap has " + numCorruptNodes
-          + " but corrupt replicas map has " + corruptReplicasCount);
+      FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for " +
+          block + "blockMap has " + numCorruptNodes + 
+          " but corrupt replicas map has " + corruptReplicasCount);
     }
     if ((corruptReplicasCount > 0) && (numLiveReplicas >= fileReplication))
       invalidateCorruptReplicas(block);
@@ -970,28 +994,29 @@
   /**
    * Invalidate corrupt replicas.
    * <p>
-   * This will remove the replicas from the block's location list, add them to
-   * {@link #recentInvalidateSets} so that they could be further deleted from
-   * the respective data-nodes, and remove the block from corruptReplicasMap.
+   * This will remove the replicas from the block's location list,
+   * add them to {@link #recentInvalidateSets} so that they could be further
+   * deleted from the respective data-nodes,
+   * and remove the block from corruptReplicasMap.
    * <p>
-   * This method should be called when the block has sufficient number of live
-   * replicas.
+   * This method should be called when the block has sufficient
+   * number of live replicas.
    *
-   * @param blk
-   *          Block whose corrupt replicas need to be invalidated
+   * @param blk Block whose corrupt replicas need to be invalidated
    */
   private void invalidateCorruptReplicas(Block blk) {
     Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(blk);
     boolean gotException = false;
     if (nodes == null)
       return;
-    for (Iterator<DatanodeDescriptor> it = nodes.iterator(); it.hasNext();) {
+    for (Iterator<DatanodeDescriptor> it = nodes.iterator(); it.hasNext(); ) {
       DatanodeDescriptor node = it.next();
       try {
         invalidateBlock(blk, node);
       } catch (IOException e) {
-        NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas "
-            + "error in deleting bad block " + blk + " on " + node + e);
+        NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
+                                      "error in deleting bad block " + blk +
+                                      " on " + node + e);
         gotException = true;
       }
     }
@@ -1040,9 +1065,9 @@
   }
 
   /**
-   * Find how many of the containing nodes are "extra", if any. If there are any
-   * extras, call chooseExcessReplicates() to mark them in the
-   * excessReplicateMap.
+   * Find how many of the containing nodes are "extra", if any.
+   * If there are any extras, call chooseExcessReplicates() to
+   * mark them in the excessReplicateMap.
    */
   void processOverReplicatedBlock(Block block, short replication,
       DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
@@ -1052,8 +1077,8 @@
     Collection<DatanodeDescriptor> nonExcess = new ArrayList<DatanodeDescriptor>();
     Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
         .getNodes(block);
-    for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block); it
-        .hasNext();) {
+    for (Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
+         it.hasNext();) {
       DatanodeDescriptor cur = it.next();
       Collection<Block> excessBlocks = excessReplicateMap.get(cur
           .getStorageID());
@@ -1066,8 +1091,8 @@
         }
       }
     }
-    namesystem.chooseExcessReplicates(nonExcess, block, replication, addedNode,
-        delNodeHint);
+    namesystem.chooseExcessReplicates(nonExcess, block, replication, 
+        addedNode, delNodeHint);
   }
 
   void addToExcessReplicate(DatanodeInfo dn, Block block) {
@@ -1171,8 +1196,8 @@
       } else if (node.isDecommissionInProgress() || node.isDecommissioned()) {
         count++;
       } else {
-        Collection<Block> blocksExcess = excessReplicateMap.get(node
-            .getStorageID());
+        Collection<Block> blocksExcess =
+          excessReplicateMap.get(node.getStorageID());
         if (blocksExcess != null && blocksExcess.contains(b)) {
           excess++;
         } else {

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java Mon Jun 15 22:13:06 2009
@@ -17,7 +17,11 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.util.*;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 
@@ -32,11 +36,11 @@
    * Internal class for block metadata.
    */
   static class BlockInfo extends Block {
-    private INodeFile          inode;
+    private INodeFile inode;
 
     /**
      * This array contains triplets of references.
-     * For each i-th data-node the block belongs to
+     * For each i-th datanode the block belongs to
      * triplets[3*i] is the reference to the DatanodeDescriptor
      * and triplets[3*i+1] and triplets[3*i+2] are references 
      * to the previous and the next blocks, respectively, in the 
@@ -217,7 +221,7 @@
      * If this block is the head of the list then return the next block as 
      * the new head.
      * @return the new head of the list or null if the list becomes
-     * empy after deletion.
+     * empty after deletion.
      */
     BlockInfo listRemove(BlockInfo head, DatanodeDescriptor dn) {
       if(head == null)
@@ -266,6 +270,18 @@
       }
       return true;
     }
+
+    @Override
+    public int hashCode() {
+      // Super implementation is sufficient
+      return super.hashCode();
+    }
+    
+    @Override
+    public boolean equals(Object obj) {
+      // Sufficient to rely on super's implementation
+      return (this == obj) || super.equals(obj);
+    }
   }
 
   private static class NodeIterator implements Iterator<DatanodeDescriptor> {

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java Mon Jun 15 22:13:06 2009
@@ -29,7 +29,7 @@
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.DeprecatedUTF8;
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.io.WritableUtils;
 
 /**************************************************
@@ -94,8 +94,8 @@
   /** A set of blocks to be invalidated by this datanode */
   private Set<Block> invalidateBlocks = new TreeSet<Block>();
 
-  /* Variables for maintaning number of blocks scheduled to be written to
-   * this datanode. This count is approximate and might be slightly higger
+  /* Variables for maintaining number of blocks scheduled to be written to
+   * this datanode. This count is approximate and might be slightly bigger
    * in case of errors (e.g. datanode does not report if an error occurs 
    * while writing the block).
    */
@@ -159,7 +159,7 @@
    * @param networkLocation location of the data node in network
    * @param capacity capacity of the data node, including space used by non-dfs
    * @param dfsUsed the used space by dfs datanode
-   * @param remaining remaing capacity of the data node
+   * @param remaining remaining capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    */
   public DatanodeDescriptor(DatanodeID nodeID,
@@ -174,7 +174,7 @@
   }
 
   /**
-   * Add data-node to the block.
+   * Add datanode to the block.
    * Add block to the head of the list of blocks belonging to the data-node.
    */
   boolean addBlock(BlockInfo b) {
@@ -187,7 +187,7 @@
   
   /**
    * Remove block from the list of blocks belonging to the data-node.
-   * Remove data-node from the block.
+   * Remove datanode from the block.
    */
   boolean removeBlock(BlockInfo b) {
     blockList = b.listRemove(blockList, this);
@@ -228,7 +228,7 @@
   }
 
   /**
-   * Iterates over the list of blocks belonging to the data-node.
+   * Iterates over the list of blocks belonging to the datanode.
    */
   static private class BlockIterator implements Iterator<Block> {
     private BlockInfo current;
@@ -463,4 +463,17 @@
       lastBlocksScheduledRollTime = now;
     }
   }
+  
+  @Override
+  public int hashCode() {
+    // Super implementation is sufficient
+    return super.hashCode();
+  }
+  
+  @Override
+  public boolean equals(Object obj) {
+    // Sufficient to use super equality as datanodes are uniquely identified
+    // by DatanodeID
+    return (this == obj) || super.equals(obj);
+  }
 }

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java Mon Jun 15 22:13:06 2009
@@ -34,6 +34,7 @@
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Mon Jun 15 22:13:06 2009
@@ -43,7 +43,7 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.DeprecatedUTF8;
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
@@ -262,13 +262,12 @@
       ArrayList<EditLogOutputStream> errorStreams,
       boolean propagate) {
     
-    String lsd = fsimage.listStorageDirectories();
-    FSNamesystem.LOG.info("current list of storage dirs:" + lsd);
-    
     if (errorStreams == null || errorStreams.size() == 0) {
       return;                       // nothing to do
     }
 
+    String lsd = fsimage.listStorageDirectories();
+    FSNamesystem.LOG.info("current list of storage dirs:" + lsd);
     //EditLogOutputStream
     if (editStreams == null || editStreams.size() <= 1) {
       FSNamesystem.LOG.fatal(
@@ -853,7 +852,7 @@
     if (lastPrintTime + 60000 > now && !force) {
       return;
     }
-    if (editStreams == null) {
+    if (editStreams == null || editStreams.size()==0) {
       return;
     }
     lastPrintTime = now;

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Mon Jun 15 22:13:06 2009
@@ -61,7 +61,7 @@
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
-import org.apache.hadoop.io.DeprecatedUTF8;
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.io.Writable;
 
 /**

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Jun 15 22:13:06 2009
@@ -54,6 +54,7 @@
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.*;
@@ -815,10 +816,10 @@
    */
   void startFile(String src, PermissionStatus permissions,
                  String holder, String clientMachine,
-                 boolean overwrite, short replication, long blockSize
+                 EnumSet<CreateFlag> flag, short replication, long blockSize
                 ) throws IOException {
-    startFileInternal(src, permissions, holder, clientMachine, overwrite, false,
-                      replication, blockSize);
+    startFileInternal(src, permissions, holder, clientMachine, flag,
+        replication, blockSize);
     getEditLog().logSync();
     if (auditLog.isInfoEnabled()) {
       final FileStatus stat = dir.getFileInfo(src);
@@ -832,11 +833,14 @@
                                               PermissionStatus permissions,
                                               String holder, 
                                               String clientMachine, 
-                                              boolean overwrite,
-                                              boolean append,
+                                              EnumSet<CreateFlag> flag,
                                               short replication,
                                               long blockSize
                                               ) throws IOException {
+    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
+    boolean append = flag.contains(CreateFlag.APPEND);
+    boolean create = flag.contains(CreateFlag.CREATE);
+
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: src=" + src
           + ", holder=" + holder
@@ -918,8 +922,15 @@
       }
       if (append) {
         if (myFile == null) {
-          throw new FileNotFoundException("failed to append to non-existent file "
+          if(!create)
+            throw new FileNotFoundException("failed to append to non-existent file "
               + src + " on client " + clientMachine);
+          else {
+            //append & create a nonexist file equals to overwrite
+            this.startFileInternal(src, permissions, holder, clientMachine,
+                EnumSet.of(CreateFlag.OVERWRITE), replication, blockSize);
+            return;
+          }
         } else if (myFile.isDirectory()) {
           throw new IOException("failed to append to directory " + src 
                                 +" on client " + clientMachine);
@@ -992,7 +1003,7 @@
       throw new IOException("Append to hdfs not supported." +
                             " Please refer to dfs.support.append configuration parameter.");
     }
-    startFileInternal(src, null, holder, clientMachine, false, true, 
+    startFileInternal(src, null, holder, clientMachine, EnumSet.of(CreateFlag.APPEND), 
                       (short)blockManager.maxReplication, (long)0);
     getEditLog().logSync();
 
@@ -2842,43 +2853,12 @@
     }
     return node;
   }
-    
-  /** Stop at and return the datanode at index (used for content browsing)*/
-  @Deprecated
-  private DatanodeDescriptor getDatanodeByIndex(int index) {
-    int i = 0;
-    for (DatanodeDescriptor node : datanodeMap.values()) {
-      if (i == index) {
-        return node;
-      }
-      i++;
-    }
-    return null;
-  }
-    
-  @Deprecated
-  public String randomDataNode() {
-    int size = datanodeMap.size();
-    int index = 0;
-    if (size != 0) {
-      index = r.nextInt(size);
-      for(int i=0; i<size; i++) {
-        DatanodeDescriptor d = getDatanodeByIndex(index);
-        if (d != null && !d.isDecommissioned() && !isDatanodeDead(d) &&
-            !d.isDecommissionInProgress()) {
-          return d.getHost() + ":" + d.getInfoPort();
-        }
-        index = (index + 1) % size;
-      }
-    }
-    return null;
-  }
 
   /** Choose a random datanode
    * 
    * @return a randomly chosen datanode
    */
-  public DatanodeDescriptor getRandomDatanode() {
+  DatanodeDescriptor getRandomDatanode() {
     return (DatanodeDescriptor)clusterMap.chooseRandom(NodeBase.ROOT);
   }
 
@@ -3131,7 +3111,7 @@
           return leaveMsg + " upon completion of " + 
             "the distributed upgrade: upgrade progress = " + 
             getDistributedUpgradeStatus() + "%";
-        leaveMsg = "Use \"hadoop dfs -safemode leave\" to turn safe mode off";
+        leaveMsg = "Use \"hdfs dfsadmin -safemode leave\" to turn safe mode off";
       }
       if(blockTotal < 0)
         return leaveMsg + ".";

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java Mon Jun 15 22:13:06 2009
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 
 /** Redirect queries about the hosted filesystem to an appropriate datanode.

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java Mon Jun 15 22:13:06 2009
@@ -160,7 +160,8 @@
    */
   int getExistingPathINodes(byte[][] components, INode[] existing) {
     assert compareBytes(this.name, components[0]) == 0 :
-      "Incorrect name " + getLocalName() + " expected " + components[0];
+      "Incorrect name " + getLocalName() + " expected " + 
+      bytes2String(components[0]);
 
     INode curNode = this;
     int count = 0;

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Mon Jun 15 22:13:06 2009
@@ -29,6 +29,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -62,6 +63,7 @@
 import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
@@ -189,13 +191,28 @@
   }
 
   public static InetSocketAddress getAddress(Configuration conf) {
-    return getAddress(FileSystem.getDefaultUri(conf).getAuthority());
+    URI filesystemURI = FileSystem.getDefaultUri(conf);
+    String authority = filesystemURI.getAuthority();
+    if (authority == null) {
+      throw new IllegalArgumentException(String.format(
+          "Invalid URI for NameNode address (check %s): %s has no authority.",
+          FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
+    }
+    if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
+        filesystemURI.getScheme())) {
+      throw new IllegalArgumentException(String.format(
+          "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
+          FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
+          FSConstants.HDFS_URI_SCHEME));
+    }
+    return getAddress(authority);
   }
 
   public static URI getUri(InetSocketAddress namenode) {
     int port = namenode.getPort();
     String portString = port == DEFAULT_PORT ? "" : (":"+port);
-    return URI.create("hdfs://"+ namenode.getHostName()+portString);
+    return URI.create(FSConstants.HDFS_URI_SCHEME + "://" 
+        + namenode.getHostName()+portString);
   }
 
   /**
@@ -532,7 +549,7 @@
   public void create(String src, 
                      FsPermission masked,
                              String clientName, 
-                             boolean overwrite,
+                             EnumSetWritable<CreateFlag> flag,
                              short replication,
                              long blockSize
                              ) throws IOException {
@@ -548,7 +565,7 @@
     namesystem.startFile(src,
         new PermissionStatus(UserGroupInformation.getCurrentUGI().getUserName(),
             null, masked),
-        clientName, clientMachine, overwrite, replication, blockSize);
+        clientName, clientMachine, flag.get(), replication, blockSize);
     myMetrics.numFilesCreated.inc();
     myMetrics.numCreateFileOps.inc();
   }

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Mon Jun 15 22:13:06 2009
@@ -32,6 +32,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
@@ -276,10 +277,16 @@
    */
   private String getInfoServer() throws IOException {
     URI fsName = FileSystem.getDefaultUri(conf);
-    if (!"hdfs".equals(fsName.getScheme())) {
+    if (!FSConstants.HDFS_URI_SCHEME.equalsIgnoreCase(fsName.getScheme())) {
       throw new IOException("This is not a DFS");
     }
-    return conf.get("dfs.http.address", "0.0.0.0:50070");
+    String configuredAddress = conf.get("dfs.http.address", "0.0.0.0:50070");
+    InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
+    if (sockAddr.getAddress().isAnyLocalAddress()) {
+      return fsName.getHost() + ":" + sockAddr.getPort();
+    } else {
+      return configuredAddress;
+    }
   }
 
   /**

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java Mon Jun 15 22:13:06 2009
@@ -29,6 +29,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java Mon Jun 15 22:13:06 2009
@@ -25,7 +25,7 @@
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
-import org.apache.hadoop.io.DeprecatedUTF8;
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;

Propchange: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Mon Jun 15 22:13:06 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs:713112
+/hadoop/core/trunk/src/test/hdfs:776175-784663

Propchange: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/
------------------------------------------------------------------------------
--- svn:mergeinfo (added)
+++ svn:mergeinfo Mon Jun 15 22:13:06 2009
@@ -0,0 +1,2 @@
+/hadoop/core/branches/branch-0.19/hdfs/src/test/hdfs-with-mr:713112
+/hadoop/core/trunk/src/test/hdfs-with-mr:776175-784663

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java Mon Jun 15 22:13:06 2009
@@ -295,6 +295,37 @@
       if (cluster != null) { cluster.shutdown(); }
     }
   }
+
+  /** copy empty directory on dfs file system */
+  public void testEmptyDir() throws Exception {
+    String namenode = null;
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 2, true, null);
+      final FileSystem hdfs = cluster.getFileSystem();
+      namenode = FileSystem.getDefaultUri(conf).toString();
+      if (namenode.startsWith("hdfs://")) {
+        
+        FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
+        fs.mkdirs(new Path("/empty"));
+
+        ToolRunner.run(new DistCp(conf), new String[] {
+                                         "-log",
+                                         namenode+"/logs",
+                                         namenode+"/empty",
+                                         namenode+"/dest"});
+        fs = FileSystem.get(URI.create(namenode+"/destdat"), conf);
+        assertTrue("Destination directory does not exist.",
+                   fs.exists(new Path(namenode+"/dest")));
+        deldir(hdfs, "/dest");
+        deldir(hdfs, "/empty");
+        deldir(hdfs, "/logs");
+      }
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
   
   /** copy files from local file system to dfs file system */
   public void testCopyFromLocalToDfs() throws Exception {
@@ -380,7 +411,7 @@
         deldir(hdfs, "/logs");
 
         ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-p",
+                                         "-prbugp", // no t to avoid preserving mod. times
                                          "-update",
                                          "-log",
                                          namenode+"/logs",
@@ -393,7 +424,7 @@
 
         deldir(hdfs, "/logs");
         ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-p",
+                                         "-prbugp", // no t to avoid preserving mod. times
                                          "-overwrite",
                                          "-log",
                                          namenode+"/logs",
@@ -551,6 +582,32 @@
         deldir(fs, "/destdat");
         deldir(fs, "/srcdat");
       }
+
+      {//test preserving times
+        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
+        fs.mkdirs(new Path("/srcdat/tmpf1"));
+        fs.mkdirs(new Path("/srcdat/tmpf2"));
+        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
+        FsPermission[] permissions = new FsPermission[srcstat.length];
+        for(int i = 0; i < srcstat.length; i++) {
+          fs.setTimes(srcstat[i].getPath(), 40, 50);
+        }
+
+        ToolRunner.run(new DistCp(conf),
+            new String[]{"-pt", nnUri+"/srcdat", nnUri+"/destdat"});
+
+        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
+        for(int i = 0; i < dststat.length; i++) {
+          assertEquals("Modif. Time i=" + i, 40, dststat[i].getModificationTime());
+          assertEquals("Access Time i=" + i+ srcstat[i].getPath() + "-" + dststat[i].getPath(), 50, dststat[i].getAccessTime());
+        }
+        
+        assertTrue("Source and destination directories do not match.",
+                   checkFiles(fs, "/destdat", files));
+  
+        deldir(fs, "/destdat");
+        deldir(fs, "/srcdat");
+      }
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     }
@@ -818,6 +875,36 @@
     }
   }
 
+  /** test globbing  */
+  public void testGlobbing() throws Exception {
+    String namenode = null;
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 2, true, null);
+      final FileSystem hdfs = cluster.getFileSystem();
+      namenode = FileSystem.getDefaultUri(conf).toString();
+      if (namenode.startsWith("hdfs://")) {
+        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
+        ToolRunner.run(new DistCp(conf), new String[] {
+                                         "-log",
+                                         namenode+"/logs",
+                                         namenode+"/srcdat/*",
+                                         namenode+"/destdat"});
+        assertTrue("Source and destination directories do not match.",
+                   checkFiles(hdfs, "/destdat", files));
+        FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
+        assertTrue("Log directory does not exist.",
+                   fs.exists(new Path(namenode+"/logs")));
+        deldir(hdfs, "/destdat");
+        deldir(hdfs, "/srcdat");
+        deldir(hdfs, "/logs");
+      }
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+  
   static void create(FileSystem fs, Path f) throws IOException {
     FSDataOutputStream out = fs.create(f);
     try {

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestFileSystem.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestFileSystem.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/fs/TestFileSystem.java Mon Jun 15 22:13:06 2009
@@ -22,6 +22,7 @@
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.Random;
 import java.util.List;
 import java.util.ArrayList;
@@ -578,6 +579,37 @@
     }
   }
 
+  public void testFsShutdownHook() throws Exception {
+    final Set<FileSystem> closed = Collections.synchronizedSet(new HashSet<FileSystem>());
+    Configuration conf = new Configuration();
+    Configuration confNoAuto = new Configuration();
+
+    conf.setClass("fs.test.impl", TestShutdownFileSystem.class, FileSystem.class);
+    confNoAuto.setClass("fs.test.impl", TestShutdownFileSystem.class, FileSystem.class);
+    confNoAuto.setBoolean("fs.automatic.close", false);
+
+    TestShutdownFileSystem fsWithAuto =
+      (TestShutdownFileSystem)(new Path("test://a/").getFileSystem(conf));
+    TestShutdownFileSystem fsWithoutAuto =
+      (TestShutdownFileSystem)(new Path("test://b/").getFileSystem(confNoAuto));
+
+    fsWithAuto.setClosedSet(closed);
+    fsWithoutAuto.setClosedSet(closed);
+
+    // Different URIs should result in different FS instances
+    assertNotSame(fsWithAuto, fsWithoutAuto);
+
+    FileSystem.CACHE.closeAll(true);
+    assertEquals(1, closed.size());
+    assertTrue(closed.contains(fsWithAuto));
+
+    closed.clear();
+
+    FileSystem.closeAll();
+    assertEquals(1, closed.size());
+    assertTrue(closed.contains(fsWithoutAuto));
+  }
+
 
   public void testCacheKeysAreCaseInsensitive()
     throws Exception
@@ -626,4 +658,18 @@
     fs1.close();
     fs2.close();
   }
+
+  public static class TestShutdownFileSystem extends RawLocalFileSystem {
+    private Set<FileSystem> closedSet;
+
+    public void setClosedSet(Set<FileSystem> closedSet) {
+      this.closedSet = closedSet;
+    }
+    public void close() throws IOException {
+      if (closedSet != null) {
+        closedSet.add(this);
+      }
+      super.close();
+    }
+  }
 }

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java Mon Jun 15 22:13:06 2009
@@ -150,13 +150,10 @@
         writer = SequenceFile.createWriter(tempFS, config, filePath, Text.class, 
                 LongWritable.class, CompressionType.NONE);
         writer.append(new Text(strFileName), new LongWritable(0l));
-      } catch(Exception e) {
-        throw new IOException(e.getLocalizedMessage());
       } finally {
         if (writer != null) {
           writer.close();
         }
-        writer = null;
       }
     }
   }
@@ -210,6 +207,9 @@
 
   /**
    * check for arguments and fail if the values are not specified
+   * @param index  positional number of an argument in the list of command
+   *   line's arguments
+   * @param length total number of arguments
    */
   public static void checkArgs(final int index, final int length) {
     if (index == length) {
@@ -220,8 +220,8 @@
   
   /**
    * Parse input arguments
-   * 
-   * @params args Command line inputs
+   *
+   * @param args array of command line's parameters to be parsed
    */
   public static void parseInputs(final String[] args) {
     // If there are no command line arguments, exit
@@ -358,8 +358,8 @@
     
     // Average latency is the average time to perform 'n' number of
     // operations, n being the number of files
-    double avgLatency1 = (double) totalTimeAL1 / (double) successfulFileOps;
-    double avgLatency2 = (double) totalTimeAL2 / (double) successfulFileOps;
+    double avgLatency1 = (double) totalTimeAL1 / successfulFileOps;
+    double avgLatency2 = (double) totalTimeAL2 / successfulFileOps;
     
     // The time it takes for the longest running map is measured. Using that,
     // cluster transactions per second is calculated. It includes time to 
@@ -367,7 +367,7 @@
     double longestMapTimeTPmS = (double) (mapEndTimeTPmS - mapStartTimeTPmS);
     double totalTimeTPS = (longestMapTimeTPmS == 0) ?
             (1000 * successfulFileOps) :
-            (double) (1000 * successfulFileOps) / (double) longestMapTimeTPmS;
+            (double) (1000 * successfulFileOps) / longestMapTimeTPmS;
             
     // The time it takes to perform 'n' operations is calculated (in ms),
     // n being the number of files. Using that time, the average execution 
@@ -375,22 +375,22 @@
     // failed operations
     double AverageExecutionTime = (totalTimeTPmS == 0) ?
         (double) successfulFileOps : 
-        (double) (totalTimeTPmS / successfulFileOps);
+        (double) totalTimeTPmS / successfulFileOps;
             
     if (operation.equals(OP_CREATE_WRITE)) {
       // For create/write/close, it is treated as two transactions,
       // since a file create from a client perspective involves create and close
       resultTPSLine1 = "               TPS: Create/Write/Close: " + 
         (int) (totalTimeTPS * 2);
-      resultTPSLine2 = "Avg exec time (ms): Create/Write/Close: " + 
-        (double) AverageExecutionTime;
+      resultTPSLine2 = "Avg exec time (ms): Create/Write/Close: " +
+        AverageExecutionTime;
       resultALLine1 = "            Avg Lat (ms): Create/Write: " + avgLatency1;
       resultALLine2 = "                   Avg Lat (ms): Close: " + avgLatency2;
     } else if (operation.equals(OP_OPEN_READ)) {
       resultTPSLine1 = "                        TPS: Open/Read: " + 
         (int) totalTimeTPS;
       resultTPSLine2 = "         Avg Exec time (ms): Open/Read: " + 
-        (double) AverageExecutionTime;
+        AverageExecutionTime;
       resultALLine1 = "                    Avg Lat (ms): Open: " + avgLatency1;
       if (readFileAfterOpen) {
         resultALLine2 = "                  Avg Lat (ms): Read: " + avgLatency2;
@@ -399,13 +399,13 @@
       resultTPSLine1 = "                           TPS: Rename: " + 
         (int) totalTimeTPS;
       resultTPSLine2 = "            Avg Exec time (ms): Rename: " + 
-        (double) AverageExecutionTime;
+        AverageExecutionTime;
       resultALLine1 = "                  Avg Lat (ms): Rename: " + avgLatency1;
     } else if (operation.equals(OP_DELETE)) {
       resultTPSLine1 = "                           TPS: Delete: " + 
         (int) totalTimeTPS;
       resultTPSLine2 = "            Avg Exec time (ms): Delete: " + 
-        (double) AverageExecutionTime;
+        AverageExecutionTime;
       resultALLine1 = "                  Avg Lat (ms): Delete: " + avgLatency1;
     }
     
@@ -558,6 +558,7 @@
   /**
   * Main method for running the NNBench benchmarks
   *
+  * @param args array of command line arguments
   * @throws IOException indicates a problem with test startup
   */
   public static void main(String[] args) throws IOException {
@@ -587,7 +588,7 @@
   /**
    * Mapper class
    */
-  static class NNBenchMapper extends Configured 
+  static class NNBenchMapper extends Configured
           implements Mapper<Text, LongWritable, Text, Text> {
     FileSystem filesystem = null;
     private String hostName = null;
@@ -639,13 +640,15 @@
      */
     public void close() throws IOException {
     }
-    
+
     /**
-    * Returns when the current number of seconds from the epoch equals
-    * the command line argument given by <code>-startTime</code>.
-    * This allows multiple instances of this program, running on clock
-    * synchronized nodes, to start at roughly the same time.
-    */
+     * Returns when the current number of seconds from the epoch equals
+     * the command line argument given by <code>-startTime</code>.
+     * This allows multiple instances of this program, running on clock
+     * synchronized nodes, to start at roughly the same time.
+     * @return true if the method was able to sleep for <code>-startTime</code>
+     * without interruption; false otherwise
+     */
     private boolean barrier() {
       long startTime = getConf().getLong("test.nnbench.starttime", 0l);
       long currentTime = System.currentTimeMillis();
@@ -698,16 +701,16 @@
       if (barrier()) {
         if (op.equals(OP_CREATE_WRITE)) {
           startTimeTPmS = System.currentTimeMillis();
-          doCreateWriteOp("file_" + hostName + "_", output, reporter);
+          doCreateWriteOp("file_" + hostName + "_", reporter);
         } else if (op.equals(OP_OPEN_READ)) {
           startTimeTPmS = System.currentTimeMillis();
-          doOpenReadOp("file_" + hostName + "_", output, reporter);
+          doOpenReadOp("file_" + hostName + "_", reporter);
         } else if (op.equals(OP_RENAME)) {
           startTimeTPmS = System.currentTimeMillis();
-          doRenameOp("file_" + hostName + "_", output, reporter);
+          doRenameOp("file_" + hostName + "_", reporter);
         } else if (op.equals(OP_DELETE)) {
           startTimeTPmS = System.currentTimeMillis();
-          doDeleteOp("file_" + hostName + "_", output, reporter);
+          doDeleteOp("file_" + hostName + "_", reporter);
         }
         
         endTimeTPms = System.currentTimeMillis();
@@ -735,11 +738,13 @@
     
     /**
      * Create and Write operation.
+     * @param name of the prefix of the putput file to be created
+     * @param reporter an instanse of (@link Reporter) to be used for
+     *   status' updates
      */
     private void doCreateWriteOp(String name,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) {
-      FSDataOutputStream out = null;
+                                 Reporter reporter) {
+      FSDataOutputStream out;
       byte[] buffer = new byte[bytesToWrite];
       
       for (long l = 0l; l < numberOfFiles; l++) {
@@ -783,11 +788,13 @@
     
     /**
      * Open operation
+     * @param name of the prefix of the putput file to be read
+     * @param reporter an instanse of (@link Reporter) to be used for
+     *   status' updates
      */
     private void doOpenReadOp(String name,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) {
-      FSDataInputStream input = null;
+                              Reporter reporter) {
+      FSDataInputStream input;
       byte[] buffer = new byte[bytesToWrite];
       
       for (long l = 0l; l < numberOfFiles; l++) {
@@ -824,10 +831,12 @@
     
     /**
      * Rename operation
+     * @param name of prefix of the file to be renamed
+     * @param reporter an instanse of (@link Reporter) to be used for
+     *   status' updates
      */
     private void doRenameOp(String name,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) {
+                            Reporter reporter) {
       for (long l = 0l; l < numberOfFiles; l++) {
         Path filePath = new Path(new Path(baseDir, dataDirName), 
                 name + "_" + l);
@@ -857,10 +866,12 @@
     
     /**
      * Delete operation
+     * @param name of prefix of the file to be deleted
+     * @param reporter an instanse of (@link Reporter) to be used for
+     *   status' updates
      */
     private void doDeleteOp(String name,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) {
+                            Reporter reporter) {
       for (long l = 0l; l < numberOfFiles; l++) {
         Path filePath = new Path(new Path(baseDir, dataDirName), 
                 name + "_" + l);

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBenchWithoutMR.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBenchWithoutMR.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBenchWithoutMR.java Mon Jun 15 22:13:06 2009
@@ -28,8 +28,8 @@
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.mapred.JobConf;
 
 /**
  * This program executes a specified operation that applies load to 
@@ -59,7 +59,6 @@
   // variables initialized in main()
   private static FileSystem fileSys = null;
   private static Path taskDir = null;
-  private static String uniqueId = null;
   private static byte[] buffer;
   private static long maxExceptionsPerFile = 200;
     
@@ -69,12 +68,14 @@
    * This allows multiple instances of this program, running on clock
    * synchronized nodes, to start at roughly the same time.
    */
+
   static void barrier() {
     long sleepTime;
     while ((sleepTime = startTime - System.currentTimeMillis()) > 0) {
       try {
         Thread.sleep(sleepTime);
       } catch (InterruptedException ex) {
+        //This left empty on purpose
       }
     }
   }
@@ -98,18 +99,20 @@
   static int createWrite() {
     int totalExceptions = 0;
     FSDataOutputStream out = null;
-    boolean success = false;
+    boolean success;
     for (int index = 0; index < numFiles; index++) {
       int singleFileExceptions = 0;
       do { // create file until is succeeds or max exceptions reached
         try {
           out = fileSys.create(
-                               new Path(taskDir, "" + index), false, 512, (short)1, bytesPerBlock);
+                  new Path(taskDir, "" + index), false, 512,
+                  (short)1, bytesPerBlock);
           success = true;
         } catch (IOException ioe) { 
           success=false; 
           totalExceptions++;
-          handleException("creating file #" + index, ioe, ++singleFileExceptions);
+          handleException("creating file #" + index, ioe,
+                  ++singleFileExceptions);
         }
       } while (!success);
       long toBeWritten = bytesPerFile;
@@ -120,7 +123,8 @@
           out.write(buffer, 0, nbytes);
         } catch (IOException ioe) {
           totalExceptions++;
-          handleException("writing to file #" + index, ioe, ++singleFileExceptions);
+          handleException("writing to file #" + index, ioe,
+                  ++singleFileExceptions);
         }
       }
       do { // close file until is succeeds
@@ -130,7 +134,8 @@
         } catch (IOException ioe) {
           success=false; 
           totalExceptions++;
-          handleException("closing file #" + index, ioe, ++singleFileExceptions);
+          handleException("closing file #" + index, ioe,
+                  ++singleFileExceptions);
         }
       } while (!success);
     }
@@ -144,7 +149,7 @@
    */
   static int openRead() {
     int totalExceptions = 0;
-    FSDataInputStream in = null;
+    FSDataInputStream in;
     for (int index = 0; index < numFiles; index++) {
       int singleFileExceptions = 0;
       try {
@@ -153,11 +158,12 @@
         while (toBeRead > 0) {
           int nbytes = (int) Math.min(buffer.length, toBeRead);
           toBeRead -= nbytes;
-          try { // only try once
+          try { // only try once && we don't care about a number of bytes read
             in.read(buffer, 0, nbytes);
           } catch (IOException ioe) {
             totalExceptions++;
-            handleException("reading from file #" + index, ioe, ++singleFileExceptions);
+            handleException("reading from file #" + index, ioe,
+                    ++singleFileExceptions);
           }
         }
         in.close();
@@ -177,19 +183,23 @@
    */
   static int rename() {
     int totalExceptions = 0;
-    boolean success = false;
+    boolean success;
     for (int index = 0; index < numFiles; index++) {
       int singleFileExceptions = 0;
       do { // rename file until is succeeds
         try {
-          boolean result = fileSys.rename(
-                                          new Path(taskDir, "" + index), new Path(taskDir, "A" + index));
+          // Possible result of this operation is at no interest to us for it
+          // can return false only if the namesystem
+          // could rename the path from the name
+          // space (e.g. no Exception has been thrown)
+          fileSys.rename(new Path(taskDir, "" + index),
+              new Path(taskDir, "A" + index));
           success = true;
-        } catch (IOException ioe) { 
-          success=false; 
+        } catch (IOException ioe) {
+          success = false;
           totalExceptions++;
           handleException("creating file #" + index, ioe, ++singleFileExceptions);
-       }
+        }
       } while (!success);
     }
     return totalExceptions;
@@ -203,14 +213,18 @@
    */
   static int delete() {
     int totalExceptions = 0;
-    boolean success = false;
+    boolean success;
     for (int index = 0; index < numFiles; index++) {
       int singleFileExceptions = 0;
       do { // delete file until is succeeds
         try {
-          boolean result = fileSys.delete(new Path(taskDir, "A" + index), true);
+          // Possible result of this operation is at no interest to us for it
+          // can return false only if namesystem
+          // delete could remove the path from the name
+          // space (e.g. no Exception has been thrown)
+          fileSys.delete(new Path(taskDir, "A" + index), true);
           success = true;
-        } catch (IOException ioe) { 
+        } catch (IOException ioe) {
           success=false; 
           totalExceptions++;
           handleException("creating file #" + index, ioe, ++singleFileExceptions);
@@ -239,6 +253,7 @@
    *         [-bytesPerChecksum <value for io.bytes.per.checksum>]
    * </pre>
    *
+   * @param args is an array of the program command line arguments
    * @throws IOException indicates a problem with test startup
    */
   public static void main(String[] args) throws IOException {
@@ -281,7 +296,7 @@
     bytesPerFile = bytesPerBlock * blocksPerFile;
     
     JobConf jobConf = new JobConf(new Configuration(), NNBench.class);
-    
+
     if ( bytesPerChecksum < 0 ) { // if it is not set in cmdline
       bytesPerChecksum = jobConf.getInt("io.bytes.per.checksum", 512);
     }
@@ -308,7 +323,7 @@
       }
     
     fileSys = FileSystem.get(jobConf);
-    uniqueId = java.net.InetAddress.getLocalHost().getHostName();
+    String uniqueId = java.net.InetAddress.getLocalHost().getHostName();
     taskDir = new Path(baseDir, uniqueId);
     // initialize buffer used for writing/reading file
     buffer = new byte[(int) Math.min(bytesPerFile, 32768L)];

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml Mon Jun 15 22:13:06 2009
@@ -1132,6 +1132,33 @@
         </comparator>
       </comparators>
     </test>
+
+    <test> <!-- TESTED -->
+      <description>duh: Test for hdfs:// path - directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data1k hdfs:///dir0/data1k</command>
+        <command>-fs NAMENODE -du -h hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^1.0k( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data1k</expected-output>
+        </comparator>
+      </comparators>
+    </test>
     
     <test> <!-- TESTED -->
       <description>du: Test for hdfs:// path - directory using globbing</description>
@@ -1314,7 +1341,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+          <expected-output>^450\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1346,7 +1373,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0( |\t)*450</expected-output>
+          <expected-output>^450\s+hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1384,7 +1411,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+          <expected-output>^450\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1417,7 +1444,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+          <expected-output>^450\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1455,7 +1482,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+          <expected-output>^450\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1487,7 +1514,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+          <expected-output>^450\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -1525,7 +1552,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+          <expected-output>^450\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5485,7 +5512,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5502,7 +5529,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5522,19 +5549,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir1(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir2(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir3(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5554,19 +5581,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5617,7 +5644,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5634,19 +5661,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir1(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir2(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir3(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5697,7 +5724,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5714,19 +5741,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir1(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir2(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir3(|\t)*0</expected-output>
+          <expected-output>^0\s+hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
         </comparator>
       </comparators>
     </test>

Modified: hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java?rev=785005&r1=785004&r2=785005&view=diff
==============================================================================
--- hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java (original)
+++ hadoop/core/branches/HADOOP-4687/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java Mon Jun 15 22:13:06 2009
@@ -29,7 +29,6 @@
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.ToolRunner;
 
 public class TestDFSShellGenericOptions extends TestCase {
@@ -101,8 +100,8 @@
     FileSystem fs=null;
     try {
       ToolRunner.run(shell, args);
-      fs = new DistributedFileSystem(NameNode.getAddress(namenode), 
-                                     shell.getConf());
+      fs = FileSystem.get(NameNode.getUri(NameNode.getAddress(namenode)),
+          shell.getConf());
       assertTrue("Directory does not get created", 
                  fs.isDirectory(new Path("/data")));
       fs.delete(new Path("/data"), true);



Mime
View raw message