hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1444439 [2/3] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-httpfs/src/test/jav...
Date Sat, 09 Feb 2013 21:29:51 GMT
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Feb  9 21:29:44 2013
@@ -1791,16 +1791,18 @@ public class FSNamesystem implements Nam
    * Create a new file entry in the namespace.
    * 
    * For description of parameters and exceptions thrown see 
-   * {@link ClientProtocol#create()}
+   * {@link ClientProtocol#create()}, except it returns valid  file status
+   * upon success
    */
-  void startFile(String src, PermissionStatus permissions, String holder,
-      String clientMachine, EnumSet<CreateFlag> flag, boolean createParent,
-      short replication, long blockSize) throws AccessControlException,
-      SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
+  HdfsFileStatus startFile(String src, PermissionStatus permissions,
+      String holder, String clientMachine, EnumSet<CreateFlag> flag,
+      boolean createParent, short replication, long blockSize)
+      throws AccessControlException, SafeModeException,
+      FileAlreadyExistsException, UnresolvedLinkException,
       FileNotFoundException, ParentNotDirectoryException, IOException {
     try {
-      startFileInt(src, permissions, holder, clientMachine, flag, createParent,
-                   replication, blockSize);
+      return startFileInt(src, permissions, holder, clientMachine, flag,
+          createParent, replication, blockSize);
     } catch (AccessControlException e) {
       if (isAuditEnabled() && isExternalInvocation()) {
         logAuditEvent(false, UserGroupInformation.getCurrentUser(),
@@ -1811,18 +1813,21 @@ public class FSNamesystem implements Nam
     }
   }
 
-  private void startFileInt(String src, PermissionStatus permissions, String holder,
-      String clientMachine, EnumSet<CreateFlag> flag, boolean createParent,
-      short replication, long blockSize) throws AccessControlException,
-      SafeModeException, FileAlreadyExistsException, UnresolvedLinkException,
+  private HdfsFileStatus startFileInt(String src, PermissionStatus permissions,
+      String holder, String clientMachine, EnumSet<CreateFlag> flag,
+      boolean createParent, short replication, long blockSize)
+      throws AccessControlException, SafeModeException,
+      FileAlreadyExistsException, UnresolvedLinkException,
       FileNotFoundException, ParentNotDirectoryException, IOException {
     boolean skipSync = false;
+    final HdfsFileStatus stat;
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
 
       startFileInternal(src, permissions, holder, clientMachine, flag,
           createParent, replication, blockSize);
+      stat = dir.getFileInfo(src, false);
     } catch (StandbyException se) {
       skipSync = true;
       throw se;
@@ -1836,11 +1841,11 @@ public class FSNamesystem implements Nam
     } 
 
     if (isAuditEnabled() && isExternalInvocation()) {
-      final HdfsFileStatus stat = dir.getFileInfo(src, false);
       logAuditEvent(UserGroupInformation.getCurrentUser(),
                     getRemoteIp(),
                     "create", src, null, stat);
     }
+    return stat;
   }
 
   /**
@@ -2207,20 +2212,14 @@ public class FSNamesystem implements Nam
    * are replicated.  Will return an empty 2-elt array if we want the
    * client to "try again later".
    */
-  LocatedBlock getAdditionalBlock(String src,
-                                         String clientName,
-                                         ExtendedBlock previous,
-                                         HashMap<Node, Node> excludedNodes
-                                         ) 
+  LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
+      ExtendedBlock previous, HashMap<Node, Node> excludedNodes)
       throws LeaseExpiredException, NotReplicatedYetException,
       QuotaExceededException, SafeModeException, UnresolvedLinkException,
       IOException {
-    checkBlock(previous);
-    Block previousBlock = ExtendedBlock.getLocalBlock(previous);
-    long fileLength, blockSize;
+    long blockSize;
     int replication;
     DatanodeDescriptor clientNode = null;
-    Block newBlock = null;
 
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug(
@@ -2228,118 +2227,61 @@ public class FSNamesystem implements Nam
           +src+" for "+clientName);
     }
 
-    writeLock();
+    // Part I. Analyze the state of the file with respect to the input data.
+    readLock();
     try {
-      checkOperation(OperationCategory.WRITE);
-
-      if (isInSafeMode()) {
-        throw new SafeModeException("Cannot add block to " + src, safeMode);
-      }
-
-      // have we exceeded the configured limit of fs objects.
-      checkFsObjectLimit();
-
-      INodeFileUnderConstruction pendingFile = checkLease(src, clientName);
-      BlockInfo lastBlockInFile = pendingFile.getLastBlock();
-      if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
-        // The block that the client claims is the current last block
-        // doesn't match up with what we think is the last block. There are
-        // three possibilities:
-        // 1) This is the first block allocation of an append() pipeline
-        //    which started appending exactly at a block boundary.
-        //    In this case, the client isn't passed the previous block,
-        //    so it makes the allocateBlock() call with previous=null.
-        //    We can distinguish this since the last block of the file
-        //    will be exactly a full block.
-        // 2) This is a retry from a client that missed the response of a
-        //    prior getAdditionalBlock() call, perhaps because of a network
-        //    timeout, or because of an HA failover. In that case, we know
-        //    by the fact that the client is re-issuing the RPC that it
-        //    never began to write to the old block. Hence it is safe to
-        //    abandon it and allocate a new one.
-        // 3) This is an entirely bogus request/bug -- we should error out
-        //    rather than potentially appending a new block with an empty
-        //    one in the middle, etc
-
-        BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
-        if (previous == null &&
-            lastBlockInFile != null &&
-            lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
-            lastBlockInFile.isComplete()) {
-          // Case 1
-          if (NameNode.stateChangeLog.isDebugEnabled()) {
-             NameNode.stateChangeLog.debug(
-                 "BLOCK* NameSystem.allocateBlock: handling block allocation" +
-                 " writing to a file with a complete previous block: src=" +
-                 src + " lastBlock=" + lastBlockInFile);
-          }
-        } else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
-          // Case 2
-          if (lastBlockInFile.getNumBytes() != 0) {
-            throw new IOException(
-                "Request looked like a retry to allocate block " +
-                lastBlockInFile + " but it already contains " +
-                lastBlockInFile.getNumBytes() + " bytes");
-          }
-
-          // The retry case ("b" above) -- abandon the old block.
-          NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
-              "caught retry for allocation of a new block in " +
-              src + ". Abandoning old block " + lastBlockInFile);
-          dir.removeBlock(src, pendingFile, lastBlockInFile);
-          dir.persistBlocks(src, pendingFile);
-        } else {
-          
-          throw new IOException("Cannot allocate block in " + src + ": " +
-              "passed 'previous' block " + previous + " does not match actual " +
-              "last block in file " + lastBlockInFile);
-        }
+      LocatedBlock[] onRetryBlock = new LocatedBlock[1];
+      final INode[] inodes = analyzeFileState(
+          src, fileId, clientName, previous, onRetryBlock).getINodes();
+      final INodeFileUnderConstruction pendingFile =
+          (INodeFileUnderConstruction) inodes[inodes.length - 1];
+
+      if(onRetryBlock[0] != null) {
+        // This is a retry. Just return the last block.
+        return onRetryBlock[0];
       }
 
-      // commit the last block and complete it if it has minimum replicas
-      commitOrCompleteLastBlock(pendingFile, previousBlock);
-
-      //
-      // If we fail this, bad things happen!
-      //
-      if (!checkFileProgress(pendingFile, false)) {
-        throw new NotReplicatedYetException("Not replicated yet:" + src);
-      }
-      fileLength = pendingFile.computeContentSummary().getLength();
       blockSize = pendingFile.getPreferredBlockSize();
       clientNode = pendingFile.getClientNode();
       replication = pendingFile.getFileReplication();
     } finally {
-      writeUnlock();
+      readUnlock();
     }
 
     // choose targets for the new block to be allocated.
-    final DatanodeDescriptor targets[] = blockManager.chooseTarget(
+    final DatanodeDescriptor targets[] = getBlockManager().chooseTarget(
         src, replication, clientNode, excludedNodes, blockSize);
 
-    // Allocate a new block and record it in the INode. 
+    // Part II.
+    // Allocate a new block, add it to the INode and the BlocksMap. 
+    Block newBlock = null;
+    long offset;
     writeLock();
     try {
-      checkOperation(OperationCategory.WRITE);
-      if (isInSafeMode()) {
-        throw new SafeModeException("Cannot add block to " + src, safeMode);
+      // Run the full analysis again, since things could have changed
+      // while chooseTarget() was executing.
+      LocatedBlock[] onRetryBlock = new LocatedBlock[1];
+      INodesInPath inodesInPath =
+          analyzeFileState(src, fileId, clientName, previous, onRetryBlock);
+      INode[] inodes = inodesInPath.getINodes();
+      final INodeFileUnderConstruction pendingFile =
+          (INodeFileUnderConstruction) inodes[inodes.length - 1];
+
+      if(onRetryBlock[0] != null) {
+        // This is a retry. Just return the last block.
+        return onRetryBlock[0];
       }
 
-      final INodesInPath iip = dir.getINodesInPath4Write(src);
-      final INodeFileUnderConstruction pendingFile
-          = checkLease(src, clientName, iip.getLastINode());
-                                                           
-      if (!checkFileProgress(pendingFile, false)) {
-        throw new NotReplicatedYetException("Not replicated yet:" + src);
-      }
+      // commit the last block and complete it if it has minimum replicas
+      commitOrCompleteLastBlock(pendingFile,
+                                ExtendedBlock.getLocalBlock(previous));
+
+      // allocate new block, record block locations in INode.
+      newBlock = createNewBlock();
+      saveAllocatedBlock(src, inodesInPath, newBlock, targets);
 
-      // allocate new block record block locations in INode.
-      newBlock = allocateBlock(src, iip, targets);
-      
-      for (DatanodeDescriptor dn : targets) {
-        dn.incBlocksScheduled();
-      }
       dir.persistBlocks(src, pendingFile);
+      offset = pendingFile.computeFileSize(true);
     } finally {
       writeUnlock();
     }
@@ -2347,10 +2289,114 @@ public class FSNamesystem implements Nam
       getEditLog().logSync();
     }
 
-    // Create next block
-    LocatedBlock b = new LocatedBlock(getExtendedBlock(newBlock), targets, fileLength);
-    blockManager.setBlockToken(b, BlockTokenSecretManager.AccessMode.WRITE);
-    return b;
+    // Return located block
+    return makeLocatedBlock(newBlock, targets, offset);
+  }
+
+  INodesInPath analyzeFileState(String src,
+                                long fileId,
+                                String clientName,
+                                ExtendedBlock previous,
+                                LocatedBlock[] onRetryBlock)
+          throws IOException  {
+    assert hasReadOrWriteLock();
+
+    checkBlock(previous);
+    onRetryBlock[0] = null;
+    checkOperation(OperationCategory.WRITE);
+    if (isInSafeMode()) {
+      throw new SafeModeException("Cannot add block to " + src, safeMode);
+    }
+
+    // have we exceeded the configured limit of fs objects.
+    checkFsObjectLimit();
+
+    Block previousBlock = ExtendedBlock.getLocalBlock(previous);
+    final INodesInPath inodesInPath = dir.getINodesInPath4Write(src);
+    final INode[] inodes = inodesInPath.getINodes();
+    final INodeFileUnderConstruction pendingFile
+        = checkLease(src, fileId, clientName, inodes[inodes.length - 1]);
+    BlockInfo lastBlockInFile = pendingFile.getLastBlock();
+    if (!Block.matchingIdAndGenStamp(previousBlock, lastBlockInFile)) {
+      // The block that the client claims is the current last block
+      // doesn't match up with what we think is the last block. There are
+      // four possibilities:
+      // 1) This is the first block allocation of an append() pipeline
+      //    which started appending exactly at a block boundary.
+      //    In this case, the client isn't passed the previous block,
+      //    so it makes the allocateBlock() call with previous=null.
+      //    We can distinguish this since the last block of the file
+      //    will be exactly a full block.
+      // 2) This is a retry from a client that missed the response of a
+      //    prior getAdditionalBlock() call, perhaps because of a network
+      //    timeout, or because of an HA failover. In that case, we know
+      //    by the fact that the client is re-issuing the RPC that it
+      //    never began to write to the old block. Hence it is safe to
+      //    to return the existing block.
+      // 3) This is an entirely bogus request/bug -- we should error out
+      //    rather than potentially appending a new block with an empty
+      //    one in the middle, etc
+      // 4) This is a retry from a client that timed out while
+      //    the prior getAdditionalBlock() is still being processed,
+      //    currently working on chooseTarget(). 
+      //    There are no means to distinguish between the first and 
+      //    the second attempts in Part I, because the first one hasn't
+      //    changed the namesystem state yet.
+      //    We run this analysis again in Part II where case 4 is impossible.
+
+      BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
+      if (previous == null &&
+          lastBlockInFile != null &&
+          lastBlockInFile.getNumBytes() == pendingFile.getPreferredBlockSize() &&
+          lastBlockInFile.isComplete()) {
+        // Case 1
+        if (NameNode.stateChangeLog.isDebugEnabled()) {
+           NameNode.stateChangeLog.debug(
+               "BLOCK* NameSystem.allocateBlock: handling block allocation" +
+               " writing to a file with a complete previous block: src=" +
+               src + " lastBlock=" + lastBlockInFile);
+        }
+      } else if (Block.matchingIdAndGenStamp(penultimateBlock, previousBlock)) {
+        if (lastBlockInFile.getNumBytes() != 0) {
+          throw new IOException(
+              "Request looked like a retry to allocate block " +
+              lastBlockInFile + " but it already contains " +
+              lastBlockInFile.getNumBytes() + " bytes");
+        }
+
+        // Case 2
+        // Return the last block.
+        NameNode.stateChangeLog.info("BLOCK* allocateBlock: " +
+            "caught retry for allocation of a new block in " +
+            src + ". Returning previously allocated block " + lastBlockInFile);
+        long offset = pendingFile.computeFileSize(true);
+        onRetryBlock[0] = makeLocatedBlock(lastBlockInFile,
+            ((BlockInfoUnderConstruction)lastBlockInFile).getExpectedLocations(),
+            offset);
+        return inodesInPath;
+      } else {
+        // Case 3
+        throw new IOException("Cannot allocate block in " + src + ": " +
+            "passed 'previous' block " + previous + " does not match actual " +
+            "last block in file " + lastBlockInFile);
+      }
+    }
+
+    // Check if the penultimate block is minimally replicated
+    if (!checkFileProgress(pendingFile, false)) {
+      throw new NotReplicatedYetException("Not replicated yet: " + src);
+    }
+    return inodesInPath;
+  }
+
+  LocatedBlock makeLocatedBlock(Block blk,
+                                        DatanodeInfo[] locs,
+                                        long offset) throws IOException {
+    LocatedBlock lBlk = new LocatedBlock(
+        getExtendedBlock(blk), locs, offset);
+    getBlockManager().setBlockToken(
+        lBlk, BlockTokenSecretManager.AccessMode.WRITE);
+    return lBlk;
   }
 
   /** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
@@ -2438,13 +2484,16 @@ public class FSNamesystem implements Nam
   }
   
   /** make sure that we still have the lease on this file. */
-  private INodeFileUnderConstruction checkLease(String src, String holder) 
-      throws LeaseExpiredException, UnresolvedLinkException {
-    return checkLease(src, holder, dir.getINode(src));
+  private INodeFileUnderConstruction checkLease(String src, String holder)
+      throws LeaseExpiredException, UnresolvedLinkException,
+      FileNotFoundException {
+    return checkLease(src, INodeId.GRANDFATHER_INODE_ID, holder,
+        dir.getINode(src));
   }
-
-  private INodeFileUnderConstruction checkLease(String src, String holder,
-      INode file) throws LeaseExpiredException {
+  
+  private INodeFileUnderConstruction checkLease(String src, long fileId,
+      String holder, INode file) throws LeaseExpiredException,
+      FileNotFoundException {
     assert hasReadOrWriteLock();
     if (file == null || !(file instanceof INodeFile)) {
       Lease lease = leaseManager.getLease(holder);
@@ -2465,6 +2514,7 @@ public class FSNamesystem implements Nam
       throw new LeaseExpiredException("Lease mismatch on " + src + " owned by "
           + pendingFile.getClientName() + " but is accessed by " + holder);
     }
+    INodeId.checkId(fileId, pendingFile);
     return pendingFile;
   }
  
@@ -2506,7 +2556,8 @@ public class FSNamesystem implements Nam
     final INodesInPath iip = dir.getLastINodeInPath(src);
     final INodeFileUnderConstruction pendingFile;
     try {
-      pendingFile = checkLease(src, holder, iip.getINode(0)); 
+      pendingFile = checkLease(src, INodeId.GRANDFATHER_INODE_ID,
+          holder, iip.getINode(0)); 
     } catch (LeaseExpiredException lee) {
       final INode inode = dir.getINode(src);
       if (inode != null && inode instanceof INodeFile && !inode.isUnderConstruction()) {
@@ -2543,22 +2594,33 @@ public class FSNamesystem implements Nam
   }
 
   /**
-   * Allocate a block at the given pending filename
+   * Save allocated block at the given pending filename
    * 
    * @param src path to the file
    * @param inodesInPath representing each of the components of src. 
    *                     The last INode is the INode for the file.
    * @throws QuotaExceededException If addition of block exceeds space quota
    */
-  private Block allocateBlock(String src, INodesInPath inodesInPath,
-      DatanodeDescriptor targets[]) throws IOException {
+  BlockInfo saveAllocatedBlock(String src, INodesInPath inodesInPath,
+      Block newBlock, DatanodeDescriptor targets[]) throws IOException {
+    assert hasWriteLock();
+    BlockInfo b = dir.addBlock(src, inodesInPath, newBlock, targets);
+    NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
+        + getBlockPoolId() + " " + b);
+    for (DatanodeDescriptor dn : targets) {
+      dn.incBlocksScheduled();
+    }
+    return b;
+  }
+
+  /**
+   * Create new block with a unique block id and a new generation stamp.
+   */
+  Block createNewBlock() throws IOException {
     assert hasWriteLock();
     Block b = new Block(getFSImage().getUniqueBlockId(), 0, 0); 
     // Increment the generation stamp for every new block.
     b.setGenerationStamp(nextGenerationStamp());
-    b = dir.addBlock(src, inodesInPath, b, targets);
-    NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
-        + blockPoolId + " " + b);
     return b;
   }
 
@@ -5623,7 +5685,7 @@ public class FSNamesystem implements Nam
   @Override
   public boolean isAvoidingStaleDataNodesForWrite() {
     return this.blockManager.getDatanodeManager()
-        .isAvoidingStaleDataNodesForWrite();
+        .shouldAvoidStaleDataNodesForWrite();
   }
   
   public SnapshotManager getSnapshotManager() {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeId.java Sat Feb  9 21:29:44 2013
@@ -17,18 +17,21 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.FileNotFoundException;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.SequentialNumber;
 
 /**
- * An id which uniquely identifies an inode
+ * An id which uniquely identifies an inode. Id 1 to 1000 are reserved for
+ * potential future usage. The id won't be recycled and is not expected to wrap
+ * around in a very long time. Root inode id is always 1001. Id 0 is used for
+ * backward compatibility support.
  */
 @InterfaceAudience.Private
-class INodeId extends SequentialNumber {
+public class INodeId extends SequentialNumber {
   /**
-   * The last reserved inode id. Reserve id 1 to 1000 for potential future
-   * usage. The id won't be recycled and is not expected to wrap around in a
-   * very long time. Root inode id will be 1001.
+   * The last reserved inode id. 
    */
   public static final long LAST_RESERVED_ID = 1000L;
 
@@ -38,6 +41,19 @@ class INodeId extends SequentialNumber {
    */
   public static final long GRANDFATHER_INODE_ID = 0;
 
+  /**
+   * To check if the request id is the same as saved id. Don't check fileId
+   * with GRANDFATHER_INODE_ID for backward compatibility.
+   */
+  public static void checkId(long requestId, INode inode)
+      throws FileNotFoundException {
+    if (requestId != GRANDFATHER_INODE_ID && requestId != inode.getId()) {
+      throw new FileNotFoundException(
+          "ID mismatch. Request id and saved id: " + requestId + " , "
+              + inode.getId());
+    }
+  }
+  
   INodeId() {
     super(LAST_RESERVED_ID);
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java Sat Feb  9 21:29:44 2013
@@ -587,7 +587,7 @@ public class NNStorage extends Storage i
     }
 
     // Set Block pool ID in version with federation support
-    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+    if (versionSupportsFederation()) {
       String sbpid = props.getProperty("blockpoolID");
       setBlockPoolID(sd.getRoot(), sbpid);
     }
@@ -634,7 +634,7 @@ public class NNStorage extends Storage i
                            ) throws IOException {
     super.setPropertiesFromFields(props, sd);
     // Set blockpoolID in version with federation support
-    if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
+    if (versionSupportsFederation()) {
       props.setProperty("blockpoolID", blockpoolID);
     }
   }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java Sat Feb  9 21:29:44 2013
@@ -424,13 +424,10 @@ class NameNodeRpcServer implements Namen
   }
 
   @Override // ClientProtocol
-  public void create(String src, 
-                     FsPermission masked,
-                     String clientName, 
-                     EnumSetWritable<CreateFlag> flag,
-                     boolean createParent,
-                     short replication,
-                     long blockSize) throws IOException {
+  public HdfsFileStatus create(String src, FsPermission masked,
+      String clientName, EnumSetWritable<CreateFlag> flag,
+      boolean createParent, short replication, long blockSize)
+      throws IOException {
     String clientMachine = getClientMachine();
     if (stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*DIR* NameNode.create: file "
@@ -440,12 +437,13 @@ class NameNodeRpcServer implements Namen
       throw new IOException("create: Pathname too long.  Limit "
           + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
-    namesystem.startFile(src,
-        new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
-            null, masked),
-        clientName, clientMachine, flag.get(), createParent, replication, blockSize);
+    HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus(
+        UserGroupInformation.getCurrentUser().getShortUserName(), null, masked),
+        clientName, clientMachine, flag.get(), createParent, replication,
+        blockSize);
     metrics.incrFilesCreated();
     metrics.incrCreateFileOps();
+    return fileStatus;
   }
 
   @Override // ClientProtocol
@@ -484,26 +482,24 @@ class NameNodeRpcServer implements Namen
       throws IOException {
     namesystem.setOwner(src, username, groupname);
   }
-
-  @Override // ClientProtocol
-  public LocatedBlock addBlock(String src,
-                               String clientName,
-                               ExtendedBlock previous,
-                               DatanodeInfo[] excludedNodes)
+  
+  @Override
+  public LocatedBlock addBlock(String src, String clientName,
+      ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId)
       throws IOException {
-    if(stateChangeLog.isDebugEnabled()) {
-      stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
-          +src+" for "+clientName);
+    if (stateChangeLog.isDebugEnabled()) {
+      stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
+          + " fileId=" + fileId + " for " + clientName);
     }
     HashMap<Node, Node> excludedNodesSet = null;
     if (excludedNodes != null) {
       excludedNodesSet = new HashMap<Node, Node>(excludedNodes.length);
-      for (Node node:excludedNodes) {
+      for (Node node : excludedNodes) {
         excludedNodesSet.put(node, node);
       }
     }
-    LocatedBlock locatedBlock = 
-      namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
+    LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
+        clientName, previous, excludedNodesSet);
     if (locatedBlock != null)
       metrics.incrAddBlockOps();
     return locatedBlock;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Sat Feb  9 21:29:44 2013
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.DFSUtil.percent2String;
+
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.MemoryMXBean;
 import java.lang.management.MemoryUsage;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
+import java.net.URI;
 import java.net.URLEncoder;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
@@ -64,6 +67,14 @@ import org.znerd.xmlenc.XMLOutputter;
 import com.google.common.base.Preconditions;
 
 class NamenodeJspHelper {
+  static String fraction2String(double value) {
+    return StringUtils.format("%.2f", value);
+  }
+
+  static String fraction2String(long numerator, long denominator) {
+    return fraction2String(numerator/(double)denominator);
+  }
+
   static String getSafeModeText(FSNamesystem fsn) {
     if (!fsn.isInSafeMode())
       return "";
@@ -361,20 +372,20 @@ class NamenodeJspHelper {
           + "DFS Remaining" + colTxt() + ":" + colTxt()
           + StringUtils.byteDesc(remaining) + rowTxt() + colTxt() + "DFS Used%"
           + colTxt() + ":" + colTxt()
-          + StringUtils.limitDecimalTo2(percentUsed) + " %" + rowTxt()
+          + percent2String(percentUsed) + rowTxt()
           + colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt()
-          + StringUtils.limitDecimalTo2(percentRemaining) + " %"
+          + percent2String(percentRemaining)
           + rowTxt() + colTxt() + "Block Pool Used" + colTxt() + ":" + colTxt()
           + StringUtils.byteDesc(bpUsed) + rowTxt()
           + colTxt() + "Block Pool Used%"+ colTxt() + ":" + colTxt()
-          + StringUtils.limitDecimalTo2(percentBpUsed) + " %" 
+          + percent2String(percentBpUsed) 
           + rowTxt() + colTxt() + "DataNodes usages" + colTxt() + ":" + colTxt()
           + "Min %" + colTxt() + "Median %" + colTxt() + "Max %" + colTxt()
           + "stdev %" + rowTxt() + colTxt() + colTxt() + colTxt()
-          + StringUtils.limitDecimalTo2(min) + " %"
-          + colTxt() + StringUtils.limitDecimalTo2(median) + " %"
-          + colTxt() + StringUtils.limitDecimalTo2(max) + " %"
-          + colTxt() + StringUtils.limitDecimalTo2(dev) + " %"
+          + percent2String(min)
+          + colTxt() + percent2String(median)
+          + colTxt() + percent2String(max)
+          + colTxt() + percent2String(dev)
           + rowTxt() + colTxt()
           + "<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> "
           + colTxt() + ":" + colTxt() + live.size()
@@ -443,7 +454,13 @@ class NamenodeJspHelper {
       nodeToRedirect = nn.getHttpAddress().getHostName();
       redirectPort = nn.getHttpAddress().getPort();
     }
-    String addr = nn.getNameNodeAddressHostPortString();
+
+    InetSocketAddress rpcAddr = nn.getNameNodeAddress();
+    String rpcHost = rpcAddr.getAddress().isAnyLocalAddress()
+      ? URI.create(request.getRequestURL().toString()).getHost()
+      : rpcAddr.getAddress().getHostAddress();
+    String addr = rpcHost + ":" + rpcAddr.getPort();
+
     String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
     redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":" + redirectPort
         + "/browseDirectory.jsp?namenodeInfoPort="
@@ -562,9 +579,9 @@ class NamenodeJspHelper {
       long u = d.getDfsUsed();
       long nu = d.getNonDfsUsed();
       long r = d.getRemaining();
-      String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());
-      String percentRemaining = StringUtils.limitDecimalTo2(d
-          .getRemainingPercent());
+      final double percentUsedValue = d.getDfsUsedPercent();
+      String percentUsed = fraction2String(percentUsedValue);
+      String percentRemaining = fraction2String(d.getRemainingPercent());
 
       String adminState = d.getAdminState().toString();
 
@@ -572,32 +589,30 @@ class NamenodeJspHelper {
       long currentTime = Time.now();
       
       long bpUsed = d.getBlockPoolUsed();
-      String percentBpUsed = StringUtils.limitDecimalTo2(d
-          .getBlockPoolUsedPercent());
+      String percentBpUsed = fraction2String(d.getBlockPoolUsedPercent());
 
       out.print("<td class=\"lastcontact\"> "
           + ((currentTime - timestamp) / 1000)
           + "<td class=\"adminstate\">"
           + adminState
           + "<td align=\"right\" class=\"capacity\">"
-          + StringUtils.limitDecimalTo2(c * 1.0 / diskBytes)
+          + fraction2String(c, diskBytes)
           + "<td align=\"right\" class=\"used\">"
-          + StringUtils.limitDecimalTo2(u * 1.0 / diskBytes)
+          + fraction2String(u, diskBytes)
           + "<td align=\"right\" class=\"nondfsused\">"
-          + StringUtils.limitDecimalTo2(nu * 1.0 / diskBytes)
+          + fraction2String(nu, diskBytes)
           + "<td align=\"right\" class=\"remaining\">"
-          + StringUtils.limitDecimalTo2(r * 1.0 / diskBytes)
+          + fraction2String(r, diskBytes)
           + "<td align=\"right\" class=\"pcused\">"
           + percentUsed
           + "<td class=\"pcused\">"
-          + ServletUtil.percentageGraph((int) Double.parseDouble(percentUsed),
-              100) 
+          + ServletUtil.percentageGraph((int)percentUsedValue, 100) 
           + "<td align=\"right\" class=\"pcremaining\">"
           + percentRemaining 
           + "<td title=" + "\"blocks scheduled : "
           + d.getBlocksScheduled() + "\" class=\"blocks\">" + d.numBlocks()+"\n"
           + "<td align=\"right\" class=\"bpused\">"
-          + StringUtils.limitDecimalTo2(bpUsed * 1.0 / diskBytes)
+          + fraction2String(bpUsed, diskBytes)
           + "<td align=\"right\" class=\"pcbpused\">"
           + percentBpUsed
           + "<td align=\"right\" class=\"volfails\">"

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Sat Feb  9 21:29:44 2013
@@ -475,14 +475,20 @@ public class SecondaryNameNode implement
     // Returns a token that would be used to upload the merged image.
     CheckpointSignature sig = namenode.rollEditLog();
     
-    if ((checkpointImage.getNamespaceID() == 0) ||
-        (sig.isSameCluster(checkpointImage) &&
+    boolean loadImage = false;
+    boolean isFreshCheckpointer = (checkpointImage.getNamespaceID() == 0);
+    boolean isSameCluster =
+        (dstStorage.versionSupportsFederation() && sig.isSameCluster(checkpointImage)) ||
+        (!dstStorage.versionSupportsFederation() && sig.namespaceIdMatches(checkpointImage));
+    if (isFreshCheckpointer ||
+        (isSameCluster &&
          !sig.storageVersionMatches(checkpointImage.getStorage()))) {
       // if we're a fresh 2NN, or if we're on the same cluster and our storage
       // needs an upgrade, just take the storage info from the server.
       dstStorage.setStorageInfo(sig);
       dstStorage.setClusterID(sig.getClusterID());
       dstStorage.setBlockPoolID(sig.getBlockpoolID());
+      loadImage = true;
     }
     sig.validateStorageInfo(checkpointImage);
 
@@ -492,7 +498,7 @@ public class SecondaryNameNode implement
     RemoteEditLogManifest manifest =
       namenode.getEditLogManifest(sig.mostRecentCheckpointTxId + 1);
 
-    boolean loadImage = downloadCheckpointFiles(
+    loadImage |= downloadCheckpointFiles(
         fsName, checkpointImage, sig, manifest);   // Fetch fsimage and edits
     doMerge(sig, manifest, loadImage, checkpointImage, namesystem);
     

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Sat Feb  9 21:29:44 2013
@@ -316,8 +316,7 @@ public class DFSAdmin extends FsShell {
       System.out.println("DFS Used: " + used
                          + " (" + StringUtils.byteDesc(used) + ")");
       System.out.println("DFS Used%: "
-                         + StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100)
-                         + "%");
+          + StringUtils.formatPercent(used/(double)presentCapacity, 2));
       
       /* These counts are not always upto date. They are updated after  
        * iteration of an internal list. Should be updated in a few seconds to 

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Sat Feb  9 21:29:44 2013
@@ -219,6 +219,7 @@ public class JsonUtil {
     m.put("modificationTime", status.getModificationTime());
     m.put("blockSize", status.getBlockSize());
     m.put("replication", status.getReplication());
+    m.put("fileId", status.getFileId());
     return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m);
   }
 
@@ -243,9 +244,10 @@ public class JsonUtil {
     final long mTime = (Long) m.get("modificationTime");
     final long blockSize = (Long) m.get("blockSize");
     final short replication = (short) (long) (Long) m.get("replication");
+    final long fileId = (Long) m.get("fileId");
     return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
         blockSize, mTime, aTime, permission, owner, group,
-        symlink, DFSUtil.string2Bytes(localName));
+        symlink, DFSUtil.string2Bytes(localName), fileId);
   }
 
   /** Convert an ExtendedBlock to a Json map. */

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java Sat Feb  9 21:29:44 2013
@@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.web.resou
 /** The concat source paths parameter. */
 public class ConcatSourcesParam extends StringParam {
   /** Parameter name. */
-  public static final String NAME = "srcs";
+  public static final String NAME = "sources";
 
   public static final String DEFAULT = NULL;
 

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1441206-1444434

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Sat Feb  9 21:29:44 2013
@@ -67,7 +67,8 @@ message CreateRequestProto {
   required uint64 blockSize = 7;
 }
 
-message CreateResponseProto { // void response
+message CreateResponseProto {
+  optional HdfsFileStatusProto fs = 1;
 }
 
 message AppendRequestProto {
@@ -119,6 +120,7 @@ message AddBlockRequestProto {
   required string clientName = 2;
   optional ExtendedBlockProto previous = 3;
   repeated DatanodeInfoProto excludeNodes = 4;
+  optional uint64 fileId = 5 [default = 0];  // default as a bogus id
 }
 
 message AddBlockResponseProto {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Sat Feb  9 21:29:44 2013
@@ -170,6 +170,9 @@ message HdfsFileStatusProto {
   optional uint32 block_replication = 10 [default = 0]; // only 16bits used
   optional uint64 blocksize = 11 [default = 0];
   optional LocatedBlocksProto locations = 12;  // suppled only if asked by client
+
+  // Optional field for fileId
+  optional uint64 fileId = 13 [default = 0]; // default as an invalid id
 } 
 
 /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Sat Feb  9 21:29:44 2013
@@ -999,17 +999,14 @@
 </property>
 
 <property>
-  <name>dfs.namenode.check.stale.datanode</name>
+  <name>dfs.namenode.avoid.read.stale.datanode</name>
   <value>false</value>
   <description>
-    Indicate whether or not to check "stale" datanodes whose 
+    Indicate whether or not to avoid reading from &quot;stale&quot; datanodes whose
     heartbeat messages have not been received by the namenode 
-    for more than a specified time interval. If this configuration 
-    parameter is set as true, the system will keep track 
-    of the number of stale datanodes. The stale datanodes will be 
+    for more than a specified time interval. Stale datanodes will be
     moved to the end of the node list returned for reading. See
-    dfs.namenode.avoid.write.stale.datanode for details on how this 
-    affects writes. 
+    dfs.namenode.avoid.write.stale.datanode for a similar setting for writes.
   </description>
 </property>
 
@@ -1017,13 +1014,13 @@
   <name>dfs.namenode.avoid.write.stale.datanode</name>
   <value>false</value>
   <description>
-    Indicate whether or not to avoid writing to "stale" datanodes whose 
+    Indicate whether or not to avoid writing to &quot;stale&quot; datanodes whose 
     heartbeat messages have not been received by the namenode 
-    for more than a specified time interval. If this configuration 
-    parameter and dfs.namenode.check.stale.datanode are both set as true, 
-    the writing will avoid using stale datanodes unless a high number 
-    of datanodes are marked as stale. See 
-    dfs.namenode.write.stale.datanode.ratio for details.
+    for more than a specified time interval. Writes will avoid using 
+    stale datanodes unless more than a configured ratio 
+    (dfs.namenode.write.stale.datanode.ratio) of datanodes are marked as 
+    stale. See dfs.namenode.avoid.read.stale.datanode for a similar setting
+    for reads.
   </description>
 </property>
 

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1441206-1444434

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1441206-1444434

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1441206-1444434

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Sat Feb  9 21:29:44 2013
@@ -109,6 +109,9 @@ WebHDFS REST API
     * {{{Append to a File}<<<APPEND>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append)
 
+    * {{{Concat File(s)}<<<CONCAT>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat)
+
   * HTTP DELETE
 
     * {{{Delete a File/Directory}<<<DELETE>>>}}
@@ -299,6 +302,32 @@ Content-Length: 0
    {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append
 
 
+** {Concat File(s)}
+
+  * Submit a HTTP POST request.
+
++---------------------------------
+curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CONCAT&sources=<SOURCES>"
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  This REST API call is available as of Hadoop version 2.0.3.
+  Please note that <SOURCES> is a comma seperated list of absolute paths.
+  (Example: sources=/test/file1,/test/file2,/test/file3)
+
+  See also:
+  {{{Sources}<<<sources>>>}},
+   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat
+
+
 ** {Open and Read a File}
 
   * Submit a HTTP GET request with automatically following redirects.
@@ -1727,6 +1756,29 @@ var tokenProperties =
   {{{Set Replication Factor}<<<SETREPLICATION>>>}}
 
 
+** {Sources}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<sources>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | The comma seperated absolute paths used for concatenation. |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | \<empty\> |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | A list of comma seperated absolute FileSystem paths without scheme and authority. |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | See the note in {{Delegation}}. |
+*----------------+-------------------------------------------------------------------+
+
+  <<Note>> that sources are absolute FileSystem paths.
+
+
+  See also:
+  {{{Concat File(s)}<<<CONCAT>>>}}
+
+
 ** {Token}
 
 *----------------+-------------------------------------------------------------------+

Propchange: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1441206-1444434

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Sat Feb  9 21:29:44 2013
@@ -33,6 +33,7 @@ import java.io.FileInputStream;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.InputStream;
+import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.URL;
@@ -639,6 +640,9 @@ public class DFSTestUtil {
    */
   public static byte[] urlGetBytes(URL url) throws IOException {
     URLConnection conn = url.openConnection();
+    HttpURLConnection hc = (HttpURLConnection)conn;
+    
+    assertEquals(HttpURLConnection.HTTP_OK, hc.getResponseCode());
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
     return out.toByteArray();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java Sat Feb  9 21:29:44 2013
@@ -23,22 +23,34 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.SocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 
+import javax.net.SocketFactory;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.ConnectTimeoutException;
+import org.apache.hadoop.net.StandardSocketFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.hamcrest.BaseMatcher;
+import org.hamcrest.Description;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestDFSClientFailover {
   
@@ -92,6 +104,63 @@ public class TestDFSClientFailover {
   }
   
   /**
+   * Test that even a non-idempotent method will properly fail-over if the
+   * first IPC attempt times out trying to connect. Regression test for
+   * HDFS-4404. 
+   */
+  @Test
+  public void testFailoverOnConnectTimeout() throws Exception {
+    conf.setClass(CommonConfigurationKeysPublic.HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
+        InjectingSocketFactory.class, SocketFactory.class);
+    // Set up the InjectingSocketFactory to throw a ConnectTimeoutException
+    // when connecting to the first NN.
+    InjectingSocketFactory.portToInjectOn = cluster.getNameNodePort(0);
+
+    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
+    
+    // Make the second NN the active one.
+    cluster.shutdownNameNode(0);
+    cluster.transitionToActive(1);
+    
+    // Call a non-idempotent method, and ensure the failover of the call proceeds
+    // successfully.
+    IOUtils.closeStream(fs.create(TEST_FILE));
+  }
+  
+  private static class InjectingSocketFactory extends StandardSocketFactory {
+
+    static SocketFactory defaultFactory = SocketFactory.getDefault();
+
+    static int portToInjectOn;
+    
+    @Override
+    public Socket createSocket() throws IOException {
+      Socket spy = Mockito.spy(defaultFactory.createSocket());
+      // Simplify our spying job by not having to also spy on the channel
+      Mockito.doReturn(null).when(spy).getChannel();
+      // Throw a ConnectTimeoutException when connecting to our target "bad"
+      // host.
+      Mockito.doThrow(new ConnectTimeoutException("injected"))
+        .when(spy).connect(
+            Mockito.argThat(new MatchesPort()),
+            Mockito.anyInt());
+      return spy;
+    }
+
+    private class MatchesPort extends BaseMatcher<SocketAddress> {
+      @Override
+      public boolean matches(Object arg0) {
+        return ((InetSocketAddress)arg0).getPort() == portToInjectOn;
+      }
+
+      @Override
+      public void describeTo(Description desc) {
+        desc.appendText("matches port " + portToInjectOn);
+      }
+    }
+  }
+  
+  /**
    * Regression test for HDFS-2683.
    */
   @Test

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Sat Feb  9 21:29:44 2013
@@ -23,7 +23,10 @@ import static org.junit.Assert.assertFal
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyShort;
 import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -49,13 +52,13 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsUtils;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -64,12 +67,14 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
@@ -208,7 +213,7 @@ public class TestDFSClientRetries {
    * Verify that client will correctly give up after the specified number
    * of times trying to add a block
    */
-  @SuppressWarnings("serial")
+  @SuppressWarnings({ "serial", "unchecked" })
   @Test
   public void testNotYetReplicatedErrors() throws IOException
   { 
@@ -235,7 +240,22 @@ public class TestDFSClientRetries {
     when(mockNN.addBlock(anyString(), 
                          anyString(),
                          any(ExtendedBlock.class),
-                         any(DatanodeInfo[].class))).thenAnswer(answer);
+                         any(DatanodeInfo[].class),
+                         anyLong())).thenAnswer(answer);
+    
+    Mockito.doReturn(
+            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+                (short) 777), "owner", "group", new byte[0], new byte[0],
+                1010)).when(mockNN).getFileInfo(anyString());
+    
+    Mockito.doReturn(
+            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+                (short) 777), "owner", "group", new byte[0], new byte[0],
+                1010))
+        .when(mockNN)
+        .create(anyString(), (FsPermission) anyObject(), anyString(),
+            (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
+            anyShort(), anyLong());
 
     final DFSClient client = new DFSClient(null, mockNN, conf, null);
     OutputStream os = client.create("testfile", true);
@@ -369,7 +389,8 @@ public class TestDFSClientRetries {
           return ret2;
         }
       }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
-          Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
+          Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
+          Mockito.anyLong());
 
       doAnswer(new Answer<Boolean>() {
 
@@ -410,7 +431,8 @@ public class TestDFSClientRetries {
       // Make sure the mock was actually properly injected.
       Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
           Mockito.anyString(), Mockito.anyString(),
-          Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any());
+          Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
+          Mockito.anyLong());
       Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
           Mockito.anyString(), Mockito.anyString(),
           Mockito.<ExtendedBlock>any());

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Sat Feb  9 21:29:44 2013
@@ -619,6 +619,16 @@ public class TestDFSUtil {
     
     assertEquals(1, uris.size());
     assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
+
+    // Make sure when config FS_DEFAULT_NAME_KEY using IP address,
+    // it will automatically convert it to hostname
+    conf = new HdfsConfiguration();
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020");
+    uris = DFSUtil.getNameServiceUris(conf);
+    assertEquals(1, uris.size());
+    for (URI uri : uris) {
+      assertFalse(uri.getHost().equals("127.0.0.1"));
+    }
   }
   
   @Test

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Sat Feb  9 21:29:44 2013
@@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -517,8 +518,8 @@ public class TestFileCreation {
           + "The file has " + locations.locatedBlockCount() + " blocks.");
 
       // add one block to the file
-      LocatedBlock location = client.getNamenode().addBlock(file1.toString(), 
-          client.clientName, null, null);
+      LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
+          client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID);
       System.out.println("testFileCreationError2: "
           + "Added block " + location.getBlock());
 
@@ -568,8 +569,8 @@ public class TestFileCreation {
       final Path f = new Path("/foo.txt");
       createFile(dfs, f, 3);
       try {
-        cluster.getNameNodeRpc().addBlock(f.toString(), 
-            client.clientName, null, null);
+        cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
+            null, null, INodeId.GRANDFATHER_INODE_ID);
         fail();
       } catch(IOException ioe) {
         FileSystem.LOG.info("GOOD!", ioe);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileLengthOnClusterRestart.java Sat Feb  9 21:29:44 2013
@@ -43,7 +43,7 @@ public class TestFileLengthOnClusterRest
         .numDataNodes(2).build();
     HdfsDataInputStream in = null;
     try {
-      Path path = new Path(MiniDFSCluster.getBaseDirectory(), "test");
+      Path path = new Path("/tmp/TestFileLengthOnClusterRestart", "test");
       DistributedFileSystem dfs = (DistributedFileSystem) cluster
           .getFileSystem();
       FSDataOutputStream out = dfs.create(path);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Sat Feb  9 21:29:44 2013
@@ -88,7 +88,7 @@ public class TestGetBlocks {
   @Test
   public void testReadSelectNonStaleDatanode() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
     long staleInterval = 30 * 1000 * 60;
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
         staleInterval);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java Sat Feb  9 21:29:44 2013
@@ -183,8 +183,7 @@ public class TestLargeBlock {
     try {
 
       // create a new file in test data directory
-      Path file1 = new Path(System.getProperty("test.build.data") + "/" +
-          Long.toString(blockSize) + ".dat");
+      Path file1 = new Path("/tmp/TestLargeBlock", blockSize + ".dat");
       FSDataOutputStream stm = createFile(fs, file1, 1, blockSize);
       LOG.info("File " + file1 + " created with file size " +
           fileSize +

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java Sat Feb  9 21:29:44 2013
@@ -18,6 +18,10 @@
 package org.apache.hadoop.hdfs;
 
 import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.anyShort;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
@@ -29,14 +33,18 @@ import java.security.PrivilegedException
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -256,6 +264,7 @@ public class TestLease {
     }
   }
 
+  @SuppressWarnings("unchecked")
   @Test
   public void testFactory() throws Exception {
     final String[] groups = new String[]{"supergroup"};
@@ -264,6 +273,20 @@ public class TestLease {
       ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
     }
 
+    Mockito.doReturn(
+        new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+            (short) 777), "owner", "group", new byte[0], new byte[0],
+            1010)).when(mcp).getFileInfo(anyString());
+    Mockito
+        .doReturn(
+            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+                (short) 777), "owner", "group", new byte[0], new byte[0],
+                1010))
+        .when(mcp)
+        .create(anyString(), (FsPermission) anyObject(), anyString(),
+            (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
+            anyShort(), anyLong());
+
     final Configuration conf = new Configuration();
     final DFSClient c1 = createDFSClientAs(ugi[0], conf);
     FSDataOutputStream out1 = createFsOut(c1, "/out1");

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInDFS.java Sat Feb  9 21:29:44 2013
@@ -38,6 +38,7 @@ public class TestListFilesInDFS extends 
 
   @BeforeClass
   public static void testSetUp() throws Exception {
+    setTestPaths(new Path("/tmp/TestListFilesInDFS"));
     cluster = new MiniDFSCluster.Builder(conf).build();
     fs = cluster.getFileSystem();
     fs.delete(TEST_DIR, true);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java Sat Feb  9 21:29:44 2013
@@ -70,8 +70,8 @@ public class TestQuota {
       throw new DSQuotaExceededException(bytes, bytes);
     } catch(DSQuotaExceededException e) {
       
-      assertEquals("The DiskSpace quota is exceeded: quota=1.0k " +
-          "diskspace consumed=1.0k", e.getMessage());
+      assertEquals("The DiskSpace quota is exceeded: quota = 1024 B = 1 KB"
+          + " but diskspace consumed = 1024 B = 1 KB", e.getMessage());
     }
   }
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java Sat Feb  9 21:29:44 2013
@@ -67,7 +67,7 @@ public class TestRBWBlockInvalidation {
     try {
       final FSNamesystem namesystem = cluster.getNamesystem();
       FileSystem fs = cluster.getFileSystem();
-      Path testPath = new Path(MiniDFSCluster.getBaseDirectory(), "foo1");
+      Path testPath = new Path("/tmp/TestRBWBlockInvalidation", "foo1");
       out = fs.create(testPath, (short) 2);
       out.writeBytes("HDFS-3157: " + testPath);
       out.hsync();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Sat Feb  9 21:29:44 2013
@@ -88,9 +88,11 @@ public class TestReplicationPolicy {
         "test.build.data", "build/test/data"), "dfs/");
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         new File(baseDir, "name").getPath());
-    // Enable the checking for stale datanodes in the beginning
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
 
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
     DFSTestUtil.formatNameNode(conf);
     namenode = new NameNode(conf);
 
@@ -100,6 +102,8 @@ public class TestReplicationPolicy {
     // construct network topology
     for (int i=0; i < NUM_OF_DATANODES; i++) {
       cluster.add(dataNodes[i]);
+      bm.getDatanodeManager().getHeartbeatManager().addDatanode(
+          dataNodes[i]);
     }
     for (int i=0; i < NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
@@ -393,11 +397,11 @@ public class TestReplicationPolicy {
       throws Exception {
     try {
       namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(true);
+        .setNumStaleNodes(NUM_OF_DATANODES);
       testChooseTargetWithMoreThanAvailableNodes();
     } finally {
       namenode.getNamesystem().getBlockManager().getDatanodeManager()
-      .setAvoidStaleDataNodesForWrite(false);
+        .setNumStaleNodes(0);
     }
   }
   
@@ -479,12 +483,12 @@ public class TestReplicationPolicy {
   
   @Test
   public void testChooseTargetWithStaleNodes() throws Exception {
-    // Enable avoidng writing to stale datanodes
-    namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(true);
     // Set dataNodes[0] as stale
     dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
-
+    namenode.getNamesystem().getBlockManager()
+      .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+    assertTrue(namenode.getNamesystem().getBlockManager()
+        .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
     DatanodeDescriptor[] targets;
     // We set the datanode[0] as stale, thus should choose datanode[1] since
     // datanode[1] is on the same rack with datanode[0] (writer)
@@ -503,9 +507,9 @@ public class TestReplicationPolicy {
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     
     // reset
-    namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(false);
     dataNodes[0].setLastUpdate(Time.now());
+    namenode.getNamesystem().getBlockManager()
+      .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
   }
 
   /**
@@ -518,20 +522,20 @@ public class TestReplicationPolicy {
    */
   @Test
   public void testChooseTargetWithHalfStaleNodes() throws Exception {
-    // Enable stale datanodes checking
-    namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(true);
     // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
     for (int i = 0; i < 3; i++) {
       dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
     }
+    namenode.getNamesystem().getBlockManager()
+      .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
 
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(filename, 0, dataNodes[0],
         new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
 
-    // We set the datanode[0] as stale, thus should choose datanode[1]
+    // Since we have 6 datanodes total, stale nodes should
+    // not be returned until we ask for more than 3 targets
     targets = replicator.chooseTarget(filename, 1, dataNodes[0],
         new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
@@ -557,18 +561,16 @@ public class TestReplicationPolicy {
     assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
     assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));
 
-    // reset
-    namenode.getNamesystem().getBlockManager().getDatanodeManager()
-        .setAvoidStaleDataNodesForWrite(false);
     for (int i = 0; i < dataNodes.length; i++) {
       dataNodes[i].setLastUpdate(Time.now());
     }
+    namenode.getNamesystem().getBlockManager()
+      .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
   }
 
   @Test
   public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
     String[] hosts = new String[]{"host1", "host2", "host3", 
@@ -598,7 +600,7 @@ public class TestReplicationPolicy {
           .getBlockManager().getDatanodeManager().getNumStaleNodes();
       assertEquals(numStaleNodes, 2);
       assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
-          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+          .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
       // Call chooseTarget
       DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode()
           .getNamesystem().getBlockManager().getDatanodeManager()
@@ -627,7 +629,7 @@ public class TestReplicationPolicy {
       // According to our strategy, stale datanodes will be included for writing
       // to avoid hotspots
       assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager()
-          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());     
+          .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
       // Call chooseTarget
       targets = replicator.chooseTarget(filename, 3,
           staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
@@ -650,7 +652,7 @@ public class TestReplicationPolicy {
           .getBlockManager().getDatanodeManager().getNumStaleNodes();
       assertEquals(numStaleNodes, 2);
       assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
-          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+          .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
       // Call chooseTarget
       targets = replicator.chooseTarget(filename, 3,
           staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java Sat Feb  9 21:29:44 2013
@@ -506,7 +506,11 @@ public abstract class FSImageTestUtil {
       props.load(fis);
       IOUtils.closeStream(fis);
   
-      props.setProperty(key, value);
+      if (value == null || value.isEmpty()) {
+        props.remove(key);
+      } else {
+        props.setProperty(key, value);
+      }
       
       out = new FileOutputStream(versionFile);
       props.store(out, null);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Sat Feb  9 21:29:44 2013
@@ -1058,7 +1058,8 @@ public class NNThroughputBenchmark {
     throws IOException {
       ExtendedBlock prevBlock = null;
       for(int jdx = 0; jdx < blocksPerFile; jdx++) {
-        LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName, prevBlock, null);
+        LocatedBlock loc = nameNodeProto.addBlock(fileName, clientName,
+            prevBlock, null, INodeId.GRANDFATHER_INODE_ID);
         prevBlock = loc.getBlock();
         for(DatanodeInfo dnInfo : loc.getLocations()) {
           int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getXferAddr());

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1444439&r1=1444438&r2=1444439&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Sat Feb  9 21:29:44 2013
@@ -24,8 +24,10 @@ import static org.junit.Assert.fail;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.EnumSet;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Options;
@@ -39,6 +41,8 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.junit.Test;
 
 public class TestINodeFile {
@@ -376,7 +380,7 @@ public class TestINodeFile {
    * @throws IOException
    */
   @Test
-  public void TestInodeId() throws IOException {
+  public void testInodeId() throws IOException {
 
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
@@ -396,9 +400,14 @@ public class TestINodeFile {
     assertTrue(fs.mkdirs(path));
     assertTrue(fsn.getLastInodeId() == 1002);
 
-    Path filePath = new Path("/test1/file");
-    fs.create(filePath);
+    // Use namenode rpc to create a file
+    NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+    HdfsFileStatus fileStatus = nnrpc.create("/test1/file", new FsPermission(
+        (short) 0755), "client",
+        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
+        (short) 1, 128 * 1024 * 1024L);
     assertTrue(fsn.getLastInodeId() == 1003);
+    assertTrue(fileStatus.getFileId() == 1003);
 
     // Rename doesn't increase inode id
     Path renamedPath = new Path("/test2");
@@ -412,4 +421,44 @@ public class TestINodeFile {
     cluster.waitActive();
     assertTrue(fsn.getLastInodeId() == 1003);
   }
+
+  @Test
+  public void testWriteToRenamedFile() throws IOException {
+
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .build();
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+
+    Path path = new Path("/test1");
+    assertTrue(fs.mkdirs(path));
+
+    int size = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
+    byte[] data = new byte[size];
+
+    // Create one file
+    Path filePath = new Path("/test1/file");
+    FSDataOutputStream fos = fs.create(filePath);
+
+    // Rename /test1 to test2, and recreate /test1/file
+    Path renamedPath = new Path("/test2");
+    fs.rename(path, renamedPath);
+    fs.create(filePath, (short) 1);
+
+    // Add new block should fail since /test1/file has a different fileId
+    try {
+      fos.write(data, 0, data.length);
+      // make sure addBlock() request gets to NN immediately
+      fos.hflush();
+
+      fail("Write should fail after rename");
+    } catch (Exception e) {
+      /* Ignore */
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }



Mime
View raw message