hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From szets...@apache.org
Subject svn commit: r1337003 [1/2] - in /hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/fuse-dfs/src/ src/main/java/ src/main/java/org/apache/hadoop/hdfs/ src/main/java/org/apache/hadoop/hdfs/security/token/block/ src/main/jav...
Date Fri, 11 May 2012 02:05:35 GMT
Author: szetszwo
Date: Fri May 11 02:05:31 2012
New Revision: 1337003

URL: http://svn.apache.org/viewvc?rev=1337003&view=rev
Log:
Merge r1335791 through r1337002 from trunk.

Removed:
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
Modified:
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/   (props changed)
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
    hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1335791-1337002

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri May 11 02:05:31 2012
@@ -435,6 +435,22 @@ Release 2.0.0 - UNRELEASED 
     so that INodeFile and INodeFileUnderConstruction do not have to be used in
     block management.  (John George via szetszwo)
 
+    HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
+    logging is enabled. (atm)
+
+    HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of
+    final releases. (todd)
+
+    HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and
+    BlocksMap to {get|set|add}BlockCollection(..).  (John George via szetszwo)
+
+    HDFS-3134. harden edit log loader against malformed or malicious input.
+    (Colin Patrick McCabe via eli)
+
+    HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
+
+    HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli)
+
   OPTIMIZATIONS
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -621,6 +637,17 @@ Release 2.0.0 - UNRELEASED 
     HDFS-3376. DFSClient fails to make connection to DN if there are many
     unusable cached sockets (todd)
 
+    HDFS-3328. NPE in DataNode.getIpcPort. (eli)
+
+    HDFS-3396. FUSE build fails on Ubuntu 12.04. (Colin Patrick McCabe via eli)
+
+    HDFS-3395. NN doesn't start with HA+security enabled and HTTP address
+    set to 0.0.0.0. (atm)
+
+    HDFS-3385. The last block of INodeFileUnderConstruction is not
+    necessarily a BlockInfoUnderConstruction, so do not cast it in
+    FSNamesystem.recoverLeaseInternal(..).  (szetszwo)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am Fri May 11 02:05:31 2012
@@ -18,4 +18,5 @@ bin_PROGRAMS = fuse_dfs
 fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c  fuse_impls_chown.c  fuse_impls_create.c  fuse_impls_flush.c fuse_impls_getattr.c  fuse_impls_mkdir.c  fuse_impls_mknod.c  fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c  fuse_impls_unlink.c fuse_impls_write.c
 AM_CFLAGS= -Wall -g
 AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/../../src/main/native -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
-AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm -lm
+AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib64 -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -L$(FUSE_HOME)/lib -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server
+fuse_dfs_LDADD=-lfuse -lhdfs -ljvm -lm

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1335791-1337002

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Fri May 11 02:05:31 2012
@@ -145,7 +145,7 @@ public class DFSConfigKeys extends Commo
   public static final String  DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained";
   public static final int     DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M
   public static final String  DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version";
-  public static final String  DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0";
+  public static final String  DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
 
   public static final String  DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
   public static final int     DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
@@ -273,7 +273,7 @@ public class DFSConfigKeys extends Commo
   public static final int     DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
   public static final String  DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT;
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
-  public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0";
+  public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
 
   public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
   public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java Fri May 11 02:05:31 2012
@@ -715,8 +715,11 @@ public class DFSUtil {
   public static String substituteForWildcardAddress(String configuredAddress,
       String defaultHost) throws IOException {
     InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
+    InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
+        + ":0");
     if (sockAddr.getAddress().isAnyLocalAddress()) {
-      if(UserGroupInformation.isSecurityEnabled()) {
+      if (UserGroupInformation.isSecurityEnabled() &&
+          defaultSockAddr.getAddress().isAnyLocalAddress()) {
         throw new IOException("Cannot use a wildcard address with security. " +
                               "Must explicitly set bind address for Kerberos");
       }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java Fri May 11 02:05:31 2012
@@ -148,7 +148,8 @@ public class BlockTokenIdentifier extend
     userId = WritableUtils.readString(in);
     blockPoolId = WritableUtils.readString(in);
     blockId = WritableUtils.readVLong(in);
-    int length = WritableUtils.readVInt(in);
+    int length = WritableUtils.readVIntInRange(in, 0,
+        AccessMode.class.getEnumConstants().length);
     for (int i = 0; i < length; i++) {
       modes.add(WritableUtils.readEnum(in, AccessMode.class));
     }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java Fri May 11 02:05:31 2012
@@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.util.Light
 @InterfaceAudience.Private
 public class BlockInfo extends Block implements
     LightWeightGSet.LinkedElement {
-  private BlockCollection inode;
+  private BlockCollection bc;
 
   /** For implementing {@link LightWeightGSet.LinkedElement} interface */
   private LightWeightGSet.LinkedElement nextLinkedElement;
@@ -57,13 +57,13 @@ public class BlockInfo extends Block imp
    */
   public BlockInfo(int replication) {
     this.triplets = new Object[3*replication];
-    this.inode = null;
+    this.bc = null;
   }
   
   public BlockInfo(Block blk, int replication) {
     super(blk);
     this.triplets = new Object[3*replication];
-    this.inode = null;
+    this.bc = null;
   }
 
   /**
@@ -72,16 +72,16 @@ public class BlockInfo extends Block imp
    * @param from BlockInfo to copy from.
    */
   protected BlockInfo(BlockInfo from) {
-    this(from, from.inode.getReplication());
-    this.inode = from.inode;
+    this(from, from.bc.getReplication());
+    this.bc = from.bc;
   }
 
-  public BlockCollection getINode() {
-    return inode;
+  public BlockCollection getBlockCollection() {
+    return bc;
   }
 
-  public void setINode(BlockCollection inode) {
-    this.inode = inode;
+  public void setBlockCollection(BlockCollection bc) {
+    this.bc = bc;
   }
 
   DatanodeDescriptor getDatanode(int index) {
@@ -334,7 +334,7 @@ public class BlockInfo extends Block imp
       BlockUCState s, DatanodeDescriptor[] targets) {
     if(isComplete()) {
       return new BlockInfoUnderConstruction(
-          this, getINode().getReplication(), s, targets);
+          this, getBlockCollection().getReplication(), s, targets);
     }
     // the block is already under construction
     BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java Fri May 11 02:05:31 2012
@@ -140,7 +140,7 @@ public class BlockManager {
   private final long replicationRecheckInterval;
   
   /**
-   * Mapping: Block -> { INode, datanodes, self ref }
+   * Mapping: Block -> { BlockCollection, datanodes, self ref }
    * Updated only in response to client-sent information.
    */
   final BlocksMap blocksMap;
@@ -190,7 +190,7 @@ public class BlockManager {
   public final short minReplication;
   /** Default number of replicas */
   public final int defaultReplication;
-  /** The maximum number of entries returned by getCorruptInodes() */
+  /** value returned by MAX_CORRUPT_FILES_RETURNED */
   final int maxCorruptFilesReturned;
 
   /** variable to enable check for enough racks */
@@ -382,7 +382,7 @@ public class BlockManager {
                          numReplicas.decommissionedReplicas();
     
     if (block instanceof BlockInfo) {
-      String fileName = ((BlockInfo)block).getINode().getName();
+      String fileName = ((BlockInfo)block).getBlockCollection().getName();
       out.print(fileName + ": ");
     }
     // l: == live:, d: == decommissioned c: == corrupt e: == excess
@@ -452,17 +452,17 @@ public class BlockManager {
    * Commit the last block of the file and mark it as complete if it has
    * meets the minimum replication requirement
    * 
-   * @param fileINode file inode
+   * @param bc block collection
    * @param commitBlock - contains client reported block length and generation
    * @return true if the last block is changed to committed state.
    * @throws IOException if the block does not have at least a minimal number
    * of replicas reported from data-nodes.
    */
-  public boolean commitOrCompleteLastBlock(MutableBlockCollection fileINode, 
+  public boolean commitOrCompleteLastBlock(MutableBlockCollection bc, 
       Block commitBlock) throws IOException {
     if(commitBlock == null)
       return false; // not committing, this is a block allocation retry
-    BlockInfo lastBlock = fileINode.getLastBlock();
+    BlockInfo lastBlock = bc.getLastBlock();
     if(lastBlock == null)
       return false; // no blocks in file yet
     if(lastBlock.isComplete())
@@ -470,22 +470,22 @@ public class BlockManager {
     
     final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock);
     if(countNodes(lastBlock).liveReplicas() >= minReplication)
-      completeBlock(fileINode, fileINode.numBlocks()-1, false);
+      completeBlock(bc, bc.numBlocks()-1, false);
     return b;
   }
 
   /**
    * Convert a specified block of the file to a complete block.
-   * @param fileINode file
+   * @param bc file
    * @param blkIndex  block index in the file
    * @throws IOException if the block does not have at least a minimal number
    * of replicas reported from data-nodes.
    */
-  private BlockInfo completeBlock(final MutableBlockCollection fileINode,
+  private BlockInfo completeBlock(final MutableBlockCollection bc,
       final int blkIndex, boolean force) throws IOException {
     if(blkIndex < 0)
       return null;
-    BlockInfo curBlock = fileINode.getBlocks()[blkIndex];
+    BlockInfo curBlock = bc.getBlocks()[blkIndex];
     if(curBlock.isComplete())
       return curBlock;
     BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock;
@@ -498,7 +498,7 @@ public class BlockManager {
           "Cannot complete block: block has not been COMMITTED by the client");
     BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
     // replace penultimate block in file
-    fileINode.setBlock(blkIndex, completeBlock);
+    bc.setBlock(blkIndex, completeBlock);
     
     // Since safe-mode only counts complete blocks, and we now have
     // one more complete block, we need to adjust the total up, and
@@ -514,12 +514,12 @@ public class BlockManager {
     return blocksMap.replaceBlock(completeBlock);
   }
 
-  private BlockInfo completeBlock(final MutableBlockCollection fileINode,
+  private BlockInfo completeBlock(final MutableBlockCollection bc,
       final BlockInfo block, boolean force) throws IOException {
-    BlockInfo[] fileBlocks = fileINode.getBlocks();
+    BlockInfo[] fileBlocks = bc.getBlocks();
     for(int idx = 0; idx < fileBlocks.length; idx++)
       if(fileBlocks[idx] == block) {
-        return completeBlock(fileINode, idx, force);
+        return completeBlock(bc, idx, force);
       }
     return block;
   }
@@ -529,10 +529,10 @@ public class BlockManager {
    * regardless of whether enough replicas are present. This is necessary
    * when tailing edit logs as a Standby.
    */
-  public BlockInfo forceCompleteBlock(final MutableBlockCollection fileINode,
+  public BlockInfo forceCompleteBlock(final MutableBlockCollection bc,
       final BlockInfoUnderConstruction block) throws IOException {
     block.commitBlock(block);
-    return completeBlock(fileINode, block, true);
+    return completeBlock(bc, block, true);
   }
 
   
@@ -546,14 +546,14 @@ public class BlockManager {
    * The methods returns null if there is no partial block at the end.
    * The client is supposed to allocate a new block with the next call.
    *
-   * @param fileINode file
+   * @param bc file
    * @return the last block locations if the block is partial or null otherwise
    */
   public LocatedBlock convertLastBlockToUnderConstruction(
-      MutableBlockCollection fileINode) throws IOException {
-    BlockInfo oldBlock = fileINode.getLastBlock();
+      MutableBlockCollection bc) throws IOException {
+    BlockInfo oldBlock = bc.getLastBlock();
     if(oldBlock == null ||
-        fileINode.getPreferredBlockSize() == oldBlock.getNumBytes())
+        bc.getPreferredBlockSize() == oldBlock.getNumBytes())
       return null;
     assert oldBlock == getStoredBlock(oldBlock) :
       "last block of the file is not in blocksMap";
@@ -561,7 +561,7 @@ public class BlockManager {
     DatanodeDescriptor[] targets = getNodes(oldBlock);
 
     BlockInfoUnderConstruction ucBlock =
-      fileINode.setLastBlock(oldBlock, targets);
+      bc.setLastBlock(oldBlock, targets);
     blocksMap.replaceBlock(ucBlock);
 
     // Remove block from replication queue.
@@ -581,7 +581,7 @@ public class BlockManager {
         // always decrement total blocks
         -1);
 
-    final long fileLength = fileINode.computeContentSummary().getLength();
+    final long fileLength = bc.computeContentSummary().getLength();
     final long pos = fileLength - ucBlock.getNumBytes();
     return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
   }
@@ -921,8 +921,8 @@ public class BlockManager {
                             " does not exist. ");
     }
 
-    BlockCollection inode = storedBlock.getINode();
-    if (inode == null) {
+    BlockCollection bc = storedBlock.getBlockCollection();
+    if (bc == null) {
       NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
                                    "block " + storedBlock +
                                    " could not be marked as corrupt as it" +
@@ -936,7 +936,7 @@ public class BlockManager {
 
     // Add this replica to corruptReplicas Map
     corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason);
-    if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
+    if (countNodes(storedBlock).liveReplicas() >= bc.getReplication()) {
       // the block is over-replicated so invalidate the replicas immediately
       invalidateBlock(storedBlock, node);
     } else if (namesystem.isPopulatingReplQueues()) {
@@ -1049,7 +1049,7 @@ public class BlockManager {
     int requiredReplication, numEffectiveReplicas;
     List<DatanodeDescriptor> containingNodes, liveReplicaNodes;
     DatanodeDescriptor srcNode;
-    BlockCollection fileINode = null;
+    BlockCollection bc = null;
     int additionalReplRequired;
 
     int scheduledWork = 0;
@@ -1061,15 +1061,15 @@ public class BlockManager {
         for (int priority = 0; priority < blocksToReplicate.size(); priority++) {
           for (Block block : blocksToReplicate.get(priority)) {
             // block should belong to a file
-            fileINode = blocksMap.getINode(block);
+            bc = blocksMap.getBlockCollection(block);
             // abandoned block or block reopened for append
-            if(fileINode == null || fileINode instanceof MutableBlockCollection) {
+            if(bc == null || bc instanceof MutableBlockCollection) {
               neededReplications.remove(block, priority); // remove from neededReplications
               neededReplications.decrementReplicationIndex(priority);
               continue;
             }
 
-            requiredReplication = fileINode.getReplication();
+            requiredReplication = bc.getReplication();
 
             // get a source data-node
             containingNodes = new ArrayList<DatanodeDescriptor>();
@@ -1105,7 +1105,7 @@ public class BlockManager {
             } else {
               additionalReplRequired = 1; // Needed on a new rack
             }
-            work.add(new ReplicationWork(block, fileINode, srcNode,
+            work.add(new ReplicationWork(block, bc, srcNode,
                 containingNodes, liveReplicaNodes, additionalReplRequired,
                 priority));
           }
@@ -1127,8 +1127,8 @@ public class BlockManager {
 
       // choose replication targets: NOT HOLDING THE GLOBAL LOCK
       // It is costly to extract the filename for which chooseTargets is called,
-      // so for now we pass in the Inode itself.
-      rw.targets = blockplacement.chooseTarget(rw.fileINode,
+      // so for now we pass in the block collection itself.
+      rw.targets = blockplacement.chooseTarget(rw.bc,
           rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes,
           excludedNodes, rw.block.getNumBytes());
     }
@@ -1147,15 +1147,15 @@ public class BlockManager {
           int priority = rw.priority;
           // Recheck since global lock was released
           // block should belong to a file
-          fileINode = blocksMap.getINode(block);
+          bc = blocksMap.getBlockCollection(block);
           // abandoned block or block reopened for append
-          if(fileINode == null || fileINode instanceof MutableBlockCollection) {
+          if(bc == null || bc instanceof MutableBlockCollection) {
             neededReplications.remove(block, priority); // remove from neededReplications
             rw.targets = null;
             neededReplications.decrementReplicationIndex(priority);
             continue;
           }
-          requiredReplication = fileINode.getReplication();
+          requiredReplication = bc.getReplication();
 
           // do not schedule more if enough replicas is already pending
           NumberReplicas numReplicas = countNodes(block);
@@ -1802,8 +1802,7 @@ assert storedBlock.findDatanode(dn) < 0 
       case COMPLETE:
       case COMMITTED:
         if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
-          return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock
-              .getINode().getReplication()),
+          return new BlockToMarkCorrupt(storedBlock,
               "block is " + ucState + " and reported genstamp " +
               iblk.getGenerationStamp() + " does not match " +
               "genstamp in block map " + storedBlock.getGenerationStamp());
@@ -1823,8 +1822,7 @@ assert storedBlock.findDatanode(dn) < 0 
       if (!storedBlock.isComplete()) {
         return null; // not corrupt
       } else if (storedBlock.getGenerationStamp() != iblk.getGenerationStamp()) {
-        return new BlockToMarkCorrupt(new BlockInfo(iblk, storedBlock
-            .getINode().getReplication()),
+        return new BlockToMarkCorrupt(storedBlock,
             "reported " + reportedState + " replica with genstamp " +
             iblk.getGenerationStamp() + " does not match COMPLETE block's " +
             "genstamp in block map " + storedBlock.getGenerationStamp());
@@ -1916,7 +1914,7 @@ assert storedBlock.findDatanode(dn) < 0 
     int numCurrentReplica = countLiveNodes(storedBlock);
     if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
         && numCurrentReplica >= minReplication) {
-      completeBlock((MutableBlockCollection)storedBlock.getINode(), storedBlock, false);
+      completeBlock((MutableBlockCollection)storedBlock.getBlockCollection(), storedBlock, false);
     } else if (storedBlock.isComplete()) {
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that.
@@ -1944,7 +1942,7 @@ assert storedBlock.findDatanode(dn) < 0 
     } else {
       storedBlock = block;
     }
-    if (storedBlock == null || storedBlock.getINode() == null) {
+    if (storedBlock == null || storedBlock.getBlockCollection() == null) {
       // If this block does not belong to anyfile, then we are done.
       NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
           + node + " size " + block.getNumBytes()
@@ -1954,8 +1952,8 @@ assert storedBlock.findDatanode(dn) < 0 
       return block;
     }
     assert storedBlock != null : "Block must be stored by now";
-    BlockCollection fileINode = storedBlock.getINode();
-    assert fileINode != null : "Block must belong to a file";
+    BlockCollection bc = storedBlock.getBlockCollection();
+    assert bc != null : "Block must belong to a file";
 
     // add block to the datanode
     boolean added = node.addBlock(storedBlock);
@@ -1981,7 +1979,7 @@ assert storedBlock.findDatanode(dn) < 0 
 
     if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
         numLiveReplicas >= minReplication) {
-      storedBlock = completeBlock((MutableBlockCollection)fileINode, storedBlock, false);
+      storedBlock = completeBlock((MutableBlockCollection)bc, storedBlock, false);
     } else if (storedBlock.isComplete()) {
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that
@@ -1992,7 +1990,7 @@ assert storedBlock.findDatanode(dn) < 0 
     }
     
     // if file is under construction, then done for now
-    if (fileINode instanceof MutableBlockCollection) {
+    if (bc instanceof MutableBlockCollection) {
       return storedBlock;
     }
 
@@ -2002,7 +2000,7 @@ assert storedBlock.findDatanode(dn) < 0 
     }
 
     // handle underReplication/overReplication
-    short fileReplication = fileINode.getReplication();
+    short fileReplication = bc.getReplication();
     if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
       neededReplications.remove(storedBlock, numCurrentReplica,
           num.decommissionedReplicas(), fileReplication);
@@ -2129,8 +2127,8 @@ assert storedBlock.findDatanode(dn) < 0 
    * what happened with it.
    */
   private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
-    BlockCollection fileINode = block.getINode();
-    if (fileINode == null) {
+    BlockCollection bc = block.getBlockCollection();
+    if (bc == null) {
       // block does not belong to any file
       addToInvalidates(block);
       return MisReplicationResult.INVALID;
@@ -2141,7 +2139,7 @@ assert storedBlock.findDatanode(dn) < 0 
       return MisReplicationResult.UNDER_CONSTRUCTION;
     }
     // calculate current replication
-    short expectedReplication = fileINode.getReplication();
+    short expectedReplication = bc.getReplication();
     NumberReplicas num = countNodes(block);
     int numCurrentReplica = num.liveReplicas();
     // add to under-replicated queue if need to be
@@ -2258,7 +2256,7 @@ assert storedBlock.findDatanode(dn) < 0 
                               BlockPlacementPolicy replicator) {
     assert namesystem.hasWriteLock();
     // first form a rack to datanodes map and
-    BlockCollection inode = getINode(b);
+    BlockCollection bc = getBlockCollection(b);
     final Map<String, List<DatanodeDescriptor>> rackMap
         = new HashMap<String, List<DatanodeDescriptor>>();
     for(final Iterator<DatanodeDescriptor> iter = nonExcess.iterator();
@@ -2298,7 +2296,7 @@ assert storedBlock.findDatanode(dn) < 0 
               || (addedNode != null && !priSet.contains(addedNode))) ) {
         cur = delNodeHint;
       } else { // regular excessive replica removal
-        cur = replicator.chooseReplicaToDelete(inode, b, replication,
+        cur = replicator.chooseReplicaToDelete(bc, b, replication,
             priSet, remains);
       }
       firstOne = false;
@@ -2379,8 +2377,8 @@ assert storedBlock.findDatanode(dn) < 0 
       // necessary. In that case, put block on a possibly-will-
       // be-replicated list.
       //
-      BlockCollection fileINode = blocksMap.getINode(block);
-      if (fileINode != null) {
+      BlockCollection bc = blocksMap.getBlockCollection(block);
+      if (bc != null) {
         namesystem.decrementSafeBlockCount(block);
         updateNeededReplications(block, -1, 0);
       }
@@ -2611,7 +2609,7 @@ assert storedBlock.findDatanode(dn) < 0 
       NumberReplicas num) {
     int curReplicas = num.liveReplicas();
     int curExpectedReplicas = getReplication(block);
-    BlockCollection fileINode = blocksMap.getINode(block);
+    BlockCollection bc = blocksMap.getBlockCollection(block);
     Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
     StringBuilder nodeList = new StringBuilder();
     while (nodeIter.hasNext()) {
@@ -2624,7 +2622,7 @@ assert storedBlock.findDatanode(dn) < 0 
         + ", corrupt replicas: " + num.corruptReplicas()
         + ", decommissioned replicas: " + num.decommissionedReplicas()
         + ", excess replicas: " + num.excessReplicas()
-        + ", Is Open File: " + (fileINode instanceof MutableBlockCollection)
+        + ", Is Open File: " + (bc instanceof MutableBlockCollection)
         + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
         + srcNode + ", Is current datanode decommissioning: "
         + srcNode.isDecommissionInProgress());
@@ -2639,8 +2637,8 @@ assert storedBlock.findDatanode(dn) < 0 
     final Iterator<? extends Block> it = srcNode.getBlockIterator();
     while(it.hasNext()) {
       final Block block = it.next();
-      BlockCollection fileINode = blocksMap.getINode(block);
-      short expectedReplication = fileINode.getReplication();
+      BlockCollection bc = blocksMap.getBlockCollection(block);
+      short expectedReplication = bc.getReplication();
       NumberReplicas num = countNodes(block);
       int numCurrentReplica = num.liveReplicas();
       if (numCurrentReplica > expectedReplication) {
@@ -2662,9 +2660,9 @@ assert storedBlock.findDatanode(dn) < 0 
     final Iterator<? extends Block> it = srcNode.getBlockIterator();
     while(it.hasNext()) {
       final Block block = it.next();
-      BlockCollection fileINode = blocksMap.getINode(block);
+      BlockCollection bc = blocksMap.getBlockCollection(block);
 
-      if (fileINode != null) {
+      if (bc != null) {
         NumberReplicas num = countNodes(block);
         int curReplicas = num.liveReplicas();
         int curExpectedReplicas = getReplication(block);
@@ -2679,7 +2677,7 @@ assert storedBlock.findDatanode(dn) < 0 
             if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
               decommissionOnlyReplicas++;
             }
-            if (fileINode instanceof MutableBlockCollection) {
+            if (bc instanceof MutableBlockCollection) {
               underReplicatedInOpenFiles++;
             }
           }
@@ -2782,11 +2780,11 @@ assert storedBlock.findDatanode(dn) < 0 
 
   /* get replication factor of a block */
   private int getReplication(Block block) {
-    BlockCollection fileINode = blocksMap.getINode(block);
-    if (fileINode == null) { // block does not belong to any file
+    BlockCollection bc = blocksMap.getBlockCollection(block);
+    if (bc == null) { // block does not belong to any file
       return 0;
     }
-    return fileINode.getReplication();
+    return bc.getReplication();
   }
 
 
@@ -2858,12 +2856,12 @@ assert storedBlock.findDatanode(dn) < 0 
     return this.neededReplications.getCorruptBlockSize();
   }
 
-  public BlockInfo addINode(BlockInfo block, BlockCollection iNode) {
-    return blocksMap.addINode(block, iNode);
+  public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) {
+    return blocksMap.addBlockCollection(block, bc);
   }
 
-  public BlockCollection getINode(Block b) {
-    return blocksMap.getINode(b);
+  public BlockCollection getBlockCollection(Block b) {
+    return blocksMap.getBlockCollection(b);
   }
 
   /** @return an iterator of the datanodes. */
@@ -3002,7 +3000,7 @@ assert storedBlock.findDatanode(dn) < 0 
   private static class ReplicationWork {
 
     private Block block;
-    private BlockCollection fileINode;
+    private BlockCollection bc;
 
     private DatanodeDescriptor srcNode;
     private List<DatanodeDescriptor> containingNodes;
@@ -3013,14 +3011,14 @@ assert storedBlock.findDatanode(dn) < 0 
     private int priority;
 
     public ReplicationWork(Block block,
-        BlockCollection fileINode,
+        BlockCollection bc,
         DatanodeDescriptor srcNode,
         List<DatanodeDescriptor> containingNodes,
         List<DatanodeDescriptor> liveReplicaNodes,
         int additionalReplRequired,
         int priority) {
       this.block = block;
-      this.fileINode = fileINode;
+      this.bc = bc;
       this.srcNode = srcNode;
       this.containingNodes = containingNodes;
       this.liveReplicaNodes = liveReplicaNodes;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java Fri May 11 02:05:31 2012
@@ -110,11 +110,11 @@ public abstract class BlockPlacementPoli
    * choose <i>numOfReplicas</i> data nodes for <i>writer</i>
    * If not, return as many as we can.
    * The base implemenatation extracts the pathname of the file from the
-   * specified srcInode, but this could be a costly operation depending on the
+   * specified srcBC, but this could be a costly operation depending on the
    * file system implementation. Concrete implementations of this class should
    * override this method to avoid this overhead.
    * 
-   * @param srcInode The inode of the file for which chooseTarget is being invoked.
+   * @param srcBC block collection of file for which chooseTarget is invoked.
    * @param numOfReplicas additional number of replicas wanted.
    * @param writer the writer's machine, null if not in the cluster.
    * @param chosenNodes datanodes that have been chosen as targets.
@@ -122,13 +122,13 @@ public abstract class BlockPlacementPoli
    * @return array of DatanodeDescriptor instances chosen as target 
    * and sorted as a pipeline.
    */
-  DatanodeDescriptor[] chooseTarget(BlockCollection srcInode,
+  DatanodeDescriptor[] chooseTarget(BlockCollection srcBC,
                                     int numOfReplicas,
                                     DatanodeDescriptor writer,
                                     List<DatanodeDescriptor> chosenNodes,
                                     HashMap<Node, Node> excludedNodes,
                                     long blocksize) {
-    return chooseTarget(srcInode.getName(), numOfReplicas, writer,
+    return chooseTarget(srcBC.getName(), numOfReplicas, writer,
                         chosenNodes, excludedNodes, blocksize);
   }
 
@@ -149,7 +149,7 @@ public abstract class BlockPlacementPoli
    * Decide whether deleting the specified replica of the block still makes 
    * the block conform to the configured block placement policy.
    * 
-   * @param srcInode The inode of the file to which the block-to-be-deleted belongs
+   * @param srcBC block collection of file to which block-to-be-deleted belongs
    * @param block The block to be deleted
    * @param replicationFactor The required number of replicas for this block
    * @param existingReplicas The replica locations of this block that are present
@@ -158,7 +158,7 @@ public abstract class BlockPlacementPoli
                    listed in the previous parameter.
    * @return the replica that is the best candidate for deletion
    */
-  abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcInode,
+  abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcBC,
                                       Block block, 
                                       short replicationFactor,
                                       Collection<DatanodeDescriptor> existingReplicas,

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Fri May 11 02:05:31 2012
@@ -546,7 +546,7 @@ public class BlockPlacementPolicyDefault
   }
 
   @Override
-  public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
+  public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
                                                  Block block,
                                                  short replicationFactor,
                                                  Collection<DatanodeDescriptor> first, 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java Fri May 11 02:05:31 2012
@@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.util.Light
 
 /**
  * This class maintains the map from a block to its metadata.
- * block's metadata currently includes INode it belongs to and
+ * block's metadata currently includes blockCollection it belongs to and
  * the datanodes that store the block.
  */
 class BlocksMap {
@@ -92,21 +92,21 @@ class BlocksMap {
     blocks = null;
   }
 
-  BlockCollection getINode(Block b) {
+  BlockCollection getBlockCollection(Block b) {
     BlockInfo info = blocks.get(b);
-    return (info != null) ? info.getINode() : null;
+    return (info != null) ? info.getBlockCollection() : null;
   }
 
   /**
-   * Add block b belonging to the specified file inode to the map.
+   * Add block b belonging to the specified block collection to the map.
    */
-  BlockInfo addINode(BlockInfo b, BlockCollection iNode) {
+  BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) {
     BlockInfo info = blocks.get(b);
     if (info != b) {
       info = b;
       blocks.put(info);
     }
-    info.setINode(iNode);
+    info.setBlockCollection(bc);
     return info;
   }
 
@@ -120,7 +120,7 @@ class BlocksMap {
     if (blockInfo == null)
       return;
 
-    blockInfo.setINode(null);
+    blockInfo.setBlockCollection(null);
     for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
       DatanodeDescriptor dn = blockInfo.getDatanode(idx);
       dn.removeBlock(blockInfo); // remove from the list and wipe the location
@@ -168,7 +168,7 @@ class BlocksMap {
     boolean removed = node.removeBlock(info);
 
     if (info.getDatanode(0) == null     // no datanodes left
-              && info.getINode() == null) {  // does not belong to a file
+              && info.getBlockCollection() == null) {  // does not belong to a file
       blocks.remove(b);  // remove block from the map
     }
     return removed;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri May 11 02:05:31 2012
@@ -860,7 +860,7 @@ public class DataNode extends Configured
    */
   public String getDisplayName() {
     // NB: our DatanodeID may not be set yet
-    return hostName + ":" + getIpcPort();
+    return hostName + ":" + getXferPort();
   }
 
   /**
@@ -877,7 +877,6 @@ public class DataNode extends Configured
   /**
    * @return the datanode's IPC port
    */
-  @VisibleForTesting
   public int getIpcPort() {
     return ipcServer.getListenerAddress().getPort();
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri May 11 02:05:31 2012
@@ -309,7 +309,7 @@ public class FSDirectory implements Clos
         INodeFile newF = (INodeFile)newNode;
         BlockInfo[] blocks = newF.getBlocks();
         for (int i = 0; i < blocks.length; i++) {
-          newF.setBlock(i, getBlockManager().addINode(blocks[i], newF));
+          newF.setBlock(i, getBlockManager().addBlockCollection(blocks[i], newF));
         }
       }
     } finally {
@@ -346,7 +346,7 @@ public class FSDirectory implements Clos
             fileINode.getReplication(),
             BlockUCState.UNDER_CONSTRUCTION,
             targets);
-      getBlockManager().addINode(blockInfo, fileINode);
+      getBlockManager().addBlockCollection(blockInfo, fileINode);
       fileINode.addBlock(blockInfo);
 
       if(NameNode.stateChangeLog.isDebugEnabled()) {
@@ -1127,7 +1127,7 @@ public class FSDirectory implements Clos
 
       int index = 0;
       for (BlockInfo b : newnode.getBlocks()) {
-        BlockInfo info = getBlockManager().addINode(b, newnode);
+        BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
         newnode.setBlock(index, info); // inode refers to the block in BlocksMap
         index++;
       }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java Fri May 11 02:05:31 2012
@@ -601,7 +601,7 @@ public class FSEditLogLoader {
           // OP_ADD operations as each block is allocated.
           newBI = new BlockInfo(newBlock, file.getReplication());
         }
-        fsNamesys.getBlockManager().addINode(newBI, file);
+        fsNamesys.getBlockManager().addBlockCollection(newBI, file);
         file.addBlock(newBI);
         fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
       }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java Fri May 11 02:05:31 2012
@@ -203,6 +203,10 @@ public abstract class FSEditLogOp {
     }
 
     <T extends AddCloseOp> T setBlocks(Block[] blocks) {
+      if (blocks.length > MAX_BLOCKS) {
+        throw new RuntimeException("Can't have more than " + MAX_BLOCKS +
+            " in an AddCloseOp.");
+      }
       this.blocks = blocks;
       return (T)this;
     }
@@ -296,10 +300,18 @@ public abstract class FSEditLogOp {
       }
     }
 
+    static final public int MAX_BLOCKS = 1024 * 1024 * 64;
+    
     private static Block[] readBlocks(
         DataInputStream in,
         int logVersion) throws IOException {
       int numBlocks = in.readInt();
+      if (numBlocks < 0) {
+        throw new IOException("invalid negative number of blocks");
+      } else if (numBlocks > MAX_BLOCKS) {
+        throw new IOException("invalid number of blocks: " + numBlocks +
+            ".  The maximum number of blocks per file is " + MAX_BLOCKS);
+      }
       Block[] blocks = new Block[numBlocks];
       for (int i = 0; i < numBlocks; i++) {
         Block blk = new Block();
@@ -579,6 +591,7 @@ public abstract class FSEditLogOp {
     String trg;
     String[] srcs;
     long timestamp;
+    final static public int MAX_CONCAT_SRC = 1024 * 1024;
 
     private ConcatDeleteOp() {
       super(OP_CONCAT_DELETE);
@@ -594,7 +607,12 @@ public abstract class FSEditLogOp {
     }
 
     ConcatDeleteOp setSources(String[] srcs) {
+      if (srcs.length > MAX_CONCAT_SRC) {
+        throw new RuntimeException("ConcatDeleteOp can only have " +
+            MAX_CONCAT_SRC + " sources at most.");
+      }
       this.srcs = srcs;
+
       return this;
     }
 
@@ -624,8 +642,8 @@ public abstract class FSEditLogOp {
       if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
         this.length = in.readInt();
         if (length < 3) { // trg, srcs.., timestamp
-          throw new IOException("Incorrect data format. "
-              + "Concat delete operation.");
+          throw new IOException("Incorrect data format " +
+              "for ConcatDeleteOp.");
         }
       }
       this.trg = FSImageSerialization.readString(in);
@@ -635,6 +653,15 @@ public abstract class FSEditLogOp {
       } else {
         srcSize = this.length - 1 - 1; // trg and timestamp
       }
+      if (srcSize < 0) {
+          throw new IOException("Incorrect data format. "
+              + "ConcatDeleteOp cannot have a negative number of data " +
+              " sources.");
+      } else if (srcSize > MAX_CONCAT_SRC) {
+          throw new IOException("Incorrect data format. "
+              + "ConcatDeleteOp can have at most " + MAX_CONCAT_SRC +
+              " sources, but we tried to have " + (length - 3) + " sources.");
+      }
       this.srcs = new String [srcSize];
       for(int i=0; i<srcSize;i++) {
         srcs[i]= FSImageSerialization.readString(in);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri May 11 02:05:31 2012
@@ -1783,24 +1783,21 @@ public class FSNamesystem implements Nam
                 "Failed to close file " + src +
                 ". Lease recovery is in progress. Try again later.");
         } else {
-          BlockInfoUnderConstruction lastBlock=pendingFile.getLastBlock();
-          if(lastBlock != null && lastBlock.getBlockUCState() ==
-            BlockUCState.UNDER_RECOVERY) {
-            throw new RecoveryInProgressException(
-              "Recovery in progress, file [" + src + "], " +
-              "lease owner [" + lease.getHolder() + "]");
-            } else {
-              throw new AlreadyBeingCreatedException(
-                "Failed to create file [" + src + "] for [" + holder +
-                "] on client [" + clientMachine +
-                "], because this file is already being created by [" +
-                pendingFile.getClientName() + "] on [" +
-                pendingFile.getClientMachine() + "]");
-            }
-         }
+          final BlockInfo lastBlock = pendingFile.getLastBlock();
+          if (lastBlock != null
+              && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
+            throw new RecoveryInProgressException("Recovery in progress, file ["
+                + src + "], " + "lease owner [" + lease.getHolder() + "]");
+          } else {
+            throw new AlreadyBeingCreatedException("Failed to create file ["
+                + src + "] for [" + holder + "] on client [" + clientMachine
+                + "], because this file is already being created by ["
+                + pendingFile.getClientName() + "] on ["
+                + pendingFile.getClientMachine() + "]");
+          }
+        }
       }
     }
-
   }
 
   /**
@@ -2840,7 +2837,7 @@ public class FSNamesystem implements Nam
       if (storedBlock == null) {
         throw new IOException("Block (=" + lastblock + ") not found");
       }
-      INodeFile iFile = (INodeFile) storedBlock.getINode();
+      INodeFile iFile = (INodeFile) storedBlock.getBlockCollection();
       if (!iFile.isUnderConstruction() || storedBlock.isComplete()) {
         throw new IOException("Unexpected block (=" + lastblock
                               + ") since the file (=" + iFile.getLocalName()
@@ -4135,7 +4132,7 @@ public class FSNamesystem implements Nam
    * Returns whether the given block is one pointed-to by a file.
    */
   private boolean isValidBlock(Block b) {
-    return (blockManager.getINode(b) != null);
+    return (blockManager.getBlockCollection(b) != null);
   }
 
   // Distributed upgrade manager
@@ -4394,7 +4391,7 @@ public class FSNamesystem implements Nam
     }
     
     // check file inode
-    INodeFile file = (INodeFile) storedBlock.getINode();
+    INodeFile file = (INodeFile) storedBlock.getBlockCollection();
     if (file==null || !file.isUnderConstruction()) {
       throw new IOException("The file " + storedBlock + 
           " belonged to does not exist or it is not under construction.");
@@ -4706,7 +4703,7 @@ public class FSNamesystem implements Nam
 
       while (blkIterator.hasNext()) {
         Block blk = blkIterator.next();
-        INode inode = (INodeFile) blockManager.getINode(blk);
+        INode inode = (INodeFile) blockManager.getBlockCollection(blk);
         skip++;
         if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
           String src = FSDirectory.getFullPathName(inode);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java Fri May 11 02:05:31 2012
@@ -131,7 +131,7 @@ public class INodeFile extends INode imp
     }
     
     for(BlockInfo bi: newlist) {
-      bi.setINode(this);
+      bi.setBlockCollection(this);
     }
     this.blocks = newlist;
   }
@@ -164,7 +164,7 @@ public class INodeFile extends INode imp
     if(blocks != null && v != null) {
       for (BlockInfo blk : blocks) {
         v.add(blk);
-        blk.setINode(null);
+        blk.setBlockCollection(null);
       }
     }
     blocks = null;

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Fri May 11 02:05:31 2012
@@ -156,7 +156,7 @@ public class INodeFileUnderConstruction 
     BlockInfoUnderConstruction ucBlock =
       lastBlock.convertToBlockUnderConstruction(
           BlockUCState.UNDER_CONSTRUCTION, targets);
-    ucBlock.setINode(this);
+    ucBlock.setBlockCollection(this);
     setBlock(numBlocks()-1, ucBlock);
     return ucBlock;
   }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Fri May 11 02:05:31 2012
@@ -734,7 +734,7 @@ class NamenodeJspHelper {
         this.inode = null;
       } else {
         this.block = new Block(blockId);
-        this.inode = (INodeFile) blockManager.getINode(block);
+        this.inode = (INodeFile) blockManager.getBlockCollection(block);
       }
     }
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri May 11 02:05:31 2012
@@ -26,6 +26,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.TreeSet;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -64,9 +66,11 @@ import org.apache.hadoop.util.ToolRunner
 @InterfaceAudience.Private
 public class DFSAdmin extends FsShell {
 
-  static{
+  static {
     HdfsConfiguration.init();
   }
+  
+  private static final Log LOG = LogFactory.getLog(DFSAdmin.class);
 
   /**
    * An abstract class for the execution of a file system command
@@ -1089,6 +1093,7 @@ public class DFSAdmin extends FsShell {
       return exitCode;
     }
 
+    Exception debugException = null;
     exitCode = 0;
     try {
       if ("-report".equals(cmd)) {
@@ -1143,6 +1148,7 @@ public class DFSAdmin extends FsShell {
         printUsage("");
       }
     } catch (IllegalArgumentException arge) {
+      debugException = arge;
       exitCode = -1;
       System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
       printUsage(cmd);
@@ -1151,6 +1157,7 @@ public class DFSAdmin extends FsShell {
       // This is a error returned by hadoop server. Print
       // out the first line of the error message, ignore the stack trace.
       exitCode = -1;
+      debugException = e;
       try {
         String[] content;
         content = e.getLocalizedMessage().split("\n");
@@ -1159,12 +1166,17 @@ public class DFSAdmin extends FsShell {
       } catch (Exception ex) {
         System.err.println(cmd.substring(1) + ": "
                            + ex.getLocalizedMessage());
+        debugException = ex;
       }
     } catch (Exception e) {
       exitCode = -1;
+      debugException = e;
       System.err.println(cmd.substring(1) + ": "
                          + e.getLocalizedMessage());
-    } 
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Exception encountered:", debugException);
+    }
     return exitCode;
   }
 

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Fri May 11 02:05:31 2012
@@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1335791-1337002

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1335791-1337002

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1335791-1337002

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1335791-1337002

Propchange: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1335791-1337002

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Fri May 11 02:05:31 2012
@@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -706,13 +707,59 @@ public class DFSTestUtil {
         .join(nameservices));
   }
   
+  private static DatanodeID getDatanodeID(String ipAddr) {
+    return new DatanodeID(ipAddr, "localhost",
+        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+  }
+
+  public static DatanodeID getLocalDatanodeID() {
+    return new DatanodeID("127.0.0.1", "localhost",
+        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+  }
+
+  public static DatanodeID getLocalDatanodeID(int port) {
+    return new DatanodeID("127.0.0.1", "localhost", "",
+        port, port, port);
+  }
+
   public static DatanodeDescriptor getLocalDatanodeDescriptor() {
-    return new DatanodeDescriptor(
-        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+    return new DatanodeDescriptor(getLocalDatanodeID());
   }
 
   public static DatanodeInfo getLocalDatanodeInfo() {
-    return new DatanodeInfo(
-        new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+    return new DatanodeInfo(getLocalDatanodeID());
+  }
+
+  public static DatanodeInfo getDatanodeInfo(String ipAddr) {
+    return new DatanodeInfo(getDatanodeID(ipAddr));
+  }
+  
+  public static DatanodeInfo getLocalDatanodeInfo(int port) {
+    return new DatanodeInfo(getLocalDatanodeID(port));
+  }
+
+  public static DatanodeInfo getDatanodeInfo(String ipAddr, 
+      String host, int port) {
+    return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
+  }
+
+  public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
+      String hostname, AdminStates adminState) {
+    return new DatanodeInfo(ipAddr, hostname, "storage",
+        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
+        1, 2, 3, 4, 5, 6, "local", adminState);
+  }
+
+  public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
+      String rackLocation) {
+    return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+        rackLocation);
+  }
+
+  public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
+      int port, String rackLocation) {
+    return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
   }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Fri May 11 02:05:31 2012
@@ -333,7 +333,7 @@ public class TestDFSClientRetries extend
       LocatedBlock badLocatedBlock = new LocatedBlock(
         goodLocatedBlock.getBlock(),
         new DatanodeInfo[] {
-          new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
+          DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
         },
         goodLocatedBlock.getStartOffset(),
         false);
@@ -627,8 +627,7 @@ public class TestDFSClientRetries extend
     server.start();
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    DatanodeID fakeDnId = new DatanodeID(
-        "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
+    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
     
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Fri May 11 02:05:31 2012
@@ -31,12 +31,15 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.ipc.RemoteException;
+import org.junit.Assert;
 import org.junit.Test;
 
 /**
@@ -295,4 +298,43 @@ public class TestFileAppend{
       cluster.shutdown();
     }
   }
+
+  /** Test two consecutive appends on a file with a full block. */
+  @Test
+  public void testAppendTwice() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    final FileSystem fs1 = cluster.getFileSystem();
+    final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+    try {
+  
+      final Path p = new Path("/testAppendTwice/foo");
+      final int len = 1 << 16;
+      final byte[] fileContents = AppendTestUtil.initBuffer(len);
+
+      {
+        // create a new file with a full block.
+        FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
+        out.write(fileContents, 0, len);
+        out.close();
+      }
+  
+      //1st append does not add any data so that the last block remains full
+      //and the last block in INodeFileUnderConstruction is a BlockInfo
+      //but not BlockInfoUnderConstruction. 
+      fs2.append(p);
+      
+      //2nd append should get AlreadyBeingCreatedException
+      fs1.append(p);
+      Assert.fail();
+    } catch(RemoteException re) {
+      AppendTestUtil.LOG.info("Got an exception:", re);
+      Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
+          re.getClassName());
+    } finally {
+      fs2.close();
+      fs1.close();
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Fri May 11 02:05:31 2012
@@ -121,8 +121,7 @@ public class TestGetBlocks extends TestC
       getBlocksWithException(namenode, dataNodes[0], -1);
 
       // get blocks of size BlockSize from a non-existent datanode
-      DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo();
-      info.setIpAddr("1.2.3.4");
+      DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
       getBlocksWithException(namenode, info, 2);
     } finally {
       cluster.shutdown();

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java Fri May 11 02:05:31 2012
@@ -62,7 +62,7 @@ public class TestReplaceDatanodeOnFailur
     final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
     datanodes[0] = new DatanodeInfo[0];
     for(int i = 0; i < infos.length; ) {
-      infos[i] = new DatanodeInfo(new DatanodeID("dn" + i, 100));
+      infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
       i++;
       datanodes[i] = new DatanodeInfo[i];
       System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Fri May 11 02:05:31 2012
@@ -131,7 +131,7 @@ public class TestPBHelper {
 
   @Test
   public void testConvertDatanodeID() {
-    DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3);
+    DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
     DatanodeIDProto dnProto = PBHelper.convert(dn);
     DatanodeID dn2 = PBHelper.convert(dnProto);
     compare(dn, dn2);
@@ -280,10 +280,6 @@ public class TestPBHelper {
     return new ExtendedBlock("bpid", blkid, 100, 2);
   }
   
-  private DatanodeInfo getDNInfo() {
-    return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2));
-  }
-  
   private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
       assertEquals(dn1.getAdminState(), dn2.getAdminState());
       assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
@@ -316,7 +312,9 @@ public class TestPBHelper {
   
   @Test
   public void testConvertRecoveringBlock() {
-    DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
+    DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
+    DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
+    DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
     RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
     RecoveringBlockProto bProto = PBHelper.convert(b);
     RecoveringBlock b1 = PBHelper.convert(bProto);
@@ -330,7 +328,9 @@ public class TestPBHelper {
   
   @Test
   public void testConvertBlockRecoveryCommand() {
-    DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
+    DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
+    DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
+    DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
 
     List<RecoveringBlock> blks = ImmutableList.of(
       new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
@@ -400,13 +400,11 @@ public class TestPBHelper {
   
   @Test
   public void testConvertLocatedBlock() {
-    DatanodeInfo [] dnInfos = new DatanodeInfo[3];
-    dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999,
-        59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
-    dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999,
-        59, 69, 32, "local", AdminStates.DECOMMISSIONED);
-    dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999,
-        59, 69, 32, "local", AdminStates.NORMAL);
+    DatanodeInfo [] dnInfos = {
+        DFSTestUtil.getLocalDatanodeInfo("1.1.1.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
+        DFSTestUtil.getLocalDatanodeInfo("2.2.2.2", "h2", AdminStates.DECOMMISSIONED),
+        DFSTestUtil.getLocalDatanodeInfo("3.3.3.3", "h3", AdminStates.NORMAL)
+    };
     LocatedBlock lb = new LocatedBlock(
         new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
     LocatedBlockProto lbProto = PBHelper.convert(lb);
@@ -424,7 +422,7 @@ public class TestPBHelper {
   
   @Test
   public void testConvertDatanodeRegistration() {
-    DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0);
+    DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
     BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
     ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
         getBlockKey(1), keys);

Modified: hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1337003&r1=1337002&r2=1337003&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/HDFS-3092/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Fri May 11 02:05:31 2012
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -279,8 +280,7 @@ public class TestBlockToken {
     server.start();
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    DatanodeID fakeDnId = new DatanodeID("localhost",
-        "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
+    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
 
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);



Mime
View raw message