Return-Path: Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: (qmail 32428 invoked from network); 28 Aug 2009 01:15:05 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3) by minotaur.apache.org with SMTP; 28 Aug 2009 01:15:05 -0000 Received: (qmail 9212 invoked by uid 500); 28 Aug 2009 01:15:03 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 9156 invoked by uid 500); 28 Aug 2009 01:15:03 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 9132 invoked by uid 99); 28 Aug 2009 01:15:03 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 28 Aug 2009 01:15:03 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 28 Aug 2009 01:13:47 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 198F6238884A; Fri, 28 Aug 2009 01:13:27 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r808718 - in /hadoop/hdfs/branches/HDFS-265: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/protocol/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoo... Date: Fri, 28 Aug 2009 01:13:26 -0000 To: hdfs-commits@hadoop.apache.org From: shv@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20090828011327.198F6238884A@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: shv Date: Fri Aug 28 01:13:24 2009 New Revision: 808718 URL: http://svn.apache.org/viewvc?rev=808718&view=rev Log: HDFS-565. Introduce block committing logic during new block allocation and file close. Contributed by Konstantin Shvachko. Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Modified: hadoop/hdfs/branches/HDFS-265/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/CHANGES.txt?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/CHANGES.txt (original) +++ hadoop/hdfs/branches/HDFS-265/CHANGES.txt Fri Aug 28 01:13:24 2009 @@ -26,6 +26,9 @@ HDFS-544. Add a "rbw" subdir to DataNode data directory. (hairong) + HDFS-565. Introduce block committing logic during new block allocation + and file close. (shv) + IMPROVEMENTS HDFS-381. Remove blocks from DataNode maps when corresponding file Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/DFSClient.java Fri Aug 28 01:13:24 2009 @@ -2432,6 +2432,8 @@ } catch (InterruptedException e) { } } + // update block length + block.setNumBytes(offsetInBlock + one.dataPos - one.dataStart); } if (ackQueue.isEmpty()) { // done receiving all acks @@ -2442,6 +2444,9 @@ blockStream.writeInt(0); blockStream.flush(); } + } else { + // update block length + block.setNumBytes(offsetInBlock + one.dataPos - one.dataStart); } if (LOG.isDebugEnabled()) { LOG.debug("DataStreamer block " + block + @@ -2801,6 +2806,7 @@ if (!success) { LOG.info("Abandoning block " + block); namenode.abandonBlock(block, src, clientName); + block = null; // Connection failed. Let's wait a little bit and retry retry = true; @@ -2908,7 +2914,7 @@ long localstart = System.currentTimeMillis(); while (true) { try { - return namenode.addBlock(src, clientName); + return namenode.addBlock(src, clientName, block); } catch (RemoteException e) { IOException ue = e.unwrapRemoteException(FileNotFoundException.class, @@ -2997,6 +3003,10 @@ processDatanodeError(true, true); } + Block getBlock() { + return block; + } + DatanodeInfo[] getNodes() { return nodes; } @@ -3364,8 +3374,10 @@ } flushInternal(); // flush all data to Datanodes + // get last block before destroying the streamer + Block lastBlock = streamer.getBlock(); closeThreads(false); - completeFile(); + completeFile(lastBlock); leasechecker.remove(src); } finally { closed = true; @@ -3374,11 +3386,11 @@ // should be called holding (this) lock since setTestFilename() may // be called during unit tests - private void completeFile() throws IOException { + private void completeFile(Block last) throws IOException { long localstart = System.currentTimeMillis(); boolean fileComplete = false; while (!fileComplete) { - fileComplete = namenode.complete(src, clientName); + fileComplete = namenode.complete(src, clientName, last); if (!fileComplete) { if (!clientRunning || (hdfsTimeout > 0 && Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Aug 28 01:13:24 2009 @@ -43,9 +43,9 @@ * Compared to the previous version the following changes have been introduced: * (Only the latest change is reflected. * The log of historical changes can be retrieved from the svn). - * 45: add create flag for create command, see Hadoop-5438 + * 46: add Block parameter to addBlock() and complete() */ - public static final long versionID = 45L; + public static final long versionID = 46L; /////////////////////////////////////// // File contents @@ -85,8 +85,8 @@ * {@link #rename(String, String)} it until the file is completed * or explicitly as a result of lease expiration. *

- * Blocks have a maximum size. Clients that intend to - * create multi-block files must also use {@link #addBlock(String, String)}. + * Blocks have a maximum size. Clients that intend to create + * multi-block files must also use {@link #addBlock(String, String, Block)}. * * @param src path of the file being created. * @param masked masked permission. @@ -177,9 +177,14 @@ * addBlock() allocates a new block and datanodes the block data * should be replicated to. * + * addBlock() also commits the previous block by reporting + * to the name-node the actual generation stamp and the length + * of the block that the client has transmitted to data-nodes. + * * @return LocatedBlock allocated block information. */ - public LocatedBlock addBlock(String src, String clientName) throws IOException; + public LocatedBlock addBlock(String src, String clientName, + Block previous) throws IOException; /** * The client is done writing data to the given filename, and would @@ -187,13 +192,18 @@ * * The function returns whether the file has been closed successfully. * If the function returns false, the caller should try again. + * + * close() also commits the last block of the file by reporting + * to the name-node the actual generation stamp and the length + * of the block that the client has transmitted to data-nodes. * * A call to complete() will not return true until all the file's * blocks have been replicated the minimum number of times. Thus, * DataNode failures may cause a client to call complete() several * times before succeeding. */ - public boolean complete(String src, String clientName) throws IOException; + public boolean complete(String src, String clientName, + Block last) throws IOException; /** * The client wants to report corrupted blocks (blocks with specified Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfo.java Fri Aug 28 01:13:24 2009 @@ -85,7 +85,7 @@ assert index >= 0 && index*3+2 < triplets.length : "Index is out of bound"; BlockInfo info = (BlockInfo)triplets[index*3+2]; assert info == null || - BlockInfo.class.getName().equals(info.getClass().getName()) : + info.getClass().getName().startsWith(BlockInfo.class.getName()) : "BlockInfo is expected at " + index*3; return info; } @@ -295,9 +295,18 @@ * * @return BlockInfoUnderConstruction - an under construction block. */ - BlockInfoUnderConstruction convertToBlockUnderConstruction() { - assert !isUnderConstruction() : "the block is already under construction"; - return new BlockInfoUnderConstruction(this, getINode().getReplication()); + BlockInfoUnderConstruction convertToBlockUnderConstruction( + BlockUCState s, DatanodeDescriptor[] targets) { + if(!isUnderConstruction()) { + return new BlockInfoUnderConstruction( + this, getINode().getReplication(), s, targets); + } + // the block is already under construction + BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this; + ucBlock.setBlockUCState(s); + ucBlock.setLocations(targets); + ucBlock.setLastRecoveryTime(0); + return ucBlock; } @Override Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockInfoUnderConstruction.java Fri Aug 28 01:13:24 2009 @@ -104,13 +104,17 @@ /** * Return the state of the block under construction. - * @see {@link BlockUCState} + * @see BlockUCState */ @Override // BlockInfo BlockUCState getBlockUCState() { return blockUCState; } + void setBlockUCState(BlockUCState s) { + blockUCState = s; + } + /** * Commit block's length and generation stamp as reported by the client. * Set block state to {@link BlockUCState#COMMITTED}. Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Fri Aug 28 01:13:24 2009 @@ -240,9 +240,14 @@ */ void commitLastBlock(INodeFileUnderConstruction fileINode, Block commitBlock) throws IOException { + if(commitBlock == null) + return; // not committing, this is a block allocation retry BlockInfoUnderConstruction lastBlock = fileINode.getLastBlock(); if(lastBlock == null) - return; + return; // no blocks in file yet + assert lastBlock.getNumBytes() <= commitBlock.getNumBytes() : + "commitBlock length is less than the stored one " + + commitBlock.getNumBytes() + " vs. " + lastBlock.getNumBytes(); lastBlock.commitBlock(commitBlock); // complete the penultimate block @@ -274,17 +279,18 @@ } /** - * Convert the last block of the file to an under constroction block. + * Convert the last block of the file to an under construction block. * @param fileINode file + * @param targets data-nodes that will form the pipeline for this block */ - void convertLastBlockToUnderConstruction(INodeFile fileINode) - throws IOException { + void convertLastBlockToUnderConstruction( + INodeFileUnderConstruction fileINode, + DatanodeDescriptor[] targets) throws IOException { BlockInfo oldBlock = fileINode.getLastBlock(); - if(oldBlock == null || oldBlock.isUnderConstruction()) + if(oldBlock == null) return; BlockInfoUnderConstruction ucBlock = - oldBlock.convertToBlockUnderConstruction(); - fileINode.setBlock(fileINode.numBlocks()-1, ucBlock); + fileINode.setLastBlock(oldBlock, targets); blocksMap.replaceBlock(ucBlock); } Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Fri Aug 28 01:13:24 2009 @@ -279,10 +279,6 @@ INodeFileUnderConstruction fileINode = (INodeFileUnderConstruction)inodes[inodes.length-1]; - // commit the last block and complete the penultimate block - // SHV !!! second parameter should be a block reported by client - getBlockManager().commitLastBlock(fileINode, fileINode.getLastBlock()); - // check quota limits and updated space consumed updateCount(inodes, inodes.length-1, 0, fileINode.getPreferredBlockSize()*fileINode.getReplication()); Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Aug 28 01:13:24 2009 @@ -956,7 +956,6 @@ // Recreate in-memory lease record. // INodeFile node = (INodeFile) myFile; - blockManager.convertLastBlockToUnderConstruction(node); INodeFileUnderConstruction cons = new INodeFileUnderConstruction( node.getLocalNameBytes(), node.getReplication(), @@ -1018,7 +1017,7 @@ LocatedBlock lb = null; synchronized (this) { INodeFileUnderConstruction file = (INodeFileUnderConstruction)dir.getFileINode(src); - BlockInfoUnderConstruction lastBlock = file.getLastBlock(); + BlockInfo lastBlock = file.getLastBlock(); if (lastBlock != null) { assert lastBlock == blockManager.getStoredBlock(lastBlock) : "last block of the file is not in blocksMap"; @@ -1030,7 +1029,7 @@ targets[i].removeBlock(lastBlock); } // convert last block to under-construction and set its locations - file.setLastBlock(lastBlock, targets); + blockManager.convertLastBlockToUnderConstruction(file, targets); lb = new LocatedBlock(lastBlock, targets, fileLength-lastBlock.getNumBytes()); @@ -1081,7 +1080,8 @@ * client to "try again later". */ public LocatedBlock getAdditionalBlock(String src, - String clientName + String clientName, + Block previous ) throws IOException { long fileLength, blockSize; int replication; @@ -1101,6 +1101,9 @@ INodeFileUnderConstruction pendingFile = checkLease(src, clientName); + // commit the last block and complete the penultimate block + blockManager.commitLastBlock(pendingFile, previous); + // // If we fail this, bad things happen! // @@ -1215,15 +1218,18 @@ COMPLETE_SUCCESS } - public CompleteFileStatus completeFile(String src, String holder) throws IOException { - CompleteFileStatus status = completeFileInternal(src, holder); + public CompleteFileStatus completeFile(String src, + String holder, + Block last) throws IOException { + CompleteFileStatus status = completeFileInternal(src, holder, last); getEditLog().logSync(); return status; } - - private synchronized CompleteFileStatus completeFileInternal(String src, - String holder) throws IOException { + private synchronized CompleteFileStatus completeFileInternal( + String src, + String holder, + Block last) throws IOException { NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " + src + " for " + holder); if (isInSafeMode()) throw new SafeModeException("Cannot complete file " + src, safeMode); @@ -1244,7 +1250,12 @@ ("from " + pendingFile.getClientMachine())) ); return CompleteFileStatus.OPERATION_FAILED; - } else if (!checkFileProgress(pendingFile, true)) { + } + + // commit the last block and complete the penultimate block + blockManager.commitLastBlock(pendingFile, last); + + if (!checkFileProgress(pendingFile, true)) { return CompleteFileStatus.STILL_WAITING; } @@ -1578,14 +1589,11 @@ leaseManager.renewLease(lease); } - private void finalizeINodeFileUnderConstruction(String src, + private void finalizeINodeFileUnderConstruction( + String src, INodeFileUnderConstruction pendingFile) throws IOException { leaseManager.removeLease(pendingFile.clientName, src); - // commit the last block and complete the penultimate block - // SHV !!! second parameter should be a block reported by client - blockManager.commitLastBlock(pendingFile, pendingFile.getLastBlock()); - // The file is no longer pending. // Create permanent INode, update blocks INodeFile newFile = pendingFile.convertToInodeFile(); @@ -1671,6 +1679,9 @@ LOG.info("commitBlockSynchronization(" + lastblock + ") successful"); return; } + + // commit the last block and complete the penultimate block + blockManager.commitLastBlock(pendingFile, lastblock); //remove lease, close file finalizeINodeFileUnderConstruction(src, pendingFile); Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java Fri Aug 28 01:13:24 2009 @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState; class INodeFileUnderConstruction extends INodeFile { @@ -117,19 +118,18 @@ * Convert the last block of the file to an under-construction block. * Set its locations. */ - void setLastBlock(BlockInfo lastBlock, DatanodeDescriptor[] targets) + BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock, + DatanodeDescriptor[] targets) throws IOException { if (blocks == null || blocks.length == 0) { throw new IOException("Trying to update non-existant block. " + "File is empty."); } - BlockInfoUnderConstruction ucBlock; - if(lastBlock.isUnderConstruction()) - ucBlock = (BlockInfoUnderConstruction)lastBlock; - else - ucBlock = new BlockInfoUnderConstruction(lastBlock, getReplication()); - ucBlock.setLocations(targets); - ucBlock.setLastRecoveryTime(0); - blocks[blocks.length - 1] = ucBlock; + BlockInfoUnderConstruction ucBlock = + lastBlock.convertToBlockUnderConstruction( + BlockUCState.UNDER_CONSTRUCTION, targets); + ucBlock.setINode(this); + setBlock(numBlocks()-1, ucBlock); + return ucBlock; } } Modified: hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Aug 28 01:13:24 2009 @@ -603,11 +603,12 @@ /** */ - public LocatedBlock addBlock(String src, - String clientName) throws IOException { + public LocatedBlock addBlock(String src, String clientName, + Block previous) throws IOException { stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " +src+" for "+clientName); - LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, clientName); + LocatedBlock locatedBlock = + namesystem.getAdditionalBlock(src, clientName, previous); if (locatedBlock != null) myMetrics.numAddBlockOps.inc(); return locatedBlock; @@ -626,9 +627,11 @@ } /** {@inheritDoc} */ - public boolean complete(String src, String clientName) throws IOException { + public boolean complete(String src, String clientName, + Block last) throws IOException { stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName); - CompleteFileStatus returnCode = namesystem.completeFile(src, clientName); + CompleteFileStatus returnCode = + namesystem.completeFile(src, clientName, last); if (returnCode == CompleteFileStatus.STILL_WAITING) { return false; } else if (returnCode == CompleteFileStatus.COMPLETE_SUCCESS) { Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java Fri Aug 28 01:13:24 2009 @@ -136,7 +136,7 @@ return versionID; } - public LocatedBlock addBlock(String src, String clientName) + public LocatedBlock addBlock(String src, String clientName, Block previous) throws IOException { num_calls++; @@ -167,7 +167,7 @@ public void abandonBlock(Block b, String src, String holder) throws IOException {} - public boolean complete(String src, String clientName) throws IOException { return false; } + public boolean complete(String src, String clientName, Block last) throws IOException { return false; } public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {} Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java Fri Aug 28 01:13:24 2009 @@ -416,9 +416,9 @@ System.out.println("testFileCreationError2: " + "The file has " + locations.locatedBlockCount() + " blocks."); - // add another block to the file + // add one block to the file LocatedBlock location = client.getNamenode().addBlock(file1.toString(), - client.clientName); + client.clientName, null); System.out.println("testFileCreationError2: " + "Added block " + location.getBlock()); Modified: hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=808718&r1=808717&r2=808718&view=diff ============================================================================== --- hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original) +++ hadoop/hdfs/branches/HDFS-265/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Fri Aug 28 01:13:24 2009 @@ -519,7 +519,8 @@ .of(CreateFlag.OVERWRITE)), replication, BLOCK_SIZE); long end = System.currentTimeMillis(); for(boolean written = !closeUponCreate; !written; - written = nameNode.complete(fileNames[daemonId][inputIdx], clientName)); + written = nameNode.complete(fileNames[daemonId][inputIdx], + clientName, null)); return end-start; } @@ -889,8 +890,8 @@ nameNode.create(fileName, FsPermission.getDefault(), clientName, new EnumSetWritable(EnumSet.of(CreateFlag.OVERWRITE)), replication, BLOCK_SIZE); - addBlocks(fileName, clientName); - nameNode.complete(fileName, clientName); + Block lastBlock = addBlocks(fileName, clientName); + nameNode.complete(fileName, clientName, lastBlock); } // prepare block reports for(int idx=0; idx < nrDatanodes; idx++) { @@ -898,9 +899,12 @@ } } - private void addBlocks(String fileName, String clientName) throws IOException { + private Block addBlocks(String fileName, String clientName) + throws IOException { + Block prevBlock = null; for(int jdx = 0; jdx < blocksPerFile; jdx++) { - LocatedBlock loc = nameNode.addBlock(fileName, clientName); + LocatedBlock loc = nameNode.addBlock(fileName, clientName, prevBlock); + prevBlock = loc.getBlock(); for(DatanodeInfo dnInfo : loc.getLocations()) { int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName()); datanodes[dnIdx].addBlock(loc.getBlock()); @@ -910,6 +914,7 @@ new String[] {""}); } } + return prevBlock; } /**