Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id B829210A8B for ; Fri, 12 Jul 2013 18:11:45 +0000 (UTC) Received: (qmail 43044 invoked by uid 500); 12 Jul 2013 18:11:44 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 42790 invoked by uid 500); 12 Jul 2013 18:11:42 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 42780 invoked by uid 99); 12 Jul 2013 18:11:41 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 12 Jul 2013 18:11:41 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 12 Jul 2013 18:11:39 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id 3E3172388980; Fri, 12 Jul 2013 18:11:19 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1502643 - in /hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/test/java/org/apache/hadoop/hdfs/ Date: Fri, 12 Jul 2013 18:11:19 -0000 To: hdfs-commits@hadoop.apache.org From: suresh@apache.org X-Mailer: svnmailer-1.0.9 Message-Id: <20130712181119.3E3172388980@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: suresh Date: Fri Jul 12 18:11:18 2013 New Revision: 1502643 URL: http://svn.apache.org/r1502643 Log: HDFS-4912. Merge 1502640 from branch-2 Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1502643&r1=1502642&r2=1502643&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jul 12 18:11:18 2013 @@ -191,6 +191,8 @@ Release 2.1.0-beta - 2013-07-02 HDFS-4645. Move from randomly generated block ID to sequentially generated block ID. (Arpit Agarwal via szetszwo) + HDFS-4912. Cleanup FSNamesystem#startFileInternal. (suresh) + OPTIMIZATIONS HDFS-4465. Optimize datanode ReplicasMap and ReplicaInfo. (atm) Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1502643&r1=1502642&r2=1502643&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Jul 12 18:11:18 2013 @@ -1874,6 +1874,7 @@ public class FSNamesystem implements Nam if (!DFSUtil.isValidName(src)) { throw new InvalidPathException(src); } + blockManager.verifyReplication(src, replication, clientMachine); boolean skipSync = false; final HdfsFileStatus stat; @@ -1885,6 +1886,8 @@ public class FSNamesystem implements Nam + "): " + blockSize + " < " + minBlockSize); } byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); + boolean create = flag.contains(CreateFlag.CREATE); + boolean overwrite = flag.contains(CreateFlag.OVERWRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); @@ -1892,8 +1895,8 @@ public class FSNamesystem implements Nam throw new SafeModeException("Cannot create file" + src, safeMode); } src = FSDirectory.resolvePath(src, pathComponents, dir); - startFileInternal(pc, src, permissions, holder, clientMachine, flag, - createParent, replication, blockSize); + startFileInternal(pc, src, permissions, holder, clientMachine, + create, overwrite, createParent, replication, blockSize); stat = dir.getFileInfo(src, false); } catch (StandbyException se) { skipSync = true; @@ -1911,25 +1914,18 @@ public class FSNamesystem implements Nam } /** - * Create new or open an existing file for append.

+ * Create a new file or overwrite an existing file
* - * In case of opening the file for append, the method returns the last - * block of the file if this is a partial block, which can still be used - * for writing more data. The client uses the returned block locations - * to form the data pipeline for this block.
- * The method returns null if the last block is full or if this is a - * new file. The client then allocates a new block with the next call - * using {@link NameNode#addBlock()}.

- * - * For description of parameters and exceptions thrown see + * Once the file is create the client then allocates a new block with the next + * call using {@link NameNode#addBlock()}. + *

+ * For description of parameters and exceptions thrown see * {@link ClientProtocol#create()} - * - * @return the last block locations if the block is partial or null otherwise */ - private LocatedBlock startFileInternal(FSPermissionChecker pc, String src, + private void startFileInternal(FSPermissionChecker pc, String src, PermissionStatus permissions, String holder, String clientMachine, - EnumSet flag, boolean createParent, short replication, - long blockSize) throws SafeModeException, FileAlreadyExistsException, + boolean create, boolean overwrite, boolean createParent, + short replication, long blockSize) throws FileAlreadyExistsException, AccessControlException, UnresolvedLinkException, FileNotFoundException, ParentNotDirectoryException, IOException { assert hasWriteLock(); @@ -1941,11 +1937,8 @@ public class FSNamesystem implements Nam + "; already exists as a directory."); } final INodeFile myFile = INodeFile.valueOf(inode, src, true); - - boolean overwrite = flag.contains(CreateFlag.OVERWRITE); - boolean append = flag.contains(CreateFlag.APPEND); if (isPermissionEnabled) { - if (append || (overwrite && myFile != null)) { + if (overwrite && myFile != null) { checkPathAccess(pc, src, FsAction.WRITE); } else { checkAncestorAccess(pc, src, FsAction.WRITE); @@ -1957,65 +1950,95 @@ public class FSNamesystem implements Nam } try { - blockManager.verifyReplication(src, replication, clientMachine); - boolean create = flag.contains(CreateFlag.CREATE); - if (myFile == null) { if (!create) { - throw new FileNotFoundException("failed to overwrite or append to non-existent file " + throw new FileNotFoundException("failed to overwrite non-existent file " + src + " on client " + clientMachine); } } else { - // File exists - must be one of append or overwrite if (overwrite) { - delete(src, true); + delete(src, true); // File exists - delete if overwrite } else { - // Opening an existing file for write - may need to recover lease. + // If lease soft limit time is expired, recover the lease recoverLeaseInternal(myFile, src, holder, clientMachine, false); - - if (!append) { - throw new FileAlreadyExistsException("failed to create file " + src - + " on client " + clientMachine - + " because the file exists"); - } + throw new FileAlreadyExistsException("failed to create file " + src + + " on client " + clientMachine + " because the file exists"); } } + checkFsObjectLimit(); final DatanodeDescriptor clientNode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); - if (append && myFile != null) { - final INodeFile f = INodeFile.valueOf(myFile, src); - return prepareFileForWrite(src, f, holder, clientMachine, clientNode, - true, iip.getLatestSnapshot()); - } else { - // Now we can add the name to the filesystem. This file has no - // blocks associated with it. - // - checkFsObjectLimit(); - - // increment global generation stamp - INodeFileUnderConstruction newNode = dir.addFile(src, permissions, - replication, blockSize, holder, clientMachine, clientNode); - if (newNode == null) { - throw new IOException("DIR* NameSystem.startFile: " + - "Unable to add file to namespace."); - } - leaseManager.addLease(newNode.getClientName(), src); + INodeFileUnderConstruction newNode = dir.addFile(src, permissions, + replication, blockSize, holder, clientMachine, clientNode); + if (newNode == null) { + throw new IOException("DIR* NameSystem.startFile: " + + "Unable to add file to namespace."); + } + leaseManager.addLease(newNode.getClientName(), src); - // record file record in log, record new generation stamp - getEditLog().logOpenFile(src, newNode); - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: " - +"add "+src+" to namespace for "+holder); - } + // record file record in log, record new generation stamp + getEditLog().logOpenFile(src, newNode); + if (NameNode.stateChangeLog.isDebugEnabled()) { + NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: " + +"add "+src+" to namespace for "+holder); } } catch (IOException ie) { NameNode.stateChangeLog.warn("DIR* NameSystem.startFile: " +ie.getMessage()); throw ie; } - return null; + } + + /** + * Append to an existing file for append. + *

+ * + * The method returns the last block of the file if this is a partial block, + * which can still be used for writing more data. The client uses the returned + * block locations to form the data pipeline for this block.
+ * The method returns null if the last block is full. The client then + * allocates a new block with the next call using {@link NameNode#addBlock()}. + *

+ * + * For description of parameters and exceptions thrown see + * {@link ClientProtocol#append(String, String)} + * + * @return the last block locations if the block is partial or null otherwise + */ + private LocatedBlock appendFileInternal(FSPermissionChecker pc, String src, + String holder, String clientMachine) throws AccessControlException, + UnresolvedLinkException, FileNotFoundException, IOException { + assert hasWriteLock(); + // Verify that the destination does not exist as a directory already. + final INodesInPath iip = dir.getINodesInPath4Write(src); + final INode inode = iip.getLastINode(); + if (inode != null && inode.isDirectory()) { + throw new FileAlreadyExistsException("Cannot append to directory " + src + + "; already exists as a directory."); + } + if (isPermissionEnabled) { + checkPathAccess(pc, src, FsAction.WRITE); + } + + try { + if (inode == null) { + throw new FileNotFoundException("failed to append to non-existent file " + + src + " on client " + clientMachine); + } + final INodeFile myFile = INodeFile.valueOf(inode, src, true); + // Opening an existing file for write - may need to recover lease. + recoverLeaseInternal(myFile, src, holder, clientMachine, false); + + final DatanodeDescriptor clientNode = + blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); + return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode, + true, iip.getLatestSnapshot()); + } catch (IOException ie) { + NameNode.stateChangeLog.warn("DIR* NameSystem.append: " +ie.getMessage()); + throw ie; + } } /** @@ -2206,14 +2229,6 @@ public class FSNamesystem implements Nam "Append is not enabled on this NameNode. Use the " + DFS_SUPPORT_APPEND_KEY + " configuration option to enable it."); } - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src - + ", holder=" + holder - + ", clientMachine=" + clientMachine); - } - if (!DFSUtil.isValidName(src)) { - throw new InvalidPathException(src); - } LocatedBlock lb = null; FSPermissionChecker pc = getPermissionChecker(); @@ -2226,9 +2241,7 @@ public class FSNamesystem implements Nam throw new SafeModeException("Cannot append to file" + src, safeMode); } src = FSDirectory.resolvePath(src, pathComponents, dir); - lb = startFileInternal(pc, src, null, holder, clientMachine, - EnumSet.of(CreateFlag.APPEND), - false, blockManager.maxReplication, 0); + lb = appendFileInternal(pc, src, holder, clientMachine); } catch (StandbyException se) { skipSync = true; throw se; Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1502643&r1=1502642&r2=1502643&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Fri Jul 12 18:11:18 2013 @@ -60,6 +60,7 @@ import org.apache.hadoop.fs.ParentNotDir import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -502,7 +503,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); DFSClient client = dfs.dfs; // create a new file. @@ -562,7 +563,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); DFSClient client = dfs.dfs; // create a new file. @@ -703,7 +704,7 @@ public class TestFileCreation { stm4.close(); // verify that new block is associated with this file - DFSClient client = ((DistributedFileSystem)fs).dfs; + DFSClient client = fs.dfs; LocatedBlocks locations = client.getNamenode().getBlockLocations( file1.toString(), 0, Long.MAX_VALUE); System.out.println("locations = " + locations.locatedBlockCount()); @@ -951,7 +952,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); // create a new file. final String f = DIR + "foo"; @@ -1012,7 +1013,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); // create a new file. final String f = DIR + "foofs"; @@ -1044,7 +1045,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); // create a new file. final String f = DIR + "testFsCloseAfterClusterShutdown"; @@ -1177,7 +1178,7 @@ public class TestFileCreation { DistributedFileSystem dfs = null; try { cluster.waitActive(); - dfs = (DistributedFileSystem)cluster.getFileSystem(); + dfs = cluster.getFileSystem(); DFSClient client = dfs.dfs; final Path f = new Path("/testFileIdMismatch.txt"); Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1502643&r1=1502642&r2=1502643&view=diff ============================================================================== --- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original) +++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Fri Jul 12 18:11:18 2013 @@ -130,7 +130,7 @@ public class TestLeaseRecovery2 { size = AppendTestUtil.nextInt(FILE_SIZE); filepath = createFile("/immediateRecoverLease-longlease", size, false); - // test recoverLese from a different client + // test recoverLease from a different client recoverLease(filepath, null); verifyFile(dfs, filepath, actual, size);