Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 68260101CF for ; Thu, 4 Dec 2014 22:10:10 +0000 (UTC) Received: (qmail 75501 invoked by uid 500); 4 Dec 2014 22:10:10 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 75430 invoked by uid 500); 4 Dec 2014 22:10:10 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 75420 invoked by uid 99); 4 Dec 2014 22:10:10 -0000 Received: from tyr.zones.apache.org (HELO tyr.zones.apache.org) (140.211.11.114) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 04 Dec 2014 22:10:10 +0000 Received: by tyr.zones.apache.org (Postfix, from userid 65534) id DF01EA1D03A; Thu, 4 Dec 2014 22:10:09 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: wheat9@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-7468. Moving verify* functions to corresponding classes. Contributed by Li Lu. Date: Thu, 4 Dec 2014 22:10:09 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/trunk 258623ff8 -> 26d8dec75 HDFS-7468. Moving verify* functions to corresponding classes. Contributed by Li Lu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26d8dec7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26d8dec7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26d8dec7 Branch: refs/heads/trunk Commit: 26d8dec756da1d9bd3df3b41a4dd5d8ff03bc5b2 Parents: 258623f Author: Haohui Mai Authored: Thu Dec 4 14:09:45 2014 -0800 Committer: Haohui Mai Committed: Thu Dec 4 14:09:45 2014 -0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSDirRenameOp.java | 54 +++++++++++++-- .../hdfs/server/namenode/FSDirSnapshotOp.java | 20 +++++- .../hdfs/server/namenode/FSDirectory.java | 72 ++------------------ 4 files changed, 78 insertions(+), 71 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/26d8dec7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2775285..4432024 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -427,6 +427,9 @@ Release 2.7.0 - UNRELEASED HDFS-7458. Add description to the nfs ports in core-site.xml used by nfs test to avoid confusion (Yongjun Zhang via brandonli) + HDFS-7468. Moving verify* functions to corresponding classes. + (Li Lu via wheat9) + OPTIMIZATIONS BUG FIXES http://git-wip-us.apache.org/repos/asf/hadoop/blob/26d8dec7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index f371f05..08241c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.protocol.SnapshotException; @@ -73,6 +74,51 @@ class FSDirRenameOp { } /** + * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves + * dstInodes[dstInodes.length-1] + */ + static void verifyQuotaForRename(FSDirectory fsd, + INode[] src, INode[] dst) + throws QuotaExceededException { + if (!fsd.getFSNamesystem().isImageLoaded() || fsd.shouldSkipQuotaChecks()) { + // Do not check quota if edits log is still being processed + return; + } + int i = 0; + while(src[i] == dst[i]) { i++; } + // src[i - 1] is the last common ancestor. + + final Quota.Counts delta = src[src.length - 1].computeQuotaUsage(); + + // Reduce the required quota by dst that is being removed + final int dstIndex = dst.length - 1; + if (dst[dstIndex] != null) { + delta.subtract(dst[dstIndex].computeQuotaUsage()); + } + FSDirectory.verifyQuota(dst, dstIndex, delta.get(Quota.NAMESPACE), + delta.get(Quota.DISKSPACE), src[i - 1]); + } + + /** + * Checks file system limits (max component length and max directory items) + * during a rename operation. + */ + static void verifyFsLimitsForRename(FSDirectory fsd, + INodesInPath srcIIP, + INodesInPath dstIIP) + throws FSLimitException.PathComponentTooLongException, + FSLimitException.MaxDirectoryItemsExceededException { + byte[] dstChildName = dstIIP.getLastLocalName(); + INode[] dstInodes = dstIIP.getINodes(); + int pos = dstInodes.length - 1; + fsd.verifyMaxComponentLength(dstChildName, dstInodes, pos); + // Do not enforce max directory items if renaming within same directory. + if (srcIIP.getINode(-2) != dstIIP.getINode(-2)) { + fsd.verifyMaxDirItems(dstInodes, pos); + } + } + + /** * Change a path name * * @param fsd FSDirectory @@ -129,8 +175,8 @@ class FSDirRenameOp { fsd.ezManager.checkMoveValidity(srcIIP, dstIIP, src); // Ensure dst has quota to accommodate rename - fsd.verifyFsLimitsForRename(srcIIP, dstIIP); - fsd.verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); + verifyFsLimitsForRename(fsd, srcIIP, dstIIP); + verifyQuotaForRename(fsd, srcIIP.getINodes(), dstIIP.getINodes()); RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP); @@ -310,8 +356,8 @@ class FSDirRenameOp { } // Ensure dst has quota to accommodate rename - fsd.verifyFsLimitsForRename(srcIIP, dstIIP); - fsd.verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes()); + verifyFsLimitsForRename(fsd, srcIIP, dstIIP); + verifyQuotaForRename(fsd, srcIIP.getINodes(), dstIIP.getINodes()); RenameOperation tx = new RenameOperation(fsd, src, dst, srcIIP, dstIIP); http://git-wip-us.apache.org/repos/asf/hadoop/blob/26d8dec7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java index aa751a7..bfd7019 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.InvalidPathException; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.protocol.FSLimitException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; @@ -32,6 +35,19 @@ import java.io.IOException; import java.util.List; class FSDirSnapshotOp { + /** Verify if the snapshot name is legal. */ + static void verifySnapshotName(FSDirectory fsd, String snapshotName, + String path) + throws FSLimitException.PathComponentTooLongException { + if (snapshotName.contains(Path.SEPARATOR)) { + throw new HadoopIllegalArgumentException( + "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""); + } + final byte[] bytes = DFSUtil.string2Bytes(snapshotName); + fsd.verifyINodeName(bytes); + fsd.verifyMaxComponentLength(bytes, path, 0); + } + /** Allow snapshot on a directory. */ static void allowSnapshot(FSDirectory fsd, SnapshotManager snapshotManager, String path) throws IOException { @@ -82,7 +98,7 @@ class FSDirSnapshotOp { snapshotName); } } - fsd.verifySnapshotName(snapshotName, snapshotRoot); + verifySnapshotName(fsd, snapshotName, snapshotRoot); fsd.writeLock(); try { snapshotPath = snapshotManager.createSnapshot(snapshotRoot, snapshotName); @@ -103,7 +119,7 @@ class FSDirSnapshotOp { FSPermissionChecker pc = fsd.getPermissionChecker(); fsd.checkOwner(pc, path); } - fsd.verifySnapshotName(snapshotNewName, path); + verifySnapshotName(fsd, snapshotNewName, path); fsd.writeLock(); try { snapshotManager.renameSnapshot(path, snapshotOldName, snapshotNewName); http://git-wip-us.apache.org/repos/asf/hadoop/blob/26d8dec7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index ffc2653..950c9fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -291,6 +291,10 @@ public class FSDirectory implements Closeable { } } + boolean shouldSkipQuotaChecks() { + return skipQuotaCheck; + } + /** Enable quota verification */ void enableQuotaChecks() { skipQuotaCheck = false; @@ -1095,7 +1099,7 @@ public class FSDirectory implements Closeable { * Pass null if a node is not being moved. * @throws QuotaExceededException if quota limit is exceeded. */ - private static void verifyQuota(INode[] inodes, int pos, long nsDelta, + static void verifyQuota(INode[] inodes, int pos, long nsDelta, long dsDelta, INode commonAncestor) throws QuotaExceededException { if (nsDelta <= 0 && dsDelta <= 0) { // if quota is being freed or not being consumed @@ -1120,69 +1124,7 @@ public class FSDirectory implements Closeable { } } } - - /** - * Verify quota for rename operation where srcInodes[srcInodes.length-1] moves - * dstInodes[dstInodes.length-1] - * - * @param src directory from where node is being moved. - * @param dst directory to where node is moved to. - * @throws QuotaExceededException if quota limit is exceeded. - */ - void verifyQuotaForRename(INode[] src, INode[] dst) - throws QuotaExceededException { - if (!namesystem.isImageLoaded() || skipQuotaCheck) { - // Do not check quota if edits log is still being processed - return; - } - int i = 0; - while(src[i] == dst[i]) { i++; } - // src[i - 1] is the last common ancestor. - final Quota.Counts delta = src[src.length - 1].computeQuotaUsage(); - - // Reduce the required quota by dst that is being removed - final int dstIndex = dst.length - 1; - if (dst[dstIndex] != null) { - delta.subtract(dst[dstIndex].computeQuotaUsage()); - } - verifyQuota(dst, dstIndex, delta.get(Quota.NAMESPACE), - delta.get(Quota.DISKSPACE), src[i - 1]); - } - - /** - * Checks file system limits (max component length and max directory items) - * during a rename operation. - * - * @param srcIIP INodesInPath containing every inode in the rename source - * @param dstIIP INodesInPath containing every inode in the rename destination - * @throws PathComponentTooLongException child's name is too long. - * @throws MaxDirectoryItemsExceededException too many children. - */ - void verifyFsLimitsForRename(INodesInPath srcIIP, INodesInPath dstIIP) - throws PathComponentTooLongException, MaxDirectoryItemsExceededException { - byte[] dstChildName = dstIIP.getLastLocalName(); - INode[] dstInodes = dstIIP.getINodes(); - int pos = dstInodes.length - 1; - verifyMaxComponentLength(dstChildName, dstInodes, pos); - // Do not enforce max directory items if renaming within same directory. - if (srcIIP.getINode(-2) != dstIIP.getINode(-2)) { - verifyMaxDirItems(dstInodes, pos); - } - } - - /** Verify if the snapshot name is legal. */ - void verifySnapshotName(String snapshotName, String path) - throws PathComponentTooLongException { - if (snapshotName.contains(Path.SEPARATOR)) { - throw new HadoopIllegalArgumentException( - "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""); - } - final byte[] bytes = DFSUtil.string2Bytes(snapshotName); - verifyINodeName(bytes); - verifyMaxComponentLength(bytes, path, 0); - } - /** Verify if the inode name is legal. */ void verifyINodeName(byte[] childName) throws HadoopIllegalArgumentException { if (Arrays.equals(HdfsConstants.DOT_SNAPSHOT_DIR_BYTES, childName)) { @@ -1202,7 +1144,7 @@ public class FSDirectory implements Closeable { * @param pos int position of new child in path * @throws PathComponentTooLongException child's name is too long. */ - private void verifyMaxComponentLength(byte[] childName, Object parentPath, + void verifyMaxComponentLength(byte[] childName, Object parentPath, int pos) throws PathComponentTooLongException { if (maxComponentLength == 0) { return; @@ -1230,7 +1172,7 @@ public class FSDirectory implements Closeable { * @param pos int position of new child in pathComponents * @throws MaxDirectoryItemsExceededException too many children. */ - private void verifyMaxDirItems(INode[] pathComponents, int pos) + void verifyMaxDirItems(INode[] pathComponents, int pos) throws MaxDirectoryItemsExceededException { final INodeDirectory parent = pathComponents[pos-1].asDirectory();