Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 361C5200B83 for ; Fri, 2 Sep 2016 18:32:26 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 34874160AAE; Fri, 2 Sep 2016 16:32:26 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 6C307160A8C for ; Fri, 2 Sep 2016 18:32:24 +0200 (CEST) Received: (qmail 43151 invoked by uid 500); 2 Sep 2016 16:32:23 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 43142 invoked by uid 99); 2 Sep 2016 16:32:23 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 02 Sep 2016 16:32:23 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 6DBC6E0159; Fri, 2 Sep 2016 16:32:23 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: kihwal@apache.org To: common-commits@hadoop.apache.org Message-Id: <1203e3b7ead64a9a88683a4b2ccb5d54@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-10744. Internally optimize path component resolution. Contributed by Daryn Sharp. Date: Fri, 2 Sep 2016 16:32:23 +0000 (UTC) archived-at: Fri, 02 Sep 2016 16:32:26 -0000 Repository: hadoop Updated Branches: refs/heads/branch-2.8 5a779cce8 -> 3dac73080 HDFS-10744. Internally optimize path component resolution. Contributed by Daryn Sharp. (cherry picked from commit 1ef8d7a638df5150b8426755af034839d5f88ca2) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3dac7308 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3dac7308 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3dac7308 Branch: refs/heads/branch-2.8 Commit: 3dac73080a7cab3652512d56510235fdef157fa7 Parents: 5a779cc Author: Kihwal Lee Authored: Fri Sep 2 11:31:48 2016 -0500 Committer: Kihwal Lee Committed: Fri Sep 2 11:31:48 2016 -0500 ---------------------------------------------------------------------- .../hadoop/hdfs/server/namenode/FSDirAclOp.java | 18 ++-- .../hdfs/server/namenode/FSDirAppendOp.java | 4 +- .../hdfs/server/namenode/FSDirAttrOp.java | 22 ++-- .../hdfs/server/namenode/FSDirDeleteOp.java | 5 +- .../server/namenode/FSDirEncryptionZoneOp.java | 8 +- .../hdfs/server/namenode/FSDirMkdirOp.java | 3 +- .../hdfs/server/namenode/FSDirRenameOp.java | 12 +-- .../server/namenode/FSDirStatAndListingOp.java | 27 ++--- .../hdfs/server/namenode/FSDirSymlinkOp.java | 3 +- .../hdfs/server/namenode/FSDirTruncateOp.java | 4 +- .../hdfs/server/namenode/FSDirWriteFileOp.java | 15 +-- .../hdfs/server/namenode/FSDirXAttrOp.java | 13 +-- .../hdfs/server/namenode/FSDirectory.java | 100 +++++++++---------- .../hdfs/server/namenode/FSNamesystem.java | 16 +-- .../hdfs/server/namenode/TestINodeFile.java | 51 ++++------ 15 files changed, 115 insertions(+), 186 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index 0c572b5..296bed2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -39,8 +39,7 @@ class FSDirAclOp { String src = srcArg; checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -65,8 +64,7 @@ class FSDirAclOp { String src = srcArg; checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -90,8 +88,7 @@ class FSDirAclOp { String src = srcArg; checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -115,8 +112,7 @@ class FSDirAclOp { String src = srcArg; checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -135,9 +131,8 @@ class FSDirAclOp { throws IOException { String src = srcArg; checkAclsConfigFlag(fsd); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = fsd.getPermissionChecker(); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip; fsd.writeLock(); try { @@ -155,8 +150,7 @@ class FSDirAclOp { FSDirectory fsd, String src) throws IOException { checkAclsConfigFlag(fsd); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); String srcs = FSDirectory.normalizePath(src); fsd.readLock(); try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java index e5b1392..f96cf69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java @@ -82,14 +82,12 @@ final class FSDirAppendOp { final boolean logRetryCache) throws IOException { assert fsn.hasWriteLock(); - final byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final LocatedBlock lb; final FSDirectory fsd = fsn.getFSDirectory(); final String src; fsd.writeLock(); try { - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); final INodesInPath iip = fsd.getINodesInPath4Write(src); // Verify that the destination does not exist as a directory already final INode inode = iip.getLastINode(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java index ef618e9..e56d131 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java @@ -57,11 +57,10 @@ public class FSDirAttrOp { throw new InvalidPathException(src); } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); unprotectedSetPermission(fsd, src, permission); @@ -79,11 +78,10 @@ public class FSDirAttrOp { throw new InvalidPathException(src); } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath4Write(src); fsd.checkOwner(pc, iip); if (!pc.isSuperUser()) { @@ -106,12 +104,11 @@ public class FSDirAttrOp { FSDirectory fsd, String src, long mtime, long atime) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath4Write(src); // Write access is required to set access and modification times if (fsd.isPermissionEnabled()) { @@ -139,10 +136,9 @@ public class FSDirAttrOp { bm.verifyReplication(src, replication, null); final boolean isFile; FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath4Write(src); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.WRITE); @@ -187,11 +183,10 @@ public class FSDirAttrOp { DFS_STORAGE_POLICY_ENABLED_KEY)); } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); INodesInPath iip; fsd.writeLock(); try { - src = FSDirectory.resolvePath(src, pathComponents, fsd); + src = FSDirectory.resolvePath(src, fsd); iip = fsd.getINodesInPath4Write(src); if (fsd.isPermissionEnabled()) { @@ -214,11 +209,9 @@ public class FSDirAttrOp { static BlockStoragePolicy getStoragePolicy(FSDirectory fsd, BlockManager bm, String path) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(path); fsd.readLock(); try { - path = fsd.resolvePath(pc, path, pathComponents); + path = fsd.resolvePath(pc, path); final INodesInPath iip = fsd.getINodesInPath(path, false); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.READ); @@ -237,10 +230,9 @@ public class FSDirAttrOp { static long getPreferredBlockSize(FSDirectory fsd, String src) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); fsd.readLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, false); if (fsd.isPermissionEnabled()) { fsd.checkTraverse(pc, iip); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java index 006fbc2..38951e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java @@ -97,9 +97,8 @@ class FSDirDeleteOp { throws IOException { FSDirectory fsd = fsn.getFSDirectory(); FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath4Write(src, false); if (!recursive && fsd.isNonEmptyDirectory(iip)) { throw new PathIsNotEmptyDirectoryException(src + " is non empty"); @@ -109,7 +108,7 @@ class FSDirDeleteOp { FsAction.ALL, true); } if (recursive && fsd.isNonEmptyDirectory(iip)) { - checkProtectedDescendants(fsd, fsd.normalizePath(src)); + checkProtectedDescendants(fsd, src); } return deleteInternal(fsn, src, iip, logRetryCache); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java index f3e4d46..ba9e9d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java @@ -146,8 +146,6 @@ final class FSDirEncryptionZoneOp { static HdfsFileStatus createEncryptionZone(final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc, final String cipher, final String keyName, final boolean logRetryCache) throws IOException { - final byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final CipherSuite suite = CipherSuite.convert(cipher); List xAttrs = Lists.newArrayListWithCapacity(1); final String src; @@ -157,7 +155,7 @@ final class FSDirEncryptionZoneOp { fsd.writeLock(); try { - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); final XAttr ezXAttr = fsd.ezManager.createEncryptionZone(src, suite, version, keyName); xAttrs.add(ezXAttr); @@ -180,14 +178,12 @@ final class FSDirEncryptionZoneOp { static Map.Entry getEZForPath( final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc) throws IOException { - final byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final String src; final INodesInPath iip; final EncryptionZone ret; fsd.readLock(); try { - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); iip = fsd.getINodesInPath(src, true); if (iip.getLastINode() == null) { throw new FileNotFoundException("Path not found: " + iip.getPath()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java index c74faca..1141422 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java @@ -50,10 +50,9 @@ class FSDirMkdirOp { throw new InvalidPathException(src); } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip = fsd.getINodesInPath4Write(src); if (fsd.isPermissionEnabled()) { fsd.checkTraverse(pc, iip); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index 210a060..9dbf555 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -64,11 +64,9 @@ class FSDirRenameOp { } FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src); - byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst); HdfsFileStatus resultingStat = null; - src = fsd.resolvePath(pc, src, srcComponents); - dst = fsd.resolvePath(pc, dst, dstComponents); + src = fsd.resolvePath(pc, src); + dst = fsd.resolvePath(pc, dst); @SuppressWarnings("deprecation") final boolean status = renameTo(fsd, pc, src, dst, logRetryCache); if (status) { @@ -239,11 +237,9 @@ class FSDirRenameOp { } final FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] srcComponents = FSDirectory.getPathComponentsForReservedPath(src); - byte[][] dstComponents = FSDirectory.getPathComponentsForReservedPath(dst); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); - src = fsd.resolvePath(pc, src, srcComponents); - dst = fsd.resolvePath(pc, dst, dstComponents); + src = fsd.resolvePath(pc, src); + dst = fsd.resolvePath(pc, dst); renameTo(fsd, pc, src, dst, collectedBlocks, logRetryCache, options); INodesInPath dstIIP = fsd.getINodesInPath(dst, false); HdfsFileStatus resultingStat = fsd.getAuditFileInfo(dstIIP); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 33af25c..e54db70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -50,24 +50,20 @@ import static org.apache.hadoop.util.Time.now; class FSDirStatAndListingOp { static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg, byte[] startAfter, boolean needLocation) throws IOException { - byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final String startAfterString = DFSUtil.bytes2String(startAfter); String src = null; if (fsd.isPermissionEnabled()) { FSPermissionChecker pc = fsd.getPermissionChecker(); - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); } else { - src = FSDirectory.resolvePath(srcArg, pathComponents, fsd); + src = FSDirectory.resolvePath(srcArg, fsd); } // Get file name when startAfter is an INodePath if (FSDirectory.isReservedName(startAfterString)) { - byte[][] startAfterComponents = FSDirectory - .getPathComponentsForReservedPath(startAfterString); try { - String tmp = FSDirectory.resolvePath(src, startAfterComponents, fsd); + String tmp = FSDirectory.resolvePath(startAfterString, fsd); byte[][] regularPath = INode.getPathComponents(tmp); startAfter = regularPath[regularPath.length - 1]; } catch (IOException e) { @@ -108,14 +104,13 @@ class FSDirStatAndListingOp { if (!DFSUtil.isValidName(src)) { throw new InvalidPathException("Invalid file name: " + src); } - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); if (fsd.isPermissionEnabled()) { FSPermissionChecker pc = fsd.getPermissionChecker(); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, srcArg); final INodesInPath iip = fsd.getINodesInPath(src, resolveLink); fsd.checkPermission(pc, iip, false, null, null, null, null, false); } else { - src = FSDirectory.resolvePath(src, pathComponents, fsd); + src = FSDirectory.resolvePath(srcArg, fsd); } return getFileInfo(fsd, src, FSDirectory.isReservedRawName(srcArg), resolveLink); @@ -126,8 +121,7 @@ class FSDirStatAndListingOp { */ static boolean isFileClosed(FSDirectory fsd, String src) throws IOException { FSPermissionChecker pc = fsd.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, true); if (fsd.isPermissionEnabled()) { fsd.checkTraverse(pc, iip); @@ -137,9 +131,8 @@ class FSDirStatAndListingOp { static ContentSummary getContentSummary( FSDirectory fsd, String src) throws IOException { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = fsd.getPermissionChecker(); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, false); if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, iip, false, null, null, null, @@ -162,11 +155,10 @@ class FSDirStatAndListingOp { "Negative length is not supported. File: " + src); CacheManager cm = fsd.getFSNamesystem().getCacheManager(); BlockManager bm = fsd.getBlockManager(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); boolean isReservedName = FSDirectory.isReservedRawName(src); fsd.readLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, true); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (fsd.isPermissionEnabled()) { @@ -613,12 +605,11 @@ class FSDirStatAndListingOp { static QuotaUsage getQuotaUsage( FSDirectory fsd, String src) throws IOException { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = fsd.getPermissionChecker(); final INodesInPath iip; fsd.readLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath(src, false); if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, iip, false, null, null, null, http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java index 44a171a..e78c7b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java @@ -53,11 +53,10 @@ class FSDirSymlinkOp { } FSPermissionChecker pc = fsn.getPermissionChecker(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(link); INodesInPath iip; fsd.writeLock(); try { - link = fsd.resolvePath(pc, link, pathComponents); + link = fsd.resolvePath(pc, link); iip = fsd.getINodesInPath4Write(link, false); if (!createParent) { fsd.verifyParentDir(iip, link); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java index 6d37530..164538f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java @@ -70,15 +70,13 @@ final class FSDirTruncateOp { assert fsn.hasWriteLock(); FSDirectory fsd = fsn.getFSDirectory(); - byte[][] pathComponents = FSDirectory - .getPathComponentsForReservedPath(srcArg); final String src; final INodesInPath iip; final boolean onBlockBoundary; Block truncateBlock = null; fsd.writeLock(); try { - src = fsd.resolvePath(pc, srcArg, pathComponents); + src = fsd.resolvePath(pc, srcArg); iip = fsd.getINodesInPath4Write(src, true); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.WRITE); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 69a4150..3534634 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -117,8 +117,7 @@ class FSDirWriteFileOp { static void abandonBlock( FSDirectory fsd, FSPermissionChecker pc, ExtendedBlock b, long fileId, String src, String holder) throws IOException { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INode inode; final INodesInPath iip; @@ -179,8 +178,7 @@ class FSDirWriteFileOp { final byte storagePolicyID; String clientMachine; - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsn.dir.resolvePath(pc, src, pathComponents); + src = fsn.dir.resolvePath(pc, src); FileState fileState = analyzeFileState(fsn, src, fileId, clientName, previous, onRetryBlock); if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) { @@ -342,8 +340,7 @@ class FSDirWriteFileOp { boolean isRawPath = FSDirectory.isReservedRawName(src); FSDirectory fsd = fsn.getFSDirectory(); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip = fsd.getINodesInPath4Write(src); // Verify that the destination does not exist as a directory already. @@ -452,9 +449,8 @@ class FSDirWriteFileOp { FSPermissionChecker pc, String src, CryptoProtocolVersion[] supportedVersions) throws IOException { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSDirectory fsd = fsn.getFSDirectory(); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); INodesInPath iip = fsd.getINodesInPath4Write(src); // Nothing to do if the path is not within an EZ final EncryptionZone zone = FSDirEncryptionZoneOp.getEZForPath(fsd, iip); @@ -700,8 +696,7 @@ class FSDirWriteFileOp { src + " for " + holder); } checkBlock(fsn, last); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsn.dir.resolvePath(pc, src, pathComponents); + src = fsn.dir.resolvePath(pc, src); boolean success = completeFileInternal(fsn, src, holder, ExtendedBlock.getLocalBlock(last), fileId); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index 92686c5..668e9e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -67,8 +67,7 @@ class FSDirXAttrOp { FSPermissionChecker pc = fsd.getPermissionChecker(); XAttrPermissionFilter.checkPermissionForApi( pc, xAttr, FSDirectory.isReservedRawName(src)); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(xAttr); INodesInPath iip; @@ -95,8 +94,7 @@ class FSDirXAttrOp { if (!getAll) { XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath); } - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, true); if (fsd.isPermissionEnabled()) { fsd.checkPathAccess(pc, iip, FsAction.READ); @@ -136,8 +134,7 @@ class FSDirXAttrOp { FSDirXAttrOp.checkXAttrsConfigFlag(fsd); final FSPermissionChecker pc = fsd.getPermissionChecker(); final boolean isRawPath = FSDirectory.isReservedRawName(src); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); final INodesInPath iip = fsd.getINodesInPath(src, true); if (fsd.isPermissionEnabled()) { /* To access xattr names, you need EXECUTE in the owning directory. */ @@ -164,15 +161,13 @@ class FSDirXAttrOp { FSPermissionChecker pc = fsd.getPermissionChecker(); XAttrPermissionFilter.checkPermissionForApi( pc, xAttr, FSDirectory.isReservedRawName(src)); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( - src); List xAttrs = Lists.newArrayListWithCapacity(1); xAttrs.add(xAttr); INodesInPath iip; fsd.writeLock(); try { - src = fsd.resolvePath(pc, src, pathComponents); + src = fsd.resolvePath(pc, src); iip = fsd.getINodesInPath4Write(src); checkXAttrChangeAccess(fsd, iip, xAttr, pc); http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 9446e3f..c8acdfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -121,6 +121,8 @@ public class FSDirectory implements Closeable { public final static String DOT_INODES_STRING = ".inodes"; public final static byte[] DOT_INODES = DFSUtil.string2Bytes(DOT_INODES_STRING); + private final static byte[] DOT_DOT = + DFSUtil.string2Bytes(".."); public final static HdfsFileStatus DOT_RESERVED_STATUS = new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770), @@ -477,7 +479,6 @@ public class FSDirectory implements Closeable { * * @param pc The permission checker used when resolving path. * @param path The path to resolve. - * @param pathComponents path components corresponding to the path * @return if the path indicates an inode, return path after replacing up to * with the corresponding path of the inode, else the path * in {@code src} as is. If the path refers to a path in the "raw" @@ -485,12 +486,12 @@ public class FSDirectory implements Closeable { * @throws FileNotFoundException * @throws AccessControlException */ - String resolvePath(FSPermissionChecker pc, String path, byte[][] pathComponents) + String resolvePath(FSPermissionChecker pc, String path) throws FileNotFoundException, AccessControlException { if (isReservedRawName(path) && isPermissionEnabled) { pc.checkSuperuserPrivilege(); } - return resolvePath(path, pathComponents, this); + return resolvePath(path, this); } /** @@ -1249,13 +1250,6 @@ public class FSDirectory implements Closeable { return components.toArray(new byte[components.size()][]); } - /** - * @return path components for reserved path, else null. - */ - static byte[][] getPathComponentsForReservedPath(String src) { - return !isReservedName(src) ? null : INode.getPathComponents(src); - } - /** Check if a given inode name is reserved */ public static boolean isReservedName(INode inode) { return CHECK_RESERVED_FILE_NAMES @@ -1281,6 +1275,12 @@ public class FSDirectory implements Closeable { Path.SEPARATOR + DOT_INODES_STRING); } + static boolean isReservedName(byte[][] components) { + return (components.length > 2) && + Arrays.equals(INodeDirectory.ROOT_NAME, components[0]) && + Arrays.equals(DOT_RESERVED, components[1]); + } + /** * Resolve a /.reserved/... path to a non-reserved path. *

@@ -1300,7 +1300,6 @@ public class FSDirectory implements Closeable { * unencrypted file). * * @param src path that is being processed - * @param pathComponents path components corresponding to the path * @param fsd FSDirectory * @return if the path indicates an inode, return path after replacing up to * with the corresponding path of the inode, else the path @@ -1308,45 +1307,37 @@ public class FSDirectory implements Closeable { * directory, return the non-raw pathname. * @throws FileNotFoundException if inodeid is invalid */ - static String resolvePath(String src, byte[][] pathComponents, + static String resolvePath(String src, FSDirectory fsd) throws FileNotFoundException { - final int nComponents = (pathComponents == null) ? - 0 : pathComponents.length; - if (nComponents <= 2) { - return src; - } - if (!Arrays.equals(DOT_RESERVED, pathComponents[1])) { + byte[][] pathComponents = INode.getPathComponents(src); + final int nComponents = pathComponents.length; + if (!isReservedName(pathComponents)) { /* This is not a /.reserved/ path so do nothing. */ - return src; - } - - if (Arrays.equals(DOT_INODES, pathComponents[2])) { + } else if (Arrays.equals(DOT_INODES, pathComponents[2])) { /* It's a /.reserved/.inodes path. */ if (nComponents > 3) { - return resolveDotInodesPath(src, pathComponents, fsd); - } else { - return src; + pathComponents = resolveDotInodesPath(pathComponents, fsd); } } else if (Arrays.equals(RAW, pathComponents[2])) { /* It's /.reserved/raw so strip off the /.reserved/raw prefix. */ if (nComponents == 3) { - return Path.SEPARATOR; + pathComponents = new byte[][]{INodeDirectory.ROOT_NAME}; } else { if (nComponents == 4 && Arrays.equals(DOT_RESERVED, pathComponents[3])) { /* It's /.reserved/raw/.reserved so don't strip */ - return src; } else { - return constructRemainingPath("", pathComponents, 3); + pathComponents = constructRemainingPath( + new byte[][]{INodeDirectory.ROOT_NAME}, pathComponents, 3); } } - } else { - /* It's some sort of /.reserved/ path. Ignore it. */ - return src; } + // this double conversion will be unnecessary when resolving returns + // INodesInPath (needs components byte[][]) + return DFSUtil.byteArray2PathString(pathComponents); } - private static String resolveDotInodesPath(String src, + private static byte[][] resolveDotInodesPath( byte[][] pathComponents, FSDirectory fsd) throws FileNotFoundException { final String inodeId = DFSUtil.bytes2String(pathComponents[3]); @@ -1354,48 +1345,47 @@ public class FSDirectory implements Closeable { try { id = Long.parseLong(inodeId); } catch (NumberFormatException e) { - throw new FileNotFoundException("Invalid inode path: " + src); + throw new FileNotFoundException("Invalid inode path: " + + DFSUtil.byteArray2PathString(pathComponents)); } if (id == INodeId.ROOT_INODE_ID && pathComponents.length == 4) { - return Path.SEPARATOR; + return new byte[][]{INodeDirectory.ROOT_NAME}; } INode inode = fsd.getInode(id); if (inode == null) { throw new FileNotFoundException( - "File for given inode path does not exist: " + src); + "File for given inode path does not exist: " + + DFSUtil.byteArray2PathString(pathComponents)); } - + // Handle single ".." for NFS lookup support. if ((pathComponents.length > 4) - && DFSUtil.bytes2String(pathComponents[4]).equals("..")) { + && Arrays.equals(pathComponents[4], DOT_DOT)) { INode parent = inode.getParent(); if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) { // inode is root, or its parent is root. - return Path.SEPARATOR; - } else { - return parent.getFullPathName(); + return new byte[][]{INodeDirectory.ROOT_NAME}; } + return parent.getPathComponents(); } - - String path = ""; - if (id != INodeId.ROOT_INODE_ID) { - path = inode.getFullPathName(); - } - return constructRemainingPath(path, pathComponents, 4); + return constructRemainingPath( + inode.getPathComponents(), pathComponents, 4); } - private static String constructRemainingPath(String pathPrefix, - byte[][] pathComponents, int startAt) { - - StringBuilder path = new StringBuilder(pathPrefix); - for (int i = startAt; i < pathComponents.length; i++) { - path.append(Path.SEPARATOR).append( - DFSUtil.bytes2String(pathComponents[i])); + private static byte[][] constructRemainingPath(byte[][] components, + byte[][] extraComponents, int startAt) { + int remainder = extraComponents.length - startAt; + if (remainder > 0) { + // grow the array and copy in the remaining components + int pos = components.length; + components = Arrays.copyOf(components, pos + remainder); + System.arraycopy(extraComponents, startAt, components, pos, remainder); } if (NameNode.LOG.isDebugEnabled()) { - NameNode.LOG.debug("Resolved path is " + path); + NameNode.LOG.debug( + "Resolved path is " + DFSUtil.byteArray2PathString(components)); } - return path.toString(); + return components; } INode getINode4DotSnapshot(String src) throws UnresolvedLinkException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 4a00dd0..7496007 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1812,8 +1812,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, logAuditEvent(true, "open", srcArg); if (!isInSafeMode() && res.updateAccessTime()) { - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath( - srcArg); String src = srcArg; writeLock(); final long now = now(); @@ -1837,7 +1835,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * HDFS-7463. A better fix is to change the edit log of SetTime to * use inode id instead of a path. */ - src = dir.resolvePath(pc, srcArg, pathComponents); + src = dir.resolvePath(pc, srcArg); final INodesInPath iip = dir.getINodesInPath(src, true); INode inode = iip.getLastINode(); boolean updateAccessTime = inode != null && @@ -2315,12 +2313,11 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, boolean skipSync = false; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); writeLock(); try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot recover the lease of " + src); - src = dir.resolvePath(pc, src, pathComponents); + src = dir.resolvePath(pc, src); final INodesInPath iip = dir.getINodesInPath4Write(src); final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src); if (!inode.isUnderConstruction()) { @@ -2566,14 +2563,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, final byte storagePolicyID; final List chosen; checkOperation(OperationCategory.READ); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = getPermissionChecker(); readLock(); try { checkOperation(OperationCategory.READ); //check safe mode checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk); - src = dir.resolvePath(pc, src, pathComponents); + src = dir.resolvePath(pc, src); //check lease final INode inode; @@ -3125,7 +3121,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, throws IOException { NameNode.stateChangeLog.info("BLOCK* fsync: " + src + " for " + clientName); checkOperation(OperationCategory.WRITE); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); FSPermissionChecker pc = getPermissionChecker(); waitForLoadingFSImage(); @@ -3133,7 +3128,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, try { checkOperation(OperationCategory.WRITE); checkNameNodeSafeMode("Cannot fsync file " + src); - src = dir.resolvePath(pc, src, pathComponents); + src = dir.resolvePath(pc, src); final INode inode; if (fileId == HdfsConstants.GRANDFATHER_INODE_ID) { // Older clients may not have given us an inode ID to work with. @@ -7468,11 +7463,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, void checkAccess(String src, FsAction mode) throws IOException { checkOperation(OperationCategory.READ); - byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); try { checkOperation(OperationCategory.READ); - src = FSDirectory.resolvePath(src, pathComponents, dir); + src = FSDirectory.resolvePath(src, dir); final INodesInPath iip = dir.getINodesInPath(src, true); INode inode = iip.getLastINode(); if (inode == null) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/3dac7308/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index b5bb5d6..10bcb5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -856,9 +856,9 @@ public class TestINodeFile { byte[][] actual = FSDirectory.getPathComponents(inode); DFSTestUtil.checkComponentsEquals(expected, actual); } - + /** - * Tests for {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)} + * Tests for {@link FSDirectory#resolvePath(String, FSDirectory)} */ @Test public void testInodePath() throws IOException { @@ -868,54 +868,47 @@ public class TestINodeFile { // For an any inode look up return inode corresponding to "c" from /a/b/c FSDirectory fsd = Mockito.mock(FSDirectory.class); Mockito.doReturn(inode).when(fsd).getInode(Mockito.anyLong()); - - // Null components - assertEquals("/test", FSDirectory.resolvePath("/test", null, fsd)); - + // Tests for FSDirectory#resolvePath() // Non inode regular path - byte[][] components = INode.getPathComponents(path); - String resolvedPath = FSDirectory.resolvePath(path, components, fsd); + String resolvedPath = FSDirectory.resolvePath(path, fsd); assertEquals(path, resolvedPath); - + // Inode path with no trailing separator - components = INode.getPathComponents("/.reserved/.inodes/1"); - resolvedPath = FSDirectory.resolvePath(path, components, fsd); + String testPath = "/.reserved/.inodes/1"; + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals(path, resolvedPath); - + // Inode path with trailing separator - components = INode.getPathComponents("/.reserved/.inodes/1/"); + testPath = "/.reserved/.inodes/1/"; + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals(path, resolvedPath); - + // Inode relative path - components = INode.getPathComponents("/.reserved/.inodes/1/d/e/f"); - resolvedPath = FSDirectory.resolvePath(path, components, fsd); + testPath = "/.reserved/.inodes/1/d/e/f"; + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals("/a/b/c/d/e/f", resolvedPath); - + // A path with just .inodes returns the path as is - String testPath = "/.reserved/.inodes"; - components = INode.getPathComponents(testPath); - resolvedPath = FSDirectory.resolvePath(testPath, components, fsd); + testPath = "/.reserved/.inodes"; + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals(testPath, resolvedPath); - + // Root inode path testPath = "/.reserved/.inodes/" + INodeId.ROOT_INODE_ID; - components = INode.getPathComponents(testPath); - resolvedPath = FSDirectory.resolvePath(testPath, components, fsd); + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals("/", resolvedPath); - + // An invalid inode path should remain unresolved testPath = "/.invalid/.inodes/1"; - components = INode.getPathComponents(testPath); - resolvedPath = FSDirectory.resolvePath(testPath, components, fsd); + resolvedPath = FSDirectory.resolvePath(testPath, fsd); assertEquals(testPath, resolvedPath); - + // Test path with nonexistent(deleted or wrong id) inode Mockito.doReturn(null).when(fsd).getInode(Mockito.anyLong()); testPath = "/.reserved/.inodes/1234"; - components = INode.getPathComponents(testPath); try { - String realPath = FSDirectory.resolvePath(testPath, components, fsd); + String realPath = FSDirectory.resolvePath(testPath, fsd); fail("Path should not be resolved:" + realPath); } catch (IOException e) { assertTrue(e instanceof FileNotFoundException); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org