Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id BDE6818B08 for ; Fri, 18 Sep 2015 18:13:26 +0000 (UTC) Received: (qmail 35502 invoked by uid 500); 18 Sep 2015 18:13:22 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 35271 invoked by uid 500); 18 Sep 2015 18:13:22 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 34101 invoked by uid 99); 18 Sep 2015 18:13:21 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 18 Sep 2015 18:13:21 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 57689E10FD; Fri, 18 Sep 2015 18:13:21 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: wangda@apache.org To: common-commits@hadoop.apache.org Date: Fri, 18 Sep 2015 18:13:33 -0000 Message-Id: <2e4098c58efc40d091d80f71cd6c4987@git.apache.org> In-Reply-To: <51bb9abd63de4957b2788d6d3f5552d7@git.apache.org> References: <51bb9abd63de4957b2788d6d3f5552d7@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [13/34] hadoop git commit: HDFS-5802. NameNode does not check for inode type before traversing down a path. (Xiao Chen via Yongjun Zhang) HDFS-5802. NameNode does not check for inode type before traversing down a path. (Xiao Chen via Yongjun Zhang) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ff6faf9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ff6faf9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ff6faf9 Branch: refs/heads/YARN-1197 Commit: 2ff6faf954eb0f1ab2b339d589edb30040087669 Parents: 92c1af1 Author: Yongjun Zhang Authored: Thu Sep 17 22:56:14 2015 -0700 Committer: Yongjun Zhang Committed: Fri Sep 18 07:17:30 2015 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../server/namenode/FSPermissionChecker.java | 25 +++++++++++- .../apache/hadoop/hdfs/TestDFSPermission.java | 42 +++++++++++++++++++- 3 files changed, 67 insertions(+), 3 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ff6faf9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4912f50..f9837f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -932,6 +932,9 @@ Release 2.8.0 - UNRELEASED HDFS-9022. Move NameNode.getAddress() and NameNode.getUri() to hadoop-hdfs-client. (Mingliang Liu via wheat9) + HDFS-5802. NameNode does not check for inode type before traversing down a + path. (Xiao Chen via Yongjun Zhang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ff6faf9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 041ce0b..9edcda1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -192,6 +192,25 @@ class FSPermissionChecker implements AccessControlEnforcer { ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); } + /** + * Check whether exception e is due to an ancestor inode's not being + * directory. + */ + private void checkAncestorType(INode[] inodes, int ancestorIndex, + AccessControlException e) throws AccessControlException { + for (int i = 0; i <= ancestorIndex; i++) { + if (inodes[i] == null) { + break; + } + if (!inodes[i].isDirectory()) { + throw new AccessControlException( + e.getMessage() + " (Ancestor " + inodes[i].getFullPathName() + + " is not a directory)."); + } + } + throw e; + } + @Override public void checkPermission(String fsOwner, String supergroup, UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs, @@ -202,7 +221,11 @@ class FSPermissionChecker implements AccessControlEnforcer { throws AccessControlException { for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; ancestorIndex--); - checkTraverse(inodeAttrs, path, ancestorIndex); + try { + checkTraverse(inodeAttrs, path, ancestorIndex); + } catch (AccessControlException e) { + checkAncestorType(inodes, ancestorIndex, e); + } final INodeAttributes last = inodeAttrs[inodeAttrs.length - 1]; if (parentAccess != null && parentAccess.implies(FsAction.WRITE) http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ff6faf9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java index 23ce916..80b2eb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.DataOutputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -510,8 +511,45 @@ public class TestDFSPermission { } } - /* Check if namenode performs permission checking correctly - * for the given user for operations mkdir, open, setReplication, + @Test + public void testPermissionMessageOnNonDirAncestor() + throws IOException, InterruptedException { + FileSystem rootFs = FileSystem.get(conf); + Path p4 = new Path("/p4"); + rootFs.mkdirs(p4); + rootFs.setOwner(p4, USER1_NAME, GROUP1_NAME); + + final Path fpath = new Path("/p4/file"); + DataOutputStream out = rootFs.create(fpath); + out.writeBytes("dhruba: " + fpath); + out.close(); + rootFs.setOwner(fpath, USER1_NAME, GROUP1_NAME); + assertTrue(rootFs.exists(fpath)); + + fs = USER1.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(conf); + } + }); + + final Path nfpath = new Path("/p4/file/nonexisting"); + assertFalse(rootFs.exists(nfpath)); + + try { + fs.exists(nfpath); + fail("The exists call should have failed."); + } catch (AccessControlException e) { + assertTrue("Permission denied messages must carry file path", + e.getMessage().contains(fpath.getName())); + assertTrue("Permission denied messages must specify existing_file is not " + + "a directory, when checked on /existing_file/non_existing_name", + e.getMessage().contains("is not a directory")); + } + } + + /* Check if namenode performs permission checking correctly + * for the given user for operations mkdir, open, setReplication, * getFileInfo, isDirectory, exists, getContentLength, list, rename, * and delete */ private void testPermissionCheckingPerUser(UserGroupInformation ugi,