Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id B325F200D5A for ; Thu, 14 Dec 2017 22:04:39 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id B1EFB160C16; Thu, 14 Dec 2017 21:04:39 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id AAFFB160BFC for ; Thu, 14 Dec 2017 22:04:38 +0100 (CET) Received: (qmail 89405 invoked by uid 500); 14 Dec 2017 21:04:37 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 89396 invoked by uid 99); 14 Dec 2017 21:04:37 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 14 Dec 2017 21:04:37 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 9D360DFBEC; Thu, 14 Dec 2017 21:04:37 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: kihwal@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-12907. Allow read-only access to reserved raw for non-superusers. Contributed by Rushabh S Shah. Date: Thu, 14 Dec 2017 21:04:37 +0000 (UTC) archived-at: Thu, 14 Dec 2017 21:04:39 -0000 Repository: hadoop Updated Branches: refs/heads/branch-2 22bc62058 -> 08a50da95 HDFS-12907. Allow read-only access to reserved raw for non-superusers. Contributed by Rushabh S Shah. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08a50da9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08a50da9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08a50da9 Branch: refs/heads/branch-2 Commit: 08a50da95fd8509d87c6a3e5b46862b8eb6f7318 Parents: 22bc620 Author: Kihwal Lee Authored: Thu Dec 14 15:03:47 2017 -0600 Committer: Kihwal Lee Committed: Thu Dec 14 15:03:47 2017 -0600 ---------------------------------------------------------------------- .../hdfs/server/namenode/FSDirectory.java | 9 ++- .../server/namenode/XAttrPermissionFilter.java | 10 +-- .../hadoop/hdfs/TestReservedRawPaths.java | 22 ++---- .../hdfs/server/namenode/FSXAttrBaseTest.java | 80 ++++++++++++++++---- 4 files changed, 86 insertions(+), 35 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a50da9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index ace9513..939445a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -621,7 +621,14 @@ public class FSDirectory implements Closeable { byte[][] components = INode.getPathComponents(src); boolean isRaw = isReservedRawName(components); if (isPermissionEnabled && pc != null && isRaw) { - pc.checkSuperuserPrivilege(); + switch(dirOp) { + case READ_LINK: + case READ: + break; + default: + pc.checkSuperuserPrivilege(); + break; + } } components = resolveComponents(components, this); INodesInPath iip = INodesInPath.resolve(rootDir, components, isRaw); http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a50da9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index de448f6..e1bf027 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -54,7 +54,8 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_ * attributes that sometimes need to be exposed. Like SYSTEM namespace * attributes they are not visible to the user except when getXAttr/getXAttrs * is called on a file or directory in the /.reserved/raw HDFS directory - * hierarchy. These attributes can only be accessed by the superuser. + * hierarchy. These attributes can only be accessed by the user who have + * read access. *
*/ @InterfaceAudience.Private @@ -68,8 +69,7 @@ public class XAttrPermissionFilter { (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) { return; } - if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && - isRawPath && isSuperUser) { + if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) { return; } if (XAttrHelper.getPrefixedName(xAttr). @@ -112,15 +112,13 @@ public class XAttrPermissionFilter { } else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser) { filteredXAttrs.add(xAttr); - } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && - isSuperUser && isRawPath) { + } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isRawPath) { filteredXAttrs.add(xAttr); } else if (XAttrHelper.getPrefixedName(xAttr). equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { filteredXAttrs.add(xAttr); } } - return filteredXAttrs; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a50da9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java index 3f57dcf..12b86cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java @@ -247,7 +247,7 @@ public class TestReservedRawPaths { } @Test(timeout = 120000) - public void testAdminAccessOnly() throws Exception { + public void testUserReadAccessOnly() throws Exception { final Path zone = new Path("zone"); final Path slashZone = new Path("/", zone); fs.mkdirs(slashZone); @@ -275,34 +275,26 @@ public class TestReservedRawPaths { } }); - /* Test failure of getFileStatus in reserved/raw as non admin */ + /* Test success of getFileStatus in reserved/raw as non admin since + * read is allowed. */ final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base); DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED); user.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { final DistributedFileSystem fs = cluster.getFileSystem(); - try { - fs.getFileStatus(ezRawEncFile); - fail("access to /.reserved/raw is superuser-only operation"); - } catch (AccessControlException e) { - assertExceptionContains("Superuser privilege is required", e); - } + fs.getFileStatus(ezRawEncFile); return null; } }); - /* Test failure of listStatus in reserved/raw as non admin */ + /* Test success of listStatus in reserved/raw as non admin since read is + * allowed. */ user.doAs(new PrivilegedExceptionAction() { @Override public Object run() throws Exception { final DistributedFileSystem fs = cluster.getFileSystem(); - try { - fs.listStatus(ezRawEncFile); - fail("access to /.reserved/raw is superuser-only operation"); - } catch (AccessControlException e) { - assertExceptionContains("Superuser privilege is required", e); - } + fs.listStatus(ezRawEncFile); return null; } }); http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a50da9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java index c526484..87a975d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.FileNotFoundException; import java.io.IOException; import java.security.PrivilegedExceptionAction; +import java.util.Arrays; import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -32,6 +33,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.io.IOUtils; @@ -1081,7 +1083,8 @@ public class FSXAttrBaseTest { { /* - * Test that non-root can not do getXAttr in the "raw.*" namespace + * Test that user who don'r have read access + * can not do getXAttr in the "raw.*" namespace */ fs.setXAttr(rawPath, raw1, value1); user.doAs(new PrivilegedExceptionAction() { @@ -1089,7 +1092,7 @@ public class FSXAttrBaseTest { public Object run() throws Exception { final FileSystem userFs = dfsCluster.getFileSystem(); try { - // non-raw path + // raw path userFs.getXAttr(rawPath, raw1); fail("getXAttr should have thrown"); } catch (AccessControlException e) { @@ -1097,7 +1100,7 @@ public class FSXAttrBaseTest { } try { - // raw path + // non-raw path userFs.getXAttr(path, raw1); fail("getXAttr should have thrown"); } catch (AccessControlException e) { @@ -1105,25 +1108,76 @@ public class FSXAttrBaseTest { } /* - * Test that only root can see raw.* xattrs returned from listXAttr - * and non-root can't do listXAttrs on /.reserved/raw. - */ + * Test that only user who have parent directory execute access + * can see raw.* xattrs returned from listXAttr + */ // non-raw path final List xattrNames = userFs.listXAttrs(path); assertTrue(xattrNames.size() == 0); - try { - // raw path - userFs.listXAttrs(rawPath); - fail("listXAttrs on raw path should have thrown"); - } catch (AccessControlException e) { - // ignore - } + // raw path + List rawXattrs = userFs.listXAttrs(rawPath); + assertTrue(rawXattrs.size() == 1); + assertTrue(rawXattrs.get(0).equals(raw1)); return null; } }); fs.removeXAttr(rawPath, raw1); } + + { + /* + * Tests that user who have read access are able to do getattr. + */ + final Path parentPath = new Path("/foo"); + fs.mkdirs(parentPath); + fs.setOwner(parentPath, "user", "mygroup"); + // Set only execute permission for others on parent directory so that + // any user can traverse down the directory. + fs.setPermission(parentPath, new FsPermission("701")); + final Path childPath = new Path("/foo/bar"); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final DistributedFileSystem dfs = dfsCluster.getFileSystem(); + DFSTestUtil.createFile(dfs, childPath, 1024, (short) 1, 0xFEED); + dfs.setPermission(childPath, new FsPermission("740")); + return null; + } + }); + final Path rawChildPath = + new Path("/.reserved/raw" + childPath.toString()); + fs.setXAttr(new Path("/.reserved/raw/foo/bar"), raw1, value1); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final DistributedFileSystem dfs = dfsCluster.getFileSystem(); + // Make sure user have access to raw xattr. + byte[] xattr = dfs.getXAttr(rawChildPath, raw1); + assertEquals(Arrays.toString(value1), Arrays.toString(xattr)); + return null; + } + }); + + final UserGroupInformation fakeUser = UserGroupInformation + .createUserForTesting("fakeUser", new String[] {"fakeGroup"}); + fakeUser.doAs(new PrivilegedExceptionAction() { + @Override + public Object run() throws Exception { + final DistributedFileSystem dfs = dfsCluster.getFileSystem(); + try { + // Make sure user who don't have read access to file can't access + // raw xattr. + dfs.getXAttr(path, raw1); + fail("should have thrown AccessControlException"); + } catch (AccessControlException ace) { + // expected + } + return null; + } + }); + // fs.removeXAttr(rawPath, raw1); + } } /** --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org