hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From weic...@apache.org
Subject [hadoop] branch branch-3.1 updated: HADOOP-16032. Distcp It should clear sub directory ACL before applying new ACL on.
Date Tue, 01 Oct 2019 23:47:27 GMT
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new a400f39  HADOOP-16032. Distcp It should clear sub directory ACL before applying new
ACL on.
a400f39 is described below

commit a400f396a6eeca9e06b501f0dd03797e8c6fda48
Author: Ranith Sardar <ranithsardar.90@gmail.com>
AuthorDate: Thu Feb 7 21:49:18 2019 +0000

    HADOOP-16032. Distcp It should clear sub directory ACL before applying new ACL on.
    
    Contributed by Ranith Sardar.
    
    (cherry picked from commit 546c5d70efebb828389f609a89b123c4ee51f867)
    (cherry picked from commit c5eca3f7ee095d6a261eb411ad97aba654d67d13)
    
     Conflicts:
    	hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
---
 .../org/apache/hadoop/tools/util/DistCpUtils.java  |  1 +
 .../apache/hadoop/tools/util/TestDistCpUtils.java  | 88 +++++++++++++++++++++-
 2 files changed, 88 insertions(+), 1 deletion(-)

diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index 496913f0..7a5842b 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -211,6 +211,7 @@ public class DistCpUtils {
       List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
       List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
       if (!srcAcl.equals(targetAcl)) {
+        targetFS.removeAcl(path);
         targetFS.setAcl(path, srcAcl);
       }
       // setAcl doesn't preserve sticky bit, so also call setPermission if needed.
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
index 2f610ab..8ca8d08 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
@@ -25,8 +25,10 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.tools.ECAdmin;
@@ -41,12 +43,26 @@ import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import com.google.common.collect.Lists;
+
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.EnumSet;
+import java.util.List;
 import java.util.Random;
 import java.util.Stack;
 
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
+import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+import static org.apache.hadoop.fs.permission.FsAction.ALL;
+import static org.apache.hadoop.fs.permission.FsAction.EXECUTE;
+import static org.apache.hadoop.fs.permission.FsAction.READ;
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
+import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -62,6 +78,7 @@ public class TestDistCpUtils {
   
   @BeforeClass
   public static void create() throws IOException {
+    config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(config)
         .numDataNodes(2)
         .format(true)
@@ -182,7 +199,76 @@ public class TestDistCpUtils {
     Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
     Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
   }
-  
+
+  @Test
+  public void testPreserveAclsforDefaultACL() throws IOException {
+    FileSystem fs = FileSystem.get(config);
+
+    EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.ACL,
+        FileAttribute.PERMISSION, FileAttribute.XATTR, FileAttribute.GROUP,
+        FileAttribute.USER, FileAttribute.REPLICATION, FileAttribute.XATTR,
+        FileAttribute.TIMES);
+
+    Path dest = new Path("/tmpdest");
+    Path src = new Path("/testsrc");
+
+    fs.mkdirs(src);
+    fs.mkdirs(dest);
+
+    List<AclEntry> acls = Lists.newArrayList(
+        aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
+        aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, GROUP, READ),
+        aclEntry(ACCESS, OTHER, READ), aclEntry(ACCESS, USER, "bar", ALL));
+    final List<AclEntry> acls1 = Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
+        aclEntry(ACCESS, USER, "user1", ALL),
+        aclEntry(ACCESS, GROUP, READ_EXECUTE),
+        aclEntry(ACCESS, OTHER, EXECUTE));
+
+    fs.setPermission(src, fullPerm);
+    fs.setOwner(src, "somebody", "somebody-group");
+    fs.setTimes(src, 0, 0);
+    fs.setReplication(src, (short) 1);
+    fs.setAcl(src, acls);
+
+    fs.setPermission(dest, noPerm);
+    fs.setOwner(dest, "nobody", "nobody-group");
+    fs.setTimes(dest, 100, 100);
+    fs.setReplication(dest, (short) 2);
+    fs.setAcl(dest, acls1);
+
+    List<AclEntry> en1 = fs.getAclStatus(src).getEntries();
+    List<AclEntry> dd2 = fs.getAclStatus(dest).getEntries();
+
+    Assert.assertNotEquals(en1, dd2);
+
+    CopyListingFileStatus srcStatus = new CopyListingFileStatus(
+        fs.getFileStatus(src));
+
+    en1 = srcStatus.getAclEntries();
+
+    DistCpUtils.preserve(fs, dest, srcStatus, attributes, false);
+
+    CopyListingFileStatus dstStatus = new CopyListingFileStatus(
+        fs.getFileStatus(dest));
+
+    dd2 = dstStatus.getAclEntries();
+    en1 = srcStatus.getAclEntries();
+
+    // FileStatus.equals only compares path field, must explicitly compare all
+    // fields
+    Assert.assertEquals("getPermission", srcStatus.getPermission(),
+        dstStatus.getPermission());
+    Assert.assertEquals("Owner", srcStatus.getOwner(), dstStatus.getOwner());
+    Assert.assertEquals("Group", srcStatus.getGroup(), dstStatus.getGroup());
+    Assert.assertEquals("AccessTime", srcStatus.getAccessTime(),
+        dstStatus.getAccessTime());
+    Assert.assertEquals("ModificationTime", srcStatus.getModificationTime(),
+        dstStatus.getModificationTime());
+    Assert.assertEquals("Replication", srcStatus.getReplication(),
+        dstStatus.getReplication());
+    Assert.assertArrayEquals(en1.toArray(), dd2.toArray());
+  }
+
   @Test
   public void testPreserveNothingOnDirectory() throws IOException {
     FileSystem fs = FileSystem.get(config);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


Mime
View raw message