Return-Path: X-Original-To: apmail-hadoop-common-commits-archive@www.apache.org Delivered-To: apmail-hadoop-common-commits-archive@www.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 7A0CB18828 for ; Tue, 25 Aug 2015 08:21:29 +0000 (UTC) Received: (qmail 36727 invoked by uid 500); 25 Aug 2015 08:21:29 -0000 Delivered-To: apmail-hadoop-common-commits-archive@hadoop.apache.org Received: (qmail 36664 invoked by uid 500); 25 Aug 2015 08:21:29 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: common-dev@hadoop.apache.org Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 36655 invoked by uid 99); 25 Aug 2015 08:21:29 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 25 Aug 2015 08:21:29 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 11F21DFC7F; Tue, 25 Aug 2015 08:21:29 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: yliu@apache.org To: common-commits@hadoop.apache.org Message-Id: <8d55a1b6438540129b9c125babb75398@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu) Date: Tue, 25 Aug 2015 08:21:29 +0000 (UTC) Repository: hadoop Updated Branches: refs/heads/trunk af7876787 -> eee0d4563 HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eee0d456 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eee0d456 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eee0d456 Branch: refs/heads/trunk Commit: eee0d4563c62647cfaaed6605ee713aaf69add78 Parents: af78767 Author: yliu Authored: Tue Aug 25 16:16:09 2015 +0800 Committer: yliu Committed: Tue Aug 25 16:16:09 2015 +0800 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 1 + .../org/apache/hadoop/hdfs/XAttrHelper.java | 13 +- .../BlockStoragePolicySuite.java | 5 +- .../hdfs/server/namenode/FSDirXAttrOp.java | 29 ++-- .../hdfs/server/namenode/FSDirectory.java | 60 ++++--- .../server/namenode/FSImageFormatPBINode.java | 6 +- .../hdfs/server/namenode/INodeDirectory.java | 11 +- .../server/namenode/SerialNumberManager.java | 44 ------ .../hdfs/server/namenode/SerialNumberMap.java | 79 ++++++++++ .../hdfs/server/namenode/XAttrFeature.java | 78 +++++++++- .../hdfs/server/namenode/XAttrFormat.java | 155 +++++++++++++++++++ .../server/namenode/XAttrPermissionFilter.java | 6 +- .../hdfs/server/namenode/XAttrStorage.java | 62 +++----- .../org/apache/hadoop/hdfs/web/JsonUtil.java | 6 +- .../src/main/resources/hdfs-default.xml | 4 +- .../hdfs/server/namenode/TestStartup.java | 27 +--- .../hdfs/server/namenode/TestXAttrFeature.java | 107 +++++++++++++ 18 files changed, 502 insertions(+), 193 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7aadcc6..2c47b50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -833,6 +833,8 @@ Release 2.8.0 - UNRELEASED ReplicaUnderConstruction as a separate class and replicas as an array. (jing9) + HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 9b14168..e6802a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -318,6 +318,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32; public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY = "dfs.namenode.fs-limits.max-xattr-size"; public static final int DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT = 16384; + public static final int DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT = 32768; //Following keys have no defaults http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java index 5cafb3c..2655c40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java @@ -130,7 +130,7 @@ public class XAttrHelper { } Map xAttrMap = Maps.newHashMap(); for (XAttr xAttr : xAttrs) { - String name = getPrefixName(xAttr); + String name = getPrefixedName(xAttr); byte[] value = xAttr.getValue(); if (value == null) { value = new byte[0]; @@ -144,13 +144,16 @@ public class XAttrHelper { /** * Get name with prefix from XAttr */ - public static String getPrefixName(XAttr xAttr) { + public static String getPrefixedName(XAttr xAttr) { if (xAttr == null) { return null; } - - String namespace = xAttr.getNameSpace().toString(); - return StringUtils.toLowerCase(namespace) + "." + xAttr.getName(); + + return getPrefixedName(xAttr.getNameSpace(), xAttr.getName()); + } + + public static String getPrefixedName(XAttr.NameSpace ns, String name) { + return StringUtils.toLowerCase(ns.toString()) + "." + name; } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java index 7e6c2b2..fa9d22c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java @@ -141,8 +141,7 @@ public class BlockStoragePolicySuite { return XAttrHelper.buildXAttr(name, new byte[]{policyId}); } - public static boolean isStoragePolicyXAttr(XAttr xattr) { - return xattr != null && xattr.getNameSpace() == XAttrNS - && xattr.getName().equals(STORAGE_POLICY_XATTR_NAME); + public static String getStoragePolicyXAttrPrefixedName() { + return XAttrHelper.getPrefixedName(XAttrNS, STORAGE_POLICY_XATTR_NAME); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index 53b9b41..e88fa49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -272,7 +272,7 @@ class FSDirXAttrOp { final boolean isFile = inode.isFile(); for (XAttr xattr : newXAttrs) { - final String xaName = XAttrHelper.getPrefixName(xattr); + final String xaName = XAttrHelper.getPrefixedName(xattr); /* * If we're adding the encryption zone xattr, then add src to the list @@ -368,30 +368,22 @@ class FSDirXAttrOp { return xAttrs; } - static List getXAttrs(FSDirectory fsd, INode inode, int snapshotId) - throws IOException { + static XAttr getXAttrByPrefixedName(FSDirectory fsd, INode inode, + int snapshotId, String prefixedName) throws IOException { fsd.readLock(); try { - return XAttrStorage.readINodeXAttrs(inode, snapshotId); + return XAttrStorage.readINodeXAttrByPrefixedName(inode, snapshotId, + prefixedName); } finally { fsd.readUnlock(); } } - static XAttr unprotectedGetXAttrByName( - INode inode, int snapshotId, String xAttrName) + static XAttr unprotectedGetXAttrByPrefixedName( + INode inode, int snapshotId, String prefixedName) throws IOException { - List xAttrs = XAttrStorage.readINodeXAttrs(inode, snapshotId); - if (xAttrs == null) { - return null; - } - for (XAttr x : xAttrs) { - if (XAttrHelper.getPrefixName(x) - .equals(xAttrName)) { - return x; - } - } - return null; + return XAttrStorage.readINodeXAttrByPrefixedName(inode, snapshotId, + prefixedName); } private static void checkXAttrChangeAccess( @@ -418,9 +410,6 @@ class FSDirXAttrOp { * the configured limit. Setting a limit of zero disables this check. */ private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) { - if (fsd.getXattrMaxSize() == 0) { - return; - } int size = xAttr.getName().getBytes(Charsets.UTF_8).length; if (xAttr.getValue() != null) { size += xAttr.getValue().length; http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index c498636..409174a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -234,11 +235,14 @@ public class FSDirectory implements Closeable { this.xattrMaxSize = conf.getInt( DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); - Preconditions.checkArgument(xattrMaxSize >= 0, - "Cannot set a negative value for the maximum size of an xattr (%s).", - DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY); - final String unlimited = xattrMaxSize == 0 ? " (unlimited)" : ""; - LOG.info("Maximum size of an xattr: " + xattrMaxSize + unlimited); + Preconditions.checkArgument(xattrMaxSize > 0, + "The maximum size of an xattr should be > 0: (%s).", + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY); + Preconditions.checkArgument(xattrMaxSize <= + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT, + "The maximum size of an xattr should be <= maximum size" + + " hard limit " + DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_HARD_LIMIT + + ": (%s).", DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY); this.accessTimePrecision = conf.getLong( DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, @@ -930,22 +934,19 @@ public class FSDirectory implements Closeable { if (!inode.isSymlink()) { final XAttrFeature xaf = inode.getXAttrFeature(); if (xaf != null) { - final List xattrs = xaf.getXAttrs(); - for (XAttr xattr : xattrs) { - final String xaName = XAttrHelper.getPrefixName(xattr); - if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) { - try { - final HdfsProtos.ZoneEncryptionInfoProto ezProto = - HdfsProtos.ZoneEncryptionInfoProto.parseFrom( - xattr.getValue()); - ezManager.unprotectedAddEncryptionZone(inode.getId(), - PBHelper.convert(ezProto.getSuite()), - PBHelper.convert(ezProto.getCryptoProtocolVersion()), - ezProto.getKeyName()); - } catch (InvalidProtocolBufferException e) { - NameNode.LOG.warn("Error parsing protocol buffer of " + - "EZ XAttr " + xattr.getName()); - } + XAttr xattr = xaf.getXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE); + if (xattr != null) { + try { + final HdfsProtos.ZoneEncryptionInfoProto ezProto = + HdfsProtos.ZoneEncryptionInfoProto.parseFrom( + xattr.getValue()); + ezManager.unprotectedAddEncryptionZone(inode.getId(), + PBHelper.convert(ezProto.getSuite()), + PBHelper.convert(ezProto.getCryptoProtocolVersion()), + ezProto.getKeyName()); + } catch (InvalidProtocolBufferException e) { + NameNode.LOG.warn("Error parsing protocol buffer of " + + "EZ XAttr " + xattr.getName()); } } } @@ -1121,9 +1122,8 @@ public class FSDirectory implements Closeable { final CipherSuite suite = encryptionZone.getSuite(); final String keyName = encryptionZone.getKeyName(); - XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByName(inode, - snapshotId, - CRYPTO_XATTR_FILE_ENCRYPTION_INFO); + XAttr fileXAttr = FSDirXAttrOp.unprotectedGetXAttrByPrefixedName(inode, + snapshotId, CRYPTO_XATTR_FILE_ENCRYPTION_INFO); if (fileXAttr == null) { NameNode.LOG.warn("Could not find encryption XAttr for file " + @@ -1481,13 +1481,11 @@ public class FSDirectory implements Closeable { FSPermissionChecker pc, INode inode, int snapshotId) throws IOException { if (pc.isSuperUser()) { - for (XAttr xattr : FSDirXAttrOp.getXAttrs(this, inode, snapshotId)) { - if (XAttrHelper.getPrefixName(xattr). - equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { - throw new AccessControlException( - "Access is denied for " + pc.getUser() + " since the superuser " - + "is not allowed to perform this operation."); - } + if (FSDirXAttrOp.getXAttrByPrefixedName(this, inode, snapshotId, + SECURITY_XATTR_UNREADABLE_BY_SUPERUSER) != null) { + throw new AccessControlException( + "Access is denied for " + pc.getUser() + " since the superuser " + + "is not allowed to perform this operation."); } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 3a9c64e..ac88919 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -131,9 +131,9 @@ public final class FSImageFormatPBINode { return b.build(); } - public static ImmutableList loadXAttrs( + public static List loadXAttrs( XAttrFeatureProto proto, final String[] stringTable) { - ImmutableList.Builder b = ImmutableList.builder(); + List b = new ArrayList<>(); for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) { int v = xAttrCompactProto.getName(); int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK; @@ -149,7 +149,7 @@ public final class FSImageFormatPBINode { .setName(name).setValue(value).build()); } - return b.build(); + return b; } public static ImmutableList loadQuotaByStorageTypeEntries( http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 5c437c8..5c33c02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; @@ -118,12 +117,10 @@ public class INodeDirectory extends INodeWithAdditionalFields @Override public byte getLocalStoragePolicyID() { XAttrFeature f = getXAttrFeature(); - ImmutableList xattrs = f == null ? ImmutableList. of() : f - .getXAttrs(); - for (XAttr xattr : xattrs) { - if (BlockStoragePolicySuite.isStoragePolicyXAttr(xattr)) { - return (xattr.getValue())[0]; - } + XAttr xattr = f == null ? null : f.getXAttr( + BlockStoragePolicySuite.getStoragePolicyXAttrPrefixedName()); + if (xattr != null) { + return (xattr.getValue())[0]; } return BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java index 6d7af82..eb497ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberManager.java @@ -17,11 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; - - /** Manage name-to-serial-number maps for users and groups. */ class SerialNumberManager { /** This is the only instance of {@link SerialNumberManager}.*/ @@ -41,43 +36,4 @@ class SerialNumberManager { getUserSerialNumber(null); getGroupSerialNumber(null); } - - private static class SerialNumberMap { - private final AtomicInteger max = new AtomicInteger(1); - private final ConcurrentMap t2i = new ConcurrentHashMap(); - private final ConcurrentMap i2t = new ConcurrentHashMap(); - - int get(T t) { - if (t == null) { - return 0; - } - Integer sn = t2i.get(t); - if (sn == null) { - sn = max.getAndIncrement(); - Integer old = t2i.putIfAbsent(t, sn); - if (old != null) { - return old; - } - i2t.put(sn, t); - } - return sn; - } - - T get(int i) { - if (i == 0) { - return null; - } - T t = i2t.get(i); - if (t == null) { - throw new IllegalStateException("!i2t.containsKey(" + i - + "), this=" + this); - } - return t; - } - - @Override - public String toString() { - return "max=" + max + ",\n t2i=" + t2i + ",\n i2t=" + i2t; - } - } } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberMap.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberMap.java new file mode 100644 index 0000000..2965837 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SerialNumberMap.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Map object to serial number. + * + *

It allows to get the serial number of an object, if the object doesn't + * exist in the map, a new serial number increased by 1 is generated to + * map to the object. The mapped object can also be got through the serial + * number. + * + *

The map is thread-safe. + */ +@InterfaceAudience.Private +public class SerialNumberMap { + private final AtomicInteger max = new AtomicInteger(1); + private final ConcurrentMap t2i = + new ConcurrentHashMap(); + private final ConcurrentMap i2t = + new ConcurrentHashMap(); + + public int get(T t) { + if (t == null) { + return 0; + } + Integer sn = t2i.get(t); + if (sn == null) { + sn = max.getAndIncrement(); + if (sn < 0) { + throw new IllegalStateException("Too many elements!"); + } + Integer old = t2i.putIfAbsent(t, sn); + if (old != null) { + return old; + } + i2t.put(sn, t); + } + return sn; + } + + public T get(int i) { + if (i == 0) { + return null; + } + T t = i2t.get(i); + if (t == null) { + throw new IllegalStateException("!i2t.containsKey(" + i + + "), this=" + this); + } + return t; + } + + @Override + public String toString() { + return "max=" + max + ",\n t2i=" + t2i + ",\n i2t=" + i2t; + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java index 2e12578..a7f0878 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFeature.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.XAttr; -import org.apache.hadoop.hdfs.server.namenode.INode; +import org.apache.hadoop.hdfs.XAttrHelper; import com.google.common.collect.ImmutableList; @@ -28,16 +31,75 @@ import com.google.common.collect.ImmutableList; */ @InterfaceAudience.Private public class XAttrFeature implements INode.Feature { - public static final ImmutableList EMPTY_ENTRY_LIST = - ImmutableList.of(); + static final int PACK_THRESHOLD = 1024; + + /** The packed bytes for small size XAttrs. */ + private byte[] attrs; - private final ImmutableList xAttrs; + /** + * List to store large size XAttrs. + * Typically XAttr value size is small, so this + * list is null usually. + */ + private ImmutableList xAttrs; + + public XAttrFeature(List xAttrs) { + if (xAttrs != null && !xAttrs.isEmpty()) { + List toPack = new ArrayList(); + ImmutableList.Builder b = null; + for (XAttr attr : xAttrs) { + if (attr.getValue() == null || + attr.getValue().length <= PACK_THRESHOLD) { + toPack.add(attr); + } else { + if (b == null) { + b = ImmutableList.builder(); + } + b.add(attr); + } + } + this.attrs = XAttrFormat.toBytes(toPack); + if (b != null) { + this.xAttrs = b.build(); + } + } + } - public XAttrFeature(ImmutableList xAttrs) { - this.xAttrs = xAttrs; + /** + * Get the XAttrs. + * @return the XAttrs + */ + public List getXAttrs() { + if (xAttrs == null) { + return XAttrFormat.toXAttrs(attrs); + } else { + if (attrs == null) { + return xAttrs; + } else { + List result = new ArrayList<>(); + result.addAll(XAttrFormat.toXAttrs(attrs)); + result.addAll(xAttrs); + return result; + } + } } - public ImmutableList getXAttrs() { - return xAttrs; + /** + * Get XAttr by name with prefix. + * @param prefixedName xAttr name with prefix + * @return the XAttr + */ + public XAttr getXAttr(String prefixedName) { + XAttr attr = XAttrFormat.getXAttr(attrs, prefixedName); + if (attr == null && xAttrs != null) { + XAttr toFind = XAttrHelper.buildXAttr(prefixedName); + for (XAttr a : xAttrs) { + if (a.equalsIgnoreValue(toFind)) { + attr = a; + break; + } + } + } + return attr; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java new file mode 100644 index 0000000..6167dac --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.hdfs.XAttrHelper; + +import com.google.common.base.Preconditions; +import com.google.common.primitives.Ints; + +/** + * Class to pack XAttrs into byte[].
+ * For each XAttr:
+ * The first 4 bytes represents XAttr namespace and name
+ * [0:3) - XAttr namespace
+ * [3:32) - The name of the entry, which is an ID that points to a + * string in map
+ * The following two bytes represents the length of XAttr value
+ * The remaining bytes is the XAttr value
+ */ +class XAttrFormat { + private static final int XATTR_NAMESPACE_MASK = (1 << 3) - 1; + private static final int XATTR_NAMESPACE_OFFSET = 29; + private static final int XATTR_NAME_MASK = (1 << 29) - 1; + private static final int XATTR_NAME_ID_MAX = 1 << 29; + private static final int XATTR_VALUE_LEN_MAX = 1 << 16; + private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = + XAttr.NameSpace.values(); + + /** + * Unpack byte[] to XAttrs. + * + * @param attrs the packed bytes of XAttrs + * @return XAttrs list + */ + static List toXAttrs(byte[] attrs) { + List xAttrs = new ArrayList<>(); + if (attrs == null || attrs.length == 0) { + return xAttrs; + } + for (int i = 0; i < attrs.length;) { + XAttr.Builder builder = new XAttr.Builder(); + // big-endian + int v = Ints.fromBytes(attrs[i++], attrs[i++], attrs[i++], attrs[i++]); + int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK; + int nid = v & XATTR_NAME_MASK; + builder.setNameSpace(XATTR_NAMESPACE_VALUES[ns]); + builder.setName(XAttrStorage.getName(nid)); + int vlen = (attrs[i++] << 8) | attrs[i++]; + if (vlen > 0) { + byte[] value = new byte[vlen]; + System.arraycopy(attrs, i, value, 0, vlen); + builder.setValue(value); + i += vlen; + } + xAttrs.add(builder.build()); + } + return xAttrs; + } + + /** + * Get XAttr by name with prefix. + * Will unpack the byte[] until find the specific XAttr + * + * @param attrs the packed bytes of XAttrs + * @param prefixedName the XAttr name with prefix + * @return the XAttr + */ + static XAttr getXAttr(byte[] attrs, String prefixedName) { + if (prefixedName == null || attrs == null) { + return null; + } + + XAttr xAttr = XAttrHelper.buildXAttr(prefixedName); + for (int i = 0; i < attrs.length;) { + // big-endian + int v = Ints.fromBytes(attrs[i++], attrs[i++], attrs[i++], attrs[i++]); + int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK; + int nid = v & XATTR_NAME_MASK; + XAttr.NameSpace namespace = XATTR_NAMESPACE_VALUES[ns]; + String name = XAttrStorage.getName(nid); + int vlen = (attrs[i++] << 8) | attrs[i++]; + if (xAttr.getNameSpace() == namespace && + xAttr.getName().equals(name)) { + if (vlen > 0) { + byte[] value = new byte[vlen]; + System.arraycopy(attrs, i, value, 0, vlen); + return new XAttr.Builder().setNameSpace(namespace). + setName(name).setValue(value).build(); + } + return xAttr; + } + i += vlen; + } + return null; + } + + /** + * Pack the XAttrs to byte[]. + * + * @param xAttrs the XAttrs + * @return the packed bytes + */ + static byte[] toBytes(List xAttrs) { + if (xAttrs == null || xAttrs.isEmpty()) { + return null; + } + ByteArrayOutputStream out = new ByteArrayOutputStream(); + try { + for (XAttr a : xAttrs) { + int nsOrd = a.getNameSpace().ordinal(); + Preconditions.checkArgument(nsOrd < 8, "Too many namespaces."); + int nid = XAttrStorage.getNameSerialNumber(a.getName()); + Preconditions.checkArgument(nid < XATTR_NAME_ID_MAX, + "Too large serial number of the xattr name"); + + // big-endian + int v = ((nsOrd & XATTR_NAMESPACE_MASK) << XATTR_NAMESPACE_OFFSET) + | (nid & XATTR_NAME_MASK); + out.write(Ints.toByteArray(v)); + int vlen = a.getValue() == null ? 0 : a.getValue().length; + Preconditions.checkArgument(vlen < XATTR_VALUE_LEN_MAX, + "The length of xAttr values is too long."); + out.write((byte)(vlen >> 8)); + out.write((byte)(vlen)); + if (vlen > 0) { + out.write(a.getValue()); + } + } + } catch (IOException e) { + // in fact, no exception + } + return out.toByteArray(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index 95f943d..de448f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -72,7 +72,7 @@ public class XAttrPermissionFilter { isRawPath && isSuperUser) { return; } - if (XAttrHelper.getPrefixName(xAttr). + if (XAttrHelper.getPrefixedName(xAttr). equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { if (xAttr.getValue() != null) { throw new AccessControlException("Attempt to set a value for '" + @@ -82,7 +82,7 @@ public class XAttrPermissionFilter { return; } throw new AccessControlException("User doesn't have permission for xattr: " - + XAttrHelper.getPrefixName(xAttr)); + + XAttrHelper.getPrefixedName(xAttr)); } static void checkPermissionForApi(FSPermissionChecker pc, @@ -115,7 +115,7 @@ public class XAttrPermissionFilter { } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW && isSuperUser && isRawPath) { filteredXAttrs.add(xAttr); - } else if (XAttrHelper.getPrefixName(xAttr). + } else if (XAttrHelper.getPrefixedName(xAttr). equals(SECURITY_XATTR_UNREADABLE_BY_SUPERUSER)) { filteredXAttrs.add(xAttr); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java index e38648d..65a4ada 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java @@ -18,12 +18,9 @@ package org.apache.hadoop.hdfs.server.namenode; +import java.util.ArrayList; import java.util.List; -import java.util.Map; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @@ -34,24 +31,32 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; @InterfaceAudience.Private public class XAttrStorage { - private static final Map internedNames = Maps.newHashMap(); + private static final SerialNumberMap NAME_MAP = + new SerialNumberMap<>(); + + public static int getNameSerialNumber(String name) { + return NAME_MAP.get(name); + } + + public static String getName(int n) { + return NAME_MAP.get(n); + } /** - * Reads the existing extended attributes of an inode. If the - * inode does not have an XAttr, then this method - * returns an empty list. + * Reads the extended attribute of an inode by name with prefix. *

- * Must be called while holding the FSDirectory read lock. * * @param inode INode to read * @param snapshotId - * @return List XAttr list. + * @param prefixedName xAttr name with prefix + * @return the xAttr */ - public static List readINodeXAttrs(INode inode, int snapshotId) { + public static XAttr readINodeXAttrByPrefixedName(INode inode, + int snapshotId, String prefixedName) { XAttrFeature f = inode.getXAttrFeature(snapshotId); - return f == null ? ImmutableList. of() : f.getXAttrs(); + return f == null ? null : f.getXAttr(prefixedName); } - + /** * Reads the existing extended attributes of an inode. *

@@ -62,7 +67,7 @@ public class XAttrStorage { */ public static List readINodeXAttrs(INodeAttributes inodeAttr) { XAttrFeature f = inodeAttr.getXAttrFeature(); - return f == null ? ImmutableList. of() : f.getXAttrs(); + return f == null ? new ArrayList(0) : f.getXAttrs(); } /** @@ -76,33 +81,12 @@ public class XAttrStorage { */ public static void updateINodeXAttrs(INode inode, List xAttrs, int snapshotId) throws QuotaExceededException { - if (xAttrs == null || xAttrs.isEmpty()) { - if (inode.getXAttrFeature() != null) { - inode.removeXAttrFeature(snapshotId); - } - return; - } - // Dedupe the xAttr name and save them into a new interned list - List internedXAttrs = Lists.newArrayListWithCapacity(xAttrs.size()); - for (XAttr xAttr : xAttrs) { - final String name = xAttr.getName(); - String internedName = internedNames.get(name); - if (internedName == null) { - internedName = name; - internedNames.put(internedName, internedName); - } - XAttr internedXAttr = new XAttr.Builder() - .setName(internedName) - .setNameSpace(xAttr.getNameSpace()) - .setValue(xAttr.getValue()) - .build(); - internedXAttrs.add(internedXAttr); - } - // Save the list of interned xattrs - ImmutableList newXAttrs = ImmutableList.copyOf(internedXAttrs); if (inode.getXAttrFeature() != null) { inode.removeXAttrFeature(snapshotId); } - inode.addXAttrFeature(new XAttrFeature(newXAttrs), snapshotId); + if (xAttrs == null || xAttrs.isEmpty()) { + return; + } + inode.addXAttrFeature(new XAttrFeature(xAttrs), snapshotId); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 252b0f7..0de89cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -312,8 +312,8 @@ public class JsonUtil { } final Map m = new TreeMap(); - m.put("name", XAttrHelper.getPrefixName(xAttr)); - m.put("value", xAttr.getValue() != null ? + m.put("name", XAttrHelper.getPrefixedName(xAttr)); + m.put("value", xAttr.getValue() != null ? XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null); return m; } @@ -345,7 +345,7 @@ public class JsonUtil { throws IOException { final List names = Lists.newArrayListWithCapacity(xAttrs.size()); for (XAttr xAttr : xAttrs) { - names.add(XAttrHelper.getPrefixName(xAttr)); + names.add(XAttrHelper.getPrefixedName(xAttr)); } ObjectMapper mapper = new ObjectMapper(); String ret = mapper.writeValueAsString(names); http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 06a60d4..65ad8b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2205,7 +2205,9 @@ dfs.namenode.fs-limits.max-xattr-size 16384 - The maximum combined size of the name and value of an extended attribute in bytes. + The maximum combined size of the name and value of an extended attribute + in bytes. It should be larger than 0, and less than or equal to maximum + size hard limit which is 32768. http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 4d3cb75..20dda37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -655,7 +655,7 @@ public class TestStartup { fail("Expected exception with negative xattr size"); } catch (IllegalArgumentException e) { GenericTestUtils.assertExceptionContains( - "Cannot set a negative value for the maximum size of an xattr", e); + "The maximum size of an xattr should be > 0", e); } finally { conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); @@ -679,31 +679,6 @@ public class TestStartup { cluster.shutdown(); } } - - try { - // Set up a logger to check log message - final LogVerificationAppender appender = new LogVerificationAppender(); - final Logger logger = Logger.getRootLogger(); - logger.addAppender(appender); - int count = appender.countLinesWithMessage( - "Maximum size of an xattr: 0 (unlimited)"); - assertEquals("Expected no messages about unlimited xattr size", 0, count); - - conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, 0); - cluster = - new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build(); - - count = appender.countLinesWithMessage( - "Maximum size of an xattr: 0 (unlimited)"); - // happens twice because we format then run - assertEquals("Expected unlimited xattr size", 2, count); - } finally { - conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, - DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_DEFAULT); - if (cluster != null) { - cluster.shutdown(); - } - } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/eee0d456/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java new file mode 100644 index 0000000..fcb157e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestXAttrFeature.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.hdfs.XAttrHelper; +import org.junit.Test; + +import static org.junit.Assert.*; + +public class TestXAttrFeature { + + static final String name1 = "system.a1"; + static final byte[] value1 = {0x31, 0x32, 0x33}; + static final String name2 = "security.a2"; + static final byte[] value2 = {0x37, 0x38, 0x39}; + static final String name3 = "trusted.a3"; + static final String name4 = "user.a4"; + static final byte[] value4 = {0x01, 0x02, 0x03}; + static final String name5 = "user.a5"; + static final byte[] value5 = randomBytes(2000); + static final String name6 = "user.a6"; + static final byte[] value6 = randomBytes(1800); + static final String name7 = "raw.a7"; + static final byte[] value7 = {0x011, 0x012, 0x013}; + static final String name8 = "user.a8"; + + static byte[] randomBytes(int len) { + Random rand = new Random(); + byte[] bytes = new byte[len]; + rand.nextBytes(bytes); + return bytes; + } + @Test + public void testXAttrFeature() throws Exception { + List xAttrs = new ArrayList<>(); + XAttrFeature feature = new XAttrFeature(xAttrs); + + // no XAttrs in the feature + assertTrue(feature.getXAttrs().isEmpty()); + + // one XAttr in the feature + XAttr a1 = XAttrHelper.buildXAttr(name1, value1); + xAttrs.add(a1); + feature = new XAttrFeature(xAttrs); + + XAttr r1 = feature.getXAttr(name1); + assertTrue(a1.equals(r1)); + assertEquals(feature.getXAttrs().size(), 1); + + // more XAttrs in the feature + XAttr a2 = XAttrHelper.buildXAttr(name2, value2); + XAttr a3 = XAttrHelper.buildXAttr(name3); + XAttr a4 = XAttrHelper.buildXAttr(name4, value4); + XAttr a5 = XAttrHelper.buildXAttr(name5, value5); + XAttr a6 = XAttrHelper.buildXAttr(name6, value6); + XAttr a7 = XAttrHelper.buildXAttr(name7, value7); + xAttrs.add(a2); + xAttrs.add(a3); + xAttrs.add(a4); + xAttrs.add(a5); + xAttrs.add(a6); + xAttrs.add(a7); + feature = new XAttrFeature(xAttrs); + + XAttr r2 = feature.getXAttr(name2); + assertTrue(a2.equals(r2)); + XAttr r3 = feature.getXAttr(name3); + assertTrue(a3.equals(r3)); + XAttr r4 = feature.getXAttr(name4); + assertTrue(a4.equals(r4)); + XAttr r5 = feature.getXAttr(name5); + assertTrue(a5.equals(r5)); + XAttr r6 = feature.getXAttr(name6); + assertTrue(a6.equals(r6)); + XAttr r7 = feature.getXAttr(name7); + assertTrue(a7.equals(r7)); + List rs = feature.getXAttrs(); + assertEquals(rs.size(), xAttrs.size()); + for (int i = 0; i < rs.size(); i++) { + assertTrue(xAttrs.contains(rs.get(i))); + } + + // get non-exist XAttr in the feature + XAttr r8 = feature.getXAttr(name8); + assertTrue(r8 == null); + } +}