From common-commits-return-96835-archive-asf-public=cust-asf.ponee.io@hadoop.apache.org Tue Oct 1 23:56:59 2019 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [207.244.88.153]) by mx-eu-01.ponee.io (Postfix) with SMTP id E1391180608 for ; Wed, 2 Oct 2019 01:56:58 +0200 (CEST) Received: (qmail 11517 invoked by uid 500); 1 Oct 2019 23:56:58 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 11508 invoked by uid 99); 1 Oct 2019 23:56:58 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 01 Oct 2019 23:56:58 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id C59EF890A3; Tue, 1 Oct 2019 23:56:57 +0000 (UTC) Date: Tue, 01 Oct 2019 23:56:57 +0000 To: "common-commits@hadoop.apache.org" Subject: [hadoop] branch branch-3.1 updated: HDFS-14192. Track missing DFS operations in Statistics and StorageStatistics. Contributed by Ayush Saxena. MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <156997421728.17668.15027589182584868970@gitbox.apache.org> From: weichiu@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: hadoop X-Git-Refname: refs/heads/branch-3.1 X-Git-Reftype: branch X-Git-Oldrev: a400f396a6eeca9e06b501f0dd03797e8c6fda48 X-Git-Newrev: 2a3eb0499680f48276c3ca22d6888759c51a21f1 X-Git-Rev: 2a3eb0499680f48276c3ca22d6888759c51a21f1 X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. weichiu pushed a commit to branch branch-3.1 in repository https://gitbox.apache.org/repos/asf/hadoop.git The following commit(s) were added to refs/heads/branch-3.1 by this push: new 2a3eb04 HDFS-14192. Track missing DFS operations in Statistics and StorageStatistics. Contributed by Ayush Saxena. 2a3eb04 is described below commit 2a3eb0499680f48276c3ca22d6888759c51a21f1 Author: Inigo Goiri AuthorDate: Wed Jan 16 10:14:22 2019 -0800 HDFS-14192. Track missing DFS operations in Statistics and StorageStatistics. Contributed by Ayush Saxena. (cherry picked from commit f048512bb89f4d1edbb54360622adc61ffacbde3) (cherry picked from commit e59ced9c60a7007551ee8a9f83ce8e266e4dbae1) Conflicts: hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java --- .../apache/hadoop/hdfs/DFSOpsCountStatistics.java | 13 +++ .../apache/hadoop/hdfs/DistributedFileSystem.java | 26 +++++ .../hadoop/hdfs/TestDistributedFileSystem.java | 108 +++++++++++++++++++++ 3 files changed, 147 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java index b9852ba..56d2613 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java @@ -41,12 +41,15 @@ public class DFSOpsCountStatistics extends StorageStatistics { /** This is for counting distributed file system operations. */ public enum OpType { + ADD_CACHE_DIRECTIVE("op_add_cache_directive"), + ADD_CACHE_POOL("op_add_cache_pool"), ADD_EC_POLICY("op_add_ec_policy"), ALLOW_SNAPSHOT("op_allow_snapshot"), APPEND(CommonStatisticNames.OP_APPEND), CONCAT("op_concat"), COPY_FROM_LOCAL_FILE(CommonStatisticNames.OP_COPY_FROM_LOCAL_FILE), CREATE(CommonStatisticNames.OP_CREATE), + CREATE_ENCRYPTION_ZONE("op_create_encryption_zone"), CREATE_NON_RECURSIVE(CommonStatisticNames.OP_CREATE_NON_RECURSIVE), CREATE_SNAPSHOT("op_create_snapshot"), CREATE_SYM_LINK("op_create_symlink"), @@ -61,6 +64,7 @@ public class DFSOpsCountStatistics extends StorageStatistics { GET_EC_CODECS("op_get_ec_codecs"), GET_EC_POLICY("op_get_ec_policy"), GET_EC_POLICIES("op_get_ec_policies"), + GET_ENCRYPTION_ZONE("op_get_encryption_zone"), GET_FILE_BLOCK_LOCATIONS("op_get_file_block_locations"), GET_FILE_CHECKSUM(CommonStatisticNames.OP_GET_FILE_CHECKSUM), GET_FILE_LINK_STATUS("op_get_file_link_status"), @@ -72,8 +76,13 @@ public class DFSOpsCountStatistics extends StorageStatistics { GET_STORAGE_POLICY("op_get_storage_policy"), GET_TRASH_ROOT("op_get_trash_root"), GET_XATTR("op_get_xattr"), + LIST_CACHE_DIRECTIVE("op_list_cache_directive"), + LIST_CACHE_POOL("op_list_cache_pool"), + LIST_ENCRYPTION_ZONE("op_list_encryption_zone"), LIST_LOCATED_STATUS(CommonStatisticNames.OP_LIST_LOCATED_STATUS), LIST_STATUS(CommonStatisticNames.OP_LIST_STATUS), + MODIFY_CACHE_POOL("op_modify_cache_pool"), + MODIFY_CACHE_DIRECTIVE("op_modify_cache_directive"), MKDIRS(CommonStatisticNames.OP_MKDIRS), MODIFY_ACL_ENTRIES(CommonStatisticNames.OP_MODIFY_ACL_ENTRIES), OPEN(CommonStatisticNames.OP_OPEN), @@ -81,6 +90,8 @@ public class DFSOpsCountStatistics extends StorageStatistics { PRIMITIVE_MKDIR("op_primitive_mkdir"), REMOVE_ACL(CommonStatisticNames.OP_REMOVE_ACL), REMOVE_ACL_ENTRIES(CommonStatisticNames.OP_REMOVE_ACL_ENTRIES), + REMOVE_CACHE_DIRECTIVE("op_remove_cache_directive"), + REMOVE_CACHE_POOL("op_remove_cache_pool"), REMOVE_DEFAULT_ACL(CommonStatisticNames.OP_REMOVE_DEFAULT_ACL), REMOVE_EC_POLICY("op_remove_ec_policy"), REMOVE_XATTR("op_remove_xattr"), @@ -91,6 +102,8 @@ public class DFSOpsCountStatistics extends StorageStatistics { SET_EC_POLICY("op_set_ec_policy"), SET_OWNER(CommonStatisticNames.OP_SET_OWNER), SET_PERMISSION(CommonStatisticNames.OP_SET_PERMISSION), + SET_QUOTA_BYTSTORAGEYPE("op_set_quota_bystoragetype"), + SET_QUOTA_USAGE("op_set_quota_usage"), SET_REPLICATION("op_set_replication"), SET_STORAGE_POLICY("op_set_storagePolicy"), SET_TIMES(CommonStatisticNames.OP_SET_TIMES), diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 11ff0c2..d4b6bd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -997,6 +997,8 @@ public class DistributedFileSystem extends FileSystem */ public void setQuota(Path src, final long namespaceQuota, final long storagespaceQuota) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.SET_QUOTA_USAGE); Path absF = fixRelativePart(src); new FileSystemLinkResolver() { @Override @@ -1025,6 +1027,8 @@ public class DistributedFileSystem extends FileSystem public void setQuotaByStorageType(Path src, final StorageType type, final long quota) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.SET_QUOTA_BYTSTORAGEYPE); Path absF = fixRelativePart(src); new FileSystemLinkResolver() { @Override @@ -2220,6 +2224,8 @@ public class DistributedFileSystem extends FileSystem */ public long addCacheDirective( CacheDirectiveInfo info, EnumSet flags) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.ADD_CACHE_DIRECTIVE); Preconditions.checkNotNull(info.getPath()); Path path = new Path(getPathName(fixRelativePart(info.getPath()))). makeQualified(getUri(), getWorkingDirectory()); @@ -2247,6 +2253,8 @@ public class DistributedFileSystem extends FileSystem */ public void modifyCacheDirective( CacheDirectiveInfo info, EnumSet flags) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.MODIFY_CACHE_DIRECTIVE); if (info.getPath() != null) { info = new CacheDirectiveInfo.Builder(info). setPath(new Path(getPathName(fixRelativePart(info.getPath()))). @@ -2263,6 +2271,8 @@ public class DistributedFileSystem extends FileSystem */ public void removeCacheDirective(long id) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.REMOVE_CACHE_DIRECTIVE); dfs.removeCacheDirective(id); } @@ -2275,6 +2285,8 @@ public class DistributedFileSystem extends FileSystem */ public RemoteIterator listCacheDirectives( CacheDirectiveInfo filter) throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.LIST_CACHE_DIRECTIVE); if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } @@ -2315,6 +2327,8 @@ public class DistributedFileSystem extends FileSystem * If the request could not be completed. */ public void addCachePool(CachePoolInfo info) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.ADD_CACHE_POOL); CachePoolInfo.validate(info); dfs.addCachePool(info); } @@ -2328,6 +2342,8 @@ public class DistributedFileSystem extends FileSystem * If the request could not be completed. */ public void modifyCachePool(CachePoolInfo info) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.MODIFY_CACHE_POOL); CachePoolInfo.validate(info); dfs.modifyCachePool(info); } @@ -2341,6 +2357,8 @@ public class DistributedFileSystem extends FileSystem * if the cache pool did not exist, or could not be removed. */ public void removeCachePool(String poolName) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.REMOVE_CACHE_POOL); CachePoolInfo.validateName(poolName); dfs.removeCachePool(poolName); } @@ -2354,6 +2372,8 @@ public class DistributedFileSystem extends FileSystem * If there was an error listing cache pools. */ public RemoteIterator listCachePools() throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.LIST_CACHE_POOL); return dfs.listCachePools(); } @@ -2495,6 +2515,8 @@ public class DistributedFileSystem extends FileSystem /* HDFS only */ public void createEncryptionZone(final Path path, final String keyName) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.CREATE_ENCRYPTION_ZONE); Path absF = fixRelativePart(path); new FileSystemLinkResolver() { @Override @@ -2522,6 +2544,8 @@ public class DistributedFileSystem extends FileSystem /* HDFS only */ public EncryptionZone getEZForPath(final Path path) throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_ENCRYPTION_ZONE); Preconditions.checkNotNull(path); Path absF = fixRelativePart(path); return new FileSystemLinkResolver() { @@ -2549,6 +2573,8 @@ public class DistributedFileSystem extends FileSystem /* HDFS only */ public RemoteIterator listEncryptionZones() throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.LIST_ENCRYPTION_ZONE); return dfs.listEncryptionZones(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index e189589..25cc817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -37,6 +37,7 @@ import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.SocketTimeoutException; import java.net.URI; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; @@ -85,6 +86,8 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; import org.apache.hadoop.hdfs.net.Peer; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; @@ -801,6 +804,111 @@ public class TestDistributedFileSystem { } @Test + public void testStatistics2() throws IOException, NoSuchAlgorithmException { + HdfsConfiguration conf = new HdfsConfiguration(); + File tmpDir = GenericTestUtils.getTestDir(UUID.randomUUID().toString()); + final Path jksPath = new Path(tmpDir.toString(), "test.jks"); + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, + JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri()); + + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + Path dir = new Path("/testStat"); + dfs.mkdirs(dir); + int readOps = 0; + int writeOps = 0; + FileSystem.clearStatistics(); + + // Quota Commands. + long opCount = getOpStatistics(OpType.SET_QUOTA_USAGE); + dfs.setQuota(dir, 100, 1000); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.SET_QUOTA_USAGE, opCount + 1); + + opCount = getOpStatistics(OpType.SET_QUOTA_BYTSTORAGEYPE); + dfs.setQuotaByStorageType(dir, StorageType.DEFAULT, 2000); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.SET_QUOTA_BYTSTORAGEYPE, opCount + 1); + + opCount = getOpStatistics(OpType.GET_QUOTA_USAGE); + dfs.getQuotaUsage(dir); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.GET_QUOTA_USAGE, opCount + 1); + + // Cache Commands. + CachePoolInfo cacheInfo = + new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)); + + opCount = getOpStatistics(OpType.ADD_CACHE_POOL); + dfs.addCachePool(cacheInfo); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.ADD_CACHE_POOL, opCount + 1); + + CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder() + .setPath(new Path(".")).setPool("pool1").build(); + + opCount = getOpStatistics(OpType.ADD_CACHE_DIRECTIVE); + long id = dfs.addCacheDirective(directive); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.ADD_CACHE_DIRECTIVE, opCount + 1); + + opCount = getOpStatistics(OpType.LIST_CACHE_DIRECTIVE); + dfs.listCacheDirectives(null); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.LIST_CACHE_DIRECTIVE, opCount + 1); + + opCount = getOpStatistics(OpType.MODIFY_CACHE_DIRECTIVE); + dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id) + .setReplication((short) 2).build()); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.MODIFY_CACHE_DIRECTIVE, opCount + 1); + + opCount = getOpStatistics(OpType.REMOVE_CACHE_DIRECTIVE); + dfs.removeCacheDirective(id); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.REMOVE_CACHE_DIRECTIVE, opCount + 1); + + opCount = getOpStatistics(OpType.MODIFY_CACHE_POOL); + dfs.modifyCachePool(cacheInfo); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.MODIFY_CACHE_POOL, opCount + 1); + + opCount = getOpStatistics(OpType.LIST_CACHE_POOL); + dfs.listCachePools(); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.LIST_CACHE_POOL, opCount + 1); + + opCount = getOpStatistics(OpType.REMOVE_CACHE_POOL); + dfs.removeCachePool(cacheInfo.getPoolName()); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.REMOVE_CACHE_POOL, opCount + 1); + + // Crypto Commands. + final KeyProvider provider = + cluster.getNameNode().getNamesystem().getProvider(); + final KeyProvider.Options options = KeyProvider.options(conf); + provider.createKey("key", options); + provider.flush(); + + opCount = getOpStatistics(OpType.CREATE_ENCRYPTION_ZONE); + dfs.createEncryptionZone(dir, "key"); + checkStatistics(dfs, readOps, ++writeOps, 0); + checkOpStatistics(OpType.CREATE_ENCRYPTION_ZONE, opCount + 1); + + opCount = getOpStatistics(OpType.LIST_ENCRYPTION_ZONE); + dfs.listEncryptionZones(); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.LIST_ENCRYPTION_ZONE, opCount + 1); + + opCount = getOpStatistics(OpType.GET_ENCRYPTION_ZONE); + dfs.getEZForPath(dir); + checkStatistics(dfs, ++readOps, writeOps, 0); + checkOpStatistics(OpType.GET_ENCRYPTION_ZONE, opCount + 1); + } + } + + @Test public void testECStatistics() throws IOException { try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(getTestConfiguration()).build()) { --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org