Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 6EA3F200B98 for ; Mon, 3 Oct 2016 18:41:42 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 6D3B1160ADC; Mon, 3 Oct 2016 16:41:42 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 8ABA1160ACD for ; Mon, 3 Oct 2016 18:41:41 +0200 (CEST) Received: (qmail 97114 invoked by uid 500); 3 Oct 2016 16:41:40 -0000 Mailing-List: contact common-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Delivered-To: mailing list common-commits@hadoop.apache.org Received: (qmail 97105 invoked by uid 99); 3 Oct 2016 16:41:40 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 03 Oct 2016 16:41:40 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 4FCC2DFFD8; Mon, 3 Oct 2016 16:41:40 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: kihwal@apache.org To: common-commits@hadoop.apache.org Message-Id: X-Mailer: ASF-Git Admin Mailer Subject: hadoop git commit: HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp. Date: Mon, 3 Oct 2016 16:41:40 +0000 (UTC) archived-at: Mon, 03 Oct 2016 16:41:42 -0000 Repository: hadoop Updated Branches: refs/heads/branch-2 ebfbce3b5 -> 03b797a6a HDFS-10940. Reduce performance penalty of block caching when not used. Contributed by Daryn Sharp. (cherry picked from commit 744208431f7365bf054e6b773b86af2583001e1d) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03b797a6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03b797a6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03b797a6 Branch: refs/heads/branch-2 Commit: 03b797a6ac2b1f99c6baebf4729bae22aa267692 Parents: ebfbce3 Author: Kihwal Lee Authored: Mon Oct 3 11:41:19 2016 -0500 Committer: Kihwal Lee Committed: Mon Oct 3 11:41:19 2016 -0500 ---------------------------------------------------------------------- .../hdfs/server/blockmanagement/BlockManager.java | 9 ++++++++- .../hadoop/hdfs/server/namenode/CacheManager.java | 12 +++++++++++- .../server/namenode/FSDirStatAndListingOp.java | 18 +----------------- .../hdfs/server/namenode/TestCacheDirectives.java | 10 ++++++++++ 4 files changed, 30 insertions(+), 19 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/03b797a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index ad0a827..2b9f2d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.util.LightWeightHashSet; +import org.apache.hadoop.hdfs.server.namenode.CacheManager; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.UserGroupInformation; @@ -1051,9 +1052,15 @@ public class BlockManager implements BlockStatsMXBean { fileSizeExcludeBlocksUnderConstruction, mode); isComplete = true; } - return new LocatedBlocks( + LocatedBlocks locations = new LocatedBlocks( fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction, locatedblocks, lastlb, isComplete, feInfo); + // Set caching information for the located blocks. + CacheManager cm = namesystem.getCacheManager(); + if (cm != null) { + cm.setCachedLocations(locations); + } + return locations; } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/03b797a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java index 366dd9b..24bf751 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; @@ -902,7 +903,16 @@ public final class CacheManager { return new BatchedListEntries(results, false); } - public void setCachedLocations(LocatedBlock block) { + public void setCachedLocations(LocatedBlocks locations) { + // don't attempt lookups if there are no cached blocks + if (cachedBlocks.size() > 0) { + for (LocatedBlock lb : locations.getLocatedBlocks()) { + setCachedLocations(lb); + } + } + } + + private void setCachedLocations(LocatedBlock block) { CachedBlock cachedBlock = new CachedBlock(block.getBlock().getBlockId(), (short)0, false); http://git-wip-us.apache.org/repos/asf/hadoop/blob/03b797a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index dfaacc6..6c7a92f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.FsPermissionExtension; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; -import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -154,7 +153,6 @@ class FSDirStatAndListingOp { "Negative offset is not supported. File: " + src); Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src); - CacheManager cm = fsd.getFSNamesystem().getCacheManager(); BlockManager bm = fsd.getBlockManager(); fsd.readLock(); try { @@ -186,11 +184,6 @@ class FSDirStatAndListingOp { inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo); - // Set caching information for the located blocks. - for (LocatedBlock lb : blocks.getLocatedBlocks()) { - cm.setCachedLocations(lb); - } - final long now = now(); boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() @@ -454,7 +447,7 @@ class FSDirStatAndListingOp { node.asDirectory().getChildrenNum(snapshot) : 0; INodeAttributes nodeAttrs = fsd.getAttributes(iip); - HdfsFileStatus status = createFileStatus( + return createFileStatus( size, node.isDirectory(), replication, @@ -471,15 +464,6 @@ class FSDirStatAndListingOp { feInfo, storagePolicy, loc); - - // Set caching information for the located blocks. - if (loc != null) { - CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager(); - for (LocatedBlock lb: loc.getLocatedBlocks()) { - cacheManager.setCachedLocations(lb); - } - } - return status; } private static HdfsFileStatus createFileStatus(long length, boolean isdir, http://git-wip-us.apache.org/repos/asf/hadoop/blob/03b797a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 5dd192f..a9988b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolStats; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; @@ -89,6 +90,7 @@ import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import com.google.common.base.Supplier; @@ -1473,4 +1475,12 @@ public class TestCacheDirectives { DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false); } } + + @Test + public void testNoLookupsWhenNotUsed() throws Exception { + CacheManager cm = cluster.getNamesystem().getCacheManager(); + LocatedBlocks locations = Mockito.mock(LocatedBlocks.class); + cm.setCachedLocations(locations); + Mockito.verifyZeroInteractions(locations); + } } --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org For additional commands, e-mail: common-commits-help@hadoop.apache.org