From commits-return-98886-archive-asf-public=cust-asf.ponee.io@hbase.apache.org Mon Jun 21 07:28:06 2021 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mxout1-he-de.apache.org (mxout1-he-de.apache.org [95.216.194.37]) by mx-eu-01.ponee.io (Postfix) with ESMTPS id C4AAD18037A for ; Mon, 21 Jun 2021 09:28:06 +0200 (CEST) Received: from mail.apache.org (mailroute1-lw-us.apache.org [207.244.88.153]) by mxout1-he-de.apache.org (ASF Mail Server at mxout1-he-de.apache.org) with SMTP id 16D11606F4 for ; Mon, 21 Jun 2021 07:28:05 +0000 (UTC) Received: (qmail 78985 invoked by uid 500); 21 Jun 2021 07:28:05 -0000 Mailing-List: contact commits-help@hbase.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@hbase.apache.org Delivered-To: mailing list commits@hbase.apache.org Received: (qmail 78976 invoked by uid 99); 21 Jun 2021 07:28:04 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Mon, 21 Jun 2021 07:28:04 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 56374821E0; Mon, 21 Jun 2021 07:28:01 +0000 (UTC) Date: Mon, 21 Jun 2021 07:27:59 +0000 To: "commits@hbase.apache.org" Subject: [hbase] branch branch-2 updated: HBASE-25698 Fixing IllegalReferenceCountException when using TinyLfuBlockCache (#3215) MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Message-ID: <162426047677.8815.1207571928113168068@gitbox.apache.org> From: vjasani@apache.org X-Git-Host: gitbox.apache.org X-Git-Repo: hbase X-Git-Refname: refs/heads/branch-2 X-Git-Reftype: branch X-Git-Oldrev: 029dc81b39aa25f0b1e81aa885df8c2449cbe284 X-Git-Newrev: 41a68325c28e3c38f1a46fae79559c526743c30d X-Git-Rev: 41a68325c28e3c38f1a46fae79559c526743c30d X-Git-NotificationType: ref_changed_plus_diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated This is an automated email from the ASF dual-hosted git repository. vjasani pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git The following commit(s) were added to refs/heads/branch-2 by this push: new 41a6832 HBASE-25698 Fixing IllegalReferenceCountException when using TinyLfuBlockCache (#3215) 41a6832 is described below commit 41a68325c28e3c38f1a46fae79559c526743c30d Author: Viraj Jasani AuthorDate: Mon Jun 21 11:50:33 2021 +0530 HBASE-25698 Fixing IllegalReferenceCountException when using TinyLfuBlockCache (#3215) Signed-off-by: Andrew Purtell Signed-off-by: Anoop Sam John Signed-off-by: Michael Stack --- .../hadoop/hbase/io/hfile/TinyLfuBlockCache.java | 46 ++++++++++++-- .../apache/hadoop/hbase/io/hfile/TestHFile.java | 74 +++++++++++++++++++++- 2 files changed, 112 insertions(+), 8 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java index a0dc30c..6f914f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java @@ -158,7 +158,13 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, boolean updateCacheMetrics) { - Cacheable value = cache.getIfPresent(cacheKey); + Cacheable value = cache.asMap().computeIfPresent(cacheKey, (blockCacheKey, cacheable) -> { + // It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside + // this block. because if retain outside the map#computeIfPresent, the evictBlock may remove + // the block and release, then we're retaining a block with refCnt=0 which is disallowed. + cacheable.retain(); + return cacheable; + }); if (value == null) { if (repeat) { return null; @@ -169,9 +175,6 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { if (victimCache != null) { value = victimCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); if ((value != null) && caching) { - if ((value instanceof HFileBlock) && ((HFileBlock) value).isSharedMem()) { - value = HFileBlock.deepCloneOnHeap((HFileBlock) value); - } cacheBlock(cacheKey, value); } } @@ -192,17 +195,48 @@ public final class TinyLfuBlockCache implements FirstLevelBlockCache { // If there are a lot of blocks that are too big this can make the logs too noisy (2% logged) if (stats.failInsert() % 50 == 0) { LOG.warn(String.format( - "Trying to cache too large a block %s @ %,d is %,d which is larger than %,d", - key.getHfileName(), key.getOffset(), value.heapSize(), DEFAULT_MAX_BLOCK_SIZE)); + "Trying to cache too large a block %s @ %,d is %,d which is larger than %,d", + key.getHfileName(), key.getOffset(), value.heapSize(), DEFAULT_MAX_BLOCK_SIZE)); } } else { + value = asReferencedHeapBlock(value); cache.put(key, value); } } + /** + * The block cached in TinyLfuBlockCache will always be an heap block: on the one side, the heap + * access will be more faster then off-heap, the small index block or meta block cached in + * CombinedBlockCache will benefit a lot. on other side, the TinyLfuBlockCache size is always + * calculated based on the total heap size, if caching an off-heap block in TinyLfuBlockCache, the + * heap size will be messed up. Here we will clone the block into an heap block if it's an + * off-heap block, otherwise just use the original block. The key point is maintain the refCnt of + * the block (HBASE-22127):
+ * 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle;
+ * 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's + * reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be + * garbage collected by JVM, so need a retain here. + * + * @param buf the original block + * @return an block with an heap memory backend. + */ + private Cacheable asReferencedHeapBlock(Cacheable buf) { + if (buf instanceof HFileBlock) { + HFileBlock blk = ((HFileBlock) buf); + if (blk.isSharedMem()) { + return HFileBlock.deepCloneOnHeap(blk); + } + } + // The block will be referenced by this TinyLfuBlockCache, so should increase its refCnt here. + return buf.retain(); + } + @Override public boolean evictBlock(BlockCacheKey cacheKey) { Cacheable value = cache.asMap().remove(cacheKey); + if (value != null) { + value.release(); + } return (value != null); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 2a65a74..6253dd1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.BUFFER_SIZE_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY; import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MIN_ALLOCATE_SIZE_KEY; +import static org.apache.hadoop.hbase.io.hfile.BlockCacheFactory.BLOCKCACHE_POLICY_KEY; import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -205,10 +206,11 @@ public class TestHFile { lru.shutdown(); } - private BlockCache initCombinedBlockCache() { + private BlockCache initCombinedBlockCache(final String l1CachePolicy) { Configuration that = HBaseConfiguration.create(conf); that.setFloat(BUCKET_CACHE_SIZE_KEY, 32); // 32MB for bucket cache. that.set(BUCKET_CACHE_IOENGINE_KEY, "offheap"); + that.set(BLOCKCACHE_POLICY_KEY, l1CachePolicy); BlockCache bc = BlockCacheFactory.createBlockCache(that); Assert.assertNotNull(bc); Assert.assertTrue(bc instanceof CombinedBlockCache); @@ -225,7 +227,7 @@ public class TestHFile { fillByteBuffAllocator(alloc, bufCount); Path storeFilePath = writeStoreFile(); // Open the file reader with CombinedBlockCache - BlockCache combined = initCombinedBlockCache(); + BlockCache combined = initCombinedBlockCache("LRU"); conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true); CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc); HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf); @@ -881,4 +883,72 @@ public class TestHFile { writer.close(); } } + + /** + * Test case for CombinedBlockCache with TinyLfu as L1 cache + */ + @Test + public void testReaderWithTinyLfuCombinedBlockCache() throws Exception { + testReaderCombinedCache("TinyLfu"); + } + + /** + * Test case for CombinedBlockCache with AdaptiveLRU as L1 cache + */ + @Test + public void testReaderWithAdaptiveLruCombinedBlockCache() throws Exception { + testReaderCombinedCache("AdaptiveLRU"); + } + + /** + * Test case for CombinedBlockCache with AdaptiveLRU as L1 cache + */ + @Test + public void testReaderWithLruCombinedBlockCache() throws Exception { + testReaderCombinedCache("LRU"); + } + + private void testReaderCombinedCache(final String l1CachePolicy) throws Exception { + int bufCount = 1024; + int blockSize = 64 * 1024; + ByteBuffAllocator alloc = initAllocator(true, bufCount, blockSize, 0); + fillByteBuffAllocator(alloc, bufCount); + Path storeFilePath = writeStoreFile(); + // Open the file reader with CombinedBlockCache + BlockCache combined = initCombinedBlockCache(l1CachePolicy); + conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true); + CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc); + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf); + long offset = 0; + Cacheable cachedBlock = null; + while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { + BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset); + HFileBlock block = reader.readBlock(offset, -1, true, true, false, true, null, null); + offset += block.getOnDiskSizeWithHeader(); + // Read the cached block. + cachedBlock = combined.getBlock(key, false, false, true); + try { + Assert.assertNotNull(cachedBlock); + Assert.assertTrue(cachedBlock instanceof HFileBlock); + HFileBlock hfb = (HFileBlock) cachedBlock; + // Data block will be cached in BucketCache, so it should be an off-heap block. + if (hfb.getBlockType().isData()) { + Assert.assertTrue(hfb.isSharedMem()); + } else if (!l1CachePolicy.equals("TinyLfu")) { + Assert.assertFalse(hfb.isSharedMem()); + } + } finally { + cachedBlock.release(); + } + block.release(); // return back the ByteBuffer back to allocator. + } + reader.close(); + combined.shutdown(); + if (cachedBlock != null) { + Assert.assertEquals(0, cachedBlock.refCnt()); + } + Assert.assertEquals(bufCount, alloc.getFreeBufferCount()); + alloc.clean(); + } + }