kudu-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ale...@apache.org
Subject [kudu] 02/02: [util] added cache with FIFO eviction policy
Date Wed, 20 Mar 2019 19:08:54 GMT
This is an automated email from the ASF dual-hosted git repository.

alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git

commit 39f4637559ac29791cc385308bd63d77a5f18e56
Author: Alexey Serbin <alexey@apache.org>
AuthorDate: Fri Mar 15 22:43:37 2019 -0700

    [util] added cache with FIFO eviction policy
    
    Updated cache implementation in util/cache.{h,cc} to accommodate
    the notion of a cache with FIFO eviction policy. Such cache is
    useful for backing a TTL cache, i.e. a cache where an entry's life
    cycle depends purely on the time when the entry was first inserted
    into the cache.
    
    Corresponding unit test is added as well.
    
    Change-Id: If06fcf6c9860d0f384816414205f8791cbe53480
    Reviewed-on: http://gerrit.cloudera.org:8080/12777
    Tested-by: Alexey Serbin <aserbin@cloudera.com>
    Reviewed-by: Adar Dembo <adar@cloudera.com>
---
 src/kudu/cfile/block_cache.cc  |  14 +++-
 src/kudu/codegen/code_cache.cc |   4 +-
 src/kudu/util/cache-bench.cc   |   3 +-
 src/kudu/util/cache-test.cc    | 159 +++++++++++++++++++++++++++++++++++------
 src/kudu/util/cache.cc         | 108 ++++++++++++++++++++--------
 src/kudu/util/cache.h          |  53 +++++++++++---
 src/kudu/util/file_cache.cc    |   2 +-
 7 files changed, 273 insertions(+), 70 deletions(-)

diff --git a/src/kudu/cfile/block_cache.cc b/src/kudu/cfile/block_cache.cc
index ce11c2f..32587c4 100644
--- a/src/kudu/cfile/block_cache.cc
+++ b/src/kudu/cfile/block_cache.cc
@@ -70,8 +70,18 @@ namespace cfile {
 namespace {
 
 Cache* CreateCache(int64_t capacity) {
-  auto mem_type = BlockCache::GetConfiguredCacheMemoryTypeOrDie();
-  return NewLRUCache(mem_type, capacity, "block_cache");
+  const auto mem_type = BlockCache::GetConfiguredCacheMemoryTypeOrDie();
+  switch (mem_type) {
+    case Cache::MemoryType::DRAM:
+      return NewCache<Cache::EvictionPolicy::LRU, Cache::MemoryType::DRAM>(
+          capacity, "block_cache");
+    case Cache::MemoryType::NVM:
+      return NewCache<Cache::EvictionPolicy::LRU, Cache::MemoryType::NVM>(
+          capacity, "block_cache");
+    default:
+      LOG(FATAL) << "unsupported LRU cache memory type: " << mem_type;
+      return nullptr;
+  }
 }
 
 // Validates the block cache capacity won't permit the cache to grow large enough
diff --git a/src/kudu/codegen/code_cache.cc b/src/kudu/codegen/code_cache.cc
index 8dafebe..1f07e5d 100644
--- a/src/kudu/codegen/code_cache.cc
+++ b/src/kudu/codegen/code_cache.cc
@@ -60,8 +60,8 @@ class CodeCache::EvictionCallback : public Cache::EvictionCallback {
 };
 
 CodeCache::CodeCache(size_t capacity)
-  : cache_(NewLRUCache(Cache::MemoryType::DRAM, capacity, "code_cache")) {
-  eviction_callback_.reset(new EvictionCallback());
+    : cache_(NewCache(capacity, "code_cache")) {
+  eviction_callback_.reset(new EvictionCallback);
 }
 
 CodeCache::~CodeCache() {}
diff --git a/src/kudu/util/cache-bench.cc b/src/kudu/util/cache-bench.cc
index a73800d..4a8e2e5 100644
--- a/src/kudu/util/cache-bench.cc
+++ b/src/kudu/util/cache-bench.cc
@@ -96,8 +96,7 @@ class CacheBench : public KuduTest,
  public:
   void SetUp() override {
     KuduTest::SetUp();
-
-    cache_.reset(NewLRUCache(Cache::MemoryType::DRAM, kCacheCapacity, "test-cache"));
+    cache_.reset(NewCache(kCacheCapacity, "test-cache"));
   }
 
   // Run queries against the cache until '*done' becomes true.
diff --git a/src/kudu/util/cache-test.cc b/src/kudu/util/cache-test.cc
index c9d40e4..6869296 100644
--- a/src/kudu/util/cache-test.cc
+++ b/src/kudu/util/cache-test.cc
@@ -16,6 +16,7 @@
 #include <gtest/gtest.h>
 
 #include "kudu/gutil/ref_counted.h"
+#include "kudu/gutil/strings/substitute.h"
 #include "kudu/util/block_cache_metrics.h"
 #include "kudu/util/cache_metrics.h"
 #include "kudu/util/coding.h"
@@ -34,10 +35,12 @@ DECLARE_string(nvm_cache_path);
 
 DECLARE_double(cache_memtracker_approximation_ratio);
 
+using std::make_tuple;
 using std::tuple;
 using std::shared_ptr;
 using std::unique_ptr;
 using std::vector;
+using strings::Substitute;
 
 namespace kudu {
 
@@ -52,9 +55,9 @@ static int DecodeInt(const Slice& k) {
   return DecodeFixed32(k.data());
 }
 
-// Cache composition type: some test scenarios assume cache is single-sharded
-// to keep the logic simpler.
-enum class CacheComposition {
+// Cache sharding policy affects the composition of the cache. Some test
+// scenarios assume cache is single-sharded to keep the logic simpler.
+enum class ShardingPolicy {
   MultiShard,
   SingleShard,
 };
@@ -101,7 +104,8 @@ class CacheBaseTest : public KuduTest,
 
  protected:
   void SetupWithParameters(Cache::MemoryType mem_type,
-                           CacheComposition cache_composition) {
+                           Cache::EvictionPolicy eviction_policy,
+                           ShardingPolicy sharding_policy) {
     // Disable approximate tracking of cache memory since we make specific
     // assertions on the MemTracker in this test.
     FLAGS_cache_memtracker_approximation_ratio = 0;
@@ -109,7 +113,7 @@ class CacheBaseTest : public KuduTest,
     // Using single shard makes the logic of scenarios simple for capacity-
     // and eviction-related behavior.
     FLAGS_cache_force_single_shard =
-        (cache_composition == CacheComposition::SingleShard);
+        (sharding_policy == ShardingPolicy::SingleShard);
 
 #if defined(HAVE_LIB_VMEM)
     if (google::GetCommandLineFlagInfoOrDie("nvm_cache_path").is_default) {
@@ -118,8 +122,38 @@ class CacheBaseTest : public KuduTest,
     }
 #endif // #if defined(HAVE_LIB_VMEM)
 
-    cache_.reset(NewLRUCache(mem_type, cache_size(), "cache_test"));
-    MemTracker::FindTracker("cache_test-sharded_lru_cache", &mem_tracker_);
+    switch (eviction_policy) {
+      case Cache::EvictionPolicy::FIFO:
+        if (mem_type != Cache::MemoryType::DRAM) {
+          FAIL() << "FIFO cache can only be of DRAM type";
+        }
+        cache_.reset(NewCache<Cache::EvictionPolicy::FIFO,
+                              Cache::MemoryType::DRAM>(cache_size(),
+                                                       "cache_test"));
+        MemTracker::FindTracker("cache_test-sharded_fifo_cache", &mem_tracker_);
+        break;
+      case Cache::EvictionPolicy::LRU:
+        switch (mem_type) {
+          case Cache::MemoryType::DRAM:
+            cache_.reset(NewCache<Cache::EvictionPolicy::LRU,
+                                  Cache::MemoryType::DRAM>(cache_size(),
+                                                           "cache_test"));
+            break;
+          case Cache::MemoryType::NVM:
+            cache_.reset(NewCache<Cache::EvictionPolicy::LRU,
+                                  Cache::MemoryType::NVM>(cache_size(),
+                                                          "cache_test"));
+            break;
+          default:
+            FAIL() << mem_type << ": unrecognized cache memory type";
+            break;
+        }
+        MemTracker::FindTracker("cache_test-sharded_lru_cache", &mem_tracker_);
+        break;
+      default:
+        FAIL() << "unrecognized cache eviction policy";
+        break;
+    }
 
     // Since nvm cache does not have memtracker due to the use of
     // tcmalloc for this we only check for it in the DRAM case.
@@ -144,7 +178,8 @@ class CacheBaseTest : public KuduTest,
 class CacheTest :
     public CacheBaseTest,
     public ::testing::WithParamInterface<tuple<Cache::MemoryType,
-                                               CacheComposition>> {
+                                               Cache::EvictionPolicy,
+                                               ShardingPolicy>> {
  public:
   CacheTest()
       : CacheBaseTest(14 * 1024 * 1024) {
@@ -153,23 +188,41 @@ class CacheTest :
   void SetUp() override {
     const auto& param = GetParam();
     SetupWithParameters(std::get<0>(param),
-                        std::get<1>(param));
+                        std::get<1>(param),
+                        std::get<2>(param));
   }
 };
 
 #if defined(HAVE_LIB_VMEM)
 INSTANTIATE_TEST_CASE_P(
     CacheTypes, CacheTest,
-    ::testing::Combine(::testing::Values(Cache::MemoryType::DRAM,
-                                         Cache::MemoryType::NVM),
-                       ::testing::Values(CacheComposition::MultiShard,
-                                         CacheComposition::SingleShard)));
+    ::testing::Values(
+        make_tuple(Cache::MemoryType::DRAM,
+                   Cache::EvictionPolicy::FIFO,
+                   ShardingPolicy::MultiShard),
+        make_tuple(Cache::MemoryType::DRAM,
+                   Cache::EvictionPolicy::FIFO,
+                   ShardingPolicy::SingleShard),
+        make_tuple(Cache::MemoryType::DRAM,
+                   Cache::EvictionPolicy::LRU,
+                   ShardingPolicy::MultiShard),
+        make_tuple(Cache::MemoryType::DRAM,
+                   Cache::EvictionPolicy::LRU,
+                   ShardingPolicy::SingleShard),
+        make_tuple(Cache::MemoryType::NVM,
+                   Cache::EvictionPolicy::LRU,
+                   ShardingPolicy::MultiShard),
+        make_tuple(Cache::MemoryType::NVM,
+                   Cache::EvictionPolicy::LRU,
+                   ShardingPolicy::SingleShard)));
 #else
 INSTANTIATE_TEST_CASE_P(
     CacheTypes, CacheTest,
     ::testing::Combine(::testing::Values(Cache::MemoryType::DRAM),
-                       ::testing::Values(CacheComposition::MultiShard,
-                                         CacheComposition::SingleShard)));
+                       ::testing::Values(Cache::EvictionPolicy::FIFO,
+                                         Cache::EvictionPolicy::LRU),
+                       ::testing::Values(ShardingPolicy::MultiShard,
+                                         ShardingPolicy::SingleShard)));
 #endif // #if defined(HAVE_LIB_VMEM) ... #else ...
 
 TEST_P(CacheTest, TrackMemory) {
@@ -276,11 +329,74 @@ TEST_P(CacheTest, HeavyEntries) {
   ASSERT_LE(cached_weight, cache_size() + cache_size() / 10);
 }
 
-// This class is dedicated for scenarios specific for LRUCache.
+// This class is dedicated for scenarios specific for FIFOCache.
+// The scenarios use a single-shard cache for simpler logic.
+class FIFOCacheTest : public CacheBaseTest {
+ public:
+  FIFOCacheTest()
+      : CacheBaseTest(10 * 1024) {
+  }
+
+  void SetUp() override {
+    SetupWithParameters(Cache::MemoryType::DRAM,
+                        Cache::EvictionPolicy::FIFO,
+                        ShardingPolicy::SingleShard);
+  }
+};
+
+// Verify how the eviction behavior of a FIFO cache.
+TEST_F(FIFOCacheTest, EvictionPolicy) {
+  static constexpr int kNumElems = 20;
+  const int size_per_elem = cache_size() / kNumElems;
+  // First data chunk: fill the cache up to the capacity.
+  int idx = 0;
+  do {
+    Insert(idx, idx, size_per_elem);
+    // Keep looking up the very first entry: this is to make sure lookups
+    // do not affect the recency criteria of the eviction policy for FIFO cache.
+    Lookup(0);
+    ++idx;
+  } while (evicted_keys_.empty());
+  ASSERT_GT(idx, 1);
+
+  // Make sure the earliest inserted entry was evicted.
+  ASSERT_EQ(-1, Lookup(0));
+
+  // Verify that the 'empirical' capacity matches the expected capacity
+  // (it's a single-shard cache).
+  const int capacity = idx - 1;
+  ASSERT_EQ(kNumElems, capacity);
+
+  // Second data chunk: add (capacity / 2) more elements.
+  for (int i = 1; i < capacity / 2; ++i) {
+    // Earlier inserted elements should be gone one-by-one as new elements are
+    // inserted, and lookups should not affect the recency criteria of the FIFO
+    // eviction policy.
+    ASSERT_EQ(i, Lookup(i));
+    Insert(capacity + i, capacity + i, size_per_elem);
+    ASSERT_EQ(capacity + i, Lookup(capacity + i));
+    ASSERT_EQ(-1, Lookup(i));
+  }
+  ASSERT_EQ(capacity / 2, evicted_keys_.size());
+
+  // Early inserted elements from the first chunk should be evicted
+  // to accommodate the elements from the second chunk.
+  for (int i = 0; i < capacity / 2; ++i) {
+    SCOPED_TRACE(Substitute("early inserted elements: index $0", i));
+    ASSERT_EQ(-1, Lookup(i));
+  }
+  // The later inserted elements from the first chunk should be still
+  // in the cache.
+  for (int i = capacity / 2; i < capacity; ++i) {
+    SCOPED_TRACE(Substitute("late inserted elements: index $0", i));
+    ASSERT_EQ(i, Lookup(i));
+  }
+}
+
 class LRUCacheTest :
     public CacheBaseTest,
     public ::testing::WithParamInterface<tuple<Cache::MemoryType,
-                                               CacheComposition>> {
+                                               ShardingPolicy>> {
  public:
   LRUCacheTest()
       : CacheBaseTest(14 * 1024 * 1024) {
@@ -289,6 +405,7 @@ class LRUCacheTest :
   void SetUp() override {
     const auto& param = GetParam();
     SetupWithParameters(std::get<0>(param),
+                        Cache::EvictionPolicy::LRU,
                         std::get<1>(param));
   }
 };
@@ -298,14 +415,14 @@ INSTANTIATE_TEST_CASE_P(
     CacheTypes, LRUCacheTest,
     ::testing::Combine(::testing::Values(Cache::MemoryType::DRAM,
                                          Cache::MemoryType::NVM),
-                       ::testing::Values(CacheComposition::MultiShard,
-                                         CacheComposition::SingleShard)));
+                       ::testing::Values(ShardingPolicy::MultiShard,
+                                         ShardingPolicy::SingleShard)));
 #else
 INSTANTIATE_TEST_CASE_P(
     CacheTypes, LRUCacheTest,
     ::testing::Combine(::testing::Values(Cache::MemoryType::DRAM),
-                       ::testing::Values(CacheComposition::MultiShard,
-                                         CacheComposition::SingleShard)));
+                       ::testing::Values(ShardingPolicy::MultiShard,
+                                         ShardingPolicy::SingleShard)));
 #endif // #if defined(HAVE_LIB_VMEM) ... #else ...
 
 TEST_P(LRUCacheTest, EvictionPolicy) {
diff --git a/src/kudu/util/cache.cc b/src/kudu/util/cache.cc
index ef8a9d7..eb01289 100644
--- a/src/kudu/util/cache.cc
+++ b/src/kudu/util/cache.cc
@@ -64,11 +64,11 @@ namespace {
 
 typedef simple_spinlock MutexType;
 
-// Recency list cache implementation (LRU, etc.)
+// Recency list cache implementations (FIFO, LRU, etc.)
 
 // Recency list handle. An entry is a variable length heap-allocated structure.
 // Entries are kept in a circular doubly linked list ordered by some recency
-// criterion (e.g., access time for LRU policy).
+// criterion (e.g., access time for LRU policy, insertion time for FIFO policy).
 struct RLHandle {
   Cache::EvictionCallback* eviction_callback;
   RLHandle* next_hash;
@@ -188,7 +188,21 @@ class HandleTable {
   }
 };
 
+string ToString(Cache::EvictionPolicy p) {
+  switch (p) {
+    case Cache::EvictionPolicy::FIFO:
+      return "fifo";
+    case Cache::EvictionPolicy::LRU:
+      return "lru";
+    default:
+      LOG(FATAL) << "unexpected cache eviction policy: " << static_cast<int>(p);
+      break;
+  }
+  return "unknown";
+}
+
 // A single shard of sharded cache.
+template<Cache::EvictionPolicy policy>
 class CacheShard {
  public:
   explicit CacheShard(MemTracker* tracker);
@@ -211,6 +225,8 @@ class CacheShard {
  private:
   void RL_Remove(RLHandle* e);
   void RL_Append(RLHandle* e);
+  // Update the recency list after a lookup operation.
+  void RL_UpdateAfterLookup(RLHandle* e);
   // Just reduce the reference count by 1.
   // Return true if last reference
   bool Unref(RLHandle* e);
@@ -264,7 +280,8 @@ class CacheShard {
   CacheMetrics* metrics_;
 };
 
-CacheShard::CacheShard(MemTracker* tracker)
+template<Cache::EvictionPolicy policy>
+CacheShard<policy>::CacheShard(MemTracker* tracker)
     : usage_(0),
       mem_tracker_(tracker),
       metrics_(nullptr) {
@@ -273,7 +290,8 @@ CacheShard::CacheShard(MemTracker* tracker)
   rl_.prev = &rl_;
 }
 
-CacheShard::~CacheShard() {
+template<Cache::EvictionPolicy policy>
+CacheShard<policy>::~CacheShard() {
   for (RLHandle* e = rl_.next; e != &rl_; ) {
     RLHandle* next = e->next;
     DCHECK_EQ(e->refs.load(std::memory_order_relaxed), 1)
@@ -286,12 +304,14 @@ CacheShard::~CacheShard() {
   mem_tracker_->Consume(deferred_consumption_);
 }
 
-bool CacheShard::Unref(RLHandle* e) {
+template<Cache::EvictionPolicy policy>
+bool CacheShard<policy>::Unref(RLHandle* e) {
   DCHECK_GT(e->refs.load(std::memory_order_relaxed), 0);
   return e->refs.fetch_sub(1) == 1;
 }
 
-void CacheShard::FreeEntry(RLHandle* e) {
+template<Cache::EvictionPolicy policy>
+void CacheShard<policy>::FreeEntry(RLHandle* e) {
   DCHECK_EQ(e->refs.load(std::memory_order_relaxed), 0);
   if (e->eviction_callback) {
     e->eviction_callback->EvictedEntry(e->key(), e->value());
@@ -304,7 +324,8 @@ void CacheShard::FreeEntry(RLHandle* e) {
   delete [] e;
 }
 
-void CacheShard::UpdateMemTracker(int64_t delta) {
+template<Cache::EvictionPolicy policy>
+void CacheShard<policy>::UpdateMemTracker(int64_t delta) {
   int64_t old_deferred = deferred_consumption_.fetch_add(delta);
   int64_t new_deferred = old_deferred + delta;
 
@@ -315,7 +336,8 @@ void CacheShard::UpdateMemTracker(int64_t delta) {
   }
 }
 
-void CacheShard::UpdateMetricsLookup(bool was_hit, bool caching) {
+template<Cache::EvictionPolicy policy>
+void CacheShard<policy>::UpdateMetricsLookup(bool was_hit, bool caching) {
   if (PREDICT_TRUE(metrics_)) {
     metrics_->lookups->Increment();
     if (was_hit) {
@@ -334,13 +356,15 @@ void CacheShard::UpdateMetricsLookup(bool was_hit, bool caching) {
   }
 }
 
-void CacheShard::RL_Remove(RLHandle* e) {
+template<Cache::EvictionPolicy policy>
+void CacheShard<policy>::RL_Remove(RLHandle* e) {
   e->next->prev = e->prev;
   e->prev->next = e->next;
   usage_ -= e->charge;
 }
 
-void CacheShard::RL_Append(RLHandle* e) {
+template<Cache::EvictionPolicy policy>
+void CacheShard<policy>::RL_Append(RLHandle* e) {
   // Make "e" newest entry by inserting just before rl_.
   e->next = &rl_;
   e->prev = rl_.prev;
@@ -349,17 +373,27 @@ void CacheShard::RL_Append(RLHandle* e) {
   usage_ += e->charge;
 }
 
-Cache::Handle* CacheShard::Lookup(const Slice& key,
-                                  uint32_t hash,
-                                  bool caching) {
+template<>
+void CacheShard<Cache::EvictionPolicy::FIFO>::RL_UpdateAfterLookup(RLHandle* /* e */)
{
+}
+
+template<>
+void CacheShard<Cache::EvictionPolicy::LRU>::RL_UpdateAfterLookup(RLHandle* e) {
+  RL_Remove(e);
+  RL_Append(e);
+}
+
+template<Cache::EvictionPolicy policy>
+Cache::Handle* CacheShard<policy>::Lookup(const Slice& key,
+                                          uint32_t hash,
+                                          bool caching) {
   RLHandle* e;
   {
     std::lock_guard<MutexType> l(mutex_);
     e = table_.Lookup(key, hash);
     if (e != nullptr) {
       e->refs.fetch_add(1, std::memory_order_relaxed);
-      RL_Remove(e);
-      RL_Append(e);
+      RL_UpdateAfterLookup(e);
     }
   }
 
@@ -369,7 +403,8 @@ Cache::Handle* CacheShard::Lookup(const Slice& key,
   return reinterpret_cast<Cache::Handle*>(e);
 }
 
-void CacheShard::Release(Cache::Handle* handle) {
+template<Cache::EvictionPolicy policy>
+void CacheShard<policy>::Release(Cache::Handle* handle) {
   RLHandle* e = reinterpret_cast<RLHandle*>(handle);
   bool last_reference = Unref(e);
   if (last_reference) {
@@ -377,7 +412,8 @@ void CacheShard::Release(Cache::Handle* handle) {
   }
 }
 
-Cache::Handle* CacheShard::Insert(
+template<Cache::EvictionPolicy policy>
+Cache::Handle* CacheShard<policy>::Insert(
     RLHandle* handle,
     Cache::EvictionCallback* eviction_callback) {
   // Set the remaining RLHandle members which were not already allocated during
@@ -428,7 +464,8 @@ Cache::Handle* CacheShard::Insert(
   return reinterpret_cast<Cache::Handle*>(handle);
 }
 
-void CacheShard::Erase(const Slice& key, uint32_t hash) {
+template<Cache::EvictionPolicy policy>
+void CacheShard<policy>::Erase(const Slice& key, uint32_t hash) {
   RLHandle* e;
   bool last_reference = false;
   {
@@ -455,11 +492,12 @@ int DetermineShardBits() {
   return bits;
 }
 
+template<Cache::EvictionPolicy policy>
 class ShardedCache : public Cache {
  private:
   shared_ptr<MemTracker> mem_tracker_;
   unique_ptr<CacheMetrics> metrics_;
-  vector<CacheShard*> shards_;
+  vector<CacheShard<policy>*> shards_;
 
   // Number of bits of hash used to determine the shard.
   const int shard_bits_;
@@ -486,12 +524,13 @@ class ShardedCache : public Cache {
     // 1. We reuse its MemTracker if one already exists, and
     // 2. It is directly parented to the root MemTracker.
     mem_tracker_ = MemTracker::FindOrCreateGlobalTracker(
-        -1, strings::Substitute("$0-sharded_lru_cache", id));
+        -1, strings::Substitute("$0-sharded_$1_cache", id, ToString(policy)));
 
     int num_shards = 1 << shard_bits_;
     const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
     for (int s = 0; s < num_shards; s++) {
-      unique_ptr<CacheShard> shard(new CacheShard(mem_tracker_.get()));
+      unique_ptr<CacheShard<policy>> shard(
+          new CacheShard<policy>(mem_tracker_.get()));
       shard->SetCapacity(per_shard);
       shards_.push_back(shard.release());
     }
@@ -568,18 +607,25 @@ class ShardedCache : public Cache {
 
 }  // end anonymous namespace
 
-Cache* NewLRUCache(Cache::MemoryType mem_type, size_t capacity, const string& id) {
-  switch (mem_type) {
-    case Cache::MemoryType::DRAM:
-      return new ShardedCache(capacity, id);
+template<>
+Cache* NewCache<Cache::EvictionPolicy::FIFO,
+                Cache::MemoryType::DRAM>(size_t capacity, const std::string& id) {
+  return new ShardedCache<Cache::EvictionPolicy::FIFO>(capacity, id);
+}
+
+template<>
+Cache* NewCache<Cache::EvictionPolicy::LRU,
+                Cache::MemoryType::DRAM>(size_t capacity, const std::string& id) {
+  return new ShardedCache<Cache::EvictionPolicy::LRU>(capacity, id);
+}
+
 #if defined(HAVE_LIB_VMEM)
-    case Cache::MemoryType::NVM:
-      return NewLRUNvmCache(capacity, id);
-#endif
-    default:
-      LOG(FATAL) << "unsupported memory type for LRU cache: " << mem_type;
-  }
+template<>
+Cache* NewCache<Cache::EvictionPolicy::LRU,
+                Cache::MemoryType::NVM>(size_t capacity, const std::string& id) {
+  return NewLRUNvmCache(capacity, id);
 }
+#endif
 
 std::ostream& operator<<(std::ostream& os, Cache::MemoryType mem_type) {
   switch (mem_type) {
diff --git a/src/kudu/util/cache.h b/src/kudu/util/cache.h
index 83e3684..7ba1ae5 100644
--- a/src/kudu/util/cache.h
+++ b/src/kudu/util/cache.h
@@ -12,11 +12,10 @@
 //
 // This is taken from LevelDB and evolved to fit the kudu codebase.
 //
-// TODO: this is pretty lock-heavy. Would be good to sub out something
+// TODO(unknown): this is pretty lock-heavy. Would be good to sub out something
 // a little more concurrent.
 
-#ifndef KUDU_UTIL_CACHE_H_
-#define KUDU_UTIL_CACHE_H_
+#pragma once
 
 #include <cstddef>
 #include <cstdint>
@@ -40,6 +39,17 @@ class Cache {
     NVM,
   };
 
+  // Supported eviction policies for the cache. Eviction policy determines what
+  // items to evict if the cache is at capacity when trying to accommodate
+  // an extra item.
+  enum class EvictionPolicy {
+    // The earliest added items are evicted (a.k.a. queue).
+    FIFO,
+
+    // The least-recently-used items are evicted.
+    LRU,
+  };
+
   // Callback interface which is called when an entry is evicted from the
   // cache.
   class EvictionCallback {
@@ -208,14 +218,35 @@ class Cache {
   DISALLOW_COPY_AND_ASSIGN(Cache);
 };
 
-// Create a new cache with a fixed size capacity. This implementation
-// of Cache uses a least-recently-used eviction policy.
-Cache* NewLRUCache(Cache::MemoryType mem_type,
-                   size_t capacity,
-                   const std::string& id);
+// A template helper function to instantiate a cache of particular
+// 'eviction_policy' flavor, backed by the given storage 'mem_type',
+// where 'capacity' specifies the capacity of the result cache,
+// and 'id' specifies its identifier.
+template<Cache::EvictionPolicy eviction_policy = Cache::EvictionPolicy::LRU,
+         Cache::MemoryType mem_type = Cache::MemoryType::DRAM>
+Cache* NewCache(size_t capacity, const std::string& id);
+
+// Create a new FIFO cache with a fixed size capacity. This implementation
+// of Cache uses the first-in-first-out eviction policy and stored in DRAM.
+template<>
+Cache* NewCache<Cache::EvictionPolicy::FIFO,
+                Cache::MemoryType::DRAM>(size_t capacity, const std::string& id);
+
+// Create a new LRU cache with a fixed size capacity. This implementation
+// of Cache uses the least-recently-used eviction policy and stored in DRAM.
+template<>
+Cache* NewCache<Cache::EvictionPolicy::LRU,
+                Cache::MemoryType::DRAM>(size_t capacity, const std::string& id);
+
+#if defined(HAVE_LIB_VMEM)
+// Create a new LRU cache with a fixed size capacity. This implementation
+// of Cache uses the least-recently-used eviction policy and stored in NVM.
+template<>
+Cache* NewCache<Cache::EvictionPolicy::LRU,
+                Cache::MemoryType::NVM>(size_t capacity, const std::string& id);
+#endif
 
+// A helper method to output cache memory type into ostream.
 std::ostream& operator<<(std::ostream& os, Cache::MemoryType mem_type);
 
-}  // namespace kudu
-
-#endif
+} // namespace kudu
diff --git a/src/kudu/util/file_cache.cc b/src/kudu/util/file_cache.cc
index 3ff22b0..b9294d0 100644
--- a/src/kudu/util/file_cache.cc
+++ b/src/kudu/util/file_cache.cc
@@ -459,7 +459,7 @@ FileCache<FileType>::FileCache(const string& cache_name,
     : env_(env),
       cache_name_(cache_name),
       eviction_cb_(new EvictionCallback<FileType>()),
-      cache_(NewLRUCache(Cache::MemoryType::DRAM, max_open_files, cache_name)),
+      cache_(NewCache(max_open_files, cache_name)),
       running_(1) {
   if (entity) {
     unique_ptr<FileCacheMetrics> metrics(new FileCacheMetrics(entity));


Mime
View raw message