Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id 14DC7200CC4 for ; Thu, 13 Jul 2017 19:59:39 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id 1236016C87D; Thu, 13 Jul 2017 17:59:39 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 22B0016C866 for ; Thu, 13 Jul 2017 19:59:37 +0200 (CEST) Received: (qmail 20580 invoked by uid 500); 13 Jul 2017 17:59:37 -0000 Mailing-List: contact commits-help@couchdb.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@couchdb.apache.org Delivered-To: mailing list commits@couchdb.apache.org Received: (qmail 20479 invoked by uid 99); 13 Jul 2017 17:59:37 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 13 Jul 2017 17:59:37 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 8F07387562; Thu, 13 Jul 2017 17:59:32 +0000 (UTC) Date: Thu, 13 Jul 2017 17:59:46 +0000 To: "commits@couchdb.apache.org" Subject: [couchdb] 15/24: FIXUP: Don't manually track cache size MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit From: davisp@apache.org Reply-To: "commits@couchdb.apache.org" In-Reply-To: <149996877195.26296.3009052880166728632@gitbox.apache.org> References: <149996877195.26296.3009052880166728632@gitbox.apache.org> X-Git-Host: gitbox.apache.org X-Git-Repo: couchdb X-Git-Refname: refs/heads/optimize-ddoc-cache X-Git-Reftype: branch X-Git-Rev: 8ed1d3f172427988aa9c313fa489bdd05116659d X-Git-NotificationType: diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated Message-Id: <20170713175933.8F07387562@gitbox.apache.org> archived-at: Thu, 13 Jul 2017 17:59:39 -0000 This is an automated email from the ASF dual-hosted git repository. davisp pushed a commit to branch optimize-ddoc-cache in repository https://gitbox.apache.org/repos/asf/couchdb.git commit 8ed1d3f172427988aa9c313fa489bdd05116659d Author: Paul J. Davis AuthorDate: Thu Jul 6 16:37:23 2017 -0500 FIXUP: Don't manually track cache size Not quite sure why I started doing this in the first place. I think it was becuase I was worried ets:info/2 might be expensive but I can get away with only calling it once here so no big deal any more. --- src/ddoc_cache/src/ddoc_cache_lru.erl | 14 ++++----- src/ddoc_cache/test/ddoc_cache_coverage_test.erl | 6 ++-- src/ddoc_cache/test/ddoc_cache_lru_test.erl | 38 ++++++++++++++++++++++-- 3 files changed, 45 insertions(+), 13 deletions(-) diff --git a/src/ddoc_cache/src/ddoc_cache_lru.erl b/src/ddoc_cache/src/ddoc_cache_lru.erl index 35173f4..2764959 100644 --- a/src/ddoc_cache/src/ddoc_cache_lru.erl +++ b/src/ddoc_cache/src/ddoc_cache_lru.erl @@ -42,7 +42,6 @@ -record(st, { pids, % pid -> key dbs, % dbname -> docid -> key -> pid - size, evictor }). @@ -93,7 +92,6 @@ init(_) -> {ok, #st{ pids = Pids, dbs = Dbs, - size = 0, evictor = Evictor }}. @@ -109,20 +107,20 @@ terminate(_Reason, St) -> handle_call({start, Key, Default}, _From, St) -> #st{ pids = Pids, - dbs = Dbs, - size = CurSize + dbs = Dbs } = St, case ets:lookup(?CACHE, Key) of [] -> MaxSize = config:get_integer("ddoc_cache", "max_size", 1000), + CurSize = ets:info(?CACHE, size), case trim(St, CurSize, max(0, MaxSize)) of - {ok, N} -> + ok -> true = ets:insert_new(?CACHE, #entry{key = Key}), {ok, Pid} = ddoc_cache_entry:start_link(Key, Default), true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}), ok = khash:put(Pids, Pid, Key), store_key(Dbs, Key, Pid), - {reply, {ok, Pid}, St#st{size = CurSize - N + 1}}; + {reply, {ok, Pid}, St}; full -> ?EVENT(full, Key), {reply, full, St} @@ -251,7 +249,7 @@ trim(_, _, 0) -> full; trim(_St, CurSize, MaxSize) when CurSize < MaxSize -> - {ok, 0}; + ok; trim(St, CurSize, MaxSize) when CurSize >= MaxSize -> case ets:first(?LRU) of @@ -259,7 +257,7 @@ trim(St, CurSize, MaxSize) when CurSize >= MaxSize -> full; {_Ts, Key, Pid} -> remove_entry(St, Key, Pid), - {ok, 1} + trim(St, CurSize - 1, MaxSize) end. diff --git a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl index 395f560..91182ca 100644 --- a/src/ddoc_cache/test/ddoc_cache_coverage_test.erl +++ b/src/ddoc_cache/test/ddoc_cache_coverage_test.erl @@ -33,7 +33,7 @@ coverage_test_() -> restart_lru() -> send_bad_messages(ddoc_cache_lru), - ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c, d})), + ?assertEqual(ok, ddoc_cache_lru:terminate(bang, {st, a, b, c})), ?assertEqual({ok, foo}, ddoc_cache_lru:code_change(1, foo, [])). @@ -47,7 +47,7 @@ restart_evictor() -> meck:new(ddoc_cache_ev, [passthrough]), try State = sys:get_state(ddoc_cache_lru), - Evictor = element(5, State), + Evictor = element(4, State), Ref = erlang:monitor(process, Evictor), exit(Evictor, shutdown), receive @@ -57,7 +57,7 @@ restart_evictor() -> end, meck:wait(ddoc_cache_ev, event, [evictor_died, '_'], 1000), NewState = sys:get_state(ddoc_cache_lru), - NewEvictor = element(5, NewState), + NewEvictor = element(4, NewState), ?assertNotEqual(Evictor, NewEvictor) after meck:unload() diff --git a/src/ddoc_cache/test/ddoc_cache_lru_test.erl b/src/ddoc_cache/test/ddoc_cache_lru_test.erl index 77b39cd..dd77828 100644 --- a/src/ddoc_cache/test/ddoc_cache_lru_test.erl +++ b/src/ddoc_cache/test/ddoc_cache_lru_test.erl @@ -63,7 +63,8 @@ check_lru_test_() -> fun check_multi_start/1, fun check_multi_open/1, fun check_capped_size/1, - fun check_full_cache/1 + fun check_full_cache/1, + fun check_cache_refill/1 ]} }. @@ -88,7 +89,7 @@ check_multi_start(_) -> ?assert(is_process_alive(Opener)), Opener ! go, receive {'DOWN', OpenerRef, _, _, _} -> ok end, - lists:foreach(fun({CPid, Ref}) -> + lists:foreach(fun({_, Ref}) -> receive {'DOWN', Ref, _, _, normal} -> ok end @@ -159,3 +160,36 @@ check_full_cache(_) -> meck:wait(I - 5, ddoc_cache_ev, event, [full, '_'], 1000), ?assertEqual(5, ets:info(?CACHE, size)) end, lists:seq(6, 20)). + + +check_cache_refill({DbName, _}) -> + ddoc_cache_tutil:clear(), + meck:reset(ddoc_cache_ev), + + InitDDoc = fun(I) -> + NumBin = list_to_binary(integer_to_list(I)), + DDocId = <<"_design/", NumBin/binary>>, + Doc = #doc{id = DDocId, body = {[]}}, + {ok, _} = fabric:update_doc(DbName, Doc, [?ADMIN_CTX]), + {ok, _} = ddoc_cache:open_doc(DbName, DDocId), + {ddoc_cache_entry_ddocid, {DbName, DDocId}} + end, + + lists:foreach(fun(I) -> + Key = InitDDoc(I), + couch_log:error("STARTED? ~p", [Key]), + meck:wait(ddoc_cache_ev, event, [started, Key], 1000), + ?assert(ets:info(?CACHE, size) > 0) + end, lists:seq(1, 5)), + + ShardName = element(2, hd(mem3:shards(DbName))), + {ok, _} = ddoc_cache_lru:handle_db_event(ShardName, deleted, foo), + meck:wait(ddoc_cache_ev, event, [evicted, DbName], 1000), + meck:wait(10, ddoc_cache_ev, event, [removed, '_'], 1000), + ?assertEqual(0, ets:info(?CACHE, size)), + + lists:foreach(fun(I) -> + Key = InitDDoc(I), + meck:wait(ddoc_cache_ev, event, [started, Key], 1000), + ?assert(ets:info(?CACHE, size) > 0) + end, lists:seq(6, 10)). -- To stop receiving notification emails like this one, please contact "commits@couchdb.apache.org" .