From commits-return-33127-archive-asf-public=cust-asf.ponee.io@couchdb.apache.org Thu May 24 23:55:35 2018 Return-Path: X-Original-To: archive-asf-public@cust-asf.ponee.io Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by mx-eu-01.ponee.io (Postfix) with SMTP id D67BC18077D for ; Thu, 24 May 2018 23:55:32 +0200 (CEST) Received: (qmail 63013 invoked by uid 500); 24 May 2018 21:55:31 -0000 Mailing-List: contact commits-help@couchdb.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@couchdb.apache.org Delivered-To: mailing list commits@couchdb.apache.org Received: (qmail 62908 invoked by uid 99); 24 May 2018 21:55:31 -0000 Received: from ec2-52-202-80-70.compute-1.amazonaws.com (HELO gitbox.apache.org) (52.202.80.70) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 24 May 2018 21:55:31 +0000 Received: by gitbox.apache.org (ASF Mail Server at gitbox.apache.org, from userid 33) id 9F80282B17; Thu, 24 May 2018 21:55:30 +0000 (UTC) Date: Thu, 24 May 2018 21:55:33 +0000 To: "commits@couchdb.apache.org" Subject: [couchdb] 04/10: [04/N] Clustered Purge: Update eunit tests MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit From: davisp@apache.org In-Reply-To: <152719892976.28108.7333547943233772375@gitbox.apache.org> References: <152719892976.28108.7333547943233772375@gitbox.apache.org> X-Git-Host: gitbox.apache.org X-Git-Repo: couchdb X-Git-Refname: refs/heads/COUCHDB-3326-clustered-purge-pr4-implementation X-Git-Reftype: branch X-Git-Rev: b1a68831610487c212ca730a86209753af625e57 X-Git-NotificationType: diff X-Git-Multimail-Version: 1.5.dev Auto-Submitted: auto-generated Message-Id: <20180524215530.9F80282B17@gitbox.apache.org> This is an automated email from the ASF dual-hosted git repository. davisp pushed a commit to branch COUCHDB-3326-clustered-purge-pr4-implementation in repository https://gitbox.apache.org/repos/asf/couchdb.git commit b1a68831610487c212ca730a86209753af625e57 Author: Paul J. Davis AuthorDate: Tue Apr 24 12:26:01 2018 -0500 [04/N] Clustered Purge: Update eunit tests This commit updates all of the various existing eunit tests to work with the new single node purge APIs. TODO: Split this into two commits: new tests and updated tests COUCHDB-3326 Co-authored-by: Mayya Sharipova Co-authored-by: jiangphcn --- src/couch/src/couch_bt_engine.erl | 4 +- src/couch/test/couch_db_purge_seqs_tests.erl | 217 +++++++++ src/couch/test/couch_db_purge_upgrade_tests.erl | 200 ++++++++ src/couch/test/fixtures/db_with_1_purge_req.couch | Bin 0 -> 12470 bytes src/couch/test/fixtures/db_with_2_purge_req.couch | Bin 0 -> 16566 bytes src/couch/test/fixtures/db_without_purge_req.couch | Bin 0 -> 61644 bytes src/couch_pse_tests/src/cpse_test_compaction.erl | 143 +++++- .../src/cpse_test_fold_purge_infos.erl | 166 +++++++ .../src/cpse_test_get_set_props.erl | 3 +- .../src/cpse_test_purge_bad_checkpoints.erl | 121 +++++ src/couch_pse_tests/src/cpse_test_purge_docs.erl | 506 +++++++++++++++++---- src/couch_pse_tests/src/cpse_util.erl | 143 +++++- 12 files changed, 1385 insertions(+), 118 deletions(-) diff --git a/src/couch/src/couch_bt_engine.erl b/src/couch/src/couch_bt_engine.erl index c6e0069..04747a7 100644 --- a/src/couch/src/couch_bt_engine.erl +++ b/src/couch/src/couch_bt_engine.erl @@ -917,7 +917,7 @@ upgrade_purge_info(Fd, Header) -> {reduce, fun ?MODULE:purge_tree_reduce/2} ]), {ok, PurgeTree2} = couch_btree:add(PurgeTree, Infos), - {ok, PurgeTreeSt} = couch_btree:get_state(PurgeTree2), + PurgeTreeSt = couch_btree:get_state(PurgeTree2), {ok, PurgeSeqTree} = couch_btree:open(nil, Fd, [ {split, fun ?MODULE:purge_seq_tree_split/1}, @@ -925,7 +925,7 @@ upgrade_purge_info(Fd, Header) -> {reduce, fun ?MODULE:purge_tree_reduce/2} ]), {ok, PurgeSeqTree2} = couch_btree:add(PurgeSeqTree, Infos), - {ok, PurgeSeqTreeSt} = couch_btree:get_state(PurgeSeqTree2), + PurgeSeqTreeSt = couch_btree:get_state(PurgeSeqTree2), couch_bt_engine_header:set(Header, [ {purge_tree_state, PurgeTreeSt}, diff --git a/src/couch/test/couch_db_purge_seqs_tests.erl b/src/couch/test/couch_db_purge_seqs_tests.erl new file mode 100644 index 0000000..5b253cb --- /dev/null +++ b/src/couch/test/couch_db_purge_seqs_tests.erl @@ -0,0 +1,217 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_purge_seqs_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +setup() -> + DbName = ?tempdb(), + {ok, _Db} = create_db(DbName), + DbName. + +teardown(DbName) -> + delete_db(DbName), + ok. + +couch_db_purge_seqs_test_() -> + { + "Couch_db purge_seqs", + [ + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + [couch_db_purge_seqs()] + } + ] + }. + + +couch_db_purge_seqs() -> + { + foreach, + fun setup/0, fun teardown/1, + [ + fun test_update_seq_bounce/1, + fun test_update_seq_inc_on_complete_purge/1, + fun test_purge_seq_bounce/1, + fun test_fold_purge_infos/1, + fun test_purge_seq/1 + ] + }. + +test_update_seq_bounce(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + Doc1 = {[{<<"_id">>, <<"foo1">>}, {<<"vsn">>, 1.1}]}, + Doc2 = {[{<<"_id">>, <<"foo2">>}, {<<"vsn">>, 1.2}]}, + {ok, Rev} = save_doc(Db, Doc1), + {ok, _Rev2} = save_doc(Db, Doc2), + couch_db:ensure_full_commit(Db), + + {ok, Db2} = couch_db:reopen(Db), + ?assertEqual(2, couch_db_engine:get_doc_count(Db2)), + ?assertEqual(2, couch_db_engine:get_update_seq(Db2)), + + UUID = couch_uuids:new(), + {ok, [{ok, PRevs}]} = couch_db:purge_docs( + Db2, [{UUID, <<"foo1">>, [Rev]}] + ), + + ?assertEqual([Rev], PRevs), + + {ok, Db3} = couch_db:reopen(Db2), + {ok, _PIdsRevs} = couch_db:fold_purge_infos( + Db3, 0, fun fold_fun/2, [], []), + ?assertEqual(3, couch_db_engine:get_update_seq(Db3)) + end). + + +test_update_seq_inc_on_complete_purge(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + Doc1 = {[{<<"_id">>, <<"foo1">>}, {<<"vsn">>, 1.1}]}, + Doc2 = {[{<<"_id">>, <<"foo2">>}, {<<"vsn">>, 1.2}]}, + {ok, Rev} = save_doc(Db, Doc1), + {ok, _Rev2} = save_doc(Db, Doc2), + couch_db:ensure_full_commit(Db), + + {ok, Db2} = couch_db:reopen(Db), + ?assertEqual(2, couch_db_engine:get_doc_count(Db2)), + ?assertEqual(2, couch_db_engine:get_update_seq(Db2)), + + UUID = couch_uuids:new(), + {ok, [{ok, PRevs}]} = couch_db:purge_docs( + Db2, [{UUID, <<"invalid">>, [Rev]}] + ), + + {ok, Db3} = couch_db:reopen(Db2), + ?assertEqual(3, couch_db_engine:get_update_seq(Db3)), + + ?assertEqual([], PRevs), + + UUID2 = couch_uuids:new(), + {ok, [{ok, PRevs2}]} = couch_db:purge_docs( + Db3, [{UUID2, <<"foo1">>, [Rev]}] + ), + + ?assertEqual([Rev], PRevs2), + + {ok, Db4} = couch_db:reopen(Db3), + ?assertEqual(4, couch_db_engine:get_update_seq(Db4)) + end). + + +test_purge_seq_bounce(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + Doc1 = {[{<<"_id">>, <<"foo1">>}, {<<"vsn">>, 1.1}]}, + Doc2 = {[{<<"_id">>, <<"foo2">>}, {<<"vsn">>, 1.2}]}, + {ok, Rev} = save_doc(Db, Doc1), + {ok, _Rev2} = save_doc(Db, Doc2), + couch_db:ensure_full_commit(Db), + + {ok, Db2} = couch_db:reopen(Db), + ?assertEqual(2, couch_db_engine:get_doc_count(Db2)), + ?assertEqual(0, couch_db_engine:get_purge_seq(Db2)), + + UUID = couch_uuids:new(), + {ok, [{ok, PRevs}]} = couch_db:purge_docs( + Db2, [{UUID, <<"foo1">>, [Rev]}] + ), + + ?assertEqual([Rev], PRevs), + + {ok, Db3} = couch_db:reopen(Db2), + {ok, _PIdsRevs} = couch_db:fold_purge_infos( + Db3, 0, fun fold_fun/2, [], []), + ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)) + end). + + +test_fold_purge_infos(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + Doc1 = {[{<<"_id">>, <<"foo1">>}, {<<"vsn">>, 1.1}]}, + Doc2 = {[{<<"_id">>, <<"foo2">>}, {<<"vsn">>, 1.2}]}, + {ok, Rev} = save_doc(Db, Doc1), + {ok, Rev2} = save_doc(Db, Doc2), + couch_db:ensure_full_commit(Db), + + {ok, Db2} = couch_db:reopen(Db), + ?assertEqual(2, couch_db_engine:get_doc_count(Db2)), + ?assertEqual(0, couch_db_engine:get_del_doc_count(Db2)), + ?assertEqual(2, couch_db_engine:get_update_seq(Db2)), + ?assertEqual(0, couch_db_engine:get_purge_seq(Db2)), + + UUID = couch_uuids:new(), UUID2 = couch_uuids:new(), + {ok, [{ok, PRevs}, {ok, PRevs2}]} = couch_db:purge_docs( + Db2, [{UUID, <<"foo1">>, [Rev]}, {UUID2, <<"foo2">>, [Rev2]}] + ), + + ?assertEqual([Rev], PRevs), + ?assertEqual([Rev2], PRevs2), + + {ok, Db3} = couch_db:reopen(Db2), + {ok, PIdsRevs} = couch_db:fold_purge_infos( + Db3, 0, fun fold_fun/2, [], []), + ?assertEqual(0, couch_db_engine:get_doc_count(Db3)), + ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)), + ?assertEqual(3, couch_db_engine:get_update_seq(Db3)), + ?assertEqual(2, couch_db_engine:get_purge_seq(Db3)), + ?assertEqual([{<<"foo2">>, [Rev2]}, {<<"foo1">>, [Rev]}], PIdsRevs) + end). + + +test_purge_seq(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + Doc1 = {[{<<"_id">>, <<"foo1">>}, {<<"vsn">>, 1.1}]}, + Doc2 = {[{<<"_id">>, <<"foo2">>}, {<<"vsn">>, 1.2}]}, + {ok, Rev} = save_doc(Db, Doc1), + {ok, _Rev2} = save_doc(Db, Doc2), + couch_db:ensure_full_commit(Db), + + {ok, Db2} = couch_db:reopen(Db), + ?assertEqual(2, couch_db_engine:get_doc_count(Db2)), + UUID = couch_uuids:new(), + {ok, [{ok, PRevs}]} = couch_db:purge_docs( + Db2, [{UUID, <<"foo1">>, [Rev]}] + ), + + ?assertEqual([Rev], PRevs), + ?assertEqual(0, couch_db_engine:get_purge_seq(Db2)), + + {ok, Db3} = couch_db:reopen(Db2), + ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)) + end). + + +create_db(DbName) -> + couch_db:create(DbName, [?ADMIN_CTX, overwrite]). + +delete_db(DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]). + +save_doc(Db, Json) -> + Doc = couch_doc:from_json_obj(Json), + couch_db:update_doc(Db, Doc, []). + +fold_fun({_PSeq, _UUID, Id, Revs}, Acc) -> + {ok, [{Id, Revs} | Acc]}. diff --git a/src/couch/test/couch_db_purge_upgrade_tests.erl b/src/couch/test/couch_db_purge_upgrade_tests.erl new file mode 100644 index 0000000..db82bb7 --- /dev/null +++ b/src/couch/test/couch_db_purge_upgrade_tests.erl @@ -0,0 +1,200 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_purge_upgrade_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + +-define(USER, "couch_db_purge_upgrade_admin"). +-define(PASS, "pass"). +-define(AUTH, {basic_auth, {?USER, ?PASS}}). +-define(CONTENT_JSON, {"Content-Type", "application/json"}). + + +setup() -> + DbDir = config:get("couchdb", "database_dir"), + DbFileNames = [ + "db_without_purge_req", + "db_with_1_purge_req", + "db_with_2_purge_req" + ], + lists:map(fun(DbFileName) -> + write_db_doc(list_to_binary(DbFileName)), + OldDbFilePath = filename:join([?FIXTURESDIR, DbFileName ++ ".couch"]), + NewDbFileName = DbFileName ++ ".1525663363.couch", + NewDbFilePath = filename:join( + [DbDir, "shards/00000000-ffffffff/", NewDbFileName] + ), + ok = filelib:ensure_dir(NewDbFilePath), + file:delete(NewDbFilePath), + file:copy(OldDbFilePath, NewDbFilePath), + NewDbFilePath + end, DbFileNames). + + +teardown(Files) -> + lists:foreach(fun(File) -> file:delete(File) end, Files). + + +purge_upgrade_test_() -> + { + "Purge Upgrade tests", + { + setup, + fun chttpd_test_util:start_couch/0, + fun chttpd_test_util:stop_couch/1, + { + foreach, + fun setup/0, fun teardown/1, + [ + fun should_upgrade_legacy_db_without_purge_req/1, + fun should_upgrade_legacy_db_with_1_purge_req/1, + fun should_upgrade_legacy_db_with_N_purge_req/1 + ] + } + } + }. + + +should_upgrade_legacy_db_without_purge_req(_Files) -> + ?_test(begin + config:set("cluster", "q", "1", false), + DbName = <<"db_without_purge_req">>, + DbUrl = db_url(DbName), + + % 3 docs in legacy database before upgrade + % and added 2 new doc to database + {ok, _, _, ResultBody1} = create_doc(DbUrl, "doc4"), + {ok, _, _, _ResultBody} = create_doc(DbUrl, "doc5"), + {Json1} = ?JSON_DECODE(ResultBody1), + {ok, _, _, ResultBody2} = test_request:get(DbUrl), + {Json2} = ?JSON_DECODE(ResultBody2), + Rev4 = couch_util:get_value(<<"rev">>, Json1, undefined), + IdsRevsEJson = {[{<<"doc4">>, [Rev4]}]}, + ?assert(5 =:= couch_util:get_value(<<"doc_count">>, Json2)), + + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + {ok, Code, _, _ResultBody3} = test_request:post(DbUrl ++ "/_purge/", + [?CONTENT_JSON], IdsRevs), + ?assert(Code =:= 201), + + {ok, _, _, ResultBody4} = test_request:get(DbUrl), + {Json4} = ?JSON_DECODE(ResultBody4), + ?assert(4 =:= couch_util:get_value(<<"doc_count">>, Json4)) + end). + + +should_upgrade_legacy_db_with_1_purge_req(_Files) -> + ?_test(begin + config:set("cluster", "q", "1", false), + DbName = <<"db_with_1_purge_req">>, + DbUrl = db_url(DbName), + + % 3 docs in legacy database and 1 of them were purged before upgrade + % and adding 2 new docs to database + {ok, _, _, ResultBody1} = create_doc(DbUrl, "doc4"), + {ok, _, _, _ResultBody} = create_doc(DbUrl, "doc5"), + {Json1} = ?JSON_DECODE(ResultBody1), + {ok, _, _, ResultBody2} = test_request:get(DbUrl), + {Json2} = ?JSON_DECODE(ResultBody2), + Rev4 = couch_util:get_value(<<"rev">>, Json1, undefined), + IdsRevsEJson = {[{<<"doc4">>, [Rev4]}]}, + ?assert(4 =:= couch_util:get_value(<<"doc_count">>, Json2)), + + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + {ok, Code, _, _ResultBody3} = test_request:post(DbUrl ++ "/_purge/", + [?CONTENT_JSON], IdsRevs), + ?assert(Code =:= 201), + + {ok, _, _, ResultBody4} = test_request:get(DbUrl), + {Json4} = ?JSON_DECODE(ResultBody4), + ?assert(3 =:= couch_util:get_value(<<"doc_count">>, Json4)), + PurgeSeq = couch_util:get_value(<<"purge_seq">>, Json4), + [SeqNumber | _Rest] = binary:split(PurgeSeq, <<"-">>, [global]), + ?assert(<<"2">> =:= SeqNumber) + end). + + +should_upgrade_legacy_db_with_N_purge_req(_Files) -> + ?_test(begin + config:set("cluster", "q", "1", false), + DbName = <<"db_with_2_purge_req">>, + DbUrl = db_url(DbName), + + % 3 docs in legacy database and 2 of them were purged before upgrade + % and adding 2 new doc to database + {ok, _, _, ResultBody1} = create_doc(DbUrl, "doc4"), + {ok, _, _, _ResultBody} = create_doc(DbUrl, "doc5"), + {Json1} = ?JSON_DECODE(ResultBody1), + {ok, _, _, ResultBody2} = test_request:get(DbUrl), + {Json2} = ?JSON_DECODE(ResultBody2), + Rev4 = couch_util:get_value(<<"rev">>, Json1, undefined), + IdsRevsEJson = {[{<<"doc4">>, [Rev4]}]}, + ?assert(3 =:= couch_util:get_value(<<"doc_count">>, Json2)), + + IdsRevs = binary_to_list(?JSON_ENCODE(IdsRevsEJson)), + {ok, Code, _, _ResultBody3} = test_request:post(DbUrl ++ "/_purge/", + [?CONTENT_JSON], IdsRevs), + ?assert(Code =:= 201), + + {ok, _, _, ResultBody4} = test_request:get(DbUrl), + {Json4} = ?JSON_DECODE(ResultBody4), + ?assert(2 =:= couch_util:get_value(<<"doc_count">>, Json4)), + PurgeSeq = couch_util:get_value(<<"purge_seq">>, Json4), + [SeqNumber | _Rest] = binary:split(PurgeSeq, <<"-">>, [global]), + ?assert(<<"3">> =:= SeqNumber) + end). + + +db_url(DbName) -> + Addr = config:get("httpd", "bind_address", "127.0.0.1"), + Port = integer_to_list(mochiweb_socket_server:get(chttpd, port)), + "http://" ++ Addr ++ ":" ++ Port ++ "/" ++ ?b2l(DbName). + +create_doc(Url, Id) -> + test_request:put(Url ++ "/" ++ Id, + [?CONTENT_JSON], "{\"mr\": \"rockoartischocko\"}"). + +write_db_doc(Id) -> + DbName = ?l2b(config:get("mem3", "shards_db", "_dbs")), + Doc = couch_doc:from_json_obj({[ + {<<"_id">>, Id}, + {<<"shard_suffix">>, ".1525663363"}, + {<<"changelog">>, + [[<<"add">>, <<"00000000-ffffffff">>, <<"nonode@nohost">>]] + }, + {<<"by_node">>, {[{<<"nonode@nohost">>, [<<"00000000-ffffffff">>]}]}}, + {<<"by_range">>, {[{<<"00000000-ffffffff">>, [<<"nonode@nohost">>]}]}} + ]}), + write_db_doc(DbName, Doc, true). + +write_db_doc(DbName, #doc{id=Id, body=Body} = Doc, ShouldMutate) -> + {ok, Db} = couch_db:open(DbName, [?ADMIN_CTX]), + try couch_db:open_doc(Db, Id, [ejson_body]) of + {ok, #doc{body = Body}} -> + % the doc is already in the desired state, we're done here + ok; + {not_found, _} when ShouldMutate -> + try couch_db:update_doc(Db, Doc, []) of + {ok, _} -> + ok + catch conflict -> + % check to see if this was a replication race or a different edit + write_db_doc(DbName, Doc, false) + end; + _ -> + % the doc already exists in a different state + conflict + after + couch_db:close(Db) + end. diff --git a/src/couch/test/fixtures/db_with_1_purge_req.couch b/src/couch/test/fixtures/db_with_1_purge_req.couch new file mode 100644 index 0000000..b0d39c9 Binary files /dev/null and b/src/couch/test/fixtures/db_with_1_purge_req.couch differ diff --git a/src/couch/test/fixtures/db_with_2_purge_req.couch b/src/couch/test/fixtures/db_with_2_purge_req.couch new file mode 100644 index 0000000..ee4e11b Binary files /dev/null and b/src/couch/test/fixtures/db_with_2_purge_req.couch differ diff --git a/src/couch/test/fixtures/db_without_purge_req.couch b/src/couch/test/fixtures/db_without_purge_req.couch new file mode 100644 index 0000000..814feb8 Binary files /dev/null and b/src/couch/test/fixtures/db_without_purge_req.couch differ diff --git a/src/couch_pse_tests/src/cpse_test_compaction.erl b/src/couch_pse_tests/src/cpse_test_compaction.erl index 11bf106..d006111 100644 --- a/src/couch_pse_tests/src/cpse_test_compaction.erl +++ b/src/couch_pse_tests/src/cpse_test_compaction.erl @@ -97,10 +97,8 @@ cpse_compact_with_everything(Db1) -> BarRev = cpse_util:prev_rev(BarFDI), Actions3 = [ - {batch, [ - {purge, {<<"foo">>, FooRev#rev_info.rev}}, - {purge, {<<"bar">>, BarRev#rev_info.rev}} - ]} + {purge, {<<"foo">>, FooRev#rev_info.rev}}, + {purge, {<<"bar">>, BarRev#rev_info.rev}} ], {ok, Db4} = cpse_util:apply_actions(Db3, Actions3), @@ -110,10 +108,9 @@ cpse_compact_with_everything(Db1) -> {<<"foo">>, [FooRev#rev_info.rev]} ], - ?assertEqual( - PurgedIdRevs, - lists:sort(couch_db_engine:get_last_purged(Db4)) - ), + {ok, PIdRevs4} = couch_db_engine:fold_purge_infos( + Db4, 0, fun fold_fun/2, [], []), + ?assertEqual(PurgedIdRevs, PIdRevs4), {ok, Db5} = try [Att0, Att1, Att2, Att3, Att4] = cpse_util:prep_atts(Db4, [ @@ -181,6 +178,132 @@ cpse_recompact_updates(Db1) -> ?assertEqual(nodiff, Diff). +cpse_purge_during_compact(Db1) -> + Actions1 = lists:map(fun(Seq) -> + {create, {docid(Seq), {[{<<"int">>, Seq}]}}} + end, lists:seq(1, 1000)), + Actions2 = [ + {create, {<<"foo">>, {[]}}}, + {create, {<<"bar">>, {[]}}}, + {create, {<<"baz">>, {[]}}} + ], + {ok, Db2} = cpse_util:apply_batch(Db1, Actions1 ++ Actions2), + Actions3 = [ + {conflict, {<<"bar">>, {[{<<"vsn">>, 2}]}}} + ], + {ok, Db3} = cpse_util:apply_actions(Db2, Actions3), + + {ok, Pid} = couch_db:start_compact(Db3), + catch erlang:suspend_process(Pid), + + [BarFDI, BazFDI] = couch_db_engine:open_docs(Db3, [<<"bar">>, <<"baz">>]), + BarRev = cpse_util:prev_rev(BarFDI), + BazRev = cpse_util:prev_rev(BazFDI), + Actions4 = [ + {purge, {<<"bar">>, BarRev#rev_info.rev}}, + {purge, {<<"baz">>, BazRev#rev_info.rev}} + ], + + {ok, Db4} = cpse_util:apply_actions(Db3, Actions4), + Term1 = cpse_util:db_as_term(Db4), + + catch erlang:resume_process(Pid), + cpse_util:compact(Db4), + + {ok, Db5} = couch_db:reopen(Db4), + Term2 = cpse_util:db_as_term(Db5), + + Diff = cpse_util:term_diff(Term1, Term2), + ?assertEqual(nodiff, Diff). + + +cpse_multiple_purge_during_compact(Db1) -> + Actions1 = lists:map(fun(Seq) -> + {create, {docid(Seq), {[{<<"int">>, Seq}]}}} + end, lists:seq(1, 1000)), + Actions2 = [ + {create, {<<"foo">>, {[]}}}, + {create, {<<"bar">>, {[]}}}, + {create, {<<"baz">>, {[]}}} + ], + {ok, Db2} = cpse_util:apply_batch(Db1, Actions1 ++ Actions2), + + Actions3 = [ + {conflict, {<<"bar">>, {[{<<"vsn">>, 2}]}}} + ], + {ok, Db3} = cpse_util:apply_actions(Db2, Actions3), + + + {ok, Pid} = couch_db:start_compact(Db3), + catch erlang:suspend_process(Pid), + + [BarFDI, BazFDI] = couch_db_engine:open_docs(Db3, [<<"bar">>, <<"baz">>]), + BarRev = cpse_util:prev_rev(BarFDI), + Actions4 = [ + {purge, {<<"bar">>, BarRev#rev_info.rev}} + ], + {ok, Db4} = cpse_util:apply_actions(Db3, Actions4), + + BazRev = cpse_util:prev_rev(BazFDI), + Actions5 = [ + {purge, {<<"baz">>, BazRev#rev_info.rev}} + ], + + {ok, Db5} = cpse_util:apply_actions(Db4, Actions5), + Term1 = cpse_util:db_as_term(Db5), + + catch erlang:resume_process(Pid), + cpse_util:compact(Db5), + + {ok, Db6} = couch_db:reopen(Db5), + Term2 = cpse_util:db_as_term(Db6), + + Diff = cpse_util:term_diff(Term1, Term2), + ?assertEqual(nodiff, Diff). + + +cpse_compact_purged_docs_limit(Db1) -> + NumDocs = 1200, + {RActions, RIds} = lists:foldl(fun(Id, {CActions, CIds}) -> + Id1 = docid(Id), + Action = {create, {Id1, {[{<<"int">>, Id}]}}}, + {[Action| CActions], [Id1| CIds]} + end, {[], []}, lists:seq(1, NumDocs)), + Ids = lists:reverse(RIds), + {ok, Db2} = cpse_util:apply_batch(Db1, lists:reverse(RActions)), + + FDIs = couch_db_engine:open_docs(Db2, Ids), + RActions2 = lists:foldl(fun(FDI, CActions) -> + Id = FDI#full_doc_info.id, + PrevRev = cpse_util:prev_rev(FDI), + Rev = PrevRev#rev_info.rev, + [{purge, {Id, Rev}}| CActions] + end, [], FDIs), + {ok, Db3} = cpse_util:apply_batch(Db2, lists:reverse(RActions2)), + + % check that before compaction all NumDocs of purge_requests + % are in purge_tree, + % even if NumDocs=1200 is greater than purged_docs_limit=1000 + {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos( + Db3, 0, fun fold_fun/2, [], []), + ?assertEqual(1, couch_db_engine:get_oldest_purge_seq(Db3)), + ?assertEqual(NumDocs, length(PurgedIdRevs)), + + % compact db + cpse_util:compact(Db3), + {ok, Db4} = couch_db:reopen(Db3), + + % check that after compaction only purged_docs_limit purge_requests + % are in purge_tree + PurgedDocsLimit = couch_db_engine:get_purge_infos_limit(Db4), + OldestPSeq = couch_db_engine:get_oldest_purge_seq(Db4), + {ok, PurgedIdRevs2} = couch_db_engine:fold_purge_infos( + Db4, OldestPSeq - 1, fun fold_fun/2, [], []), + ExpectedOldestPSeq = NumDocs - PurgedDocsLimit + 1, + ?assertEqual(ExpectedOldestPSeq, OldestPSeq), + ?assertEqual(PurgedDocsLimit, length(PurgedIdRevs2)). + + docid(I) -> Str = io_lib:format("~4..0b", [I]), iolist_to_binary(Str). @@ -189,3 +312,7 @@ docid(I) -> local_docid(I) -> Str = io_lib:format("_local/~4..0b", [I]), iolist_to_binary(Str). + + +fold_fun({_PSeq, _UUID, Id, Revs}, Acc) -> + {ok, [{Id, Revs} | Acc]}. diff --git a/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl b/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl new file mode 100644 index 0000000..42bc536 --- /dev/null +++ b/src/couch_pse_tests/src/cpse_test_fold_purge_infos.erl @@ -0,0 +1,166 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(cpse_test_fold_purge_infos). +-compile(export_all). + + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +-define(NUM_DOCS, 100). + + +setup_each() -> + {ok, Db} = cpse_util:create_db(), + Db. + + +teardown_each(Db) -> + ok = couch_server:delete(couch_db:name(Db), []). + + +cpse_empty_purged_docs(Db) -> + ?assertEqual({ok, []}, couch_db_engine:fold_purge_infos( + Db, 0, fun fold_fun/2, [], [])). + + +cpse_all_purged_docs(Db1) -> + {RActions, RIds} = lists:foldl(fun(Id, {CActions, CIds}) -> + Id1 = docid(Id), + Action = {create, {Id1, {[{<<"int">>, Id}]}}}, + {[Action| CActions], [Id1| CIds]} + end, {[], []}, lists:seq(1, ?NUM_DOCS)), + Actions = lists:reverse(RActions), + Ids = lists:reverse(RIds), + {ok, Db2} = cpse_util:apply_batch(Db1, Actions), + + FDIs = couch_db_engine:open_docs(Db2, Ids), + {RevActions2, RevIdRevs} = lists:foldl(fun(FDI, {CActions, CIdRevs}) -> + Id = FDI#full_doc_info.id, + PrevRev = cpse_util:prev_rev(FDI), + Rev = PrevRev#rev_info.rev, + Action = {purge, {Id, Rev}}, + {[Action| CActions], [{Id, [Rev]}| CIdRevs]} + end, {[], []}, FDIs), + {Actions2, IdsRevs} = {lists:reverse(RevActions2), lists:reverse(RevIdRevs)}, + + {ok, Db3} = cpse_util:apply_batch(Db2, Actions2), + {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos( + Db3, 0, fun fold_fun/2, [], []), + ?assertEqual(IdsRevs, lists:reverse(PurgedIdRevs)). + + +cpse_start_seq(Db1) -> + Actions1 = [ + {create, {docid(1), {[{<<"int">>, 1}]}}}, + {create, {docid(2), {[{<<"int">>, 2}]}}}, + {create, {docid(3), {[{<<"int">>, 3}]}}}, + {create, {docid(4), {[{<<"int">>, 4}]}}}, + {create, {docid(5), {[{<<"int">>, 5}]}}} + ], + Ids = [docid(1), docid(2), docid(3), docid(4), docid(5)], + {ok, Db2} = cpse_util:apply_actions(Db1, Actions1), + + FDIs = couch_db_engine:open_docs(Db2, Ids), + {RActions2, RIdRevs} = lists:foldl(fun(FDI, {CActions, CIdRevs}) -> + Id = FDI#full_doc_info.id, + PrevRev = cpse_util:prev_rev(FDI), + Rev = PrevRev#rev_info.rev, + Action = {purge, {Id, Rev}}, + {[Action| CActions], [{Id, [Rev]}| CIdRevs]} + end, {[], []}, FDIs), + {ok, Db3} = cpse_util:apply_actions(Db2, lists:reverse(RActions2)), + + StartSeq = 3, + StartSeqIdRevs = lists:nthtail(StartSeq, lists:reverse(RIdRevs)), + {ok, PurgedIdRevs} = couch_db_engine:fold_purge_infos( + Db3, StartSeq, fun fold_fun/2, [], []), + ?assertEqual(StartSeqIdRevs, lists:reverse(PurgedIdRevs)). + + +cpse_id_rev_repeated(Db1) -> + Actions1 = [ + {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}}, + {conflict, {<<"foo">>, {[{<<"vsn">>, 2}]}}} + ], + {ok, Db2} = cpse_util:apply_actions(Db1, Actions1), + + [FDI1] = couch_db_engine:open_docs(Db2, [<<"foo">>]), + PrevRev1 = cpse_util:prev_rev(FDI1), + Rev1 = PrevRev1#rev_info.rev, + Actions2 = [ + {purge, {<<"foo">>, Rev1}} + ], + + {ok, Db3} = cpse_util:apply_actions(Db2, Actions2), + {ok, PurgedIdRevs1} = couch_db_engine:fold_purge_infos( + Db3, 0, fun fold_fun/2, [], []), + ExpectedPurgedIdRevs1 = [ + {<<"foo">>, [Rev1]} + ], + + ?assertEqual(ExpectedPurgedIdRevs1, lists:reverse(PurgedIdRevs1)), + ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)), + + % purge the same Id,Rev when the doc still exists + {ok, Db4} = cpse_util:apply_actions(Db3, Actions2), + {ok, PurgedIdRevs2} = couch_db_engine:fold_purge_infos( + Db4, 0, fun fold_fun/2, [], []), + ExpectedPurgedIdRevs2 = [ + {<<"foo">>, [Rev1]}, + {<<"foo">>, [Rev1]} + ], + ?assertEqual(ExpectedPurgedIdRevs2, lists:reverse(PurgedIdRevs2)), + ?assertEqual(2, couch_db_engine:get_purge_seq(Db4)), + + [FDI2] = couch_db_engine:open_docs(Db4, [<<"foo">>]), + PrevRev2 = cpse_util:prev_rev(FDI2), + Rev2 = PrevRev2#rev_info.rev, + Actions3 = [ + {purge, {<<"foo">>, Rev2}} + ], + {ok, Db5} = cpse_util:apply_actions(Db4, Actions3), + + {ok, PurgedIdRevs3} = couch_db_engine:fold_purge_infos( + Db5, 0, fun fold_fun/2, [], []), + ExpectedPurgedIdRevs3 = [ + {<<"foo">>, [Rev1]}, + {<<"foo">>, [Rev1]}, + {<<"foo">>, [Rev2]} + ], + ?assertEqual(ExpectedPurgedIdRevs3, lists:reverse(PurgedIdRevs3)), + ?assertEqual(3, couch_db_engine:get_purge_seq(Db5)), + + % purge the same Id,Rev when the doc was completely purged + {ok, Db6} = cpse_util:apply_actions(Db5, Actions3), + + {ok, PurgedIdRevs4} = couch_db_engine:fold_purge_infos( + Db6, 0, fun fold_fun/2, [], []), + ExpectedPurgedIdRevs4 = [ + {<<"foo">>, [Rev1]}, + {<<"foo">>, [Rev1]}, + {<<"foo">>, [Rev2]}, + {<<"foo">>, [Rev2]} + ], + ?assertEqual(ExpectedPurgedIdRevs4, lists:reverse(PurgedIdRevs4)), + ?assertEqual(4, couch_db_engine:get_purge_seq(Db6)). + + +fold_fun({_PSeq, _UUID, Id, Revs}, Acc) -> + {ok, [{Id, Revs} | Acc]}. + + +docid(I) -> + Str = io_lib:format("~4..0b", [I]), + iolist_to_binary(Str). diff --git a/src/couch_pse_tests/src/cpse_test_get_set_props.erl b/src/couch_pse_tests/src/cpse_test_get_set_props.erl index 97f164b..1f86844 100644 --- a/src/couch_pse_tests/src/cpse_test_get_set_props.erl +++ b/src/couch_pse_tests/src/cpse_test_get_set_props.erl @@ -37,7 +37,8 @@ cpse_default_props(DbName) -> ?assertEqual(true, is_integer(couch_db_engine:get_disk_version(Db))), ?assertEqual(0, couch_db_engine:get_update_seq(Db)), ?assertEqual(0, couch_db_engine:get_purge_seq(Db)), - ?assertEqual([], couch_db_engine:get_last_purged(Db)), + ?assertEqual(true, is_integer(couch_db_engine:get_purge_infos_limit(Db))), + ?assertEqual(true, couch_db_engine:get_purge_infos_limit(Db) > 0), ?assertEqual([], couch_db_engine:get_security(Db)), ?assertEqual(1000, couch_db_engine:get_revs_limit(Db)), ?assertMatch(<<_:32/binary>>, couch_db_engine:get_uuid(Db)), diff --git a/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl b/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl new file mode 100644 index 0000000..52d6870 --- /dev/null +++ b/src/couch_pse_tests/src/cpse_test_purge_bad_checkpoints.erl @@ -0,0 +1,121 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(cpse_test_purge_bad_checkpoints). +-compile(export_all). +-compile(nowarn_export_all). + + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +setup_each() -> + {ok, Db1} = cpse_util:create_db(), + {ok, Revs} = cpse_util:save_docs(couch_db:name(Db1), [ + {[{'_id', foo0}, {vsn, 0}]}, + {[{'_id', foo1}, {vsn, 1}]}, + {[{'_id', foo2}, {vsn, 2}]}, + {[{'_id', foo3}, {vsn, 3}]}, + {[{'_id', foo4}, {vsn, 4}]}, + {[{'_id', foo5}, {vsn, 5}]}, + {[{'_id', foo6}, {vsn, 6}]}, + {[{'_id', foo7}, {vsn, 7}]}, + {[{'_id', foo8}, {vsn, 8}]}, + {[{'_id', foo9}, {vsn, 9}]} + ]), + PInfos = lists:map(fun(Idx) -> + DocId = iolist_to_binary(["foo", $0 + Idx]), + Rev = lists:nth(Idx + 1, Revs), + {cpse_util:uuid(), DocId, [Rev]} + end, lists:seq(0, 9)), + {ok, _} = cpse_util:purge(couch_db:name(Db1), PInfos), + {ok, Db2} = couch_db:reopen(Db1), + Db2. + + +teardown_each(Db) -> + ok = couch_server:delete(couch_db:name(Db), []). + + +cpse_bad_purge_seq(Db1) -> + Db2 = save_local_doc(Db1, <<"foo">>, ?MODULE, valid_fun), + ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)), + + ok = couch_db:set_purge_infos_limit(Db2, 5), + {ok, Db3} = couch_db:reopen(Db2), + ?assertEqual(1, couch_db:get_minimum_purge_seq(Db3)). + + +cpse_bad_verify_mod(Db1) -> + Db2 = save_local_doc(Db1, 2, [invalid_module], valid_fun), + ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)), + + ok = couch_db:set_purge_infos_limit(Db2, 5), + {ok, Db3} = couch_db:reopen(Db2), + ?assertEqual(2, couch_db:get_minimum_purge_seq(Db3)). + + +cpse_bad_verify_fun(Db1) -> + Db2 = save_local_doc(Db1, 2, ?MODULE, [invalid_function]), + ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)), + + ok = couch_db:set_purge_infos_limit(Db2, 5), + {ok, Db3} = couch_db:reopen(Db2), + ?assertEqual(2, couch_db:get_minimum_purge_seq(Db3)). + + +cpse_verify_fun_throws(Db1) -> + Db2 = save_local_doc(Db1, 2, ?MODULE, throw_fun), + ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)), + + ok = couch_db:set_purge_infos_limit(Db2, 5), + {ok, Db3} = couch_db:reopen(Db2), + ?assertEqual(2, couch_db:get_minimum_purge_seq(Db3)). + + +cpse_verify_non_boolean(Db1) -> + Db2 = save_local_doc(Db1, 2, ?MODULE, non_bool_fun), + ?assertEqual(0, couch_db:get_minimum_purge_seq(Db2)), + + ok = couch_db:set_purge_infos_limit(Db2, 5), + {ok, Db3} = couch_db:reopen(Db2), + ?assertEqual(2, couch_db:get_minimum_purge_seq(Db3)). + + +save_local_doc(Db1, PurgeSeq, Mod, Fun) -> + {Mega, Secs, _} = os:timestamp(), + NowSecs = Mega * 1000000 + Secs, + Doc = couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE({[ + {<<"_id">>, <<"_local/purge-test-stuff">>}, + {<<"purge_seq">>, PurgeSeq}, + {<<"timestamp_utc">>, NowSecs}, + {<<"verify_module">>, Mod}, + {<<"verify_function">>, Fun}, + {<<"verify_options">>, {[{<<"signature">>, <<"stuff">>}]}}, + {<<"type">>, <<"test">>} + ]}))), + {ok, _} = couch_db:update_doc(Db1, Doc, []), + {ok, Db2} = couch_db:reopen(Db1), + Db2. + + +valid_fun(_Options) -> + true. + + +throw_fun(_Options) -> + throw(failed). + + +not_bool(_Options) -> + ok. diff --git a/src/couch_pse_tests/src/cpse_test_purge_docs.erl b/src/couch_pse_tests/src/cpse_test_purge_docs.erl index 4352268..bebd234 100644 --- a/src/couch_pse_tests/src/cpse_test_purge_docs.erl +++ b/src/couch_pse_tests/src/cpse_test_purge_docs.erl @@ -18,142 +18,446 @@ -include_lib("couch/include/couch_db.hrl"). +-define(REV_DEPTH, 100). + + setup_each() -> {ok, Db} = cpse_util:create_db(), - Db. + couch_db:name(Db). -teardown_each(Db) -> - ok = couch_server:delete(couch_db:name(Db), []). +teardown_each(DbName) -> + ok = couch_server:delete(DbName, []). -cpse_purge_simple(Db1) -> - Actions1 = [ - {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}} - ], - {ok, Db2} = cpse_util:apply_actions(Db1, Actions1), +cpse_purge_simple(DbName) -> + {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}), - ?assertEqual(1, couch_db_engine:get_doc_count(Db2)), - ?assertEqual(0, couch_db_engine:get_del_doc_count(Db2)), - ?assertEqual(1, couch_db_engine:get_update_seq(Db2)), - ?assertEqual(0, couch_db_engine:get_purge_seq(Db2)), - ?assertEqual([], couch_db_engine:get_last_purged(Db2)), + cpse_util:assert_db_props(DbName, [ + {doc_count, 1}, + {del_doc_count, 0}, + {update_seq, 1}, + {purge_seq, 0}, + {purge_infos, []} + ]), - [FDI] = couch_db_engine:open_docs(Db2, [<<"foo">>]), - PrevRev = cpse_util:prev_rev(FDI), - Rev = PrevRev#rev_info.rev, - - Actions2 = [ - {purge, {<<"foo">>, Rev}} + PurgeInfos = [ + {cpse_util:uuid(), <<"foo1">>, [Rev]} ], - {ok, Db3} = cpse_util:apply_actions(Db2, Actions2), - ?assertEqual(0, couch_db_engine:get_doc_count(Db3)), - ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)), - ?assertEqual(2, couch_db_engine:get_update_seq(Db3)), - ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)), - ?assertEqual([{<<"foo">>, [Rev]}], couch_db_engine:get_last_purged(Db3)). + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([Rev], PRevs), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 0}, + {del_doc_count, 0}, + {update_seq, 2}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). -cpse_purge_conflicts(Db1) -> - Actions1 = [ - {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}}, - {conflict, {<<"foo">>, {[{<<"vsn">>, 2}]}}} +cpse_purge_simple_info_check(DbName) -> + {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}), + PurgeInfos = [ + {cpse_util:uuid(), <<"foo1">>, [Rev]} ], - {ok, Db2} = cpse_util:apply_actions(Db1, Actions1), + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([Rev], PRevs), - ?assertEqual(1, couch_db_engine:get_doc_count(Db2)), - ?assertEqual(0, couch_db_engine:get_del_doc_count(Db2)), - ?assertEqual(2, couch_db_engine:get_update_seq(Db2)), - ?assertEqual(0, couch_db_engine:get_purge_seq(Db2)), - ?assertEqual([], couch_db_engine:get_last_purged(Db2)), + {ok, AllInfos} = couch_util:with_db(DbName, fun(Db) -> + couch_db_engine:fold_purge_infos(Db, 0, fun fold_all_infos/2, [], []) + end), - [FDI1] = couch_db_engine:open_docs(Db2, [<<"foo">>]), - PrevRev1 = cpse_util:prev_rev(FDI1), - Rev1 = PrevRev1#rev_info.rev, + ?assertMatch([{1, <<_/binary>>, <<"foo1">>, [Rev]}], AllInfos). - Actions2 = [ - {purge, {<<"foo">>, Rev1}} + +cpse_purge_empty_db(DbName) -> + PurgeInfos = [ + {cpse_util:uuid(), <<"foo">>, [{0, <<0>>}]} + ], + + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([], PRevs), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 0}, + {del_doc_count, 0}, + {update_seq, 1}, + {changes, 0}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_single_docid(DbName) -> + {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [ + {[{'_id', foo1}, {vsn, 1}]}, + {[{'_id', foo2}, {vsn, 2}]} + ]), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 2}, + {del_doc_count, 0}, + {update_seq, 2}, + {changes, 2}, + {purge_seq, 0}, + {purge_infos, []} + ]), + + PurgeInfos = [ + {cpse_util:uuid(), <<"foo1">>, [Rev1]} + ], + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([Rev1], PRevs), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 1}, + {del_doc_count, 0}, + {update_seq, 3}, + {changes, 1}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_multiple_docids(DbName) -> + {ok, [Rev1, Rev2]} = cpse_util:save_docs(DbName, [ + {[{'_id', foo1}, {vsn, 1.1}]}, + {[{'_id', foo2}, {vsn, 1.2}]} + ]), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 2}, + {del_doc_count, 0}, + {update_seq, 2}, + {changes, 2}, + {purge_seq, 0}, + {purge_infos, []} + ]), + + PurgeInfos = [ + {cpse_util:uuid(), <<"foo1">>, [Rev1]}, + {cpse_util:uuid(), <<"foo2">>, [Rev2]} ], - {ok, Db3} = cpse_util:apply_actions(Db2, Actions2), - ?assertEqual(1, couch_db_engine:get_doc_count(Db3)), - ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)), - ?assertEqual(4, couch_db_engine:get_update_seq(Db3)), - ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)), - ?assertEqual([{<<"foo">>, [Rev1]}], couch_db_engine:get_last_purged(Db3)), + {ok, [{ok, PRevs1}, {ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos), + + ?assertEqual([Rev1], PRevs1), + ?assertEqual([Rev2], PRevs2), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 0}, + {del_doc_count, 0}, + {update_seq, 3}, + {changes, 0}, + {purge_seq, 2}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_no_docids(DbName) -> + {ok, [_Rev1, _Rev2]} = cpse_util:save_docs(DbName, [ + {[{'_id', foo1}, {vsn, 1}]}, + {[{'_id', foo2}, {vsn, 2}]} + ]), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 2}, + {del_doc_count, 0}, + {update_seq, 2}, + {changes, 2}, + {purge_seq, 0}, + {purge_infos, []} + ]), + + {ok, []} = cpse_util:purge(DbName, []), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 2}, + {del_doc_count, 0}, + {update_seq, 2}, + {changes, 2}, + {purge_seq, 0}, + {purge_infos, []} + ]). + + +cpse_purge_rev_path(DbName) -> + {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, 1}]}), + Update = {[ + {<<"_id">>, <<"foo">>}, + {<<"_rev">>, couch_doc:rev_to_str(Rev1)}, + {<<"_deleted">>, true}, + {<<"vsn">>, 2} + ]}, + {ok, Rev2} = cpse_util:save_doc(DbName, Update), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 0}, + {del_doc_count, 1}, + {update_seq, 2}, + {changes, 1}, + {purge_seq, 0}, + {purge_infos, []} + ]), + + PurgeInfos = [ + {cpse_util:uuid(), <<"foo">>, [Rev2]} + ], - [FDI2] = couch_db_engine:open_docs(Db3, [<<"foo">>]), - PrevRev2 = cpse_util:prev_rev(FDI2), - Rev2 = PrevRev2#rev_info.rev, + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([Rev2], PRevs), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 0}, + {del_doc_count, 0}, + {update_seq, 3}, + {changes, 0}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_deep_revision_path(DbName) -> + {ok, InitRev} = cpse_util:save_doc(DbName, {[{'_id', bar}, {vsn, 0}]}), + LastRev = lists:foldl(fun(Count, PrevRev) -> + Update = {[ + {'_id', bar}, + {'_rev', couch_doc:rev_to_str(PrevRev)}, + {vsn, Count} + ]}, + {ok, NewRev} = cpse_util:save_doc(DbName, Update), + NewRev + end, InitRev, lists:seq(1, ?REV_DEPTH)), + + PurgeInfos = [ + {cpse_util:uuid(), <<"bar">>, [LastRev]} + ], - Actions3 = [ - {purge, {<<"foo">>, Rev2}} + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([LastRev], PRevs), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 0}, + {del_doc_count, 0}, + {update_seq, ?REV_DEPTH + 2}, + {changes, 0}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_partial_revs(DbName) -> + {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, <<"1.1">>}]}), + Update = {[ + {'_id', foo}, + {'_rev', couch_doc:rev_to_str({1, [crypto:hash(md5, <<"1.2">>)]})}, + {vsn, <<"1.2">>} + ]}, + {ok, [_Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]), + + PurgeInfos = [ + {cpse_util:uuid(), <<"foo">>, [Rev1]} ], - {ok, Db4} = cpse_util:apply_actions(Db3, Actions3), - ?assertEqual(0, couch_db_engine:get_doc_count(Db4)), - ?assertEqual(0, couch_db_engine:get_del_doc_count(Db4)), - ?assertEqual(5, couch_db_engine:get_update_seq(Db4)), - ?assertEqual(2, couch_db_engine:get_purge_seq(Db4)), - ?assertEqual([{<<"foo">>, [Rev2]}], couch_db_engine:get_last_purged(Db4)). + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([Rev1], PRevs), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 1}, + {del_doc_count, 0}, + {update_seq, 3}, + {changes, 1}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_missing_docid(DbName) -> + {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [ + {[{'_id', foo1}, {vsn, 1}]}, + {[{'_id', foo2}, {vsn, 2}]} + ]), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 2}, + {del_doc_count, 0}, + {update_seq, 2}, + {changes, 2}, + {purge_seq, 0}, + {purge_infos, []} + ]), + + PurgeInfos = [ + {cpse_util:uuid(), <<"baz">>, [Rev1]} + ], + {ok, [{ok, []}]} = cpse_util:purge(DbName, PurgeInfos), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 2}, + {del_doc_count, 0}, + {update_seq, 3}, + {changes, 2}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_duplicate_docids(DbName) -> + {ok, [Rev1, _Rev2]} = cpse_util:save_docs(DbName, [ + {[{'_id', foo1}, {vsn, 1}]}, + {[{'_id', foo2}, {vsn, 2}]} + ]), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 2}, + {del_doc_count, 0}, + {update_seq, 2}, + {purge_seq, 0}, + {changes, 2}, + {purge_infos, []} + ]), + + PurgeInfos = [ + {cpse_util:uuid(), <<"foo1">>, [Rev1]}, + {cpse_util:uuid(), <<"foo1">>, [Rev1]} + ], -cpse_add_delete_purge(Db1) -> - Actions1 = [ - {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}}, - {delete, {<<"foo">>, {[{<<"vsn">>, 2}]}}} + {ok, Resp} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([{ok, [Rev1]}, {ok, []}], Resp), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 1}, + {del_doc_count, 0}, + {update_seq, 3}, + {purge_seq, 2}, + {changes, 1}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_internal_revision(DbName) -> + {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, 1}]}), + Update = {[ + {'_id', foo}, + {'_rev', couch_doc:rev_to_str(Rev1)}, + {vsn, 2} + ]}, + {ok, _Rev2} = cpse_util:save_doc(DbName, Update), + + PurgeInfos = [ + {cpse_util:uuid(), <<"foo">>, [Rev1]} ], - {ok, Db2} = cpse_util:apply_actions(Db1, Actions1), + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([], PRevs), - ?assertEqual(0, couch_db_engine:get_doc_count(Db2)), - ?assertEqual(1, couch_db_engine:get_del_doc_count(Db2)), - ?assertEqual(2, couch_db_engine:get_update_seq(Db2)), - ?assertEqual(0, couch_db_engine:get_purge_seq(Db2)), - ?assertEqual([], couch_db_engine:get_last_purged(Db2)), + cpse_util:assert_db_props(DbName, [ + {doc_count, 1}, + {del_doc_count, 0}, + {update_seq, 3}, + {changes, 1}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). - [FDI] = couch_db_engine:open_docs(Db2, [<<"foo">>]), - PrevRev = cpse_util:prev_rev(FDI), - Rev = PrevRev#rev_info.rev, - Actions2 = [ - {purge, {<<"foo">>, Rev}} - ], - {ok, Db3} = cpse_util:apply_actions(Db2, Actions2), +cpse_purge_missing_revision(DbName) -> + {ok, [_Rev1, Rev2]} = cpse_util:save_docs(DbName, [ + {[{'_id', foo1}, {vsn, 1}]}, + {[{'_id', foo2}, {vsn, 2}]} + ]), - ?assertEqual(0, couch_db_engine:get_doc_count(Db3)), - ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)), - ?assertEqual(3, couch_db_engine:get_update_seq(Db3)), - ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)), - ?assertEqual([{<<"foo">>, [Rev]}], couch_db_engine:get_last_purged(Db3)). + PurgeInfos = [ + {cpse_util:uuid(), <<"foo1">>, [Rev2]} + ], + {ok, [{ok, PRevs}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([], PRevs), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 2}, + {del_doc_count, 0}, + {update_seq, 3}, + {changes, 2}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). + + +cpse_purge_repeated_revisions(DbName) -> + {ok, Rev1} = cpse_util:save_doc(DbName, {[{'_id', foo}, {vsn, <<"1.1">>}]}), + Update = {[ + {'_id', foo}, + {'_rev', couch_doc:rev_to_str({1, [crypto:hash(md5, <<"1.2">>)]})}, + {vsn, <<"1.2">>} + ]}, + {ok, [Rev2]} = cpse_util:save_docs(DbName, [Update], [replicated_changes]), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 1}, + {del_doc_count, 0}, + {update_seq, 2}, + {changes, 1}, + {purge_seq, 0}, + {purge_infos, []} + ]), + + PurgeInfos1 = [ + {cpse_util:uuid(), <<"foo">>, [Rev1]}, + {cpse_util:uuid(), <<"foo">>, [Rev1, Rev2]} + ], -cpse_add_two_purge_one(Db1) -> - Actions1 = [ - {create, {<<"foo">>, {[{<<"vsn">>, 1}]}}}, - {create, {<<"bar">>, {[]}}} + {ok, [{ok, PRevs1}, {ok, PRevs2}]} = cpse_util:purge(DbName, PurgeInfos1), + ?assertEqual([Rev1], PRevs1), + ?assertEqual([Rev2], PRevs2), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 0}, + {del_doc_count, 0}, + {update_seq, 3}, + {changes, 0}, + {purge_seq, 2}, + {purge_infos, PurgeInfos1} + ]). + + +cpse_purge_repeated_uuid(DbName) -> + {ok, Rev} = cpse_util:save_doc(DbName, {[{'_id', foo1}, {vsn, 1.1}]}), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 1}, + {del_doc_count, 0}, + {update_seq, 1}, + {changes, 1}, + {purge_seq, 0}, + {purge_infos, []} + ]), + + PurgeInfos = [ + {cpse_util:uuid(), <<"foo1">>, [Rev]} ], - {ok, Db2} = cpse_util:apply_actions(Db1, Actions1), + {ok, [{ok, PRevs1}]} = cpse_util:purge(DbName, PurgeInfos), + ?assertEqual([Rev], PRevs1), - ?assertEqual(2, couch_db_engine:get_doc_count(Db2)), - ?assertEqual(0, couch_db_engine:get_del_doc_count(Db2)), - ?assertEqual(2, couch_db_engine:get_update_seq(Db2)), - ?assertEqual(0, couch_db_engine:get_purge_seq(Db2)), - ?assertEqual([], couch_db_engine:get_last_purged(Db2)), + % Attempting to purge a repeated UUID is an error + ?assertThrow({badreq, _}, cpse_util:purge(DbName, PurgeInfos)), - [FDI] = couch_db_engine:open_docs(Db2, [<<"foo">>]), - PrevRev = cpse_util:prev_rev(FDI), - Rev = PrevRev#rev_info.rev, + % Although we can replicate it in + {ok, []} = cpse_util:purge(DbName, PurgeInfos, [replicated_changes]), + + cpse_util:assert_db_props(DbName, [ + {doc_count, 0}, + {del_doc_count, 0}, + {update_seq, 2}, + {changes, 0}, + {purge_seq, 1}, + {purge_infos, PurgeInfos} + ]). - Actions2 = [ - {purge, {<<"foo">>, Rev}} - ], - {ok, Db3} = cpse_util:apply_actions(Db2, Actions2), - ?assertEqual(1, couch_db_engine:get_doc_count(Db3)), - ?assertEqual(0, couch_db_engine:get_del_doc_count(Db3)), - ?assertEqual(3, couch_db_engine:get_update_seq(Db3)), - ?assertEqual(1, couch_db_engine:get_purge_seq(Db3)), - ?assertEqual([{<<"foo">>, [Rev]}], couch_db_engine:get_last_purged(Db3)). +fold_all_infos(Info, Acc) -> + {ok, [Info | Acc]}. diff --git a/src/couch_pse_tests/src/cpse_util.erl b/src/couch_pse_tests/src/cpse_util.erl index 8492c99..3f3f41f 100644 --- a/src/couch_pse_tests/src/cpse_util.erl +++ b/src/couch_pse_tests/src/cpse_util.erl @@ -25,7 +25,10 @@ cpse_test_attachments, cpse_test_fold_docs, cpse_test_fold_changes, + cpse_test_fold_purge_infos, cpse_test_purge_docs, + cpse_test_purge_replication, + cpse_test_purge_bad_checkpoints, cpse_test_compaction, cpse_test_ref_counting ]). @@ -116,6 +119,123 @@ shutdown_db(Db) -> end). +save_doc(DbName, Json) -> + {ok, [Rev]} = save_docs(DbName, [Json], []), + {ok, Rev}. + + +save_docs(DbName, JsonDocs) -> + save_docs(DbName, JsonDocs, []). + + +save_docs(DbName, JsonDocs, Options) -> + Docs = lists:map(fun(JDoc) -> + couch_doc:from_json_obj(?JSON_DECODE(?JSON_ENCODE(JDoc))) + end, JsonDocs), + Opts = [full_commit | Options], + {ok, Db} = couch_db:open_int(DbName, []), + try + case lists:member(replicated_changes, Options) of + true -> + {ok, []} = couch_db:update_docs( + Db, Docs, Opts, replicated_changes), + {ok, lists:map(fun(Doc) -> + {Pos, [RevId | _]} = Doc#doc.revs, + {Pos, RevId} + end, Docs)}; + false -> + {ok, Resp} = couch_db:update_docs(Db, Docs, Opts), + {ok, [Rev || {ok, Rev} <- Resp]} + end + after + couch_db:close(Db) + end. + + +open_doc(DbName, DocId0) -> + DocId = ?JSON_DECODE(?JSON_ENCODE(DocId0)), + {ok, Db} = couch_db:open_int(DbName, []), + try + couch_db:get_doc_info(Db, DocId) + after + couch_db:close(Db) + end. + + +purge(DbName, PurgeInfos) -> + purge(DbName, PurgeInfos, []). + + +purge(DbName, PurgeInfos0, Options) when is_list(PurgeInfos0) -> + PurgeInfos = lists:map(fun({UUID, DocIdJson, Revs}) -> + {UUID, ?JSON_DECODE(?JSON_ENCODE(DocIdJson)), Revs} + end, PurgeInfos0), + {ok, Db} = couch_db:open_int(DbName, []), + try + couch_db:purge_docs(Db, PurgeInfos, Options) + after + couch_db:close(Db) + end. + + +uuid() -> + couch_uuids:random(). + + +assert_db_props(DbName, Props) when is_binary(DbName) -> + {ok, Db} = couch_db:open_int(DbName, []), + try + assert_db_props(Db, Props) + after + couch_db:close(Db) + end; + +assert_db_props(Db, Props) -> + assert_each_prop(Db, Props). + + +assert_each_prop(_Db, []) -> + ok; +assert_each_prop(Db, [{doc_count, Expect} | Rest]) -> + {ok, DocCount} = couch_db:get_doc_count(Db), + ?assertEqual(Expect, DocCount), + assert_each_prop(Db, Rest); +assert_each_prop(Db, [{del_doc_count, Expect} | Rest]) -> + {ok, DelDocCount} = couch_db:get_del_doc_count(Db), + ?assertEqual(Expect, DelDocCount), + assert_each_prop(Db, Rest); +assert_each_prop(Db, [{update_seq, Expect} | Rest]) -> + UpdateSeq = couch_db:get_update_seq(Db), + ?assertEqual(Expect, UpdateSeq), + assert_each_prop(Db, Rest); +assert_each_prop(Db, [{changes, Expect} | Rest]) -> + {ok, NumChanges} = couch_db:fold_changes(Db, 0, fun aep_changes/2, 0, []), + ?assertEqual(Expect, NumChanges), + assert_each_prop(Db, Rest); +assert_each_prop(Db, [{purge_seq, Expect} | Rest]) -> + {ok, PurgeSeq} = couch_db:get_purge_seq(Db), + ?assertEqual(Expect, PurgeSeq), + assert_each_prop(Db, Rest); +assert_each_prop(Db, [{purge_infos, Expect} | Rest]) -> + {ok, PurgeInfos} = couch_db:fold_purge_infos(Db, 0, fun aep_fold/2, [], []), + ?assertEqual(Expect, lists:reverse(PurgeInfos)), + assert_each_prop(Db, Rest). + + +aep_changes(_A, Acc) -> + {ok, Acc + 1}. + + +aep_fold({_PSeq, UUID, Id, Revs}, Acc) -> + {ok, [{UUID, Id, Revs} | Acc]}. + + +apply_actions(DbName, Actions) when is_binary(DbName) -> + {ok, Db0} = couch_db:open_int(DbName, [?ADMIN_CTX]), + {ok, Db1} = apply_actions(Db0, Actions), + couch_db:close(Db1), + ok; + apply_actions(Db, []) -> {ok, Db}; @@ -161,7 +281,7 @@ apply_batch(Db, Actions) -> {ok, Db2} = couch_db:reopen(Db1), if PurgeInfos == [] -> ok; true -> - {ok, _, _} = couch_db:purge_docs(Db2, PurgeInfos) + {ok, _} = couch_db:purge_docs(Db2, PurgeInfos) end, couch_db:reopen(Db2). @@ -203,7 +323,7 @@ gen_write(Db, {create, {DocId, Body, Atts}}) -> gen_write(_Db, {purge, {DocId, PrevRevs0, _}}) -> PrevRevs = if is_list(PrevRevs0) -> PrevRevs0; true -> [PrevRevs0] end, - {purge, {DocId, PrevRevs}}; + {purge, {couch_uuids:random(), DocId, PrevRevs}}; gen_write(Db, {Action, {DocId, Body, Atts}}) -> #full_doc_info{} = PrevFDI = couch_db:get_full_doc_info(Db, DocId), @@ -304,7 +424,8 @@ db_as_term(Db) -> {props, db_props_as_term(Db)}, {docs, db_docs_as_term(Db)}, {local_docs, db_local_docs_as_term(Db)}, - {changes, db_changes_as_term(Db)} + {changes, db_changes_as_term(Db)}, + {purged_docs, db_purged_docs_as_term(Db)} ]. @@ -315,7 +436,7 @@ db_props_as_term(Db) -> get_disk_version, get_update_seq, get_purge_seq, - get_last_purged, + get_purge_infos_limit, get_security, get_revs_limit, get_uuid, @@ -348,6 +469,16 @@ db_changes_as_term(Db) -> end, Changes)). +db_purged_docs_as_term(Db) -> + InitPSeq = couch_db_engine:get_oldest_purge_seq(Db) - 1, + FoldFun = fun({PSeq, UUID, Id, Revs}, Acc) -> + {ok, [{PSeq, UUID, Id, Revs} | Acc]} + end, + {ok, PDocs} = couch_db_engine:fold_purge_infos( + Db, InitPSeq, FoldFun, [], []), + lists:reverse(PDocs). + + fdi_to_term(Db, FDI) -> #full_doc_info{ id = DocId, @@ -476,8 +607,8 @@ compact(Db) -> ok; {'DOWN', Ref, _, _, Reason} -> erlang:error({compactor_died, Reason}) - after ?COMPACTOR_TIMEOUT -> - erlang:error(compactor_timed_out) + after ?COMPACTOR_TIMEOUT -> + erlang:error(compactor_timed_out) end, test_util:wait(fun() -> -- To stop receiving notification emails like this one, please contact davisp@apache.org.