Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id D0AAD200C38 for ; Wed, 15 Mar 2017 23:11:28 +0100 (CET) Received: by cust-asf.ponee.io (Postfix) id CF4B7160B78; Wed, 15 Mar 2017 22:11:28 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id A903D160B60 for ; Wed, 15 Mar 2017 23:11:27 +0100 (CET) Received: (qmail 37202 invoked by uid 500); 15 Mar 2017 22:11:25 -0000 Mailing-List: contact commits-help@couchdb.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@couchdb.apache.org Delivered-To: mailing list commits@couchdb.apache.org Received: (qmail 37192 invoked by uid 99); 15 Mar 2017 22:11:25 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Wed, 15 Mar 2017 22:11:25 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id C201ADFE7B; Wed, 15 Mar 2017 22:11:25 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: davisp@apache.org To: commits@couchdb.apache.org Date: Wed, 15 Mar 2017 22:11:25 -0000 Message-Id: <9c6dea72ad854e2c812c3d4917dc878c@git.apache.org> X-Mailer: ASF-Git Admin Mailer Subject: [1/2] couch commit: updated refs/heads/COUCHDB-3326-clustered-purge to 7a28094 archived-at: Wed, 15 Mar 2017 22:11:29 -0000 Repository: couchdb-couch Updated Branches: refs/heads/COUCHDB-3326-clustered-purge [created] 7a2809477 http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7a280947/src/test_engine_util.erl ---------------------------------------------------------------------- diff --git a/src/test_engine_util.erl b/src/test_engine_util.erl index 33048d3..4d06cee 100644 --- a/src/test_engine_util.erl +++ b/src/test_engine_util.erl @@ -24,6 +24,7 @@ test_engine_attachments, test_engine_fold_docs, test_engine_fold_changes, + test_engine_fold_purged_docs, test_engine_purge_docs, test_engine_compaction, test_engine_ref_counting @@ -129,28 +130,34 @@ apply_action(Engine, St, Action) -> apply_batch(Engine, St, [Action]). +apply_batch(Engine, St, [{purge, {Id, Revs}}]) -> + UpdateSeq = Engine:get(St, update_seq) + 1, + case gen_write(Engine, St, {purge, {Id, Revs}}, UpdateSeq) of + {_, _, purged_before}-> + St; + {Pair, _, {Id, PRevs}} -> + UUID = couch_uuids:new(), + {ok, NewSt} = Engine:purge_doc_revs(St, [Pair], [{UUID, Id, PRevs}]), + NewSt + end; + apply_batch(Engine, St, Actions) -> UpdateSeq = Engine:get_update_seq(St) + 1, AccIn = {UpdateSeq, [], [], []}, AccOut = lists:foldl(fun(Action, Acc) -> - {SeqAcc, DocAcc, LDocAcc, PurgeAcc} = Acc, + {SeqAcc, DocAcc, LDocAcc} = Acc, case Action of {_, {<<"_local/", _/binary>>, _}} -> LDoc = gen_local_write(Engine, St, Action), - {SeqAcc, DocAcc, [LDoc | LDocAcc], PurgeAcc}; + {SeqAcc, DocAcc, [LDoc | LDocAcc]}; _ -> - case gen_write(Engine, St, Action, SeqAcc) of - {_OldFDI, _NewFDI} = Pair -> - {SeqAcc + 1, [Pair | DocAcc], LDocAcc, PurgeAcc}; - {Pair, NewSeqAcc, NewPurgeInfo} -> - NewPurgeAcc = [NewPurgeInfo | PurgeAcc], - {NewSeqAcc, [Pair | DocAcc], LDocAcc, NewPurgeAcc} - end + {OldFDI, NewFDI} = gen_write(Engine, St, Action, SeqAcc), + {SeqAcc + 1, [{OldFDI, NewFDI} | DocAcc], LDocAcc} end end, AccIn, Actions), - {_, Docs0, LDocs, PurgeIdRevs} = AccOut, + {_, Docs0, LDocs} = AccOut, Docs = lists:reverse(Docs0), - {ok, NewSt} = Engine:write_doc_infos(St, Docs, LDocs, PurgeIdRevs), + {ok, NewSt} = Engine:write_doc_infos(St, Docs, LDocs), NewSt. @@ -218,39 +225,70 @@ gen_write(Engine, St, {create, {DocId, Body, Atts0}}, UpdateSeq) -> }}; gen_write(Engine, St, {purge, {DocId, PrevRevs0, _}}, UpdateSeq) -> - [#full_doc_info{} = PrevFDI] = Engine:open_docs(St, [DocId]), - PrevRevs = if is_list(PrevRevs0) -> PrevRevs0; true -> [PrevRevs0] end, - - #full_doc_info{ - rev_tree = PrevTree - } = PrevFDI, - - {NewTree, RemRevs} = couch_key_tree:remove_leafs(PrevTree, PrevRevs), - RemovedAll = lists:sort(RemRevs) == lists:sort(PrevRevs), - if RemovedAll -> ok; true -> - % If we didn't purge all the requested revisions - % then its a bug in the test. - erlang:error({invalid_purge_test_revs, PrevRevs}) - end, + case Engine:open_docs(St, [DocId]) of + [not_found] -> + % Check if this doc has been purged before + FoldFun = fun({_PSeq, _UUID, Id, _Revs}, _Acc) -> + case Id of + DocId -> true; + _ -> false + end + end, + {ok, IsPurgedBefore} = Engine:fold_purged_docs(St, 0, FoldFun, false, []), + case IsPurgedBefore of + true -> {{}, UpdateSeq, purged_before}; + false -> erlang:error({invalid_purge_test_id, DocId}) + end; + [#full_doc_info{} = PrevFDI] -> + PrevRevs = if is_list(PrevRevs0) -> PrevRevs0; true -> [PrevRevs0] end, + + #full_doc_info{ + rev_tree = PrevTree + } = PrevFDI, + + {NewTree, RemRevs0} = couch_key_tree:remove_leafs(PrevTree, PrevRevs), + {RemRevs, NotRemRevs} = lists:partition(fun(R) -> + lists:member(R, RemRevs0) end, PrevRevs), + + if NotRemRevs == [] -> ok; true -> + % Check if these Revs have been purged before + FoldFun = fun({_Pseq, _UUID, Id, Revs}, Acc) -> + case Id of + DocId -> Acc ++ Revs; + _ -> Acc + end + end, + {ok, PurgedRevs} = Engine:fold_purged_docs(St, 0, FoldFun, [], []), + case lists:subtract(PrevRevs, PurgedRevs) of [] -> ok; true -> + % If we didn't purge all the requested revisions + % and they haven't been purged before + % then its a bug in the test. + erlang:error({invalid_purge_test_revs, PrevRevs}) + end + end, + + case {RemRevs, NewTree} of + {[], _} -> + {{PrevFDI, PrevFDI}, UpdateSeq, purged_before}; + {_, []} -> + % We've completely purged the document + {{PrevFDI, not_found}, UpdateSeq, {DocId, RemRevs}}; + _ -> + % We have to relabel the update_seq of all + % leaves. See couch_db_updater for details. + {NewNewTree, NewUpdateSeq} = couch_key_tree:mapfold(fun + (_RevId, Leaf, leaf, InnerSeqAcc) -> + {Leaf#leaf{seq = InnerSeqAcc}, InnerSeqAcc + 1}; + (_RevId, Value, _Type, InnerSeqAcc) -> + {Value, InnerSeqAcc} + end, UpdateSeq, NewTree), + NewFDI = PrevFDI#full_doc_info{ + update_seq = NewUpdateSeq - 1, + rev_tree = NewNewTree + }, + {{PrevFDI, NewFDI}, NewUpdateSeq, {DocId, RemRevs}} - case NewTree of - [] -> - % We've completely purged the document - {{PrevFDI, not_found}, UpdateSeq, {DocId, RemRevs}}; - _ -> - % We have to relabel the update_seq of all - % leaves. See couch_db_updater for details. - {NewNewTree, NewUpdateSeq} = couch_key_tree:mapfold(fun - (_RevId, Leaf, leaf, InnerSeqAcc) -> - {Leaf#leaf{seq = InnerSeqAcc}, InnerSeqAcc + 1}; - (_RevId, Value, _Type, InnerSeqAcc) -> - {Value, InnerSeqAcc} - end, UpdateSeq, NewTree), - NewFDI = PrevFDI#full_doc_info{ - update_seq = NewUpdateSeq - 1, - rev_tree = NewNewTree - }, - {{PrevFDI, NewFDI}, NewUpdateSeq, {DocId, RemRevs}} + end end; gen_write(Engine, St, {Action, {DocId, Body, Atts0}}, UpdateSeq) -> @@ -403,7 +441,8 @@ db_as_term(Engine, St) -> {props, db_props_as_term(Engine, St)}, {docs, db_docs_as_term(Engine, St)}, {local_docs, db_local_docs_as_term(Engine, St)}, - {changes, db_changes_as_term(Engine, St)} + {changes, db_changes_as_term(Engine, St)}, + {purged_docs, db_purged_docs_as_term(Engine, St)} ]. @@ -414,6 +453,7 @@ db_props_as_term(Engine, St) -> get_disk_version, get_update_seq, get_purge_seq, + get_purged_docs_limit, get_last_purged, get_security, get_revs_limit, @@ -447,6 +487,15 @@ db_changes_as_term(Engine, St) -> end, Changes)). +db_purged_docs_as_term(Engine, St) -> + StartPSeq = Engine:get(St, oldest_purge_seq) - 1, + FoldFun = fun({PSeq, UUID, Id, Revs}, Acc) -> + [{PSeq, UUID, Id, Revs} | Acc] + end, + {ok, PDocs} = Engine:fold_purged_docs(St, StartPSeq, FoldFun, [], []), + PDocs. + + fdi_to_term(Engine, St, FDI) -> #full_doc_info{ id = DocId, @@ -573,11 +622,27 @@ compact(Engine, St1, DbPath) -> {'$gen_cast', {compact_done, Engine, Term0}} -> Term0; {'DOWN', Ref, _, _, Reason} -> - erlang:error({compactor_died, Reason}) + erlang:error({compactor_died, Reason}); + {'$gen_call', {Pid, Ref2}, get_disposable_purge_seq} -> + % assuming no client exists (no internal replications or indexes) + PSeq = Engine:get(St2, purge_seq), + OldestPSeq = Engine:get(St2, oldest_purge_seq), + PDocsLimit = Engine:get(St2, purged_docs_limit), + ExpectedDispPSeq = PSeq - PDocsLimit, + DisposablePSeq = if ExpectedDispPSeq > 0 -> ExpectedDispPSeq; + true -> OldestPSeq - 1 end, + Pid!{Ref2, {ok, DisposablePSeq}}, + receive + {'$gen_cast', {compact_done, Engine, Term0}} -> + Term0; + {'DOWN', Ref, _, _, Reason} -> + erlang:error({compactor_died, Reason}) + after 10000 -> + erlang:error(compactor_timed_out) + end after 10000 -> erlang:error(compactor_timed_out) end, - {ok, St2, DbName, Pid, Term}. http://git-wip-us.apache.org/repos/asf/couchdb-couch/blob/7a280947/test/couch_db_purge_docs_tests.erl ---------------------------------------------------------------------- diff --git a/test/couch_db_purge_docs_tests.erl b/test/couch_db_purge_docs_tests.erl new file mode 100644 index 0000000..58d869f --- /dev/null +++ b/test/couch_db_purge_docs_tests.erl @@ -0,0 +1,348 @@ +% Licensed under the Apache License, Version 2.0 (the "License"); you may not +% use this file except in compliance with the License. You may obtain a copy of +% the License at +% +% http://www.apache.org/licenses/LICENSE-2.0 +% +% Unless required by applicable law or agreed to in writing, software +% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +% License for the specific language governing permissions and limitations under +% the License. + +-module(couch_db_purge_docs_tests). + +-include_lib("couch/include/couch_eunit.hrl"). +-include_lib("couch/include/couch_db.hrl"). + + +setup() -> + DbName = ?tempdb(), + {ok, _Db} = create_db(DbName), + DbName. + +teardown(DbName) -> + delete_db(DbName), + ok. + +couch_db_purge_docs_test_() -> + { + "Couch_db purge_docs", + [ + { + setup, + fun test_util:start_couch/0, fun test_util:stop_couch/1, + [couch_db_purge_docs()] + }, + purge_with_replication() + ] + + }. + + +couch_db_purge_docs() -> + { + foreach, + fun setup/0, fun teardown/1, + [ + fun purge_simple/1, + fun add_delete_purge/1, + fun add_two_purge_one/1, + fun purge_id_not_exist/1, + fun purge_non_leaf_rev/1, + fun purge_conflicts/1, + fun purge_deep_tree/1 + ] + }. + + +purge_simple(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + DocId = <<"foo">>, + {ok, Rev} = save_doc(Db, {[{<<"_id">>, DocId}, {<<"vsn">>, 1}]}), + couch_db:ensure_full_commit(Db), + + {ok, Db2} = couch_db:reopen(Db), + ?assertEqual(1, couch_db_engine:get(Db2, doc_count)), + ?assertEqual(0, couch_db_engine:get(Db2, del_doc_count)), + ?assertEqual(1, couch_db_engine:get(Db2, update_seq)), + ?assertEqual(0, couch_db_engine:get(Db2, purge_seq)), + ?assertEqual(nil, couch_db_engine:get(Db2, purge_tree_state)), + + UUID = couch_uuids:new(), + {ok, {PurgeSeq, [{ok, PRevs}]}} = couch_db:purge_docs(Db2, + [{UUID, DocId, [Rev]}]), + ?assertEqual([Rev], PRevs), + ?assertEqual(1, PurgeSeq), + + {ok, Db3} = couch_db:reopen(Db2), + {ok, PIdsRevs} = couch_db:fold_purged_docs(Db3, 0, fun fold_fun/2, [], []), + ?assertEqual(0, couch_db_engine:get(Db3, doc_count)), + ?assertEqual(0, couch_db_engine:get(Db3, del_doc_count)), + ?assertEqual(2, couch_db_engine:get(Db3, update_seq)), + ?assertEqual(1, couch_db_engine:get(Db3, purge_seq)), + ?assertEqual([{DocId, [Rev]}], PIdsRevs) + end). + + +add_delete_purge(DbName) -> + ?_test( + begin + {ok, Db0} = couch_db:open_int(DbName, []), + DocId = <<"foo">>, + {ok, Rev} = save_doc(Db0, {[{<<"_id">>, DocId}, {<<"vsn">>, 1}]}), + couch_db:ensure_full_commit(Db0), + {ok, Db1} = couch_db:reopen(Db0), + + {ok, Rev2} = save_doc(Db1, {[{<<"_id">>, DocId}, {<<"vsn">>, 2}, + {<<"_rev">>, couch_doc:rev_to_str(Rev)}, {<<"_deleted">>, true}]}), + couch_db:ensure_full_commit(Db1), + + {ok, Db2} = couch_db:reopen(Db1), + {ok, PIdsRevs1} = couch_db:fold_purged_docs(Db2, 0, fun fold_fun/2, [], []), + ?assertEqual(0, couch_db_engine:get(Db2, doc_count)), + ?assertEqual(1, couch_db_engine:get(Db2, del_doc_count)), + ?assertEqual(2, couch_db_engine:get(Db2, update_seq)), + ?assertEqual(0, couch_db_engine:get(Db2, purge_seq)), + ?assertEqual([], PIdsRevs1), + + UUID = couch_uuids:new(), + {ok, {PurgeSeq, [{ok, PRevs}]}} = couch_db:purge_docs(Db2, + [{UUID, DocId, [Rev2]}]), + ?assertEqual([Rev2], PRevs), + ?assertEqual(1, PurgeSeq), + + {ok, Db3} = couch_db:reopen(Db2), + {ok, PIdsRevs2} = couch_db:fold_purged_docs(Db3, 0, fun fold_fun/2, [], []), + ?assertEqual(0, couch_db_engine:get(Db3, doc_count)), + ?assertEqual(0, couch_db_engine:get(Db3, del_doc_count)), + ?assertEqual(3, couch_db_engine:get(Db3, update_seq)), + ?assertEqual(1, couch_db_engine:get(Db3, purge_seq)), + ?assertEqual([{DocId, [Rev2]}], PIdsRevs2) + end). + + +add_two_purge_one(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Rev} = save_doc(Db, {[{<<"_id">>, <<"foo">>}, {<<"vsn">>, 1}]}), + {ok, _Rev2} = save_doc(Db, {[{<<"_id">>, <<"bar">>}]}), + couch_db:ensure_full_commit(Db), + + {ok, Db2} = couch_db:reopen(Db), + ?assertEqual(2, couch_db_engine:get(Db2, doc_count)), + ?assertEqual(0, couch_db_engine:get(Db2, del_doc_count)), + ?assertEqual(2, couch_db_engine:get(Db2, update_seq)), + ?assertEqual(0, couch_db_engine:get(Db2, purge_seq)), + + UUID = couch_uuids:new(), + {ok, {PurgeSeq, [{ok, PRevs}]}} = couch_db:purge_docs(Db2, + [{UUID, <<"foo">>, [Rev]}]), + ?assertEqual([Rev], PRevs), + ?assertEqual(1, PurgeSeq), + + {ok, Db3} = couch_db:reopen(Db2), + {ok, PIdsRevs} = couch_db:fold_purged_docs(Db3, 0, fun fold_fun/2, [], []), + ?assertEqual(1, couch_db_engine:get(Db3, doc_count)), + ?assertEqual(0, couch_db_engine:get(Db3, del_doc_count)), + ?assertEqual(3, couch_db_engine:get(Db3, update_seq)), + ?assertEqual(1, couch_db_engine:get(Db3, purge_seq)), + ?assertEqual([{<<"foo">>, [Rev]}], PIdsRevs) + end). + + +purge_id_not_exist(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + UUID = couch_uuids:new(), + {ok, {PurgeSeq, [{ok, PRevs}]}} = couch_db:purge_docs(Db, + [{UUID, <<"foo">>, [{0, <<0>>}]}]), + + ?assertEqual([], PRevs), + ?assertEqual(0, PurgeSeq), + + {ok, Db2} = couch_db:reopen(Db), + {ok, PIdsRevs} = couch_db:fold_purged_docs(Db2, 0, fun fold_fun/2, [], []), + ?assertEqual(0, couch_db_engine:get(Db2, doc_count)), + ?assertEqual(0, couch_db_engine:get(Db2, del_doc_count)), + ?assertEqual(0, couch_db_engine:get(Db2, update_seq)), + ?assertEqual(0, couch_db_engine:get(Db2, purge_seq)), + ?assertEqual([], PIdsRevs) + end). + + +purge_non_leaf_rev(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Rev} = save_doc(Db, {[{<<"_id">>, <<"foo">>}, {<<"vsn">>, 1}]}), + couch_db:ensure_full_commit(Db), + {ok, Db2} = couch_db:reopen(Db), + + {ok, _Rev2} = save_doc(Db2, {[{<<"_id">>, <<"foo">>}, {<<"vsn">>, 2}, + {<<"_rev">>, couch_doc:rev_to_str(Rev)}]}), + couch_db:ensure_full_commit(Db2), + {ok, Db3} = couch_db:reopen(Db2), + + UUID = couch_uuids:new(), + {ok, {PurgeSeq, [{ok, PRevs}]}} = couch_db:purge_docs(Db3, + [{UUID, <<"foo">>, [Rev]}]), + ?assertEqual([], PRevs), + ?assertEqual(0, PurgeSeq), + + {ok, Db4} = couch_db:reopen(Db3), + {ok, PIdsRevs} = couch_db:fold_purged_docs(Db4, 0, fun fold_fun/2, [], []), + ?assertEqual(1, couch_db_engine:get(Db4, doc_count)), + ?assertEqual(2, couch_db_engine:get(Db4, update_seq)), + ?assertEqual(0, couch_db_engine:get(Db4, purge_seq)), + ?assertEqual([], PIdsRevs) + end). + + +purge_conflicts(DbName) -> + ?_test( + begin + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Rev} = save_doc(Db, {[{<<"_id">>, <<"foo">>}, {<<"vsn">>, <<"v1.1">>}]}), + couch_db:ensure_full_commit(Db), + {ok, Db2} = couch_db:reopen(Db), + + % create a conflict + DocConflict = #doc{ + id = <<"foo">>, + revs = {1, [couch_crypto:hash(md5, <<"v1.2">>)]}, + body = {[ {<<"vsn">>, <<"v1.2">>}]} + }, + {ok, _} = couch_db:update_doc(Db2, DocConflict, [], replicated_changes), + couch_db:ensure_full_commit(Db2), + {ok, Db3} = couch_db:reopen(Db2), + + UUID = couch_uuids:new(), + {ok, {PurgeSeq, [{ok, PRevs}]}} = couch_db:purge_docs(Db3, + [{UUID, <<"foo">>, [Rev]}]), + ?assertEqual([Rev], PRevs), + ?assertEqual(1, PurgeSeq), + + {ok, Db4} = couch_db:reopen(Db3), + {ok, PIdsRevs} = couch_db:fold_purged_docs(Db4, 0, fun fold_fun/2, [], []), + % still has one doc + ?assertEqual(1, couch_db_engine:get(Db4, doc_count)), + ?assertEqual(0, couch_db_engine:get(Db4, del_doc_count)), + ?assertEqual(3, couch_db_engine:get(Db4, update_seq)), + ?assertEqual(1, couch_db_engine:get(Db4, purge_seq)), + ?assertEqual([{<<"foo">>, [Rev]}], PIdsRevs) + end). + + +purge_deep_tree(DbName) -> + ?_test( + begin + NRevs = 300, + {ok, Db0} = couch_db:open_int(DbName, []), + {ok, InitRev} = save_doc(Db0, {[{<<"_id">>, <<"bar">>}, {<<"vsn">>, 0}]}), + ok = couch_db:close(Db0), + LastRev = lists:foldl(fun(V, PrevRev) -> + {ok, Db} = couch_db:open_int(DbName, []), + {ok, Rev} = save_doc(Db, + {[{<<"_id">>, <<"bar">>}, + {<<"vsn">>, V}, + {<<"_rev">>, couch_doc:rev_to_str(PrevRev)}]} + ), + ok = couch_db:close(Db), + Rev + end, InitRev, lists:seq(2, NRevs)), + {ok, Db1} = couch_db:open_int(DbName, []), + + % purge doc + UUID = couch_uuids:new(), + {ok, {PurgeSeq, [{ok, PRevs}]}} = couch_db:purge_docs(Db1, + [{UUID, <<"bar">>, [LastRev]}]), + ?assertEqual([LastRev], PRevs), + ?assertEqual(1, PurgeSeq), + + {ok, Db2} = couch_db:reopen(Db1), + % no docs left + ?assertEqual(0, couch_db_engine:get(Db2, doc_count)), + ?assertEqual(0, couch_db_engine:get(Db2, del_doc_count)), + ?assertEqual(1, couch_db_engine:get(Db2, purge_seq)), + ?assertEqual(NRevs + 1 , couch_db_engine:get(Db2, update_seq)) + end). + + +purge_with_replication() -> + ?_test( + begin + Ctx = test_util:start_couch([couch_replicator]), + Source = ?tempdb(), + {ok, SourceDb} = create_db(Source), + Target = ?tempdb(), + {ok, _Db} = create_db(Target), + + % create Doc and do replication to Target + {ok, Rev} = save_doc(SourceDb, + {[{<<"_id">>, <<"foo">>}, {<<"vsn">>, 1}]}), + couch_db:ensure_full_commit(SourceDb), + {ok, SourceDb2} = couch_db:reopen(SourceDb), + RepObject = {[ + {<<"source">>, Source}, + {<<"target">>, Target} + ]}, + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + {ok, TargetDb} = couch_db:open_int(Target, []), + {ok, Doc} = couch_db:get_doc_info(TargetDb, <<"foo">>), + + % purge Doc on Source and do replication to Target + % assert purges don't get replicated to Target + UUID = couch_uuids:new(), + {ok, _} = couch_db:purge_docs(SourceDb2, [{UUID, <<"foo">>, [Rev]}]), + {ok, SourceDb3} = couch_db:reopen(SourceDb2), + {ok, _} = couch_replicator:replicate(RepObject, ?ADMIN_USER), + {ok, TargetDb2} = couch_db:open_int(Target, []), + {ok, Doc2} = couch_db:get_doc_info(TargetDb2, <<"foo">>), + [Rev2] = Doc2#doc_info.revs, + ?assertEqual(Rev, Rev2#rev_info.rev), + ?assertEqual(Doc, Doc2), + ?assertEqual(0, couch_db_engine:get(SourceDb3, doc_count)), + ?assertEqual(1, couch_db_engine:get(SourceDb3, purge_seq)), + ?assertEqual(1, couch_db_engine:get(TargetDb2, doc_count)), + ?assertEqual(0, couch_db_engine:get(TargetDb2, purge_seq)), + + % replicate from Target to Source + % assert that Doc reappears on Source + RepObject2 = {[ + {<<"source">>, Target}, + {<<"target">>, Source} + ]}, + {ok, _} = couch_replicator:replicate(RepObject2, ?ADMIN_USER), + {ok, SourceDb4} = couch_db:reopen(SourceDb3), + {ok, Doc3} = couch_db:get_doc_info(SourceDb4, <<"foo">>), + [Rev3] = Doc3#doc_info.revs, + ?assertEqual(Rev, Rev3#rev_info.rev), + ?assertEqual(1, couch_db_engine:get(SourceDb4, doc_count)), + ?assertEqual(1, couch_db_engine:get(SourceDb4, purge_seq)), + + delete_db(Source), + delete_db(Target), + ok = application:stop(couch_replicator), + ok = test_util:stop_couch(Ctx) + end). + + +create_db(DbName) -> + couch_db:create(DbName, [?ADMIN_CTX, overwrite]). + +delete_db(DbName) -> + couch_server:delete(DbName, [?ADMIN_CTX]). + +save_doc(Db, Json) -> + Doc = couch_doc:from_json_obj(Json), + couch_db:update_doc(Db, Doc, []). + +fold_fun({_PSeq, _UUID, Id, Revs}, Acc) -> + [{Id, Revs} | Acc]. + +