couchdb-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From beno...@apache.org
Subject [6/8] create couch_replicator application.
Date Mon, 05 Dec 2011 09:33:30 GMT
http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couch_replicator/test/002-replication-compact.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/002-replication-compact.t b/src/couch_replicator/test/002-replication-compact.t
new file mode 100755
index 0000000..c8b265e
--- /dev/null
+++ b/src/couch_replicator/test/002-replication-compact.t
@@ -0,0 +1,486 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Verify that compacting databases that are being used as the source or
+% target of a replication doesn't affect the replication and that the
+% replication doesn't hold their reference counters forever.
+
+-define(b2l(B), binary_to_list(B)).
+
+-record(user_ctx, {
+    name = null,
+    roles = [],
+    handler
+}).
+
+-record(db, {
+    main_pid = nil,
+    update_pid = nil,
+    compactor_pid = nil,
+    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+    fd,
+    updater_fd,
+    fd_ref_counter,
+    header = nil,
+    committed_update_seq,
+    fulldocinfo_by_id_btree,
+    docinfo_by_seq_btree,
+    local_docs_btree,
+    update_seq,
+    name,
+    filepath,
+    validate_doc_funs = [],
+    security = [],
+    security_ptr = nil,
+    user_ctx = #user_ctx{},
+    waiting_delayed_commit = nil,
+    revs_limit = 1000,
+    fsync_options = [],
+    options = [],
+    compression
+}).
+
+-record(rep, {
+    id,
+    source,
+    target,
+    options,
+    user_ctx,
+    doc_id
+}).
+
+
+source_db_name() -> <<"couch_test_rep_db_a">>.
+target_db_name() -> <<"couch_test_rep_db_b">>.
+
+
+main(_) ->
+    test_util:init_code_path(),
+
+    etap:plan(376),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail(Other)
+    end,
+    ok.
+
+
+test() ->
+    couch_server_sup:start_link(test_util:config_files()),
+    ibrowse:start(),
+
+    Pairs = [
+        {source_db_name(), target_db_name()},
+        {{remote, source_db_name()}, target_db_name()},
+        {source_db_name(), {remote, target_db_name()}},
+        {{remote, source_db_name()}, {remote, (target_db_name())}}
+    ],
+
+    lists:foreach(
+        fun({Source, Target}) ->
+            {ok, SourceDb} = create_db(source_db_name()),
+            etap:is(couch_db:is_idle(SourceDb), true,
+                "Source database is idle before starting replication"),
+
+            {ok, TargetDb} = create_db(target_db_name()),
+            etap:is(couch_db:is_idle(TargetDb), true,
+                "Target database is idle before starting replication"),
+
+            {ok, RepPid, RepId} = replicate(Source, Target),
+            check_active_tasks(RepPid, RepId, Source, Target),
+            {ok, DocsWritten} = populate_and_compact_test(
+                RepPid, SourceDb, TargetDb),
+
+            wait_target_in_sync(DocsWritten, TargetDb),
+            check_active_tasks(RepPid, RepId, Source, Target),
+            cancel_replication(RepId, RepPid),
+            compare_dbs(SourceDb, TargetDb),
+
+            delete_db(SourceDb),
+            delete_db(TargetDb),
+            couch_server_sup:stop(),
+            ok = timer:sleep(1000),
+            couch_server_sup:start_link(test_util:config_files())
+        end,
+        Pairs),
+
+    couch_server_sup:stop(),
+    ok.
+
+
+populate_and_compact_test(RepPid, SourceDb0, TargetDb0) ->
+    etap:is(is_process_alive(RepPid), true, "Replication process is alive"),
+    check_db_alive("source", SourceDb0),
+    check_db_alive("target", TargetDb0),
+
+    Writer = spawn_writer(SourceDb0),
+
+    lists:foldl(
+        fun(_, {SourceDb, TargetDb, DocCount}) ->
+            pause_writer(Writer),
+
+            compact_db("source", SourceDb),
+            etap:is(is_process_alive(RepPid), true,
+                "Replication process is alive after source database compaction"),
+            check_db_alive("source", SourceDb),
+            check_ref_counter("source", SourceDb),
+
+            compact_db("target", TargetDb),
+            etap:is(is_process_alive(RepPid), true,
+                "Replication process is alive after target database compaction"),
+            check_db_alive("target", TargetDb),
+            check_ref_counter("target", TargetDb),
+
+            {ok, SourceDb2} = reopen_db(SourceDb),
+            {ok, TargetDb2} = reopen_db(TargetDb),
+
+            resume_writer(Writer),
+            wait_writer(Writer, DocCount),
+
+            compact_db("source", SourceDb2),
+            etap:is(is_process_alive(RepPid), true,
+                "Replication process is alive after source database compaction"),
+            check_db_alive("source", SourceDb2),
+            pause_writer(Writer),
+            check_ref_counter("source", SourceDb2),
+            resume_writer(Writer),
+
+            compact_db("target", TargetDb2),
+            etap:is(is_process_alive(RepPid), true,
+                "Replication process is alive after target database compaction"),
+            check_db_alive("target", TargetDb2),
+            pause_writer(Writer),
+            check_ref_counter("target", TargetDb2),
+            resume_writer(Writer),
+
+            {ok, SourceDb3} = reopen_db(SourceDb2),
+            {ok, TargetDb3} = reopen_db(TargetDb2),
+            {SourceDb3, TargetDb3, DocCount + 50}
+        end,
+        {SourceDb0, TargetDb0, 50}, lists:seq(1, 5)),
+
+    DocsWritten = stop_writer(Writer),
+    {ok, DocsWritten}.
+
+
+check_db_alive(Type, #db{main_pid = Pid}) ->
+    etap:is(is_process_alive(Pid), true,
+        "Local " ++ Type ++ " database main pid is alive").
+
+
+compact_db(Type, #db{name = Name}) ->
+    {ok, Db} = couch_db:open_int(Name, []),
+    {ok, CompactPid} = couch_db:start_compact(Db),
+    MonRef = erlang:monitor(process, CompactPid),
+    receive
+    {'DOWN', MonRef, process, CompactPid, normal} ->
+        ok;
+    {'DOWN', MonRef, process, CompactPid, Reason} ->
+        etap:bail("Error compacting " ++ Type ++ " database " ++ ?b2l(Name) ++
+            ": " ++ couch_util:to_list(Reason))
+    after 30000 ->
+        etap:bail("Compaction for " ++ Type ++ " database " ++ ?b2l(Name) ++
+            " didn't finish")
+    end,
+    ok = couch_db:close(Db).
+
+
+check_ref_counter(Type, #db{name = Name, fd_ref_counter = OldRefCounter}) ->
+    MonRef = erlang:monitor(process, OldRefCounter),
+    receive
+    {'DOWN', MonRef, process, OldRefCounter, _} ->
+        etap:diag("Old " ++ Type ++ " database ref counter terminated")
+    after 30000 ->
+        etap:bail("Old " ++ Type ++ " database ref counter didn't terminate")
+    end,
+    {ok, #db{fd_ref_counter = NewRefCounter} = Db} = couch_db:open_int(Name, []),
+    ok = couch_db:close(Db),
+    etap:isnt(
+        NewRefCounter, OldRefCounter, Type ++ " database has new ref counter").
+
+
+reopen_db(#db{name = Name}) ->
+    {ok, Db} = couch_db:open_int(Name, []),
+    ok = couch_db:close(Db),
+    {ok, Db}.
+
+
+wait_target_in_sync(DocCount, #db{name = TargetName}) ->
+    wait_target_in_sync_loop(DocCount, TargetName, 300).
+
+
+wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
+    etap:bail("Could not get source and target databases in sync");
+wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
+    {ok, Target} = couch_db:open_int(TargetName, []),
+    {ok, TargetInfo} = couch_db:get_db_info(Target),
+    ok = couch_db:close(Target),
+    TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
+    case TargetDocCount == DocCount of
+    true ->
+        etap:diag("Source and target databases are in sync");
+    false ->
+        ok = timer:sleep(100),
+        wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
+    end.
+
+
+compare_dbs(#db{name = SourceName}, #db{name = TargetName}) ->
+    {ok, SourceDb} = couch_db:open_int(SourceName, []),
+    {ok, TargetDb} = couch_db:open_int(TargetName, []),
+    Fun = fun(FullDocInfo, _, Acc) ->
+        {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+        {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+        DocId = couch_util:get_value(<<"_id">>, Props),
+        DocTarget = case couch_db:open_doc(TargetDb, DocId) of
+        {ok, DocT} ->
+            DocT;
+        Error ->
+            etap:bail("Error opening document '" ++ ?b2l(DocId) ++
+                "' from target: " ++ couch_util:to_list(Error))
+        end,
+        DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+        case DocTargetJson of
+        DocJson ->
+            ok;
+        _ ->
+            etap:bail("Content from document '" ++ ?b2l(DocId) ++
+                "' differs in target database")
+        end,
+        {ok, Acc}
+    end,
+    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+    etap:diag("Target database has the same documents as the source database"),
+    ok = couch_db:close(SourceDb),
+    ok = couch_db:close(TargetDb).
+
+
+check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
+    Source = case Src of
+    {remote, NameSrc} ->
+        <<(db_url(NameSrc))/binary, $/>>;
+    _ ->
+        Src
+    end,
+    Target = case Tgt of
+    {remote, NameTgt} ->
+        <<(db_url(NameTgt))/binary, $/>>;
+    _ ->
+        Tgt
+    end,
+    FullRepId = list_to_binary(BaseId ++ Ext),
+    Pid = list_to_binary(pid_to_list(RepPid)),
+    [RepTask] = couch_task_status:all(),
+    etap:is(couch_util:get_value(pid, RepTask), Pid,
+        "_active_tasks entry has correct pid property"),
+    etap:is(couch_util:get_value(replication_id, RepTask), FullRepId,
+        "_active_tasks entry has right replication id"),
+    etap:is(couch_util:get_value(continuous, RepTask), true,
+        "_active_tasks entry has continuous property set to true"),
+    etap:is(couch_util:get_value(source, RepTask), Source,
+        "_active_tasks entry has correct source property"),
+    etap:is(couch_util:get_value(target, RepTask), Target,
+        "_active_tasks entry has correct target property"),
+    etap:is(is_integer(couch_util:get_value(docs_read, RepTask)), true,
+        "_active_tasks entry has integer docs_read property"),
+    etap:is(is_integer(couch_util:get_value(docs_written, RepTask)), true,
+        "_active_tasks entry has integer docs_written property"),
+    etap:is(is_integer(couch_util:get_value(doc_write_failures, RepTask)), true,
+        "_active_tasks entry has integer doc_write_failures property"),
+    etap:is(is_integer(couch_util:get_value(revisions_checked, RepTask)), true,
+        "_active_tasks entry has integer revisions_checked property"),
+    etap:is(is_integer(couch_util:get_value(missing_revisions_found, RepTask)), true,
+        "_active_tasks entry has integer missing_revisions_found property"),
+    etap:is(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask)), true,
+        "_active_tasks entry has integer checkpointed_source_seq property"),
+    etap:is(is_integer(couch_util:get_value(source_seq, RepTask)), true,
+        "_active_tasks entry has integer source_seq property"),
+    Progress = couch_util:get_value(progress, RepTask),
+    etap:is(is_integer(Progress), true,
+        "_active_tasks entry has an integer progress property"),
+    etap:is(Progress =< 100, true, "Progress is not greater than 100%").
+
+
+wait_writer(Pid, NumDocs) ->
+    case get_writer_num_docs_written(Pid) of
+    N when N >= NumDocs ->
+        ok;
+    _ ->
+        wait_writer(Pid, NumDocs)
+    end.
+
+
+spawn_writer(Db) ->
+    Parent = self(),
+    Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
+    etap:diag("Started source database writer"),
+    Pid.
+
+
+pause_writer(Pid) ->
+    Ref = make_ref(),
+    Pid ! {pause, Ref},
+    receive
+    {paused, Ref} ->
+        ok
+    after 30000 ->
+        etap:bail("Failed to pause source database writer")
+    end.
+
+
+resume_writer(Pid) ->
+    Ref = make_ref(),
+    Pid ! {continue, Ref},
+    receive
+    {ok, Ref} ->
+        ok
+    after 30000 ->
+        etap:bail("Failed to unpause source database writer")
+    end.
+
+
+get_writer_num_docs_written(Pid) ->
+    Ref = make_ref(),
+    Pid ! {get_count, Ref},
+    receive
+    {count, Ref, Count} ->
+        Count
+    after 30000 ->
+        etap:bail("Timeout getting number of documents written from "
+            "source database writer")
+    end.
+
+
+stop_writer(Pid) ->
+    Ref = make_ref(),
+    Pid ! {stop, Ref},
+    receive
+    {stopped, Ref, DocsWritten} ->
+        MonRef = erlang:monitor(process, Pid),
+        receive
+        {'DOWN', MonRef, process, Pid, _Reason} ->
+            etap:diag("Stopped source database writer"),
+            DocsWritten
+        after 30000 ->
+            etap:bail("Timeout stopping source database writer")
+        end
+    after 30000 ->
+        etap:bail("Timeout stopping source database writer")
+    end.
+
+
+writer_loop(#db{name = DbName}, Parent, Counter) ->
+    maybe_pause(Parent, Counter),
+    Doc = couch_doc:from_json_obj({[
+        {<<"_id">>, list_to_binary(integer_to_list(Counter + 1))},
+        {<<"value">>, Counter + 1},
+        {<<"_attachments">>, {[
+            {<<"icon1.png">>, {[
+                {<<"data">>, base64:encode(att_data())},
+                {<<"content_type">>, <<"image/png">>}
+            ]}},
+            {<<"icon2.png">>, {[
+                {<<"data">>, base64:encode(iolist_to_binary(
+                    [att_data(), att_data()]))},
+                {<<"content_type">>, <<"image/png">>}
+            ]}}
+        ]}}
+    ]}),
+    maybe_pause(Parent, Counter),
+    {ok, Db} = couch_db:open_int(DbName, []),
+    {ok, _} = couch_db:update_doc(Db, Doc, []),
+    ok = couch_db:close(Db),
+    receive
+    {get_count, Ref} ->
+        Parent ! {count, Ref, Counter + 1},
+        writer_loop(Db, Parent, Counter + 1);
+    {stop, Ref} ->
+        Parent ! {stopped, Ref, Counter + 1}
+    after 0 ->
+        ok = timer:sleep(500),
+        writer_loop(Db, Parent, Counter + 1)
+    end.
+
+
+maybe_pause(Parent, Counter) ->
+    receive
+    {get_count, Ref} ->
+        Parent ! {count, Ref, Counter};
+    {pause, Ref} ->
+        Parent ! {paused, Ref},
+        receive {continue, Ref2} -> Parent ! {ok, Ref2} end
+    after 0 ->
+        ok
+    end.
+
+
+db_url(DbName) ->
+    iolist_to_binary([
+        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/", DbName
+    ]).
+
+
+create_db(DbName) ->
+    {ok, Db} = couch_db:create(
+        DbName,
+        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
+    couch_db:close(Db),
+    {ok, Db}.
+
+
+delete_db(#db{name = DbName, main_pid = Pid}) ->
+    ok = couch_server:delete(
+        DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, _Reason} ->
+        ok
+    after 30000 ->
+        etap:bail("Timeout deleting database")
+    end.
+
+
+replicate({remote, Db}, Target) ->
+    replicate(db_url(Db), Target);
+
+replicate(Source, {remote, Db}) ->
+    replicate(Source, db_url(Db));
+
+replicate(Source, Target) ->
+    RepObject = {[
+        {<<"source">>, Source},
+        {<<"target">>, Target},
+        {<<"continuous">>, true}
+    ]},
+    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+    {ok, Pid} = couch_replicator:async_replicate(Rep),
+    {ok, Pid, Rep#rep.id}.
+
+
+cancel_replication(RepId, RepPid) ->
+    {ok, _} = couch_replicator:cancel_replication(RepId),
+    etap:is(is_process_alive(RepPid), false,
+        "Replication process is no longer alive after cancel").
+
+
+att_data() ->
+    {ok, Data} = file:read_file(
+        test_util:source_file("share/www/image/logo.png")),
+    Data.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couch_replicator/test/003-replication-large-atts.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/003-replication-large-atts.t b/src/couch_replicator/test/003-replication-large-atts.t
new file mode 100755
index 0000000..5386179
--- /dev/null
+++ b/src/couch_replicator/test/003-replication-large-atts.t
@@ -0,0 +1,267 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Test replication of large attachments. Verify that both source and
+% target have the same attachment data and metadata.
+
+-define(b2l(Bin), binary_to_list(Bin)).
+
+-record(user_ctx, {
+    name = null,
+    roles = [],
+    handler
+}).
+
+-record(doc, {
+    id = <<"">>,
+    revs = {0, []},
+    body = {[]},
+    atts = [],
+    deleted = false,
+    meta = []
+}).
+
+-record(att, {
+    name,
+    type,
+    att_len,
+    disk_len,
+    md5= <<>>,
+    revpos=0,
+    data,
+    encoding=identity
+}).
+
+
+source_db_name() -> <<"couch_test_rep_db_a">>.
+target_db_name() -> <<"couch_test_rep_db_b">>.
+
+
+main(_) ->
+    test_util:init_code_path(),
+
+    etap:plan(1192),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail(Other)
+    end,
+    ok.
+
+
+test() ->
+    couch_server_sup:start_link(test_util:config_files()),
+    ibrowse:start(),
+    crypto:start(),
+    couch_config:set("attachments", "compressible_types", "text/*", false),
+
+    Pairs = [
+        {source_db_name(), target_db_name()},
+        {{remote, source_db_name()}, target_db_name()},
+        {source_db_name(), {remote, target_db_name()}},
+        {{remote, source_db_name()}, {remote, (target_db_name())}}
+    ],
+
+    {ok, SourceDb} = create_db(source_db_name()),
+    etap:diag("Populating source database"),
+    populate_db(SourceDb, 11),
+    ok = couch_db:close(SourceDb),
+
+    lists:foreach(
+        fun({Source, Target}) ->
+            etap:diag("Creating target database"),
+            {ok, TargetDb} = create_db(target_db_name()),
+
+            ok = couch_db:close(TargetDb),
+            etap:diag("Triggering replication"),
+            replicate(Source, Target),
+            etap:diag("Replication finished, comparing source and target databases"),
+            compare_dbs(SourceDb, TargetDb),
+
+            etap:diag("Deleting target database"),
+            delete_db(TargetDb),
+            ok = timer:sleep(1000)
+        end,
+        Pairs),
+
+    delete_db(SourceDb),
+    couch_server_sup:stop(),
+    ok.
+
+
+populate_db(Db, DocCount) ->
+    Docs = lists:foldl(
+        fun(DocIdCounter, Acc) ->
+            Doc = #doc{
+                id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
+                body = {[]},
+                atts = [
+                    att(<<"att1">>, 2 * 1024 * 1024, <<"text/plain">>),
+                    att(<<"att2">>, round(6.6 * 1024 * 1024), <<"app/binary">>)
+                ]
+            },
+            [Doc | Acc]
+        end,
+        [], lists:seq(1, DocCount)),
+    {ok, _} = couch_db:update_docs(Db, Docs, []).
+
+
+att(Name, Size, Type) ->
+    #att{
+        name = Name,
+        type = Type,
+        att_len = Size,
+        data = fun(Count) -> crypto:rand_bytes(Count) end
+    }.
+
+
+compare_dbs(Source, Target) ->
+    {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
+    {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
+
+    Fun = fun(FullDocInfo, _, Acc) ->
+        {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
+        Id = DocSource#doc.id,
+
+        etap:diag("Verifying document " ++ ?b2l(Id)),
+
+        {ok, DocTarget} = couch_db:open_doc(TargetDb, Id),
+        etap:is(DocTarget#doc.body, DocSource#doc.body,
+            "Same body in source and target databases"),
+
+        #doc{atts = SourceAtts} = DocSource,
+        #doc{atts = TargetAtts} = DocTarget,
+        etap:is(
+            lists:sort([N || #att{name = N} <- SourceAtts]),
+            lists:sort([N || #att{name = N} <- TargetAtts]),
+            "Document has same number (and names) of attachments in "
+            "source and target databases"),
+
+        lists:foreach(
+            fun(#att{name = AttName} = Att) ->
+                etap:diag("Verifying attachment " ++ ?b2l(AttName)),
+
+                {ok, AttTarget} = find_att(TargetAtts, AttName),
+                SourceMd5 = att_md5(Att),
+                TargetMd5 = att_md5(AttTarget),
+                case AttName of
+                <<"att1">> ->
+                    etap:is(Att#att.encoding, gzip,
+                        "Attachment is gzip encoded in source database"),
+                    etap:is(AttTarget#att.encoding, gzip,
+                        "Attachment is gzip encoded in target database"),
+                    DecSourceMd5 = att_decoded_md5(Att),
+                    DecTargetMd5 = att_decoded_md5(AttTarget),
+                    etap:is(DecTargetMd5, DecSourceMd5,
+                        "Same identity content in source and target databases");
+                _ ->
+                    etap:is(Att#att.encoding, identity,
+                        "Attachment is not encoded in source database"),
+                    etap:is(AttTarget#att.encoding, identity,
+                        "Attachment is not encoded in target database")
+                end,
+                etap:is(TargetMd5, SourceMd5,
+                    "Same content in source and target databases"),
+                etap:is(is_integer(Att#att.disk_len), true,
+                    "#att.disk_len is an integer in source database"),
+                etap:is(is_integer(Att#att.att_len), true,
+                    "#att.att_len is an integer in source database"),
+                etap:is(is_integer(AttTarget#att.disk_len), true,
+                    "#att.disk_len is an integer in target database"),
+                etap:is(is_integer(AttTarget#att.att_len), true,
+                    "#att.att_len is an integer in target database"),
+                etap:is(Att#att.disk_len, AttTarget#att.disk_len,
+                    "Same identity length in source and target databases"),
+                etap:is(Att#att.att_len, AttTarget#att.att_len,
+                    "Same encoded length in source and target databases"),
+                etap:is(Att#att.type, AttTarget#att.type,
+                    "Same type in source and target databases"),
+                etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
+                etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
+            end,
+            SourceAtts),
+
+        {ok, Acc}
+    end,
+
+    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+    ok = couch_db:close(SourceDb),
+    ok = couch_db:close(TargetDb).
+
+
+find_att([], _Name) ->
+    nil;
+find_att([#att{name = Name} = Att | _], Name) ->
+    {ok, Att};
+find_att([_ | Rest], Name) ->
+    find_att(Rest, Name).
+
+
+att_md5(Att) ->
+    Md50 = couch_doc:att_foldl(
+        Att,
+        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+        couch_util:md5_init()),
+    couch_util:md5_final(Md50).
+
+att_decoded_md5(Att) ->
+    Md50 = couch_doc:att_foldl_decode(
+        Att,
+        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+        couch_util:md5_init()),
+    couch_util:md5_final(Md50).
+
+
+db_url(DbName) ->
+    iolist_to_binary([
+        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/", DbName
+    ]).
+
+
+create_db(DbName) ->
+    couch_db:create(
+        DbName,
+        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
+
+
+delete_db(Db) ->
+    ok = couch_server:delete(
+        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
+
+
+replicate({remote, Db}, Target) ->
+    replicate(db_url(Db), Target);
+
+replicate(Source, {remote, Db}) ->
+    replicate(Source, db_url(Db));
+
+replicate(Source, Target) ->
+    RepObject = {[
+        {<<"source">>, Source},
+        {<<"target">>, Target}
+    ]},
+    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+    {ok, Pid} = couch_replicator:async_replicate(Rep),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, Reason} ->
+        etap:is(Reason, normal, "Replication finished successfully")
+    after 300000 ->
+        etap:bail("Timeout waiting for replication to finish")
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couch_replicator/test/004-replication-many-leaves.t
----------------------------------------------------------------------
diff --git a/src/couch_replicator/test/004-replication-many-leaves.t b/src/couch_replicator/test/004-replication-many-leaves.t
new file mode 100755
index 0000000..52d2023
--- /dev/null
+++ b/src/couch_replicator/test/004-replication-many-leaves.t
@@ -0,0 +1,216 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Test replication of documents with many leaf revisions.
+% Motivated by COUCHDB-1340 and other similar issues where a document
+% GET with a too long ?open_revs revision list doesn't work due to
+% maximum web server limits for the HTTP request path.
+
+-record(user_ctx, {
+    name = null,
+    roles = [],
+    handler
+}).
+
+-record(doc, {
+    id = <<"">>,
+    revs = {0, []},
+    body = {[]},
+    atts = [],
+    deleted = false,
+    meta = []
+}).
+
+-define(b2l(B), binary_to_list(B)).
+-define(l2b(L), list_to_binary(L)).
+-define(i2l(I), integer_to_list(I)).
+
+
+source_db_name() -> <<"couch_test_rep_db_a">>.
+target_db_name() -> <<"couch_test_rep_db_b">>.
+
+doc_ids() ->
+    [<<"doc1">>, <<"doc2">>, <<"doc3">>].
+
+doc_num_conflicts(<<"doc1">>) -> 100;
+doc_num_conflicts(<<"doc2">>) -> 200;
+doc_num_conflicts(<<"doc3">>) -> 550.
+
+
+main(_) ->
+    test_util:init_code_path(),
+
+    etap:plan(16),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail(Other)
+    end,
+    ok.
+
+
+test() ->
+    couch_server_sup:start_link(test_util:config_files()),
+    ibrowse:start(),
+    crypto:start(),
+
+    Pairs = [
+        {source_db_name(), target_db_name()},
+        {{remote, source_db_name()}, target_db_name()},
+        {source_db_name(), {remote, target_db_name()}},
+        {{remote, source_db_name()}, {remote, (target_db_name())}}
+    ],
+
+    {ok, SourceDb} = create_db(source_db_name()),
+    etap:diag("Populating source database"),
+    {ok, DocRevs} = populate_db(SourceDb),
+    ok = couch_db:close(SourceDb),
+
+    lists:foreach(
+        fun({Source, Target}) ->
+            etap:diag("Creating target database"),
+            {ok, TargetDb} = create_db(target_db_name()),
+
+            ok = couch_db:close(TargetDb),
+            etap:diag("Triggering replication"),
+            replicate(Source, Target),
+            etap:diag("Replication finished, comparing source and target databases"),
+            {ok, TargetDb2} = couch_db:open_int(target_db_name(), []),
+            verify_target(TargetDb2, DocRevs),
+            ok = couch_db:close(TargetDb2),
+
+            etap:diag("Deleting target database"),
+            delete_db(TargetDb),
+            ok = timer:sleep(1000)
+        end,
+        Pairs),
+
+    delete_db(SourceDb),
+    couch_server_sup:stop(),
+    ok.
+
+
+populate_db(Db) ->
+    DocRevsDict = lists:foldl(
+        fun(DocId, Acc) ->
+            Value = <<"0">>,
+            Doc = #doc{
+                id = DocId,
+                body = {[ {<<"value">>, Value} ]}
+            },
+            {ok, Rev} = couch_db:update_doc(Db, Doc, []),
+            {ok, RevsDict} = add_doc_siblings(Db, DocId, doc_num_conflicts(DocId)),
+            RevsDict2 = dict:store(Rev, Value, RevsDict),
+            dict:store(DocId, RevsDict2, Acc)
+        end,
+        dict:new(), doc_ids()),
+    {ok, dict:to_list(DocRevsDict)}.
+
+
+add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
+    add_doc_siblings(Db, DocId, NumLeaves, [], dict:new()).
+
+
+add_doc_siblings(Db, _DocId, 0, AccDocs, RevsDict) ->
+    {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
+    {ok, RevsDict};
+
+add_doc_siblings(Db, DocId, NumLeaves, AccDocs, RevsDict) ->
+    Value = list_to_binary(integer_to_list(NumLeaves)),
+    Rev = couch_util:md5(Value),
+    RevsDict2 = dict:store({1, Rev}, Value, RevsDict),
+    Doc = #doc{
+        id = DocId,
+        revs = {1, [Rev]},
+        body = {[ {<<"value">>, Value} ]}
+    },
+    add_doc_siblings(Db, DocId, NumLeaves - 1, [Doc | AccDocs], RevsDict2).
+
+
+verify_target(_TargetDb, []) ->
+    ok;
+
+verify_target(TargetDb, [{DocId, RevsDict} | Rest]) ->
+    {ok, Lookups} = couch_db:open_doc_revs(
+        TargetDb,
+        DocId,
+        [R || {R, _} <- dict:to_list(RevsDict)],
+        [ejson_body]),
+    Docs = [Doc || {ok, Doc} <- Lookups],
+    Total = doc_num_conflicts(DocId) + 1,
+    etap:is(
+        length(Docs),
+        Total,
+        "Target has " ++ ?i2l(Total) ++ " leaf revisions of document " ++ ?b2l(DocId)),
+    etap:diag("Verifying all revisions of document " ++ ?b2l(DocId)),
+    lists:foreach(
+        fun(#doc{revs = {Pos, [RevId]}, body = {Body}}) ->
+            Rev = {Pos, RevId},
+            {ok, Value} = dict:find(Rev, RevsDict),
+            case couch_util:get_value(<<"value">>, Body) of
+            Value ->
+                ok;
+            Other ->
+                etap:bail("Wrong value for revision " ++
+                    ?b2l(couch_doc:rev_to_str(Rev)) ++ " of document " ++
+                    ?b2l(DocId) ++ ". Expected `" ++ couch_util:to_list(Value) ++
+                    "`, got `" ++ couch_util:to_list(Other) ++ "`")
+            end
+        end,
+        Docs),
+    verify_target(TargetDb, Rest).
+
+
+db_url(DbName) ->
+    iolist_to_binary([
+        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/", DbName
+    ]).
+
+
+create_db(DbName) ->
+    couch_db:create(
+        DbName,
+        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
+
+
+delete_db(Db) ->
+    ok = couch_server:delete(
+        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
+
+
+replicate({remote, Db}, Target) ->
+    replicate(db_url(Db), Target);
+
+replicate(Source, {remote, Db}) ->
+    replicate(Source, db_url(Db));
+
+replicate(Source, Target) ->
+    RepObject = {[
+        {<<"source">>, Source},
+        {<<"target">>, Target}
+    ]},
+    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+    {ok, Pid} = couch_replicator:async_replicate(Rep),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, Reason} ->
+        etap:is(Reason, normal, "Replication finished successfully")
+    after 300000 ->
+        etap:bail("Timeout waiting for replication to finish")
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couchdb/Makefile.am
----------------------------------------------------------------------
diff --git a/src/couchdb/Makefile.am b/src/couchdb/Makefile.am
index cca91ee..f208693 100644
--- a/src/couchdb/Makefile.am
+++ b/src/couchdb/Makefile.am
@@ -17,7 +17,7 @@ couchlibdir = $(localerlanglibdir)/couch-$(version)
 couchincludedir = $(couchlibdir)/include
 couchebindir = $(couchlibdir)/ebin
 
-couchinclude_DATA = couch_api_wrap.hrl couch_db.hrl couch_js_functions.hrl couch_replicator.hrl
+couchinclude_DATA = couch_db.hrl couch_js_functions.hrl
 couchebin_DATA = $(compiled_files)
 
 # dist_devdoc_DATA = $(doc_base) $(doc_modules)
@@ -28,8 +28,6 @@ CLEANFILES = $(compiled_files) $(doc_base)
 
 source_files = \
     couch.erl \
-    couch_api_wrap.erl \
-    couch_api_wrap_httpc.erl \
     couch_app.erl \
     couch_auth_cache.erl \
     couch_btree.erl \
@@ -48,7 +46,6 @@ source_files = \
     couch_external_manager.erl \
     couch_external_server.erl \
     couch_file.erl \
-    couch_httpc_pool.erl \
     couch_httpd.erl \
     couch_httpd_db.erl \
     couch_httpd_auth.erl \
@@ -56,7 +53,6 @@ source_files = \
     couch_httpd_external.erl \
     couch_httpd_misc_handlers.erl \
     couch_httpd_proxy.erl \
-    couch_httpd_replicator.erl \
     couch_httpd_rewrite.erl \
     couch_httpd_stats_handlers.erl \
     couch_httpd_vhost.erl \
@@ -68,12 +64,6 @@ source_files = \
     couch_primary_sup.erl \
     couch_query_servers.erl \
     couch_ref_counter.erl \
-    couch_rep_sup.erl \
-    couch_replication_manager.erl \
-    couch_replication_notifier.erl \
-    couch_replicator.erl \
-    couch_replicator_worker.erl \
-    couch_replicator_utils.erl \
     couch_secondary_sup.erl \
     couch_server.erl \
     couch_server_sup.erl \
@@ -87,13 +77,11 @@ source_files = \
     couch_work_queue.erl \
     json_stream_parse.erl
 
-EXTRA_DIST = $(source_files) couch_api_wrap.hrl couch_db.hrl couch_js_functions.hrl couch_replicator.hrl
+EXTRA_DIST = $(source_files) couch_db.hrl couch_js_functions.hrl 
 
 compiled_files = \
     couch.app \
     couch.beam \
-    couch_api_wrap.beam \
-    couch_api_wrap_httpc.beam \
     couch_app.beam \
     couch_auth_cache.beam \
     couch_btree.beam \
@@ -112,7 +100,6 @@ compiled_files = \
     couch_external_manager.beam \
     couch_external_server.beam \
     couch_file.beam \
-    couch_httpc_pool.beam \
     couch_httpd.beam \
     couch_httpd_db.beam \
     couch_httpd_auth.beam \
@@ -120,7 +107,6 @@ compiled_files = \
     couch_httpd_proxy.beam \
     couch_httpd_external.beam \
     couch_httpd_misc_handlers.beam \
-    couch_httpd_replicator.beam \
     couch_httpd_rewrite.beam \
     couch_httpd_stats_handlers.beam \
     couch_httpd_vhost.beam \
@@ -132,12 +118,6 @@ compiled_files = \
     couch_primary_sup.beam \
     couch_query_servers.beam \
     couch_ref_counter.beam \
-    couch_rep_sup.beam \
-    couch_replication_manager.beam \
-    couch_replication_notifier.beam \
-    couch_replicator.beam \
-    couch_replicator_worker.beam \
-    couch_replicator_utils.beam \
     couch_secondary_sup.beam \
     couch_server.beam \
     couch_server_sup.beam \
@@ -207,6 +187,6 @@ endif
 
 # $(ERL) -noshell -run edoc_run files [\"$<\"]
 
-%.beam: %.erl couch_api_wrap.hrl couch_db.hrl couch_js_functions.hrl couch_replicator.hrl
+%.beam: %.erl couch_db.hrl couch_js_functions.hrl
 	$(ERLC) $(ERLC_FLAGS) ${TEST} $<;
 

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couchdb/couch.app.tpl.in
----------------------------------------------------------------------
diff --git a/src/couchdb/couch.app.tpl.in b/src/couchdb/couch.app.tpl.in
index b8486f9..2d75b91 100644
--- a/src/couchdb/couch.app.tpl.in
+++ b/src/couchdb/couch.app.tpl.in
@@ -11,7 +11,6 @@
         couch_log,
         couch_primary_services,
         couch_query_servers,
-        couch_rep_sup,
         couch_secondary_services,
         couch_server,
         couch_server_sup,

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couchdb/couch_api_wrap.erl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_api_wrap.erl b/src/couchdb/couch_api_wrap.erl
deleted file mode 100644
index 2c57008..0000000
--- a/src/couchdb/couch_api_wrap.erl
+++ /dev/null
@@ -1,775 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_api_wrap).
-
-% This module wraps the native erlang API, and allows for performing
-% operations on a remote vs. local databases via the same API.
-%
-% Notes:
-% Many options and apis aren't yet supported here, they are added as needed.
-
--include("couch_db.hrl").
--include("couch_api_wrap.hrl").
-
--export([
-    db_open/2,
-    db_open/3,
-    db_close/1,
-    get_db_info/1,
-    update_doc/3,
-    update_doc/4,
-    update_docs/3,
-    update_docs/4,
-    ensure_full_commit/1,
-    get_missing_revs/2,
-    open_doc/3,
-    open_doc_revs/6,
-    changes_since/5,
-    db_uri/1
-    ]).
-
--import(couch_api_wrap_httpc, [
-    send_req/3
-    ]).
-
--import(couch_util, [
-    encode_doc_id/1,
-    get_value/2,
-    get_value/3
-    ]).
-
-
-db_uri(#httpdb{url = Url}) ->
-    couch_util:url_strip_password(Url);
-
-db_uri(#db{name = Name}) ->
-    db_uri(Name);
-
-db_uri(DbName) ->
-    ?b2l(DbName).
-
-
-db_open(Db, Options) ->
-    db_open(Db, Options, false).
-
-db_open(#httpdb{} = Db1, _Options, Create) ->
-    {ok, Db} = couch_api_wrap_httpc:setup(Db1),
-    case Create of
-    false ->
-        ok;
-    true ->
-        send_req(Db, [{method, put}], fun(_, _, _) -> ok end)
-    end,
-    send_req(Db, [{method, head}],
-        fun(200, _, _) ->
-            {ok, Db};
-        (401, _, _) ->
-            throw({unauthorized, ?l2b(db_uri(Db))});
-        (_, _, _) ->
-            throw({db_not_found, ?l2b(db_uri(Db))})
-        end);
-db_open(DbName, Options, Create) ->
-    try
-        case Create of
-        false ->
-            ok;
-        true ->
-            ok = couch_httpd:verify_is_server_admin(
-                get_value(user_ctx, Options)),
-            couch_db:create(DbName, Options)
-        end,
-        case couch_db:open(DbName, Options) of
-        {not_found, _Reason} ->
-            throw({db_not_found, DbName});
-        {ok, _Db} = Success ->
-            Success
-        end
-    catch
-    throw:{unauthorized, _} ->
-        throw({unauthorized, DbName})
-    end.
-
-db_close(#httpdb{httpc_pool = Pool}) ->
-    unlink(Pool),
-    ok = couch_httpc_pool:stop(Pool);
-db_close(DbName) ->
-    catch couch_db:close(DbName).
-
-
-get_db_info(#httpdb{} = Db) ->
-    send_req(Db, [],
-        fun(200, _, {Props}) ->
-            {ok, Props}
-        end);
-get_db_info(#db{name = DbName, user_ctx = UserCtx}) ->
-    {ok, Db} = couch_db:open(DbName, [{user_ctx, UserCtx}]),
-    {ok, Info} = couch_db:get_db_info(Db),
-    couch_db:close(Db),
-    {ok, [{couch_util:to_binary(K), V} || {K, V} <- Info]}.
-
-
-ensure_full_commit(#httpdb{} = Db) ->
-    send_req(
-        Db,
-        [{method, post}, {path, "_ensure_full_commit"},
-            {headers, [{"Content-Type", "application/json"}]}],
-        fun(201, _, {Props}) ->
-            {ok, get_value(<<"instance_start_time">>, Props)};
-        (_, _, {Props}) ->
-            {error, get_value(<<"error">>, Props)}
-        end);
-ensure_full_commit(Db) ->
-    couch_db:ensure_full_commit(Db).
-
-
-get_missing_revs(#httpdb{} = Db, IdRevs) ->
-    JsonBody = {[{Id, couch_doc:revs_to_strs(Revs)} || {Id, Revs} <- IdRevs]},
-    send_req(
-        Db,
-        [{method, post}, {path, "_revs_diff"}, {body, ?JSON_ENCODE(JsonBody)}],
-        fun(200, _, {Props}) ->
-            ConvertToNativeFun = fun({Id, {Result}}) ->
-                MissingRevs = couch_doc:parse_revs(
-                    get_value(<<"missing">>, Result)
-                ),
-                PossibleAncestors = couch_doc:parse_revs(
-                    get_value(<<"possible_ancestors">>, Result, [])
-                ),
-                {Id, MissingRevs, PossibleAncestors}
-            end,
-            {ok, lists:map(ConvertToNativeFun, Props)}
-        end);
-get_missing_revs(Db, IdRevs) ->
-    couch_db:get_missing_revs(Db, IdRevs).
-
-
-
-open_doc_revs(#httpdb{} = HttpDb, Id, Revs, Options, Fun, Acc) ->
-    Path = encode_doc_id(Id),
-    QArgs = options_to_query_args(
-        HttpDb, Path, [revs, {open_revs, Revs} | Options]),
-    Self = self(),
-    Streamer = spawn_link(fun() ->
-            send_req(
-                HttpDb,
-                [{path, Path}, {qs, QArgs},
-                    {ibrowse_options, [{stream_to, {self(), once}}]},
-                    {headers, [{"Accept", "multipart/mixed"}]}],
-                fun(200, Headers, StreamDataFun) ->
-                    remote_open_doc_revs_streamer_start(Self),
-                    {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
-                        get_value("Content-Type", Headers),
-                        StreamDataFun,
-                        fun mp_parse_mixed/1)
-                end),
-            unlink(Self)
-        end),
-    receive
-    {started_open_doc_revs, Ref} ->
-        receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc)
-    end;
-open_doc_revs(Db, Id, Revs, Options, Fun, Acc) ->
-    {ok, Results} = couch_db:open_doc_revs(Db, Id, Revs, Options),
-    {ok, lists:foldl(fun(R, A) -> {_, A2} = Fun(R, A), A2 end, Acc, Results)}.
-
-
-open_doc(#httpdb{} = Db, Id, Options) ->
-    send_req(
-        Db,
-        [{path, encode_doc_id(Id)}, {qs, options_to_query_args(Options, [])}],
-        fun(200, _, Body) ->
-            {ok, couch_doc:from_json_obj(Body)};
-        (_, _, {Props}) ->
-            {error, get_value(<<"error">>, Props)}
-        end);
-open_doc(Db, Id, Options) ->
-    case couch_db:open_doc(Db, Id, Options) of
-    {ok, _} = Ok ->
-        Ok;
-    {not_found, _Reason} ->
-        {error, <<"not_found">>}
-    end.
-
-
-update_doc(Db, Doc, Options) ->
-    update_doc(Db, Doc, Options, interactive_edit).
-
-update_doc(#httpdb{} = HttpDb, #doc{id = DocId} = Doc, Options, Type) ->
-    QArgs = case Type of
-    replicated_changes ->
-        [{"new_edits", "false"}];
-    _ ->
-        []
-    end ++ options_to_query_args(Options, []),
-    Boundary = couch_uuids:random(),
-    JsonBytes = ?JSON_ENCODE(
-        couch_doc:to_json_obj(
-          Doc, [revs, attachments, follows, att_encoding_info | Options])),
-    {ContentType, Len} = couch_doc:len_doc_to_multi_part_stream(Boundary,
-        JsonBytes, Doc#doc.atts, true),
-    Headers = case lists:member(delay_commit, Options) of
-    true ->
-        [{"X-Couch-Full-Commit", "false"}];
-    false ->
-        []
-    end ++ [{"Content-Type", ?b2l(ContentType)}, {"Content-Length", Len}],
-    Body = {fun stream_doc/1, {JsonBytes, Doc#doc.atts, Boundary, Len}},
-    send_req(
-        HttpDb,
-        [{method, put}, {path, encode_doc_id(DocId)},
-            {qs, QArgs}, {headers, Headers}, {body, Body}],
-        fun(Code, _, {Props}) when Code =:= 200 orelse Code =:= 201 ->
-                {ok, couch_doc:parse_rev(get_value(<<"rev">>, Props))};
-            (409, _, _) ->
-                throw(conflict);
-            (Code, _, {Props}) ->
-                case {Code, get_value(<<"error">>, Props)} of
-                {401, <<"unauthorized">>} ->
-                    throw({unauthorized, get_value(<<"reason">>, Props)});
-                {403, <<"forbidden">>} ->
-                    throw({forbidden, get_value(<<"reason">>, Props)});
-                {412, <<"missing_stub">>} ->
-                    throw({missing_stub, get_value(<<"reason">>, Props)});
-                {_, Error} ->
-                    {error, Error}
-                end
-        end);
-update_doc(Db, Doc, Options, Type) ->
-    couch_db:update_doc(Db, Doc, Options, Type).
-
-
-update_docs(Db, DocList, Options) ->
-    update_docs(Db, DocList, Options, interactive_edit).
-
-update_docs(_Db, [], _Options, _UpdateType) ->
-    {ok, []};
-update_docs(#httpdb{} = HttpDb, DocList, Options, UpdateType) ->
-    FullCommit = atom_to_list(not lists:member(delay_commit, Options)),
-    Prefix = case UpdateType of
-    replicated_changes ->
-        <<"{\"new_edits\":false,\"docs\":[">>;
-    interactive_edit ->
-        <<"{\"docs\":[">>
-    end,
-    Suffix = <<"]}">>,
-    % Note: nginx and other servers don't like PUT/POST requests without
-    % a Content-Length header, so we can't do a chunked transfer encoding
-    % and JSON encode each doc only before sending it through the socket.
-    {Docs, Len} = lists:mapfoldl(
-        fun(#doc{} = Doc, Acc) ->
-            Json = ?JSON_ENCODE(couch_doc:to_json_obj(Doc, [revs, attachments])),
-            {Json, Acc + iolist_size(Json)};
-        (Doc, Acc) ->
-            {Doc, Acc + iolist_size(Doc)}
-        end,
-        byte_size(Prefix) + byte_size(Suffix) + length(DocList) - 1,
-        DocList),
-    BodyFun = fun(eof) ->
-            eof;
-        ([]) ->
-            {ok, Suffix, eof};
-        ([prefix | Rest]) ->
-            {ok, Prefix, Rest};
-        ([Doc]) ->
-            {ok, Doc, []};
-        ([Doc | RestDocs]) ->
-            {ok, [Doc, ","], RestDocs}
-    end,
-    Headers = [
-        {"Content-Length", Len},
-        {"Content-Type", "application/json"},
-        {"X-Couch-Full-Commit", FullCommit}
-    ],
-    send_req(
-        HttpDb,
-        [{method, post}, {path, "_bulk_docs"},
-            {body, {BodyFun, [prefix | Docs]}}, {headers, Headers}],
-        fun(201, _, Results) when is_list(Results) ->
-                {ok, bulk_results_to_errors(DocList, Results, remote)};
-           (417, _, Results) when is_list(Results) ->
-                {ok, bulk_results_to_errors(DocList, Results, remote)}
-        end);
-update_docs(Db, DocList, Options, UpdateType) ->
-    Result = couch_db:update_docs(Db, DocList, Options, UpdateType),
-    {ok, bulk_results_to_errors(DocList, Result, UpdateType)}.
-
-
-changes_since(#httpdb{headers = Headers1} = HttpDb, Style, StartSeq,
-    UserFun, Options) ->
-    BaseQArgs = case get_value(continuous, Options, false) of
-    false ->
-        [{"feed", "normal"}];
-    true ->
-        [{"feed", "continuous"}, {"heartbeat", "10000"}]
-    end ++ [
-        {"style", atom_to_list(Style)}, {"since", couch_util:to_list(StartSeq)}
-    ],
-    DocIds = get_value(doc_ids, Options),
-    {QArgs, Method, Body, Headers} = case DocIds of
-    undefined ->
-        QArgs1 = maybe_add_changes_filter_q_args(BaseQArgs, Options),
-        {QArgs1, get, [], Headers1};
-    _ when is_list(DocIds) ->
-        Headers2 = [{"Content-Type", "application/json"} | Headers1],
-        JsonDocIds = ?JSON_ENCODE({[{<<"doc_ids">>, DocIds}]}),
-        {[{"filter", "_doc_ids"} | BaseQArgs], post, JsonDocIds, Headers2}
-    end,
-    send_req(
-        HttpDb,
-        [{method, Method}, {path, "_changes"}, {qs, QArgs},
-            {headers, Headers}, {body, Body},
-            {ibrowse_options, [{stream_to, {self(), once}}]}],
-        fun(200, _, DataStreamFun) ->
-                parse_changes_feed(Options, UserFun, DataStreamFun);
-            (405, _, _) when is_list(DocIds) ->
-                % CouchDB versions < 1.1.0 don't have the builtin _changes feed
-                % filter "_doc_ids" neither support POST
-                send_req(HttpDb, [{method, get}, {path, "_changes"},
-                    {qs, BaseQArgs}, {headers, Headers1},
-                    {ibrowse_options, [{stream_to, {self(), once}}]}],
-                    fun(200, _, DataStreamFun2) ->
-                        UserFun2 = fun(#doc_info{id = Id} = DocInfo) ->
-                            case lists:member(Id, DocIds) of
-                            true ->
-                                UserFun(DocInfo);
-                            false ->
-                                ok
-                            end
-                        end,
-                        parse_changes_feed(Options, UserFun2, DataStreamFun2)
-                    end)
-        end);
-changes_since(Db, Style, StartSeq, UserFun, Options) ->
-    Filter = case get_value(doc_ids, Options) of
-    undefined ->
-        ?b2l(get_value(filter, Options, <<>>));
-    _DocIds ->
-        "_doc_ids"
-    end,
-    Args = #changes_args{
-        style = Style,
-        since = StartSeq,
-        filter = Filter,
-        feed = case get_value(continuous, Options, false) of
-            true ->
-                "continuous";
-            false ->
-                "normal"
-        end,
-        timeout = infinity
-    },
-    QueryParams = get_value(query_params, Options, {[]}),
-    Req = changes_json_req(Db, Filter, QueryParams, Options),
-    ChangesFeedFun = couch_changes:handle_changes(Args, {json_req, Req}, Db),
-    ChangesFeedFun(fun({change, Change, _}, _) ->
-            UserFun(json_to_doc_info(Change));
-        (_, _) ->
-            ok
-    end).
-
-
-% internal functions
-
-maybe_add_changes_filter_q_args(BaseQS, Options) ->
-    case get_value(filter, Options) of
-    undefined ->
-        BaseQS;
-    FilterName ->
-        {Params} = get_value(query_params, Options, {[]}),
-        [{"filter", ?b2l(FilterName)} | lists:foldl(
-            fun({K, V}, QSAcc) ->
-                Ks = couch_util:to_list(K),
-                case lists:keymember(Ks, 1, QSAcc) of
-                true ->
-                    QSAcc;
-                false ->
-                    [{Ks, couch_util:to_list(V)} | QSAcc]
-                end
-            end,
-            BaseQS, Params)]
-    end.
-
-parse_changes_feed(Options, UserFun, DataStreamFun) ->
-    case get_value(continuous, Options, false) of
-    true ->
-        continuous_changes(DataStreamFun, UserFun);
-    false ->
-        EventFun = fun(Ev) ->
-            changes_ev1(Ev, fun(DocInfo, _) -> UserFun(DocInfo) end, [])
-        end,
-        json_stream_parse:events(DataStreamFun, EventFun)
-    end.
-
-changes_json_req(_Db, "", _QueryParams, _Options) ->
-    {[]};
-changes_json_req(_Db, "_doc_ids", _QueryParams, Options) ->
-    {[{<<"doc_ids">>, get_value(doc_ids, Options)}]};
-changes_json_req(Db, FilterName, {QueryParams}, _Options) ->
-    {ok, Info} = couch_db:get_db_info(Db),
-    % simulate a request to db_name/_changes
-    {[
-        {<<"info">>, {Info}},
-        {<<"id">>, null},
-        {<<"method">>, 'GET'},
-        {<<"path">>, [couch_db:name(Db), <<"_changes">>]},
-        {<<"query">>, {[{<<"filter">>, FilterName} | QueryParams]}},
-        {<<"headers">>, []},
-        {<<"body">>, []},
-        {<<"peer">>, <<"replicator">>},
-        {<<"form">>, []},
-        {<<"cookie">>, []},
-        {<<"userCtx">>, couch_util:json_user_ctx(Db)}
-    ]}.
-
-
-options_to_query_args(HttpDb, Path, Options) ->
-    case lists:keytake(atts_since, 1, Options) of
-    false ->
-        options_to_query_args(Options, []);
-    {value, {atts_since, []}, Options2} ->
-        options_to_query_args(Options2, []);
-    {value, {atts_since, PAs}, Options2} ->
-        QueryArgs1 = options_to_query_args(Options2, []),
-        FullUrl = couch_api_wrap_httpc:full_url(
-            HttpDb, [{path, Path}, {qs, QueryArgs1}]),
-        RevList = atts_since_arg(
-            length("GET " ++ FullUrl ++ " HTTP/1.1\r\n") +
-            length("&atts_since=") + 6,  % +6 = % encoded [ and ]
-            PAs, []),
-        [{"atts_since", ?JSON_ENCODE(RevList)} | QueryArgs1]
-    end.
-
-
-options_to_query_args([], Acc) ->
-    lists:reverse(Acc);
-options_to_query_args([ejson_body | Rest], Acc) ->
-    options_to_query_args(Rest, Acc);
-options_to_query_args([delay_commit | Rest], Acc) ->
-    options_to_query_args(Rest, Acc);
-options_to_query_args([revs | Rest], Acc) ->
-    options_to_query_args(Rest, [{"revs", "true"} | Acc]);
-options_to_query_args([{open_revs, all} | Rest], Acc) ->
-    options_to_query_args(Rest, [{"open_revs", "all"} | Acc]);
-options_to_query_args([{open_revs, Revs} | Rest], Acc) ->
-    JsonRevs = ?b2l(?JSON_ENCODE(couch_doc:revs_to_strs(Revs))),
-    options_to_query_args(Rest, [{"open_revs", JsonRevs} | Acc]).
-
-
--define(MAX_URL_LEN, 7000).
-
-atts_since_arg(_UrlLen, [], Acc) ->
-    lists:reverse(Acc);
-atts_since_arg(UrlLen, [PA | Rest], Acc) ->
-    RevStr = couch_doc:rev_to_str(PA),
-    NewUrlLen = case Rest of
-    [] ->
-        % plus 2 double quotes (% encoded)
-        UrlLen + size(RevStr) + 6;
-    _ ->
-        % plus 2 double quotes and a comma (% encoded)
-        UrlLen + size(RevStr) + 9
-    end,
-    case NewUrlLen >= ?MAX_URL_LEN of
-    true ->
-        lists:reverse(Acc);
-    false ->
-        atts_since_arg(NewUrlLen, Rest, [RevStr | Acc])
-    end.
-
-
-% TODO: A less verbose, more elegant and automatic restart strategy for
-%       the exported open_doc_revs/6 function. The restart should be
-%       transparent to the caller like any other Couch API function exported
-%       by this module.
-receive_docs_loop(Streamer, Fun, Id, Revs, Ref, Acc) ->
-    try
-        % Left only for debugging purposes via an interactive or remote shell
-        erlang:put(open_doc_revs, {Id, Revs, Ref, Streamer}),
-        receive_docs(Streamer, Fun, Ref, Acc)
-    catch
-    error:{restart_open_doc_revs, NewRef} ->
-        receive_docs_loop(Streamer, Fun, Id, Revs, NewRef, Acc)
-    end.
-
-receive_docs(Streamer, UserFun, Ref, UserAcc) ->
-    Streamer ! {get_headers, Ref, self()},
-    receive
-    {started_open_doc_revs, NewRef} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {headers, Ref, Headers} ->
-        case get_value("content-type", Headers) of
-        {"multipart/related", _} = ContentType ->
-            case doc_from_multi_part_stream(
-                ContentType,
-                fun() -> receive_doc_data(Streamer, Ref) end,
-                Ref) of
-            {ok, Doc, Parser} ->
-                case UserFun({ok, Doc}, UserAcc) of
-                {ok, UserAcc2} ->
-                    ok;
-                {skip, UserAcc2} ->
-                    couch_doc:abort_multi_part_stream(Parser)
-                end,
-                receive_docs(Streamer, UserFun, Ref, UserAcc2)
-            end;
-        {"application/json", []} ->
-            Doc = couch_doc:from_json_obj(
-                    ?JSON_DECODE(receive_all(Streamer, Ref, []))),
-            {_, UserAcc2} = UserFun({ok, Doc}, UserAcc),
-            receive_docs(Streamer, UserFun, Ref, UserAcc2);
-        {"application/json", [{"error","true"}]} ->
-            {ErrorProps} = ?JSON_DECODE(receive_all(Streamer, Ref, [])),
-            Rev = get_value(<<"missing">>, ErrorProps),
-            Result = {{not_found, missing}, couch_doc:parse_rev(Rev)},
-            {_, UserAcc2} = UserFun(Result, UserAcc),
-            receive_docs(Streamer, UserFun, Ref, UserAcc2)
-        end;
-    {done, Ref} ->
-        {ok, UserAcc}
-    end.
-
-
-restart_remote_open_doc_revs(Ref, NewRef) ->
-    receive
-    {body_bytes, Ref, _} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {body_done, Ref} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {done, Ref} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {headers, Ref, _} ->
-        restart_remote_open_doc_revs(Ref, NewRef)
-    after 0 ->
-        erlang:error({restart_open_doc_revs, NewRef})
-    end.
-
-
-remote_open_doc_revs_streamer_start(Parent) ->
-    receive
-    {get_headers, _Ref, Parent} ->
-        remote_open_doc_revs_streamer_start(Parent);
-    {next_bytes, _Ref, Parent} ->
-        remote_open_doc_revs_streamer_start(Parent)
-    after 0 ->
-        Parent ! {started_open_doc_revs, make_ref()}
-    end.
-
-
-receive_all(Streamer, Ref, Acc) ->
-    Streamer ! {next_bytes, Ref, self()},
-    receive
-    {started_open_doc_revs, NewRef} ->
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {body_bytes, Ref, Bytes} ->
-        receive_all(Streamer, Ref, [Bytes | Acc]);
-    {body_done, Ref} ->
-        lists:reverse(Acc)
-    end.
-
-
-mp_parse_mixed(eof) ->
-    receive {get_headers, Ref, From} ->
-        From ! {done, Ref}
-    end;
-mp_parse_mixed({headers, H}) ->
-    receive {get_headers, Ref, From} ->
-        From ! {headers, Ref, H}
-    end,
-    fun mp_parse_mixed/1;
-mp_parse_mixed({body, Bytes}) ->
-    receive {next_bytes, Ref, From} ->
-        From ! {body_bytes, Ref, Bytes}
-    end,
-    fun mp_parse_mixed/1;
-mp_parse_mixed(body_end) ->
-    receive {next_bytes, Ref, From} ->
-        From ! {body_done, Ref};
-    {get_headers, Ref, From} ->
-        self() ! {get_headers, Ref, From}
-    end,
-    fun mp_parse_mixed/1.
-
-
-receive_doc_data(Streamer, Ref) ->
-    Streamer ! {next_bytes, Ref, self()},
-    receive
-    {body_bytes, Ref, Bytes} ->
-        {Bytes, fun() -> receive_doc_data(Streamer, Ref) end};
-    {body_done, Ref} ->
-        {<<>>, fun() -> receive_doc_data(Streamer, Ref) end}
-    end.
-
-doc_from_multi_part_stream(ContentType, DataFun, Ref) ->
-    Self = self(),
-    Parser = spawn_link(fun() ->
-        {<<"--">>, _, _} = couch_httpd:parse_multipart_request(
-            ContentType, DataFun,
-            fun(Next) -> couch_doc:mp_parse_doc(Next, []) end),
-        unlink(Self)
-        end),
-    Parser ! {get_doc_bytes, Ref, self()},
-    receive
-    {started_open_doc_revs, NewRef} ->
-        unlink(Parser),
-        exit(Parser, kill),
-        restart_remote_open_doc_revs(Ref, NewRef);
-    {doc_bytes, Ref, DocBytes} ->
-        Doc = couch_doc:from_json_obj(?JSON_DECODE(DocBytes)),
-        ReadAttachmentDataFun = fun() ->
-            Parser ! {get_bytes, Ref, self()},
-            receive
-            {started_open_doc_revs, NewRef} ->
-                unlink(Parser),
-                exit(Parser, kill),
-                receive {bytes, Ref, _} -> ok after 0 -> ok end,
-                restart_remote_open_doc_revs(Ref, NewRef);
-            {bytes, Ref, Bytes} ->
-                Bytes
-            end
-        end,
-        Atts2 = lists:map(
-            fun(#att{data = follows} = A) ->
-                A#att{data = ReadAttachmentDataFun};
-            (A) ->
-                A
-            end, Doc#doc.atts),
-        {ok, Doc#doc{atts = Atts2}, Parser}
-    end.
-
-
-changes_ev1(object_start, UserFun, UserAcc) ->
-    fun(Ev) -> changes_ev2(Ev, UserFun, UserAcc) end.
-
-changes_ev2({key, <<"results">>}, UserFun, UserAcc) ->
-    fun(Ev) -> changes_ev3(Ev, UserFun, UserAcc) end;
-changes_ev2(_, UserFun, UserAcc) ->
-    fun(Ev) -> changes_ev2(Ev, UserFun, UserAcc) end.
-
-changes_ev3(array_start, UserFun, UserAcc) ->
-    fun(Ev) -> changes_ev_loop(Ev, UserFun, UserAcc) end.
-
-changes_ev_loop(object_start, UserFun, UserAcc) ->
-    fun(Ev) ->
-        json_stream_parse:collect_object(Ev,
-            fun(Obj) ->
-                UserAcc2 = UserFun(json_to_doc_info(Obj), UserAcc),
-                fun(Ev2) -> changes_ev_loop(Ev2, UserFun, UserAcc2) end
-            end)
-    end;
-changes_ev_loop(array_end, _UserFun, _UserAcc) ->
-    fun(_Ev) -> changes_ev_done() end.
-
-changes_ev_done() ->
-    fun(_Ev) -> changes_ev_done() end.
-
-continuous_changes(DataFun, UserFun) ->
-    {DataFun2, _, Rest} = json_stream_parse:events(
-        DataFun,
-        fun(Ev) -> parse_changes_line(Ev, UserFun) end),
-    continuous_changes(fun() -> {Rest, DataFun2} end, UserFun).
-
-parse_changes_line(object_start, UserFun) ->
-    fun(Ev) ->
-        json_stream_parse:collect_object(Ev,
-            fun(Obj) -> UserFun(json_to_doc_info(Obj)) end)
-    end.
-
-json_to_doc_info({Props}) ->
-    RevsInfo = lists:map(
-        fun({Change}) ->
-            Rev = couch_doc:parse_rev(get_value(<<"rev">>, Change)),
-            Del = (true =:= get_value(<<"deleted">>, Change)),
-            #rev_info{rev=Rev, deleted=Del}
-        end, get_value(<<"changes">>, Props)),
-    #doc_info{
-        id = get_value(<<"id">>, Props),
-        high_seq = get_value(<<"seq">>, Props),
-        revs = RevsInfo
-    }.
-
-
-bulk_results_to_errors(Docs, {ok, Results}, interactive_edit) ->
-    lists:reverse(lists:foldl(
-        fun({_, {ok, _}}, Acc) ->
-            Acc;
-        ({#doc{id = Id, revs = {Pos, [RevId | _]}}, Error}, Acc) ->
-            {_, Error, Reason} = couch_httpd:error_info(Error),
-            [ {[{id, Id}, {rev, rev_to_str({Pos, RevId})},
-                {error, Error}, {reason, Reason}]} | Acc ]
-        end,
-        [], lists:zip(Docs, Results)));
-
-bulk_results_to_errors(Docs, {ok, Results}, replicated_changes) ->
-    bulk_results_to_errors(Docs, {aborted, Results}, interactive_edit);
-
-bulk_results_to_errors(_Docs, {aborted, Results}, interactive_edit) ->
-    lists:map(
-        fun({{Id, Rev}, Err}) ->
-            {_, Error, Reason} = couch_httpd:error_info(Err),
-            {[{id, Id}, {rev, rev_to_str(Rev)}, {error, Error}, {reason, Reason}]}
-        end,
-        Results);
-
-bulk_results_to_errors(_Docs, Results, remote) ->
-    lists:reverse(lists:foldl(
-        fun({Props}, Acc) ->
-            case get_value(<<"error">>, Props, get_value(error, Props)) of
-            undefined ->
-                Acc;
-            Error ->
-                Id = get_value(<<"id">>, Props, get_value(id, Props)),
-                Rev = get_value(<<"rev">>, Props, get_value(rev, Props)),
-                Reason = get_value(<<"reason">>, Props, get_value(reason, Props)),
-                [ {[{id, Id}, {rev, rev_to_str(Rev)},
-                    {error, Error}, {reason, Reason}]} | Acc ]
-            end
-        end,
-        [], Results)).
-
-
-rev_to_str({_Pos, _Id} = Rev) ->
-    couch_doc:rev_to_str(Rev);
-rev_to_str(Rev) ->
-    Rev.
-
-
-stream_doc({JsonBytes, Atts, Boundary, Len}) ->
-    case erlang:erase({doc_streamer, Boundary}) of
-    Pid when is_pid(Pid) ->
-        unlink(Pid),
-        exit(Pid, kill);
-    _ ->
-        ok
-    end,
-    Self = self(),
-    DocStreamer = spawn_link(fun() ->
-        couch_doc:doc_to_multi_part_stream(
-            Boundary, JsonBytes, Atts,
-            fun(Data) ->
-                receive {get_data, Ref, From} ->
-                    From ! {data, Ref, Data}
-                end
-            end, true),
-        unlink(Self)
-    end),
-    erlang:put({doc_streamer, Boundary}, DocStreamer),
-    {ok, <<>>, {Len, Boundary}};
-stream_doc({0, Id}) ->
-    erlang:erase({doc_streamer, Id}),
-    eof;
-stream_doc({LenLeft, Id}) when LenLeft > 0 ->
-    Ref = make_ref(),
-    erlang:get({doc_streamer, Id}) ! {get_data, Ref, self()},
-    receive {data, Ref, Data} ->
-        {ok, Data, {LenLeft - iolist_size(Data), Id}}
-    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couchdb/couch_api_wrap.hrl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_api_wrap.hrl b/src/couchdb/couch_api_wrap.hrl
deleted file mode 100644
index 1a6f27a..0000000
--- a/src/couchdb/couch_api_wrap.hrl
+++ /dev/null
@@ -1,36 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
-
-
--record(httpdb, {
-    url,
-    oauth = nil,
-    headers = [
-        {"Accept", "application/json"},
-        {"User-Agent", "CouchDB/" ++ couch_server:get_version()}
-    ],
-    timeout,            % milliseconds
-    ibrowse_options = [],
-    retries = 10,
-    wait = 250,         % milliseconds
-    httpc_pool = nil,
-    http_connections
-}).
-
--record(oauth, {
-    consumer_key,
-    token,
-    token_secret,
-    consumer_secret,
-    signature_method
-}).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couchdb/couch_api_wrap_httpc.erl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_api_wrap_httpc.erl b/src/couchdb/couch_api_wrap_httpc.erl
deleted file mode 100644
index d05eec7..0000000
--- a/src/couchdb/couch_api_wrap_httpc.erl
+++ /dev/null
@@ -1,286 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_api_wrap_httpc).
-
--include("couch_db.hrl").
--include("couch_api_wrap.hrl").
--include("../ibrowse/ibrowse.hrl").
-
--export([setup/1]).
--export([send_req/3]).
--export([full_url/2]).
-
--import(couch_util, [
-    get_value/2,
-    get_value/3
-]).
-
--define(replace(L, K, V), lists:keystore(K, 1, L, {K, V})).
--define(MAX_WAIT, 5 * 60 * 1000).
-
-
-setup(#httpdb{httpc_pool = nil, url = Url, http_connections = MaxConns} = Db) ->
-    {ok, Pid} = couch_httpc_pool:start_link(Url, [{max_connections, MaxConns}]),
-    {ok, Db#httpdb{httpc_pool = Pid}}.
-
-
-send_req(HttpDb, Params1, Callback) ->
-    Params2 = ?replace(Params1, qs,
-        [{K, ?b2l(iolist_to_binary(V))} || {K, V} <- get_value(qs, Params1, [])]),
-    Params = ?replace(Params2, ibrowse_options,
-        lists:keysort(1, get_value(ibrowse_options, Params2, []))),
-    {Worker, Response} = send_ibrowse_req(HttpDb, Params),
-    process_response(Response, Worker, HttpDb, Params, Callback).
-
-
-send_ibrowse_req(#httpdb{headers = BaseHeaders} = HttpDb, Params) ->
-    Method = get_value(method, Params, get),
-    UserHeaders = lists:keysort(1, get_value(headers, Params, [])),
-    Headers1 = lists:ukeymerge(1, UserHeaders, BaseHeaders),
-    Headers2 = oauth_header(HttpDb, Params) ++ Headers1,
-    Url = full_url(HttpDb, Params),
-    Body = get_value(body, Params, []),
-    case get_value(path, Params) of
-    "_changes" ->
-        {ok, Worker} = ibrowse:spawn_link_worker_process(Url);
-    _ ->
-        {ok, Worker} = couch_httpc_pool:get_worker(HttpDb#httpdb.httpc_pool)
-    end,
-    IbrowseOptions = [
-        {response_format, binary}, {inactivity_timeout, HttpDb#httpdb.timeout} |
-        lists:ukeymerge(1, get_value(ibrowse_options, Params, []),
-            HttpDb#httpdb.ibrowse_options)
-    ],
-    Response = ibrowse:send_req_direct(
-        Worker, Url, Headers2, Method, Body, IbrowseOptions, infinity),
-    {Worker, Response}.
-
-
-process_response({error, sel_conn_closed}, _Worker, HttpDb, Params, Callback) ->
-    send_req(HttpDb, Params, Callback);
-
-process_response({error, {'EXIT', {normal, _}}}, _Worker, HttpDb, Params, Cb) ->
-    % ibrowse worker terminated because remote peer closed the socket
-    % -> not an error
-    send_req(HttpDb, Params, Cb);
-
-process_response({ibrowse_req_id, ReqId}, Worker, HttpDb, Params, Callback) ->
-    process_stream_response(ReqId, Worker, HttpDb, Params, Callback);
-
-process_response({ok, Code, Headers, Body}, Worker, HttpDb, Params, Callback) ->
-    release_worker(Worker, HttpDb),
-    case list_to_integer(Code) of
-    Ok when Ok =:= 200 ; Ok =:= 201 ; (Ok >= 400 andalso Ok < 500) ->
-        EJson = case Body of
-        <<>> ->
-            null;
-        Json ->
-            ?JSON_DECODE(Json)
-        end,
-        Callback(Ok, Headers, EJson);
-    R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
-        do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
-    Error ->
-        maybe_retry({code, Error}, Worker, HttpDb, Params, Callback)
-    end;
-
-process_response(Error, Worker, HttpDb, Params, Callback) ->
-    maybe_retry(Error, Worker, HttpDb, Params, Callback).
-
-
-process_stream_response(ReqId, Worker, HttpDb, Params, Callback) ->
-    receive
-    {ibrowse_async_headers, ReqId, Code, Headers} ->
-        case list_to_integer(Code) of
-        Ok when Ok =:= 200 ; Ok =:= 201 ; (Ok >= 400 andalso Ok < 500) ->
-            StreamDataFun = fun() ->
-                stream_data_self(HttpDb, Params, Worker, ReqId, Callback)
-            end,
-            ibrowse:stream_next(ReqId),
-            try
-                Ret = Callback(Ok, Headers, StreamDataFun),
-                release_worker(Worker, HttpDb),
-                clean_mailbox_req(ReqId),
-                Ret
-            catch throw:{maybe_retry_req, Err} ->
-                clean_mailbox_req(ReqId),
-                maybe_retry(Err, Worker, HttpDb, Params, Callback)
-            end;
-        R when R =:= 301 ; R =:= 302 ; R =:= 303 ->
-            do_redirect(Worker, R, Headers, HttpDb, Params, Callback);
-        Error ->
-            report_error(Worker, HttpDb, Params, {code, Error})
-        end;
-    {ibrowse_async_response, ReqId, {error, _} = Error} ->
-        maybe_retry(Error, Worker, HttpDb, Params, Callback)
-    after HttpDb#httpdb.timeout + 500 ->
-        % Note: ibrowse should always reply with timeouts, but this doesn't
-        % seem to be always true when there's a very high rate of requests
-        % and many open connections.
-        maybe_retry(timeout, Worker, HttpDb, Params, Callback)
-    end.
-
-
-clean_mailbox_req(ReqId) ->
-    receive
-    {ibrowse_async_response, ReqId, _} ->
-        clean_mailbox_req(ReqId);
-    {ibrowse_async_response_end, ReqId} ->
-        clean_mailbox_req(ReqId)
-    after 0 ->
-        ok
-    end.
-
-
-release_worker(Worker, #httpdb{httpc_pool = Pool}) ->
-    ok = couch_httpc_pool:release_worker(Pool, Worker).
-
-
-maybe_retry(Error, Worker, #httpdb{retries = 0} = HttpDb, Params, _Cb) ->
-    report_error(Worker, HttpDb, Params, {error, Error});
-
-maybe_retry(Error, Worker, #httpdb{retries = Retries, wait = Wait} = HttpDb,
-    Params, Cb) ->
-    release_worker(Worker, HttpDb),
-    Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
-    Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
-    ?LOG_INFO("Retrying ~s request to ~s in ~p seconds due to error ~s",
-        [Method, Url, Wait / 1000, error_cause(Error)]),
-    ok = timer:sleep(Wait),
-    Wait2 = erlang:min(Wait * 2, ?MAX_WAIT),
-    send_req(HttpDb#httpdb{retries = Retries - 1, wait = Wait2}, Params, Cb).
-
-
-report_error(Worker, HttpDb, Params, Error) ->
-    Method = string:to_upper(atom_to_list(get_value(method, Params, get))),
-    Url = couch_util:url_strip_password(full_url(HttpDb, Params)),
-    do_report_error(Url, Method, Error),
-    release_worker(Worker, HttpDb),
-    exit({http_request_failed, Method, Url, Error}).
-
-
-do_report_error(Url, Method, {code, Code}) ->
-    ?LOG_ERROR("Replicator, request ~s to ~p failed. The received "
-        "HTTP error code is ~p", [Method, Url, Code]);
-
-do_report_error(FullUrl, Method, Error) ->
-    ?LOG_ERROR("Replicator, request ~s to ~p failed due to error ~s",
-        [Method, FullUrl, error_cause(Error)]).
-
-
-error_cause({error, Cause}) ->
-    lists:flatten(io_lib:format("~p", [Cause]));
-error_cause(Cause) ->
-    lists:flatten(io_lib:format("~p", [Cause])).
-
-
-stream_data_self(#httpdb{timeout = T} = HttpDb, Params, Worker, ReqId, Cb) ->
-    receive
-    {ibrowse_async_response, ReqId, {error, Error}} ->
-        throw({maybe_retry_req, Error});
-    {ibrowse_async_response, ReqId, <<>>} ->
-        ibrowse:stream_next(ReqId),
-        stream_data_self(HttpDb, Params, Worker, ReqId, Cb);
-    {ibrowse_async_response, ReqId, Data} ->
-        ibrowse:stream_next(ReqId),
-        {Data, fun() -> stream_data_self(HttpDb, Params, Worker, ReqId, Cb) end};
-    {ibrowse_async_response_end, ReqId} ->
-        {<<>>, fun() -> throw({maybe_retry_req, more_data_expected}) end}
-    after T + 500 ->
-        % Note: ibrowse should always reply with timeouts, but this doesn't
-        % seem to be always true when there's a very high rate of requests
-        % and many open connections.
-        throw({maybe_retry_req, timeout})
-    end.
-
-
-full_url(#httpdb{url = BaseUrl}, Params) ->
-    Path = get_value(path, Params, []),
-    QueryArgs = get_value(qs, Params, []),
-    BaseUrl ++ Path ++ query_args_to_string(QueryArgs, []).
-
-
-query_args_to_string([], []) ->
-    "";
-query_args_to_string([], Acc) ->
-    "?" ++ string:join(lists:reverse(Acc), "&");
-query_args_to_string([{K, V} | Rest], Acc) ->
-    query_args_to_string(Rest, [K ++ "=" ++ couch_httpd:quote(V) | Acc]).
-
-
-oauth_header(#httpdb{oauth = nil}, _ConnParams) ->
-    [];
-oauth_header(#httpdb{url = BaseUrl, oauth = OAuth}, ConnParams) ->
-    Consumer = {
-        OAuth#oauth.consumer_key,
-        OAuth#oauth.consumer_secret,
-        OAuth#oauth.signature_method
-    },
-    Method = case get_value(method, ConnParams, get) of
-    get -> "GET";
-    post -> "POST";
-    put -> "PUT";
-    head -> "HEAD"
-    end,
-    QSL = get_value(qs, ConnParams, []),
-    OAuthParams = oauth:signed_params(Method,
-        BaseUrl ++ get_value(path, ConnParams, []),
-        QSL, Consumer, OAuth#oauth.token, OAuth#oauth.token_secret) -- QSL,
-    [{"Authorization",
-        "OAuth " ++ oauth_uri:params_to_header_string(OAuthParams)}].
-
-
-do_redirect(Worker, Code, Headers, #httpdb{url = Url} = HttpDb, Params, Cb) ->
-    release_worker(Worker, HttpDb),
-    RedirectUrl = redirect_url(Headers, Url),
-    {HttpDb2, Params2} = after_redirect(RedirectUrl, Code, HttpDb, Params),
-    send_req(HttpDb2, Params2, Cb).
-
-
-redirect_url(RespHeaders, OrigUrl) ->
-    MochiHeaders = mochiweb_headers:make(RespHeaders),
-    RedUrl = mochiweb_headers:get_value("Location", MochiHeaders),
-    #url{
-        host = Host,
-        host_type = HostType,
-        port = Port,
-        path = Path,  % includes query string
-        protocol = Proto
-    } = ibrowse_lib:parse_url(RedUrl),
-    #url{
-        username = User,
-        password = Passwd
-    } = ibrowse_lib:parse_url(OrigUrl),
-    Creds = case is_list(User) andalso is_list(Passwd) of
-    true ->
-        User ++ ":" ++ Passwd ++ "@";
-    false ->
-        []
-    end,
-    HostPart = case HostType of
-    ipv6_address ->
-        "[" ++ Host ++ "]";
-    _ ->
-        Host
-    end,
-    atom_to_list(Proto) ++ "://" ++ Creds ++ HostPart ++ ":" ++
-        integer_to_list(Port) ++ Path.
-
-after_redirect(RedirectUrl, 303, HttpDb, Params) ->
-    after_redirect(RedirectUrl, HttpDb, ?replace(Params, method, get));
-after_redirect(RedirectUrl, _Code, HttpDb, Params) ->
-    after_redirect(RedirectUrl, HttpDb, Params).
-
-after_redirect(RedirectUrl, HttpDb, Params) ->
-    Params2 = lists:keydelete(path, 1, lists:keydelete(qs, 1, Params)),
-    {HttpDb#httpdb{url = RedirectUrl}, Params2}.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couchdb/couch_httpc_pool.erl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_httpc_pool.erl b/src/couchdb/couch_httpc_pool.erl
deleted file mode 100644
index f6b7c26..0000000
--- a/src/couchdb/couch_httpc_pool.erl
+++ /dev/null
@@ -1,138 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpc_pool).
--behaviour(gen_server).
-
-% public API
--export([start_link/2, stop/1]).
--export([get_worker/1, release_worker/2]).
-
-% gen_server API
--export([init/1, handle_call/3, handle_info/2, handle_cast/2]).
--export([code_change/3, terminate/2]).
-
--include("couch_db.hrl").
-
--import(couch_util, [
-    get_value/2,
-    get_value/3
-]).
-
--record(state, {
-    url,
-    limit,                  % max # of workers allowed
-    free = [],              % free workers (connections)
-    busy = [],              % busy workers (connections)
-    waiting = queue:new()   % blocked clients waiting for a worker
-}).
-
-
-start_link(Url, Options) ->
-    gen_server:start_link(?MODULE, {Url, Options}, []).
-
-
-stop(Pool) ->
-    ok = gen_server:call(Pool, stop, infinity).
-
-
-get_worker(Pool) ->
-    {ok, _Worker} = gen_server:call(Pool, get_worker, infinity).
-
-
-release_worker(Pool, Worker) ->
-    ok = gen_server:cast(Pool, {release_worker, Worker}).
-
-
-init({Url, Options}) ->
-    process_flag(trap_exit, true),
-    State = #state{
-        url = Url,
-        limit = get_value(max_connections, Options)
-    },
-    {ok, State}.
-
-
-handle_call(get_worker, From, #state{waiting = Waiting} = State) ->
-    #state{url = Url, limit = Limit, busy = Busy, free = Free} = State,
-    case length(Busy) >= Limit of
-    true ->
-        {noreply, State#state{waiting = queue:in(From, Waiting)}};
-    false ->
-        case Free of
-        [] ->
-           {ok, Worker} = ibrowse:spawn_link_worker_process(Url),
-           Free2 = Free;
-        [Worker | Free2] ->
-           ok
-        end,
-        NewState = State#state{free = Free2, busy = [Worker | Busy]},
-        {reply, {ok, Worker}, NewState}
-    end;
-
-handle_call(stop, _From, State) ->
-    {stop, normal, ok, State}.
-
-
-handle_cast({release_worker, Worker}, #state{waiting = Waiting} = State) ->
-    case is_process_alive(Worker) andalso
-        lists:member(Worker, State#state.busy) of
-    true ->
-        case queue:out(Waiting) of
-        {empty, Waiting2} ->
-            Busy2 = State#state.busy -- [Worker],
-            Free2 = [Worker | State#state.free];
-        {{value, From}, Waiting2} ->
-            gen_server:reply(From, {ok, Worker}),
-            Busy2 = State#state.busy,
-            Free2 = State#state.free
-        end,
-        NewState = State#state{
-           busy = Busy2,
-           free = Free2,
-           waiting = Waiting2
-        },
-        {noreply, NewState};
-   false ->
-        {noreply, State}
-   end.
-
-
-handle_info({'EXIT', Pid, _Reason}, #state{busy = Busy, free = Free} = State) ->
-    case Free -- [Pid] of
-    Free ->
-        case Busy -- [Pid] of
-        Busy ->
-            {noreply, State};
-        Busy2 ->
-            case queue:out(State#state.waiting) of
-            {empty, _} ->
-                {noreply, State#state{busy = Busy2}};
-            {{value, From}, Waiting2} ->
-                {ok, Worker} = ibrowse:spawn_link_worker_process(State#state.url),
-                gen_server:reply(From, {ok, Worker}),
-                {noreply, State#state{busy = [Worker | Busy2], waiting = Waiting2}}
-            end
-        end;
-    Free2 ->
-        {noreply, State#state{free = Free2}}
-    end.
-
-
-code_change(_OldVsn, State, _Extra) ->
-    {ok, State}.
-
-
-terminate(_Reason, State) ->
-    lists:foreach(fun ibrowse_http_client:stop/1, State#state.free),
-    lists:foreach(fun ibrowse_http_client:stop/1, State#state.busy).
-

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couchdb/couch_httpd_replicator.erl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_httpd_replicator.erl b/src/couchdb/couch_httpd_replicator.erl
deleted file mode 100644
index fb1e350..0000000
--- a/src/couchdb/couch_httpd_replicator.erl
+++ /dev/null
@@ -1,66 +0,0 @@
-% Licensed under the Apache License, Version 2.0 (the "License"); you may not
-% use this file except in compliance with the License. You may obtain a copy of
-% the License at
-%
-%   http://www.apache.org/licenses/LICENSE-2.0
-%
-% Unless required by applicable law or agreed to in writing, software
-% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-% License for the specific language governing permissions and limitations under
-% the License.
-
--module(couch_httpd_replicator).
-
--include("couch_db.hrl").
-
--import(couch_httpd, [
-    send_json/2,
-    send_json/3,
-    send_method_not_allowed/2
-]).
-
--import(couch_util, [
-    to_binary/1
-]).
-
--export([handle_req/1]).
-
-
-handle_req(#httpd{method = 'POST', user_ctx = UserCtx} = Req) ->
-    couch_httpd:validate_ctype(Req, "application/json"),
-    RepDoc = {Props} = couch_httpd:json_body_obj(Req),
-    validate_rep_props(Props),
-    {ok, Rep} = couch_replicator_utils:parse_rep_doc(RepDoc, UserCtx),
-    case couch_replicator:replicate(Rep) of
-    {error, {Error, Reason}} ->
-        send_json(
-            Req, 404,
-            {[{error, to_binary(Error)}, {reason, to_binary(Reason)}]});
-    {error, not_found} ->
-        % Tried to cancel a replication that didn't exist.
-        send_json(Req, 404, {[{error, <<"not found">>}]});
-    {error, Reason} ->
-        send_json(Req, 500, {[{error, to_binary(Reason)}]});
-    {ok, {cancelled, RepId}} ->
-        send_json(Req, 200, {[{ok, true}, {<<"_local_id">>, RepId}]});
-    {ok, {continuous, RepId}} ->
-        send_json(Req, 202, {[{ok, true}, {<<"_local_id">>, RepId}]});
-    {ok, {HistoryResults}} ->
-        send_json(Req, {[{ok, true} | HistoryResults]})
-    end;
-
-handle_req(Req) ->
-    send_method_not_allowed(Req, "POST").
-
-validate_rep_props([]) ->
-    ok;
-validate_rep_props([{<<"query_params">>, {Params}}|Rest]) ->
-    lists:foreach(fun
-        ({_,V}) when is_binary(V) -> ok;
-        ({K,_}) -> throw({bad_request,
-            <<K/binary," value must be a string.">>})
-        end, Params),
-    validate_rep_props(Rest);
-validate_rep_props([_|Rest]) ->
-    validate_rep_props(Rest).

http://git-wip-us.apache.org/repos/asf/couchdb/blob/ad526790/src/couchdb/couch_js_functions.hrl
----------------------------------------------------------------------
diff --git a/src/couchdb/couch_js_functions.hrl b/src/couchdb/couch_js_functions.hrl
index 1949904..1c1dee1 100644
--- a/src/couchdb/couch_js_functions.hrl
+++ b/src/couchdb/couch_js_functions.hrl
@@ -104,144 +104,3 @@
         }
     }
 ">>).
-
-
--define(REP_DB_DOC_VALIDATE_FUN, <<"
-    function(newDoc, oldDoc, userCtx) {
-        function reportError(error_msg) {
-            log('Error writing document `' + newDoc._id +
-                '\\' to the replicator database: ' + error_msg);
-            throw({forbidden: error_msg});
-        }
-
-        function validateEndpoint(endpoint, fieldName) {
-            if ((typeof endpoint !== 'string') &&
-                ((typeof endpoint !== 'object') || (endpoint === null))) {
-
-                reportError('The `' + fieldName + '\\' property must exist' +
-                    ' and be either a string or an object.');
-            }
-
-            if (typeof endpoint === 'object') {
-                if ((typeof endpoint.url !== 'string') || !endpoint.url) {
-                    reportError('The url property must exist in the `' +
-                        fieldName + '\\' field and must be a non-empty string.');
-                }
-
-                if ((typeof endpoint.auth !== 'undefined') &&
-                    ((typeof endpoint.auth !== 'object') ||
-                        endpoint.auth === null)) {
-
-                    reportError('`' + fieldName +
-                        '.auth\\' must be a non-null object.');
-                }
-
-                if ((typeof endpoint.headers !== 'undefined') &&
-                    ((typeof endpoint.headers !== 'object') ||
-                        endpoint.headers === null)) {
-
-                    reportError('`' + fieldName +
-                        '.headers\\' must be a non-null object.');
-                }
-            }
-        }
-
-        var isReplicator = (userCtx.roles.indexOf('_replicator') >= 0);
-        var isAdmin = (userCtx.roles.indexOf('_admin') >= 0);
-
-        if (oldDoc && !newDoc._deleted && !isReplicator &&
-            (oldDoc._replication_state === 'triggered')) {
-            reportError('Only the replicator can edit replication documents ' +
-                'that are in the triggered state.');
-        }
-
-        if (!newDoc._deleted) {
-            validateEndpoint(newDoc.source, 'source');
-            validateEndpoint(newDoc.target, 'target');
-
-            if ((typeof newDoc.create_target !== 'undefined') &&
-                (typeof newDoc.create_target !== 'boolean')) {
-
-                reportError('The `create_target\\' field must be a boolean.');
-            }
-
-            if ((typeof newDoc.continuous !== 'undefined') &&
-                (typeof newDoc.continuous !== 'boolean')) {
-
-                reportError('The `continuous\\' field must be a boolean.');
-            }
-
-            if ((typeof newDoc.doc_ids !== 'undefined') &&
-                !isArray(newDoc.doc_ids)) {
-
-                reportError('The `doc_ids\\' field must be an array of strings.');
-            }
-
-            if ((typeof newDoc.filter !== 'undefined') &&
-                ((typeof newDoc.filter !== 'string') || !newDoc.filter)) {
-
-                reportError('The `filter\\' field must be a non-empty string.');
-            }
-
-            if ((typeof newDoc.query_params !== 'undefined') &&
-                ((typeof newDoc.query_params !== 'object') ||
-                    newDoc.query_params === null)) {
-
-                reportError('The `query_params\\' field must be an object.');
-            }
-
-            if (newDoc.user_ctx) {
-                var user_ctx = newDoc.user_ctx;
-
-                if ((typeof user_ctx !== 'object') || (user_ctx === null)) {
-                    reportError('The `user_ctx\\' property must be a ' +
-                        'non-null object.');
-                }
-
-                if (!(user_ctx.name === null ||
-                    (typeof user_ctx.name === 'undefined') ||
-                    ((typeof user_ctx.name === 'string') &&
-                        user_ctx.name.length > 0))) {
-
-                    reportError('The `user_ctx.name\\' property must be a ' +
-                        'non-empty string or null.');
-                }
-
-                if (!isAdmin && (user_ctx.name !== userCtx.name)) {
-                    reportError('The given `user_ctx.name\\' is not valid');
-                }
-
-                if (user_ctx.roles && !isArray(user_ctx.roles)) {
-                    reportError('The `user_ctx.roles\\' property must be ' +
-                        'an array of strings.');
-                }
-
-                if (!isAdmin && user_ctx.roles) {
-                    for (var i = 0; i < user_ctx.roles.length; i++) {
-                        var role = user_ctx.roles[i];
-
-                        if (typeof role !== 'string' || role.length === 0) {
-                            reportError('Roles must be non-empty strings.');
-                        }
-                        if (userCtx.roles.indexOf(role) === -1) {
-                            reportError('Invalid role (`' + role +
-                                '\\') in the `user_ctx\\'');
-                        }
-                    }
-                }
-            } else {
-                if (!isAdmin) {
-                    reportError('The `user_ctx\\' property is missing (it is ' +
-                       'optional for admins only).');
-                }
-            }
-        } else {
-            if (!isAdmin) {
-                if (!oldDoc.user_ctx || (oldDoc.user_ctx.name !== userCtx.name)) {
-                    reportError('Replication documents can only be deleted by ' +
-                        'admins or by the users who created them.');
-                }
-            }
-        }
-    }
-">>).


Mime
View raw message