couchdb-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From beno...@apache.org
Subject [36/57] [abbrv] [partial] inital move to rebar compilation
Date Tue, 07 Jan 2014 00:36:56 GMT
http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couch_replicator/test/03-replication-compact.t
----------------------------------------------------------------------
diff --git a/apps/couch_replicator/test/03-replication-compact.t b/apps/couch_replicator/test/03-replication-compact.t
new file mode 100755
index 0000000..7c4d38c
--- /dev/null
+++ b/apps/couch_replicator/test/03-replication-compact.t
@@ -0,0 +1,488 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Verify that compacting databases that are being used as the source or
+% target of a replication doesn't affect the replication and that the
+% replication doesn't hold their reference counters forever.
+
+-define(b2l(B), binary_to_list(B)).
+
+-record(user_ctx, {
+    name = null,
+    roles = [],
+    handler
+}).
+
+-record(db, {
+    main_pid = nil,
+    update_pid = nil,
+    compactor_pid = nil,
+    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+    fd,
+    updater_fd,
+    fd_ref_counter,
+    header = nil,
+    committed_update_seq,
+    fulldocinfo_by_id_btree,
+    docinfo_by_seq_btree,
+    local_docs_btree,
+    update_seq,
+    name,
+    filepath,
+    validate_doc_funs = [],
+    security = [],
+    security_ptr = nil,
+    user_ctx = #user_ctx{},
+    waiting_delayed_commit = nil,
+    revs_limit = 1000,
+    fsync_options = [],
+    options = [],
+    compression,
+    before_doc_update,
+    after_doc_read
+}).
+
+-record(rep, {
+    id,
+    source,
+    target,
+    options,
+    user_ctx,
+    doc_id
+}).
+
+
+source_db_name() -> <<"couch_test_rep_db_a">>.
+target_db_name() -> <<"couch_test_rep_db_b">>.
+
+
+main(_) ->
+    test_util:init_code_path(),
+
+    etap:plan(376),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail(Other)
+    end,
+    ok.
+
+
+test() ->
+    couch_server_sup:start_link(test_util:config_files()),
+    ibrowse:start(),
+
+    Pairs = [
+        {source_db_name(), target_db_name()},
+        {{remote, source_db_name()}, target_db_name()},
+        {source_db_name(), {remote, target_db_name()}},
+        {{remote, source_db_name()}, {remote, (target_db_name())}}
+    ],
+
+    lists:foreach(
+        fun({Source, Target}) ->
+            {ok, SourceDb} = create_db(source_db_name()),
+            etap:is(couch_db:is_idle(SourceDb), true,
+                "Source database is idle before starting replication"),
+
+            {ok, TargetDb} = create_db(target_db_name()),
+            etap:is(couch_db:is_idle(TargetDb), true,
+                "Target database is idle before starting replication"),
+
+            {ok, RepPid, RepId} = replicate(Source, Target),
+            check_active_tasks(RepPid, RepId, Source, Target),
+            {ok, DocsWritten} = populate_and_compact_test(
+                RepPid, SourceDb, TargetDb),
+
+            wait_target_in_sync(DocsWritten, TargetDb),
+            check_active_tasks(RepPid, RepId, Source, Target),
+            cancel_replication(RepId, RepPid),
+            compare_dbs(SourceDb, TargetDb),
+
+            delete_db(SourceDb),
+            delete_db(TargetDb),
+            couch_server_sup:stop(),
+            ok = timer:sleep(1000),
+            couch_server_sup:start_link(test_util:config_files())
+        end,
+        Pairs),
+
+    couch_server_sup:stop(),
+    ok.
+
+
+populate_and_compact_test(RepPid, SourceDb0, TargetDb0) ->
+    etap:is(is_process_alive(RepPid), true, "Replication process is alive"),
+    check_db_alive("source", SourceDb0),
+    check_db_alive("target", TargetDb0),
+
+    Writer = spawn_writer(SourceDb0),
+
+    lists:foldl(
+        fun(_, {SourceDb, TargetDb, DocCount}) ->
+            pause_writer(Writer),
+
+            compact_db("source", SourceDb),
+            etap:is(is_process_alive(RepPid), true,
+                "Replication process is alive after source database compaction"),
+            check_db_alive("source", SourceDb),
+            check_ref_counter("source", SourceDb),
+
+            compact_db("target", TargetDb),
+            etap:is(is_process_alive(RepPid), true,
+                "Replication process is alive after target database compaction"),
+            check_db_alive("target", TargetDb),
+            check_ref_counter("target", TargetDb),
+
+            {ok, SourceDb2} = reopen_db(SourceDb),
+            {ok, TargetDb2} = reopen_db(TargetDb),
+
+            resume_writer(Writer),
+            wait_writer(Writer, DocCount),
+
+            compact_db("source", SourceDb2),
+            etap:is(is_process_alive(RepPid), true,
+                "Replication process is alive after source database compaction"),
+            check_db_alive("source", SourceDb2),
+            pause_writer(Writer),
+            check_ref_counter("source", SourceDb2),
+            resume_writer(Writer),
+
+            compact_db("target", TargetDb2),
+            etap:is(is_process_alive(RepPid), true,
+                "Replication process is alive after target database compaction"),
+            check_db_alive("target", TargetDb2),
+            pause_writer(Writer),
+            check_ref_counter("target", TargetDb2),
+            resume_writer(Writer),
+
+            {ok, SourceDb3} = reopen_db(SourceDb2),
+            {ok, TargetDb3} = reopen_db(TargetDb2),
+            {SourceDb3, TargetDb3, DocCount + 50}
+        end,
+        {SourceDb0, TargetDb0, 50}, lists:seq(1, 5)),
+
+    DocsWritten = stop_writer(Writer),
+    {ok, DocsWritten}.
+
+
+check_db_alive(Type, #db{main_pid = Pid}) ->
+    etap:is(is_process_alive(Pid), true,
+        "Local " ++ Type ++ " database main pid is alive").
+
+
+compact_db(Type, #db{name = Name}) ->
+    {ok, Db} = couch_db:open_int(Name, []),
+    {ok, CompactPid} = couch_db:start_compact(Db),
+    MonRef = erlang:monitor(process, CompactPid),
+    receive
+    {'DOWN', MonRef, process, CompactPid, normal} ->
+        ok;
+    {'DOWN', MonRef, process, CompactPid, Reason} ->
+        etap:bail("Error compacting " ++ Type ++ " database " ++ ?b2l(Name) ++
+            ": " ++ couch_util:to_list(Reason))
+    after 30000 ->
+        etap:bail("Compaction for " ++ Type ++ " database " ++ ?b2l(Name) ++
+            " didn't finish")
+    end,
+    ok = couch_db:close(Db).
+
+
+check_ref_counter(Type, #db{name = Name, fd_ref_counter = OldRefCounter}) ->
+    MonRef = erlang:monitor(process, OldRefCounter),
+    receive
+    {'DOWN', MonRef, process, OldRefCounter, _} ->
+        etap:diag("Old " ++ Type ++ " database ref counter terminated")
+    after 30000 ->
+        etap:bail("Old " ++ Type ++ " database ref counter didn't terminate")
+    end,
+    {ok, #db{fd_ref_counter = NewRefCounter} = Db} = couch_db:open_int(Name, []),
+    ok = couch_db:close(Db),
+    etap:isnt(
+        NewRefCounter, OldRefCounter, Type ++ " database has new ref counter").
+
+
+reopen_db(#db{name = Name}) ->
+    {ok, Db} = couch_db:open_int(Name, []),
+    ok = couch_db:close(Db),
+    {ok, Db}.
+
+
+wait_target_in_sync(DocCount, #db{name = TargetName}) ->
+    wait_target_in_sync_loop(DocCount, TargetName, 300).
+
+
+wait_target_in_sync_loop(_DocCount, _TargetName, 0) ->
+    etap:bail("Could not get source and target databases in sync");
+wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft) ->
+    {ok, Target} = couch_db:open_int(TargetName, []),
+    {ok, TargetInfo} = couch_db:get_db_info(Target),
+    ok = couch_db:close(Target),
+    TargetDocCount = couch_util:get_value(doc_count, TargetInfo),
+    case TargetDocCount == DocCount of
+    true ->
+        etap:diag("Source and target databases are in sync");
+    false ->
+        ok = timer:sleep(100),
+        wait_target_in_sync_loop(DocCount, TargetName, RetriesLeft - 1)
+    end.
+
+
+compare_dbs(#db{name = SourceName}, #db{name = TargetName}) ->
+    {ok, SourceDb} = couch_db:open_int(SourceName, []),
+    {ok, TargetDb} = couch_db:open_int(TargetName, []),
+    Fun = fun(FullDocInfo, _, Acc) ->
+        {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+        {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+        DocId = couch_util:get_value(<<"_id">>, Props),
+        DocTarget = case couch_db:open_doc(TargetDb, DocId) of
+        {ok, DocT} ->
+            DocT;
+        Error ->
+            etap:bail("Error opening document '" ++ ?b2l(DocId) ++
+                "' from target: " ++ couch_util:to_list(Error))
+        end,
+        DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+        case DocTargetJson of
+        DocJson ->
+            ok;
+        _ ->
+            etap:bail("Content from document '" ++ ?b2l(DocId) ++
+                "' differs in target database")
+        end,
+        {ok, Acc}
+    end,
+    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+    etap:diag("Target database has the same documents as the source database"),
+    ok = couch_db:close(SourceDb),
+    ok = couch_db:close(TargetDb).
+
+
+check_active_tasks(RepPid, {BaseId, Ext} = _RepId, Src, Tgt) ->
+    Source = case Src of
+    {remote, NameSrc} ->
+        <<(db_url(NameSrc))/binary, $/>>;
+    _ ->
+        Src
+    end,
+    Target = case Tgt of
+    {remote, NameTgt} ->
+        <<(db_url(NameTgt))/binary, $/>>;
+    _ ->
+        Tgt
+    end,
+    FullRepId = list_to_binary(BaseId ++ Ext),
+    Pid = list_to_binary(pid_to_list(RepPid)),
+    [RepTask] = couch_task_status:all(),
+    etap:is(couch_util:get_value(pid, RepTask), Pid,
+        "_active_tasks entry has correct pid property"),
+    etap:is(couch_util:get_value(replication_id, RepTask), FullRepId,
+        "_active_tasks entry has right replication id"),
+    etap:is(couch_util:get_value(continuous, RepTask), true,
+        "_active_tasks entry has continuous property set to true"),
+    etap:is(couch_util:get_value(source, RepTask), Source,
+        "_active_tasks entry has correct source property"),
+    etap:is(couch_util:get_value(target, RepTask), Target,
+        "_active_tasks entry has correct target property"),
+    etap:is(is_integer(couch_util:get_value(docs_read, RepTask)), true,
+        "_active_tasks entry has integer docs_read property"),
+    etap:is(is_integer(couch_util:get_value(docs_written, RepTask)), true,
+        "_active_tasks entry has integer docs_written property"),
+    etap:is(is_integer(couch_util:get_value(doc_write_failures, RepTask)), true,
+        "_active_tasks entry has integer doc_write_failures property"),
+    etap:is(is_integer(couch_util:get_value(revisions_checked, RepTask)), true,
+        "_active_tasks entry has integer revisions_checked property"),
+    etap:is(is_integer(couch_util:get_value(missing_revisions_found, RepTask)), true,
+        "_active_tasks entry has integer missing_revisions_found property"),
+    etap:is(is_integer(couch_util:get_value(checkpointed_source_seq, RepTask)), true,
+        "_active_tasks entry has integer checkpointed_source_seq property"),
+    etap:is(is_integer(couch_util:get_value(source_seq, RepTask)), true,
+        "_active_tasks entry has integer source_seq property"),
+    Progress = couch_util:get_value(progress, RepTask),
+    etap:is(is_integer(Progress), true,
+        "_active_tasks entry has an integer progress property"),
+    etap:is(Progress =< 100, true, "Progress is not greater than 100%").
+
+
+wait_writer(Pid, NumDocs) ->
+    case get_writer_num_docs_written(Pid) of
+    N when N >= NumDocs ->
+        ok;
+    _ ->
+        wait_writer(Pid, NumDocs)
+    end.
+
+
+spawn_writer(Db) ->
+    Parent = self(),
+    Pid = spawn(fun() -> writer_loop(Db, Parent, 0) end),
+    etap:diag("Started source database writer"),
+    Pid.
+
+
+pause_writer(Pid) ->
+    Ref = make_ref(),
+    Pid ! {pause, Ref},
+    receive
+    {paused, Ref} ->
+        ok
+    after 30000 ->
+        etap:bail("Failed to pause source database writer")
+    end.
+
+
+resume_writer(Pid) ->
+    Ref = make_ref(),
+    Pid ! {continue, Ref},
+    receive
+    {ok, Ref} ->
+        ok
+    after 30000 ->
+        etap:bail("Failed to unpause source database writer")
+    end.
+
+
+get_writer_num_docs_written(Pid) ->
+    Ref = make_ref(),
+    Pid ! {get_count, Ref},
+    receive
+    {count, Ref, Count} ->
+        Count
+    after 30000 ->
+        etap:bail("Timeout getting number of documents written from "
+            "source database writer")
+    end.
+
+
+stop_writer(Pid) ->
+    Ref = make_ref(),
+    Pid ! {stop, Ref},
+    receive
+    {stopped, Ref, DocsWritten} ->
+        MonRef = erlang:monitor(process, Pid),
+        receive
+        {'DOWN', MonRef, process, Pid, _Reason} ->
+            etap:diag("Stopped source database writer"),
+            DocsWritten
+        after 30000 ->
+            etap:bail("Timeout stopping source database writer")
+        end
+    after 30000 ->
+        etap:bail("Timeout stopping source database writer")
+    end.
+
+
+writer_loop(#db{name = DbName}, Parent, Counter) ->
+    maybe_pause(Parent, Counter),
+    Doc = couch_doc:from_json_obj({[
+        {<<"_id">>, list_to_binary(integer_to_list(Counter + 1))},
+        {<<"value">>, Counter + 1},
+        {<<"_attachments">>, {[
+            {<<"icon1.png">>, {[
+                {<<"data">>, base64:encode(att_data())},
+                {<<"content_type">>, <<"image/png">>}
+            ]}},
+            {<<"icon2.png">>, {[
+                {<<"data">>, base64:encode(iolist_to_binary(
+                    [att_data(), att_data()]))},
+                {<<"content_type">>, <<"image/png">>}
+            ]}}
+        ]}}
+    ]}),
+    maybe_pause(Parent, Counter),
+    {ok, Db} = couch_db:open_int(DbName, []),
+    {ok, _} = couch_db:update_doc(Db, Doc, []),
+    ok = couch_db:close(Db),
+    receive
+    {get_count, Ref} ->
+        Parent ! {count, Ref, Counter + 1},
+        writer_loop(Db, Parent, Counter + 1);
+    {stop, Ref} ->
+        Parent ! {stopped, Ref, Counter + 1}
+    after 0 ->
+        ok = timer:sleep(500),
+        writer_loop(Db, Parent, Counter + 1)
+    end.
+
+
+maybe_pause(Parent, Counter) ->
+    receive
+    {get_count, Ref} ->
+        Parent ! {count, Ref, Counter};
+    {pause, Ref} ->
+        Parent ! {paused, Ref},
+        receive {continue, Ref2} -> Parent ! {ok, Ref2} end
+    after 0 ->
+        ok
+    end.
+
+
+db_url(DbName) ->
+    iolist_to_binary([
+        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/", DbName
+    ]).
+
+
+create_db(DbName) ->
+    {ok, Db} = couch_db:create(
+        DbName,
+        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
+    couch_db:close(Db),
+    {ok, Db}.
+
+
+delete_db(#db{name = DbName, main_pid = Pid}) ->
+    ok = couch_server:delete(
+        DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, _Reason} ->
+        ok
+    after 30000 ->
+        etap:bail("Timeout deleting database")
+    end.
+
+
+replicate({remote, Db}, Target) ->
+    replicate(db_url(Db), Target);
+
+replicate(Source, {remote, Db}) ->
+    replicate(Source, db_url(Db));
+
+replicate(Source, Target) ->
+    RepObject = {[
+        {<<"source">>, Source},
+        {<<"target">>, Target},
+        {<<"continuous">>, true}
+    ]},
+    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+    {ok, Pid} = couch_replicator:async_replicate(Rep),
+    {ok, Pid, Rep#rep.id}.
+
+
+cancel_replication(RepId, RepPid) ->
+    {ok, _} = couch_replicator:cancel_replication(RepId),
+    etap:is(is_process_alive(RepPid), false,
+        "Replication process is no longer alive after cancel").
+
+
+att_data() ->
+    {ok, Data} = file:read_file(
+        test_util:source_file("share/www/image/logo.png")),
+    Data.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couch_replicator/test/04-replication-large-atts.t
----------------------------------------------------------------------
diff --git a/apps/couch_replicator/test/04-replication-large-atts.t b/apps/couch_replicator/test/04-replication-large-atts.t
new file mode 100755
index 0000000..a7063c7
--- /dev/null
+++ b/apps/couch_replicator/test/04-replication-large-atts.t
@@ -0,0 +1,267 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Test replication of large attachments. Verify that both source and
+% target have the same attachment data and metadata.
+
+-define(b2l(Bin), binary_to_list(Bin)).
+
+-record(user_ctx, {
+    name = null,
+    roles = [],
+    handler
+}).
+
+-record(doc, {
+    id = <<"">>,
+    revs = {0, []},
+    body = {[]},
+    atts = [],
+    deleted = false,
+    meta = []
+}).
+
+-record(att, {
+    name,
+    type,
+    att_len,
+    disk_len,
+    md5= <<>>,
+    revpos=0,
+    data,
+    encoding=identity
+}).
+
+
+source_db_name() -> <<"couch_test_rep_db_a">>.
+target_db_name() -> <<"couch_test_rep_db_b">>.
+
+
+main(_) ->
+    test_util:init_code_path(),
+
+    etap:plan(1192),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail(Other)
+    end,
+    ok.
+
+
+test() ->
+    couch_server_sup:start_link(test_util:config_files()),
+    application:start(ibrowse),
+    application:start(crypto),
+    couch_config:set("attachments", "compressible_types", "text/*", false),
+
+    Pairs = [
+        {source_db_name(), target_db_name()},
+        {{remote, source_db_name()}, target_db_name()},
+        {source_db_name(), {remote, target_db_name()}},
+        {{remote, source_db_name()}, {remote, (target_db_name())}}
+    ],
+
+    {ok, SourceDb} = create_db(source_db_name()),
+    etap:diag("Populating source database"),
+    populate_db(SourceDb, 11),
+    ok = couch_db:close(SourceDb),
+
+    lists:foreach(
+        fun({Source, Target}) ->
+            etap:diag("Creating target database"),
+            {ok, TargetDb} = create_db(target_db_name()),
+
+            ok = couch_db:close(TargetDb),
+            etap:diag("Triggering replication"),
+            replicate(Source, Target),
+            etap:diag("Replication finished, comparing source and target databases"),
+            compare_dbs(SourceDb, TargetDb),
+
+            etap:diag("Deleting target database"),
+            delete_db(TargetDb),
+            ok = timer:sleep(1000)
+        end,
+        Pairs),
+
+    delete_db(SourceDb),
+    couch_server_sup:stop(),
+    ok.
+
+
+populate_db(Db, DocCount) ->
+    Docs = lists:foldl(
+        fun(DocIdCounter, Acc) ->
+            Doc = #doc{
+                id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
+                body = {[]},
+                atts = [
+                    att(<<"att1">>, 2 * 1024 * 1024, <<"text/plain">>),
+                    att(<<"att2">>, round(6.6 * 1024 * 1024), <<"app/binary">>)
+                ]
+            },
+            [Doc | Acc]
+        end,
+        [], lists:seq(1, DocCount)),
+    {ok, _} = couch_db:update_docs(Db, Docs, []).
+
+
+att(Name, Size, Type) ->
+    #att{
+        name = Name,
+        type = Type,
+        att_len = Size,
+        data = fun(Count) -> crypto:rand_bytes(Count) end
+    }.
+
+
+compare_dbs(Source, Target) ->
+    {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
+    {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
+
+    Fun = fun(FullDocInfo, _, Acc) ->
+        {ok, DocSource} = couch_db:open_doc(SourceDb, FullDocInfo),
+        Id = DocSource#doc.id,
+
+        etap:diag("Verifying document " ++ ?b2l(Id)),
+
+        {ok, DocTarget} = couch_db:open_doc(TargetDb, Id),
+        etap:is(DocTarget#doc.body, DocSource#doc.body,
+            "Same body in source and target databases"),
+
+        #doc{atts = SourceAtts} = DocSource,
+        #doc{atts = TargetAtts} = DocTarget,
+        etap:is(
+            lists:sort([N || #att{name = N} <- SourceAtts]),
+            lists:sort([N || #att{name = N} <- TargetAtts]),
+            "Document has same number (and names) of attachments in "
+            "source and target databases"),
+
+        lists:foreach(
+            fun(#att{name = AttName} = Att) ->
+                etap:diag("Verifying attachment " ++ ?b2l(AttName)),
+
+                {ok, AttTarget} = find_att(TargetAtts, AttName),
+                SourceMd5 = att_md5(Att),
+                TargetMd5 = att_md5(AttTarget),
+                case AttName of
+                <<"att1">> ->
+                    etap:is(Att#att.encoding, gzip,
+                        "Attachment is gzip encoded in source database"),
+                    etap:is(AttTarget#att.encoding, gzip,
+                        "Attachment is gzip encoded in target database"),
+                    DecSourceMd5 = att_decoded_md5(Att),
+                    DecTargetMd5 = att_decoded_md5(AttTarget),
+                    etap:is(DecTargetMd5, DecSourceMd5,
+                        "Same identity content in source and target databases");
+                _ ->
+                    etap:is(Att#att.encoding, identity,
+                        "Attachment is not encoded in source database"),
+                    etap:is(AttTarget#att.encoding, identity,
+                        "Attachment is not encoded in target database")
+                end,
+                etap:is(TargetMd5, SourceMd5,
+                    "Same content in source and target databases"),
+                etap:is(is_integer(Att#att.disk_len), true,
+                    "#att.disk_len is an integer in source database"),
+                etap:is(is_integer(Att#att.att_len), true,
+                    "#att.att_len is an integer in source database"),
+                etap:is(is_integer(AttTarget#att.disk_len), true,
+                    "#att.disk_len is an integer in target database"),
+                etap:is(is_integer(AttTarget#att.att_len), true,
+                    "#att.att_len is an integer in target database"),
+                etap:is(Att#att.disk_len, AttTarget#att.disk_len,
+                    "Same identity length in source and target databases"),
+                etap:is(Att#att.att_len, AttTarget#att.att_len,
+                    "Same encoded length in source and target databases"),
+                etap:is(Att#att.type, AttTarget#att.type,
+                    "Same type in source and target databases"),
+                etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
+                etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
+            end,
+            SourceAtts),
+
+        {ok, Acc}
+    end,
+
+    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+    ok = couch_db:close(SourceDb),
+    ok = couch_db:close(TargetDb).
+
+
+find_att([], _Name) ->
+    nil;
+find_att([#att{name = Name} = Att | _], Name) ->
+    {ok, Att};
+find_att([_ | Rest], Name) ->
+    find_att(Rest, Name).
+
+
+att_md5(Att) ->
+    Md50 = couch_doc:att_foldl(
+        Att,
+        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+        couch_util:md5_init()),
+    couch_util:md5_final(Md50).
+
+att_decoded_md5(Att) ->
+    Md50 = couch_doc:att_foldl_decode(
+        Att,
+        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+        couch_util:md5_init()),
+    couch_util:md5_final(Md50).
+
+
+db_url(DbName) ->
+    iolist_to_binary([
+        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/", DbName
+    ]).
+
+
+create_db(DbName) ->
+    couch_db:create(
+        DbName,
+        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
+
+
+delete_db(Db) ->
+    ok = couch_server:delete(
+        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
+
+
+replicate({remote, Db}, Target) ->
+    replicate(db_url(Db), Target);
+
+replicate(Source, {remote, Db}) ->
+    replicate(Source, db_url(Db));
+
+replicate(Source, Target) ->
+    RepObject = {[
+        {<<"source">>, Source},
+        {<<"target">>, Target}
+    ]},
+    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+    {ok, Pid} = couch_replicator:async_replicate(Rep),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, Reason} ->
+        etap:is(Reason, normal, "Replication finished successfully")
+    after 300000 ->
+        etap:bail("Timeout waiting for replication to finish")
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couch_replicator/test/05-replication-many-leaves.t
----------------------------------------------------------------------
diff --git a/apps/couch_replicator/test/05-replication-many-leaves.t b/apps/couch_replicator/test/05-replication-many-leaves.t
new file mode 100755
index 0000000..212ee99
--- /dev/null
+++ b/apps/couch_replicator/test/05-replication-many-leaves.t
@@ -0,0 +1,294 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Test replication of documents with many leaf revisions.
+% Motivated by COUCHDB-1340 and other similar issues where a document
+% GET with a too long ?open_revs revision list doesn't work due to
+% maximum web server limits for the HTTP request path.
+
+-record(user_ctx, {
+    name = null,
+    roles = [],
+    handler
+}).
+
+-record(doc, {
+    id = <<"">>,
+    revs = {0, []},
+    body = {[]},
+    atts = [],
+    deleted = false,
+    meta = []
+}).
+
+-record(att, {
+    name,
+    type,
+    att_len,
+    disk_len,
+    md5= <<>>,
+    revpos=0,
+    data,
+    encoding=identity
+}).
+
+-define(b2l(B), binary_to_list(B)).
+-define(l2b(L), list_to_binary(L)).
+-define(i2l(I), integer_to_list(I)).
+
+
+source_db_name() -> <<"couch_test_rep_db_a">>.
+target_db_name() -> <<"couch_test_rep_db_b">>.
+
+doc_ids() ->
+    [<<"doc1">>, <<"doc2">>, <<"doc3">>].
+
+doc_num_conflicts(<<"doc1">>) -> 10;
+doc_num_conflicts(<<"doc2">>) -> 100;
+% a number > MaxURLlength (7000) / length(DocRevisionString)
+doc_num_conflicts(<<"doc3">>) -> 210.
+
+
+main(_) ->
+    test_util:init_code_path(),
+
+    etap:plan(56),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail(Other)
+    end,
+    ok.
+
+
+test() ->
+    couch_server_sup:start_link(test_util:config_files()),
+    ibrowse:start(),
+    crypto:start(),
+    couch_config:set("replicator", "connection_timeout", "90000", false),
+
+    Pairs = [
+        {source_db_name(), target_db_name()},
+        {{remote, source_db_name()}, target_db_name()},
+        {source_db_name(), {remote, target_db_name()}},
+        {{remote, source_db_name()}, {remote, (target_db_name())}}
+    ],
+
+    lists:foreach(
+        fun({Source, Target}) ->
+            {ok, SourceDb} = create_db(source_db_name()),
+            etap:diag("Populating source database"),
+            {ok, DocRevs} = populate_db(SourceDb),
+            ok = couch_db:close(SourceDb),
+            etap:diag("Creating target database"),
+            {ok, TargetDb} = create_db(target_db_name()),
+
+            ok = couch_db:close(TargetDb),
+            etap:diag("Triggering replication"),
+            replicate(Source, Target),
+            etap:diag("Replication finished, comparing source and target databases"),
+            {ok, SourceDb2} = couch_db:open_int(source_db_name(), []),
+            {ok, TargetDb2} = couch_db:open_int(target_db_name(), []),
+            verify_target(SourceDb2, TargetDb2, DocRevs),
+            ok = couch_db:close(SourceDb2),
+            ok = couch_db:close(TargetDb2),
+
+            {ok, SourceDb3} = couch_db:open_int(source_db_name(), []),
+            {ok, DocRevs2} = add_attachments(SourceDb3, DocRevs, 2),
+            ok = couch_db:close(SourceDb3),
+            etap:diag("Triggering replication again"),
+            replicate(Source, Target),
+            etap:diag("Replication finished, comparing source and target databases"),
+            {ok, SourceDb4} = couch_db:open_int(source_db_name(), []),
+            {ok, TargetDb4} = couch_db:open_int(target_db_name(), []),
+            verify_target(SourceDb4, TargetDb4, DocRevs2),
+            ok = couch_db:close(SourceDb4),
+            ok = couch_db:close(TargetDb4),
+
+            etap:diag("Deleting source and target databases"),
+            delete_db(TargetDb),
+            delete_db(SourceDb),
+            ok = timer:sleep(1000)
+        end,
+        Pairs),
+
+    couch_server_sup:stop(),
+    ok.
+
+
+populate_db(Db) ->
+    DocRevsDict = lists:foldl(
+        fun(DocId, Acc) ->
+            Value = <<"0">>,
+            Doc = #doc{
+                id = DocId,
+                body = {[ {<<"value">>, Value} ]}
+            },
+            {ok, Rev} = couch_db:update_doc(Db, Doc, []),
+            {ok, DocRevs} = add_doc_siblings(Db, DocId, doc_num_conflicts(DocId)),
+            dict:store(DocId, [Rev | DocRevs], Acc)
+        end,
+        dict:new(), doc_ids()),
+    {ok, dict:to_list(DocRevsDict)}.
+
+
+add_doc_siblings(Db, DocId, NumLeaves) when NumLeaves > 0 ->
+    add_doc_siblings(Db, DocId, NumLeaves, [], []).
+
+
+add_doc_siblings(Db, _DocId, 0, AccDocs, AccRevs) ->
+    {ok, []} = couch_db:update_docs(Db, AccDocs, [], replicated_changes),
+    {ok, AccRevs};
+
+add_doc_siblings(Db, DocId, NumLeaves, AccDocs, AccRevs) ->
+    Value = list_to_binary(integer_to_list(NumLeaves)),
+    Rev = couch_util:md5(Value),
+    Doc = #doc{
+        id = DocId,
+        revs = {1, [Rev]},
+        body = {[ {<<"value">>, Value} ]}
+    },
+    add_doc_siblings(Db, DocId, NumLeaves - 1, [Doc | AccDocs], [{1, Rev} | AccRevs]).
+
+
+verify_target(_SourceDb, _TargetDb, []) ->
+    ok;
+
+verify_target(SourceDb, TargetDb, [{DocId, RevList} | Rest]) ->
+    {ok, Lookups} = couch_db:open_doc_revs(
+        TargetDb,
+        DocId,
+        RevList,
+        [conflicts, deleted_conflicts]),
+    Docs = [Doc || {ok, Doc} <- Lookups],
+    {ok, SourceLookups} = couch_db:open_doc_revs(
+        SourceDb,
+        DocId,
+        RevList,
+        [conflicts, deleted_conflicts]),
+    SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
+    Total = doc_num_conflicts(DocId) + 1,
+    etap:is(
+        length(Docs),
+        Total,
+        "Target has " ++ ?i2l(Total) ++ " leaf revisions of document " ++ ?b2l(DocId)),
+    etap:diag("Verifying all revisions of document " ++ ?b2l(DocId)),
+    lists:foreach(
+        fun({#doc{id = Id, revs = Revs} = TgtDoc, #doc{id = Id, revs = Revs} = SrcDoc}) ->
+            SourceJson = couch_doc:to_json_obj(SrcDoc, [attachments]),
+            TargetJson = couch_doc:to_json_obj(TgtDoc, [attachments]),
+            case TargetJson of
+            SourceJson ->
+                ok;
+            _ ->
+                {Pos, [Rev | _]} = Revs,
+                etap:bail("Wrong value for revision " ++
+                    ?b2l(couch_doc:rev_to_str({Pos, Rev})) ++
+                    " of document " ++ ?b2l(DocId))
+            end
+        end,
+        lists:zip(Docs, SourceDocs)),
+    verify_target(SourceDb, TargetDb, Rest).
+
+
+add_attachments(Source, DocIdRevs, NumAtts) ->
+    add_attachments(Source, DocIdRevs, NumAtts, []).
+
+add_attachments(_SourceDb, [], _NumAtts, Acc) ->
+    {ok, Acc};
+
+add_attachments(SourceDb, [{DocId, RevList} | Rest], NumAtts, IdRevsAcc) ->
+    {ok, SourceLookups} = couch_db:open_doc_revs(
+        SourceDb,
+        DocId,
+        RevList,
+        []),
+    SourceDocs = [Doc || {ok, Doc} <- SourceLookups],
+    Total = doc_num_conflicts(DocId) + 1,
+    etap:is(
+        length(SourceDocs),
+        Total,
+        "Source still has " ++ ?i2l(Total) ++
+            " leaf revisions of document " ++ ?b2l(DocId)),
+    etap:diag("Adding " ++ ?i2l(NumAtts) ++
+        " attachments to each revision of the document " ++ ?b2l(DocId)),
+    NewDocs = lists:foldl(
+        fun(#doc{atts = Atts, revs = {Pos, [Rev | _]}} = Doc, Acc) ->
+            NewAtts = lists:foldl(
+                fun(I, AttAcc) ->
+                    AttData = crypto:rand_bytes(100),
+                    NewAtt = #att{
+                        name = iolist_to_binary(
+                            ["att_", ?i2l(I), "_", couch_doc:rev_to_str({Pos, Rev})]),
+                        type = <<"application/foobar">>,
+                        att_len = byte_size(AttData),
+                        data = AttData
+                    },
+                    [NewAtt | AttAcc]
+                end,
+                [], lists:seq(1, NumAtts)),
+            [Doc#doc{atts = Atts ++ NewAtts} | Acc]
+        end,
+        [], SourceDocs),
+    {ok, UpdateResults} = couch_db:update_docs(SourceDb, NewDocs, []),
+    NewRevs = [R || {ok, R} <- UpdateResults],
+    etap:is(
+        length(NewRevs),
+        length(NewDocs),
+        "Document revisions updated with " ++ ?i2l(NumAtts) ++ " attachments"),
+    add_attachments(SourceDb, Rest, NumAtts, [{DocId, NewRevs} | IdRevsAcc]).
+
+
+db_url(DbName) ->
+    iolist_to_binary([
+        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/", DbName
+    ]).
+
+
+create_db(DbName) ->
+    couch_db:create(
+        DbName,
+        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
+
+
+delete_db(Db) ->
+    ok = couch_server:delete(
+        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
+
+
+replicate({remote, Db}, Target) ->
+    replicate(db_url(Db), Target);
+
+replicate(Source, {remote, Db}) ->
+    replicate(Source, db_url(Db));
+
+replicate(Source, Target) ->
+    RepObject = {[
+        {<<"source">>, Source},
+        {<<"target">>, Target}
+    ]},
+    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+    {ok, Pid} = couch_replicator:async_replicate(Rep),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, Reason} ->
+        etap:is(Reason, normal, "Replication finished successfully")
+    after 900000 ->
+        etap:bail("Timeout waiting for replication to finish")
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couch_replicator/test/06-doc-missing-stubs.t
----------------------------------------------------------------------
diff --git a/apps/couch_replicator/test/06-doc-missing-stubs.t b/apps/couch_replicator/test/06-doc-missing-stubs.t
new file mode 100755
index 0000000..e17efc9
--- /dev/null
+++ b/apps/couch_replicator/test/06-doc-missing-stubs.t
@@ -0,0 +1,304 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Test replication of documents with many leaf revisions.
+% Motivated by COUCHDB-1340 and other similar issues where a document
+% GET with a too long ?open_revs revision list doesn't work due to
+% maximum web server limits for the HTTP request path.
+
+-record(user_ctx, {
+    name = null,
+    roles = [],
+    handler
+}).
+
+-record(doc, {
+    id = <<"">>,
+    revs = {0, []},
+    body = {[]},
+    atts = [],
+    deleted = false,
+    meta = []
+}).
+
+-record(att, {
+    name,
+    type,
+    att_len,
+    disk_len,
+    md5= <<>>,
+    revpos=0,
+    data,
+    encoding=identity
+}).
+
+-define(b2l(B), binary_to_list(B)).
+
+source_db_name() -> <<"couch_test_rep_db_a">>.
+target_db_name() -> <<"couch_test_rep_db_b">>.
+
+target_revs_limit() -> 3.
+
+
+main(_) ->
+    test_util:init_code_path(),
+
+    etap:plan(128),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail(Other)
+    end,
+    ok.
+
+
+% Test motivated by COUCHDB-1365.
+test() ->
+    couch_server_sup:start_link(test_util:config_files()),
+    ibrowse:start(),
+
+    Pairs = [
+        {source_db_name(), target_db_name()},
+        {{remote, source_db_name()}, target_db_name()},
+        {source_db_name(), {remote, target_db_name()}},
+        {{remote, source_db_name()}, {remote, (target_db_name())}}
+    ],
+
+    lists:foreach(
+        fun({Source, Target}) ->
+            {ok, SourceDb} = create_db(source_db_name()),
+            etap:diag("Populating source database"),
+            populate_db(SourceDb),
+            ok = couch_db:close(SourceDb),
+
+            etap:diag("Creating target database"),
+            {ok, TargetDb} = create_db(target_db_name()),
+            ok = couch_db:set_revs_limit(TargetDb, target_revs_limit()),
+            ok = couch_db:close(TargetDb),
+
+            etap:diag("Triggering replication"),
+            replicate(Source, Target),
+            etap:diag("Replication finished, comparing source and target databases"),
+            compare_dbs(SourceDb, TargetDb),
+
+            etap:diag("Updating source database docs"),
+            update_db_docs(couch_db:name(SourceDb), target_revs_limit() + 2),
+
+            etap:diag("Triggering replication again"),
+            replicate(Source, Target),
+            etap:diag("Replication finished, comparing source and target databases"),
+            compare_dbs(SourceDb, TargetDb),
+
+            etap:diag("Deleting databases"),
+            delete_db(TargetDb),
+            delete_db(SourceDb),
+            ok = timer:sleep(1000)
+        end,
+        Pairs),
+
+    couch_server_sup:stop(),
+    ok.
+
+
+populate_db(Db) ->
+    AttData = crypto:rand_bytes(6000),
+    Doc1 = #doc{
+        id = <<"doc1">>,
+        atts = [
+            #att{
+                name = <<"doc1_att1">>,
+                type = <<"application/foobar">>,
+                att_len = byte_size(AttData),
+                data = AttData
+            }
+        ]
+    },
+    {ok, _} = couch_db:update_doc(Db, Doc1, []).
+
+
+update_db_docs(DbName, Times) ->
+    {ok, Db} = couch_db:open_int(DbName, []),
+    {ok, _, _} = couch_db:enum_docs(
+        Db,
+        fun(FDI, _, Acc) -> db_fold_fun(FDI, Acc) end,
+        {DbName, Times},
+        []),
+    ok = couch_db:close(Db).
+
+
+db_fold_fun(FullDocInfo, {DbName, Times}) ->
+    {ok, Db} = couch_db:open_int(DbName, []),
+    {ok, Doc} = couch_db:open_doc(Db, FullDocInfo),
+    lists:foldl(
+        fun(_, {Pos, RevId}) ->
+            {ok, Db2} = couch_db:reopen(Db),
+            NewDocVersion = Doc#doc{
+                revs = {Pos, [RevId]},
+                body = {[{<<"value">>, base64:encode(crypto:rand_bytes(100))}]}
+            },
+            {ok, NewRev} = couch_db:update_doc(Db2, NewDocVersion, []),
+            NewRev
+        end,
+        {element(1, Doc#doc.revs), hd(element(2, Doc#doc.revs))},
+        lists:seq(1, Times)),
+    ok = couch_db:close(Db),
+    {ok, {DbName, Times}}.
+
+
+compare_dbs(Source, Target) ->
+    {ok, SourceDb} = couch_db:open_int(couch_db:name(Source), []),
+    {ok, TargetDb} = couch_db:open_int(couch_db:name(Target), []),
+
+    Fun = fun(FullDocInfo, _, Acc) ->
+        {ok, DocSource} = couch_db:open_doc(
+            SourceDb, FullDocInfo, [conflicts, deleted_conflicts]),
+        Id = DocSource#doc.id,
+
+        etap:diag("Verifying document " ++ ?b2l(Id)),
+
+        {ok, DocTarget} = couch_db:open_doc(
+            TargetDb, Id, [conflicts, deleted_conflicts]),
+        etap:is(DocTarget#doc.body, DocSource#doc.body,
+            "Same body in source and target databases"),
+
+        etap:is(
+            couch_doc:to_json_obj(DocTarget, []),
+            couch_doc:to_json_obj(DocSource, []),
+            "Same doc body in source and target databases"),
+
+        #doc{atts = SourceAtts} = DocSource,
+        #doc{atts = TargetAtts} = DocTarget,
+        etap:is(
+            lists:sort([N || #att{name = N} <- SourceAtts]),
+            lists:sort([N || #att{name = N} <- TargetAtts]),
+            "Document has same number (and names) of attachments in "
+            "source and target databases"),
+
+        lists:foreach(
+            fun(#att{name = AttName} = Att) ->
+                etap:diag("Verifying attachment " ++ ?b2l(AttName)),
+
+                {ok, AttTarget} = find_att(TargetAtts, AttName),
+                SourceMd5 = att_md5(Att),
+                TargetMd5 = att_md5(AttTarget),
+                case AttName of
+                <<"att1">> ->
+                    etap:is(Att#att.encoding, gzip,
+                        "Attachment is gzip encoded in source database"),
+                    etap:is(AttTarget#att.encoding, gzip,
+                        "Attachment is gzip encoded in target database"),
+                    DecSourceMd5 = att_decoded_md5(Att),
+                    DecTargetMd5 = att_decoded_md5(AttTarget),
+                    etap:is(DecTargetMd5, DecSourceMd5,
+                        "Same identity content in source and target databases");
+                _ ->
+                    etap:is(Att#att.encoding, identity,
+                        "Attachment is not encoded in source database"),
+                    etap:is(AttTarget#att.encoding, identity,
+                        "Attachment is not encoded in target database")
+                end,
+                etap:is(TargetMd5, SourceMd5,
+                    "Same content in source and target databases"),
+                etap:is(is_integer(Att#att.disk_len), true,
+                    "#att.disk_len is an integer in source database"),
+                etap:is(is_integer(Att#att.att_len), true,
+                    "#att.att_len is an integer in source database"),
+                etap:is(is_integer(AttTarget#att.disk_len), true,
+                    "#att.disk_len is an integer in target database"),
+                etap:is(is_integer(AttTarget#att.att_len), true,
+                    "#att.att_len is an integer in target database"),
+                etap:is(Att#att.disk_len, AttTarget#att.disk_len,
+                    "Same identity length in source and target databases"),
+                etap:is(Att#att.att_len, AttTarget#att.att_len,
+                    "Same encoded length in source and target databases"),
+                etap:is(Att#att.type, AttTarget#att.type,
+                    "Same type in source and target databases"),
+                etap:is(Att#att.md5, SourceMd5, "Correct MD5 in source database"),
+                etap:is(AttTarget#att.md5, SourceMd5, "Correct MD5 in target database")
+            end,
+            SourceAtts),
+
+        {ok, Acc}
+    end,
+
+    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+    ok = couch_db:close(SourceDb),
+    ok = couch_db:close(TargetDb).
+
+
+find_att([], _Name) ->
+    nil;
+find_att([#att{name = Name} = Att | _], Name) ->
+    {ok, Att};
+find_att([_ | Rest], Name) ->
+    find_att(Rest, Name).
+
+
+att_md5(Att) ->
+    Md50 = couch_doc:att_foldl(
+        Att,
+        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+        couch_util:md5_init()),
+    couch_util:md5_final(Md50).
+
+att_decoded_md5(Att) ->
+    Md50 = couch_doc:att_foldl_decode(
+        Att,
+        fun(Chunk, Acc) -> couch_util:md5_update(Acc, Chunk) end,
+        couch_util:md5_init()),
+    couch_util:md5_final(Md50).
+
+
+db_url(DbName) ->
+    iolist_to_binary([
+        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/", DbName
+    ]).
+
+
+create_db(DbName) ->
+    couch_db:create(
+        DbName,
+        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]).
+
+
+delete_db(Db) ->
+    ok = couch_server:delete(
+        couch_db:name(Db), [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]).
+
+
+replicate({remote, Db}, Target) ->
+    replicate(db_url(Db), Target);
+
+replicate(Source, {remote, Db}) ->
+    replicate(Source, db_url(Db));
+
+replicate(Source, Target) ->
+    RepObject = {[
+        {<<"source">>, Source},
+        {<<"target">>, Target}
+    ]},
+    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+    {ok, Pid} = couch_replicator:async_replicate(Rep),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, Reason} ->
+        etap:is(Reason, normal, "Replication finished successfully")
+    after 300000 ->
+        etap:bail("Timeout waiting for replication to finish")
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couch_replicator/test/07-use-checkpoints.t
----------------------------------------------------------------------
diff --git a/apps/couch_replicator/test/07-use-checkpoints.t b/apps/couch_replicator/test/07-use-checkpoints.t
new file mode 100755
index 0000000..cefc1a7
--- /dev/null
+++ b/apps/couch_replicator/test/07-use-checkpoints.t
@@ -0,0 +1,256 @@
+#!/usr/bin/env escript
+%% -*- erlang -*-
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+% Verify that compacting databases that are being used as the source or
+% target of a replication doesn't affect the replication and that the
+% replication doesn't hold their reference counters forever.
+
+-define(b2l(B), binary_to_list(B)).
+
+-record(user_ctx, {
+    name = null,
+    roles = [],
+    handler
+}).
+
+-record(doc, {
+    id = <<"">>,
+    revs = {0, []},
+    body = {[]},
+    atts = [],
+    deleted = false,
+    meta = []
+}).
+
+-record(db, {
+    main_pid = nil,
+    update_pid = nil,
+    compactor_pid = nil,
+    instance_start_time, % number of microsecs since jan 1 1970 as a binary string
+    fd,
+    updater_fd,
+    fd_ref_counter,
+    header = nil,
+    committed_update_seq,
+    fulldocinfo_by_id_btree,
+    docinfo_by_seq_btree,
+    local_docs_btree,
+    update_seq,
+    name,
+    filepath,
+    validate_doc_funs = [],
+    security = [],
+    security_ptr = nil,
+    user_ctx = #user_ctx{},
+    waiting_delayed_commit = nil,
+    revs_limit = 1000,
+    fsync_options = [],
+    options = [],
+    compression,
+    before_doc_update,
+    after_doc_read
+}).
+
+-record(rep, {
+    id,
+    source,
+    target,
+    options,
+    user_ctx,
+    doc_id
+}).
+
+
+source_db_name() -> <<"couch_test_rep_db_a">>.
+target_db_name() -> <<"couch_test_rep_db_b">>.
+
+
+main(_) ->
+    test_util:init_code_path(),
+
+    etap:plan(16),
+    case (catch test()) of
+        ok ->
+            etap:end_tests();
+        Other ->
+            etap:diag(io_lib:format("Test died abnormally: ~p", [Other])),
+            etap:bail(Other)
+    end,
+    ok.
+
+
+test() ->
+    couch_server_sup:start_link(test_util:config_files()),
+    ibrowse:start(),
+
+    test_use_checkpoints(false),
+    test_use_checkpoints(true),
+
+    couch_server_sup:stop(),
+    ok.
+
+
+test_use_checkpoints(UseCheckpoints) ->
+    Pairs = [
+        {source_db_name(), target_db_name()},
+        {{remote, source_db_name()}, target_db_name()},
+        {source_db_name(), {remote, target_db_name()}},
+        {{remote, source_db_name()}, {remote, (target_db_name())}}
+    ],
+
+    ListenerFun = case UseCheckpoints of
+    false ->
+        fun({finished, _, {CheckpointHistory}}) ->
+            etap:is(CheckpointHistory,
+            [{<<"use_checkpoints">>,false}],
+            "No checkpoints found");
+        (_) ->
+            ok
+        end;
+    true ->
+        fun({finished, _, {CheckpointHistory}}) ->
+            SessionId = lists:keyfind(
+                <<"session_id">>, 1, CheckpointHistory),
+            etap:isnt(SessionId, false, "There's a checkpoint");
+        (_) ->
+            ok
+        end
+    end,
+    {ok, Listener} = couch_replicator_notifier:start_link(ListenerFun),
+
+    lists:foreach(
+        fun({Source, Target}) ->
+            {ok, SourceDb} = create_db(source_db_name()),
+            etap:diag("Populating source database"),
+            populate_db(SourceDb, 100),
+            ok = couch_db:close(SourceDb),
+
+            etap:diag("Creating target database"),
+            {ok, TargetDb} = create_db(target_db_name()),
+            ok = couch_db:close(TargetDb),
+
+            etap:diag("Setup replicator notifier listener"),
+
+            etap:diag("Triggering replication"),
+            replicate(Source, Target, UseCheckpoints),
+
+            etap:diag("Replication finished, comparing source and target databases"),
+            compare_dbs(SourceDb, TargetDb),
+
+            etap:diag("Deleting databases"),
+            delete_db(TargetDb),
+            delete_db(SourceDb),
+
+            ok = timer:sleep(1000)
+        end,
+        Pairs),
+
+    couch_replicator_notifier:stop(Listener).
+
+
+populate_db(Db, DocCount) ->
+    Docs = lists:foldl(
+        fun(DocIdCounter, Acc) ->
+            Id = iolist_to_binary(["doc", integer_to_list(DocIdCounter)]),
+            Value = iolist_to_binary(["val", integer_to_list(DocIdCounter)]),
+            Doc = #doc{
+                id = Id,
+                body = {[ {<<"value">>, Value} ]}
+            },
+            [Doc | Acc]
+        end,
+        [], lists:seq(1, DocCount)),
+    {ok, _} = couch_db:update_docs(Db, Docs, []).
+
+
+compare_dbs(#db{name = SourceName}, #db{name = TargetName}) ->
+    {ok, SourceDb} = couch_db:open_int(SourceName, []),
+    {ok, TargetDb} = couch_db:open_int(TargetName, []),
+    Fun = fun(FullDocInfo, _, Acc) ->
+        {ok, Doc} = couch_db:open_doc(SourceDb, FullDocInfo),
+        {Props} = DocJson = couch_doc:to_json_obj(Doc, [attachments]),
+        DocId = couch_util:get_value(<<"_id">>, Props),
+        DocTarget = case couch_db:open_doc(TargetDb, DocId) of
+        {ok, DocT} ->
+            DocT;
+        Error ->
+            etap:bail("Error opening document '" ++ ?b2l(DocId) ++
+                "' from target: " ++ couch_util:to_list(Error))
+        end,
+        DocTargetJson = couch_doc:to_json_obj(DocTarget, [attachments]),
+        case DocTargetJson of
+        DocJson ->
+            ok;
+        _ ->
+            etap:bail("Content from document '" ++ ?b2l(DocId) ++
+                "' differs in target database")
+        end,
+        {ok, Acc}
+    end,
+    {ok, _, _} = couch_db:enum_docs(SourceDb, Fun, [], []),
+    etap:diag("Target database has the same documents as the source database"),
+    ok = couch_db:close(SourceDb),
+    ok = couch_db:close(TargetDb).
+
+
+db_url(DbName) ->
+    iolist_to_binary([
+        "http://", couch_config:get("httpd", "bind_address", "127.0.0.1"),
+        ":", integer_to_list(mochiweb_socket_server:get(couch_httpd, port)),
+        "/", DbName
+    ]).
+
+
+create_db(DbName) ->
+    {ok, Db} = couch_db:create(
+        DbName,
+        [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}, overwrite]),
+    couch_db:close(Db),
+    {ok, Db}.
+
+
+delete_db(#db{name = DbName, main_pid = Pid}) ->
+    ok = couch_server:delete(
+        DbName, [{user_ctx, #user_ctx{roles = [<<"_admin">>]}}]),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, _Reason} ->
+        ok
+    after 30000 ->
+        etap:bail("Timeout deleting database")
+    end.
+
+
+replicate({remote, Db}, Target, UseCheckpoints) ->
+    replicate(db_url(Db), Target, UseCheckpoints);
+
+replicate(Source, {remote, Db}, UseCheckpoints) ->
+    replicate(Source, db_url(Db), UseCheckpoints);
+
+replicate(Source, Target, UseCheckpoints) ->
+    RepObject = {[
+        {<<"source">>, Source},
+        {<<"target">>, Target},
+        {<<"use_checkpoints">>, UseCheckpoints}
+    ]},
+    {ok, Rep} = couch_replicator_utils:parse_rep_doc(
+        RepObject, #user_ctx{roles = [<<"_admin">>]}),
+    {ok, Pid} = couch_replicator:async_replicate(Rep),
+    MonRef = erlang:monitor(process, Pid),
+    receive
+    {'DOWN', MonRef, process, Pid, Reason} ->
+        etap:is(Reason, normal, "Replication finished successfully")
+    after 300000 ->
+        etap:bail("Timeout waiting for replication to finish")
+    end.

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/Makefile.am
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/Makefile.am b/apps/couchjs-node/Makefile.am
new file mode 100644
index 0000000..2144970
--- /dev/null
+++ b/apps/couchjs-node/Makefile.am
@@ -0,0 +1,24 @@
+## Licensed under the Apache License, Version 2.0 (the "License"); you may not
+## use this file except in compliance with the License. You may obtain a copy of
+## the License at
+##
+##   http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+## WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+## License for the specific language governing permissions and limitations under
+## the License.
+
+EXTRA_DIST = \
+  cli.js \
+  console.js \
+  couchdb.js \
+  couchjs.js \
+  inspector.js \
+  package.json \
+  README.md \
+  sandbox.js \
+  stream.js \
+  test/experiment.js \
+  xml.js

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/README.md
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/README.md b/apps/couchjs-node/README.md
new file mode 100644
index 0000000..551c3df
--- /dev/null
+++ b/apps/couchjs-node/README.md
@@ -0,0 +1,32 @@
+# CouchJS
+
+## Drop-in replacement JavaScript engine for Apache CouchDB
+
+CouchJS is a command-line Node.js program. It is 100% compatible with Apache CouchDB's built-in JavaScript system.
+
+By using CouchJS, you will get 100% CouchDB compatibility (the test suite completely passes) but your JavaScript environment is V8, or Node.js.
+
+See share/doc/src/experimental.rst for installation instructions.
+
+## Idea
+
+JavaScript is decoupled from the CouchDB core. To do JavaScript stuff, CouchDB runs a normal Unix subprocess, `couchjs`. This subprocess is just a read-eval-print loop on standard i/o. CouchDB passes `couchjs` a file name, and *that file* contains the view server  implementation.
+
+This tool duplicates the "REPL" look and feel of `couchjs` and supports the exact same view server implementation.
+
+## Security
+
+I have no idea. I would not trust it for production use.
+
+## Log
+
+If you create a file, `/tmp/couchjs.log` then *couchjs* will output debugging messages there.
+
+## License
+
+Apache 2.0
+
+See the [Apache 2.0 license](named/blob/master/LICENSE).
+
+[tap]: https://github.com/isaacs/node-tap
+[def]: https://github.com/iriscouch/defaultable

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/cli.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/cli.js b/apps/couchjs-node/cli.js
new file mode 100755
index 0000000..5447dd5
--- /dev/null
+++ b/apps/couchjs-node/cli.js
@@ -0,0 +1,89 @@
+#!/usr/bin/env node
+
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var fs = require('fs');
+var Fiber = require('fibers');
+var optimist = require('optimist');
+
+var couchjs = require('./couchjs');
+var package_json = require('./package.json');
+var LineStream = require('./stream');
+var inspector = require('./inspector');
+var log = require('./console').log;
+
+
+var opts = optimist.boolean(['h', 'V', 'H'])
+                   .describe({ 'h': 'display a short help message and exit',
+                             'V': 'display version information and exit',
+                             'H': 'enable couchjs cURL bindings (not implemented)'
+                             })
+                   .usage('$0 <path to main.js>');
+
+
+function main() {
+
+  'use strict';
+
+  var main_js = opts.argv._[0];
+
+  if (!main_js) {
+    return console.error(opts.help());
+  }
+
+  log('couchjs/%s %s: %s', package_json.version, process.pid, main_js);
+
+  if (process.env.COUCHJS_DEBUG_PORT) {
+    inspector(+process.env.COUCHJS_DEBUG_PORT);
+  }
+
+  fs.readFile(main_js, 'utf8', function(er, body) {
+    if (er) {
+      throw er;
+    }
+
+    var stdin = new LineStream.v2();
+
+    stdin.on('readable', function() {
+      var buf = stdin.read();
+
+      if (buf) {
+        couchjs.stdin(buf);
+      }
+    });
+
+    stdin.on('end', function() {
+      log('Terminate; connection to parent closed');
+      process.exit(0);
+    });
+
+    process.stdin.setEncoding('utf8');
+    process.stdin.pipe(stdin);
+
+    var main_func = Function(['print', 'readline', 'evalcx', 'gc', 'quit'], body);
+
+    log('Call main');
+
+    new Fiber(function() {
+      main_func(couchjs.print, couchjs.readline, couchjs.evalcx, couchjs.gc);
+    }).run();
+  });
+
+  process.on('uncaughtException', function(er) {
+    log('Error:\n%s', er.stack);
+  });
+}
+
+if (require.main === module) {
+  main();
+}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/console.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/console.js b/apps/couchjs-node/console.js
new file mode 100644
index 0000000..18e7241
--- /dev/null
+++ b/apps/couchjs-node/console.js
@@ -0,0 +1,66 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var fs = require('fs');
+var util = require('util');
+
+var noop = function() {};
+
+module.exports = {};
+module.exports.log = noop;
+module.exports.debug = noop;
+module.exports.info = noop;
+module.exports.warn = noop;
+module.exports.error = noop;
+
+var LOG_PATH = '/tmp/couchjs.log';
+var stat = null;
+var LOG = null;
+
+try {
+  stat = fs.statSync(LOG_PATH);
+} catch(er) {}
+
+if (stat) {
+  LOG = fs.createWriteStream(LOG_PATH, {
+    'flags':'a'
+  });
+
+  var log = function () {
+    var str = util.format.apply(this, arguments);
+    LOG.write(str + '\n');
+  };
+
+  var on_err = function (er) {
+    module.exports.error('Uncaught error:\n%s', er.stack || er.message || JSON.stringify(er));
+
+    if (er.stack) {
+      er = ['fatal', 'unknown_error', er.stack];
+    }
+
+    process.stdout.write(JSON.stringify(er) + '\n');
+    process.exit(1);
+  };
+
+  module.exports.log = log;
+  module.exports.debug = log;
+  module.exports.info = log;
+  module.exports.warn = log;
+  module.exports.error = log;
+
+  process.on('exit', function() {
+    module.exports.log('Exit %d', process.pid);
+  });
+
+  process.on('uncaughtException', on_err);
+}
+

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/couchdb.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/couchdb.js b/apps/couchjs-node/couchdb.js
new file mode 100755
index 0000000..d420540
--- /dev/null
+++ b/apps/couchjs-node/couchdb.js
@@ -0,0 +1,29 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+var http = require('http');
+
+function handler(req, res) {
+  res.writeHead(200);
+  res.end('Hello: ' + req.url + '\n');
+}
+
+if (require.main === module) {
+  var http = require('http');
+  var server = http.createServer(handler);
+
+  server.listen(3000);
+  console.log('Listening on :3000');
+}
+
+module.exports = handler;

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/couchjs.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/couchjs.js b/apps/couchjs-node/couchjs.js
new file mode 100644
index 0000000..ccc1aa9
--- /dev/null
+++ b/apps/couchjs-node/couchjs.js
@@ -0,0 +1,133 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+
+var vm = require('vm');
+var Fiber = require('fibers');
+var util = require('util');
+
+var XML = require('./xml');
+var log = require('./console').log;
+
+var INPUT = {
+  'queue':[],
+  'waiting':null
+};
+
+
+function print(line) {
+  log('STDOUT %s: %s', process.pid, line);
+  process.stdout.write(line + '\n');
+
+  try {
+    line = JSON.parse(line);
+  } catch(er) {
+    return;
+  }
+
+  if (line[0] === 'log') {
+    log('LOG: %s', line[1]);
+  }
+}
+
+function stdin (line) {
+  log('STDIN %s: %s', process.pid, line.trim());
+
+  if (INPUT.waiting) {
+    INPUT.waiting.run(line);
+  } else {
+    INPUT.queue.push(line);
+  }
+}
+
+function readline () {
+  var line = INPUT.queue.shift();
+
+  if (line) {
+    return line;
+  }
+
+  INPUT.waiting = Fiber.current;
+  line = Fiber.yield();
+  INPUT.waiting = null;
+
+  return line;
+}
+
+
+function evalcx (source, sandbox) {
+  sandbox = sandbox || {};
+  var func;
+  //log('evalcx in %j: %j', Object.keys(sandbox), source)
+
+  if (source === '') {
+    return sandbox;
+  }
+
+  // source might be "function(doc) { emit(doc._id, 1) }"
+  source = source.replace(/;+$/, '');
+
+  sandbox.XML = sandbox.XML || XML;
+  source = '(' + source + ')';
+
+  try {
+    var id = Math.floor(Math.random() * 1000*1000);
+    var filename = '_couchdb:' + id + '.js';
+    var script = vm.createScript(source, filename);
+    func = script.runInNewContext(sandbox);
+  } catch (er) {
+    log('Error making code: %s', er.stack);
+    return sandbox;
+  }
+
+  return func;
+}
+
+function quit(code) {
+  code = code || 1;
+  if (code < 0) {
+    code = -code;
+  }
+
+  process.exit(code);
+}
+
+function gc() { }
+
+
+function toSource() {
+  if (typeof this === 'function') {
+    return '' + this;
+  }
+
+  if (this instanceof Error) {
+    return this.stack;
+  }
+
+  return util.inspect(this);
+}
+
+Error.prototype.toSource = Error.prototype.toSource || toSource;
+Error.prototype.toString = Error.prototype.toString || toSource;
+Function.prototype.toSource = Function.prototype.toSource || toSource;
+Function.prototype.toString = Function.prototype.toString || toSource;
+
+module.exports = {
+  'print': print,
+  'readline': readline,
+  'stdin': stdin,
+  'evalcx': evalcx,
+  'quit': quit,
+  'gc': gc
+};
+

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/inspector.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/inspector.js b/apps/couchjs-node/inspector.js
new file mode 100755
index 0000000..48182e5
--- /dev/null
+++ b/apps/couchjs-node/inspector.js
@@ -0,0 +1,101 @@
+#!/usr/bin/env node
+
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+
+var cp = require('child_process');
+var log = require('./console').log;
+
+function watchInspector(child) {
+
+  child.stderr.on('data', function(body) {
+    log('Inspector STDERR: %s', body);
+  });
+
+  child.stdout.on('data', function(body) {
+    log('Inspector STDOUT: %s', body);
+  });
+
+  child.on('exit', function(code, signal) {
+    log('Inspector exited %d signal=%j', code, signal);
+    process.exit(code);
+  });
+
+  process.on('exit', function() {
+    log('Kill inspector upon exit: %d', child.pid);
+    process.kill(child.pid, 'SIGTERM');
+  });
+
+}
+
+function start (debugPort) {
+
+  if (!debugPort || typeof debugPort !== 'number') {
+    throw new Error('Need a listen debugPort');
+  }
+
+  var webPort = debugPort + 1;
+  var cmd = __filename;
+  var args = [debugPort, webPort];
+  var opts = {
+    'cwd': __dirname,
+    'stdio': 'pipe',
+    'detached': false
+  };
+
+  log('Start inspector: %s %j %j', cmd, args, opts);
+
+  var inspector = cp.spawn(cmd, args, opts);
+
+  watchInspector(inspector);
+
+  log('Enable remote debug pid=%d port=%d', process.pid, debugPort);
+
+  process.debugPort = debugPort;
+  process.kill(process.pid, 'SIGUSR1');
+}
+
+function main() {
+  var debugPort = +process.argv[2];
+  var webPort = +process.argv[3];
+
+  if (!debugPort || !webPort) {
+    throw new Error('Bad arguments: need debugPort and webPort');
+  }
+
+  console.log('Start inspector debugPort=%j webPort=%j', debugPort, webPort);
+
+  var DebugServer = require('node-inspector/lib/debug-server');
+  var server = new DebugServer();
+
+  server.on('close', function() {
+    console.log('Server closed');
+    process.exit(0);
+  });
+
+  server.start({
+    'webPort':webPort,
+    'debugPort':debugPort
+  });
+
+  process.on('uncaughtException', function(er) {
+    console.log('Error:\n%s', er.stack);
+  });
+}
+
+module.exports = start;
+
+if (require.main === module) {
+  main();
+}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/package.json
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/package.json b/apps/couchjs-node/package.json
new file mode 100644
index 0000000..445f3b9
--- /dev/null
+++ b/apps/couchjs-node/package.json
@@ -0,0 +1,40 @@
+{
+  "name": "couchjs",
+  "description": "Drop-in replacement for CouchDB JavaScript view server",
+  "keywords": [ "couchdb", "couchjs" ],
+  "version": "0.3.2",
+  "author": "Jason Smith <jhs@iriscouch.com> (http://www.iriscouch.com)",
+  "repository": {
+    "type":"git",
+    "url":"https://github.com/iriscouch/couchjs"
+  },
+  "engines": {
+    "node": ">= 0.8"
+  },
+  "main": "./couchjs.js",
+  "bin": {
+    "couchjs-node":"./cli.js"
+  },
+  "couchdb": {
+    "main":"./couchdb.js",
+    "vhosts": [
+      "127.0.0.1.xip.io"
+    ]
+  },
+  "bundledDependencies": [
+    "node-inspector"
+  ],
+  "dependencies": {
+    "optimist": "~0.3.4",
+    "async"   : "~0.2.5",
+    "mkdirp"  : "~0.3.4",
+    "fibers"  : "~1.0.0",
+    "request" : "~2.9.203",
+    "pushover": "~1.2.1",
+    "defaultable": "~0.7.2",
+    "node-inspector": "git://github.com/iriscouch/node-inspector#couchjs"
+  },
+  "devDependencies": {
+    "tap": "~0.2.5"
+  }
+}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/sandbox.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/sandbox.js b/apps/couchjs-node/sandbox.js
new file mode 100644
index 0000000..5c18ef4
--- /dev/null
+++ b/apps/couchjs-node/sandbox.js
@@ -0,0 +1,47 @@
+// from https://github.com/KlausTrainer/sandbox.js
+exports.runInSandbox = function(src, ctx, whitelist) {
+  var vm = require('vm');
+  var sandbox;
+
+  if (ctx && ctx.require) {
+
+    whitelist = whitelist || [];
+
+    var insecureRequire = ctx.require;
+    var module = require('module');
+    var oldModulePrototype = module.prototype;
+
+    var secureRequire = function(moduleName) {
+
+      if (whitelist.indexOf(moduleName) === -1) {
+        module.prototype = oldModulePrototype;
+
+        throw new Error("'" + moduleName + "' is not whitelisted");
+
+      } else {
+        var requiredModule = insecureRequire(moduleName);
+
+        module.prototype = oldModulePrototype;
+
+        return requiredModule;
+      }
+    };
+
+    module.prototype = {
+      require: secureRequire,
+      load: module.prototype.load,
+      _compile: module.prototype._compile
+    };
+
+    module._cache = {};
+
+    ctx.require = secureRequire;
+    sandbox = Object.freeze(vm.createContext(ctx));
+    ctx.require = insecureRequire;
+
+  } else {
+    sandbox = Object.freeze(vm.createContext(ctx || {}));
+  }
+
+  return vm.createScript('(function() {"use strict"; return (' + src + ')()}())').runInContext(sandbox);
+};

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/stream.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/stream.js b/apps/couchjs-node/stream.js
new file mode 100644
index 0000000..ebffc30
--- /dev/null
+++ b/apps/couchjs-node/stream.js
@@ -0,0 +1,115 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// Text line stream
+
+var stream = require('stream');
+var util = require('util');
+
+
+function LineStream() {
+
+  var self = this;
+  stream.call(self);
+
+  self.readable = true;
+  self.writable = true;
+
+  self.buffer = '';
+  self.downstream = null;
+
+  self.on('pipe', function(upstream) {
+    upstream.on('end', function(data, encoding) {
+      self.emit('end', data, encoding);
+    });
+  });
+}
+
+function LineStream2() {
+
+  if (!(this instanceof LineStream2)) {
+    return new LineStream2();
+  }
+
+  stream.Transform.call(this);
+  this.setEncoding('utf8');
+}
+
+util.inherits(LineStream2, stream.Transform);
+
+LineStream2.prototype._transform = function(message, encoding, done) {
+  var self = this;
+
+  message = message.toString(encoding);
+  var lines = message.split(/\n/);
+
+  // If the data ends in "\n" this will be ""; otherwise the final partial line.
+  var remainder = lines.pop();
+  if (remainder) {
+    this.unshift(remainder);
+  }
+
+  lines.forEach(function(line) {
+    self.push(line);
+  });
+
+  done();
+};
+
+util.inherits(LineStream, stream);
+
+
+
+LineStream.prototype.write = function(data) {
+  var self = this;
+
+  data = data || '';
+  if (typeof data !== 'string') {
+    return self.error(new Error('Data was not a string: ' + util.inspect(data)));
+  }
+
+  self.buffer += data;
+  var lines = self.buffer.split(/\n/);
+  self.buffer = lines.pop(); // If the data ended in "\n" this will be ""; otherwise the final partial line.
+
+  lines.forEach(function(line) {
+    self.emit('data', line);
+  });
+};
+
+
+LineStream.prototype.end = function(data) {
+  var self = this;
+
+  self.is_ending = true;
+  self.writable = false;
+
+  // Always call write, even with no data, so it can fire the "end" event.
+  self.write(data);
+};
+
+
+LineStream.prototype.error = function(er) {
+  var self = this;
+
+  self.readable = false;
+  self.writable = false;
+  self.emit('error', er);
+
+  // The write() method sometimes returns this value, so if there was an error, make write() return false.
+  return false;
+};
+
+
+module.exports = LineStream;
+module.exports.v2 = LineStream2;
+

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/test/experiment.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/test/experiment.js b/apps/couchjs-node/test/experiment.js
new file mode 100644
index 0000000..9197ec6
--- /dev/null
+++ b/apps/couchjs-node/test/experiment.js
@@ -0,0 +1,125 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+var vm = require('vm');
+var util = require('util');
+
+var STATE = 'wait';
+
+function main() {
+  process.debugPort = 5859;
+  process.kill(process.pid, 'SIGUSR1');
+
+  setTimeout(function() {
+    stuff(0);
+  }, 1000);
+}
+
+function stuff(count) {
+
+  console.log('Doing stuff: %d', count);
+  STATE = 'vm';
+  console.log('More stuff: %d', count);
+
+  if (STATE === 'done') {
+    console.log('Done');
+  } else if (STATE === 'code') {
+    setTimeout(code, 1000);
+  } else if(STATE === 'eval') {
+    test_eval();
+  } else if(STATE === 'vm') {
+    test_vm();
+  } else if(STATE === 'wait') {
+    setTimeout(function() {
+      stuff(count+1);
+    }, 1000);
+  } else {
+    throw new Error('Unknown state: ' + STATE);
+  }
+}
+
+function code() {
+  var code = [
+    'var foo = "in the code"',
+    'console.log("This is some code")',
+    'debugger',
+    'console.log("foo = " + foo)'
+    ].join('\n');
+
+  var runner = Function([], code);
+
+  console.log('Run runner in 1s');
+
+  setTimeout(run_runner, 1000)
+
+  function run_runner() {
+    console.log('About to run runner');
+    debugger;
+    runner();
+    console.log('Runner done');
+  }
+}
+
+function test_eval() {
+  console.log('Test eval in 1s');
+  setTimeout(run_eval, 1000);
+
+  var code = [
+    'var foo = "in eval"',
+    'console.log("This is eval")',
+    'debugger',
+    'console.log("foo = " + foo)'
+    ].join('\n');
+
+  function run_eval() {
+    console.log('Run eval now');
+    debugger;
+    eval(code);
+  }
+}
+
+function test_vm() {
+  console.log('Test vm');
+
+  var code = [
+    'var i = 10',
+    'setTimeout(hello, 1000)',
+    '',
+    'function hello() {',
+    '  debugger',
+    '  console.log("Hello: " + i)',
+    '  if(--i)',
+    '    setTimeout(hello, 1000)',
+    '}'
+  ].join('\n');
+
+  console.log('Run vm now');
+  var filename = '_couchdb:code.js';
+
+  var sandbox = {};
+  var ok = ['console', 'setTimeout'];
+
+  ok.forEach(function(key) {
+    sandbox[key] = global[key];
+  });
+
+  var ctx = vm.createContext(sandbox);
+  var script = vm.createScript(code, filename);
+
+  var r = script.runInNewContext(sandbox);
+  console.log('Result:\n%s', util.inspect(r, false, 10));
+  return r;
+}
+
+if (require.main === module) {
+  main();
+}

http://git-wip-us.apache.org/repos/asf/couchdb/blob/add91738/apps/couchjs-node/xml.js
----------------------------------------------------------------------
diff --git a/apps/couchjs-node/xml.js b/apps/couchjs-node/xml.js
new file mode 100644
index 0000000..b64d221
--- /dev/null
+++ b/apps/couchjs-node/xml.js
@@ -0,0 +1,22 @@
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+
+function XML () {
+  this.foo = 'bar';
+}
+
+XML.prototype.toXMLString = function() {
+  return '<xml>\n  <title>test</title>\n</xml>';
+};
+
+module.exports = XML;


Mime
View raw message