couchdb-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cml...@apache.org
Subject svn commit: r642432 [14/16] - in /incubator/couchdb/trunk: ./ bin/ build-contrib/ etc/ etc/conf/ etc/default/ etc/init/ etc/launchd/ etc/logrotate.d/ share/ share/server/ share/www/ share/www/browse/ share/www/image/ share/www/script/ share/www/style/ ...
Date Fri, 28 Mar 2008 23:32:30 GMT
Added: incubator/couchdb/trunk/src/couchdb/cjson.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/cjson.erl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/cjson.erl (added)
+++ incubator/couchdb/trunk/src/couchdb/cjson.erl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,567 @@
+%% @author Bob Ippolito <bob@mochimedia.com>
+%% @copyright 2006 Mochi Media, Inc.
+%%
+%% Permission is hereby granted, free of charge, to any person
+%% obtaining a copy of this software and associated documentation
+%% files (the "Software"), to deal in the Software without restriction,
+%% including without limitation the rights to use, copy, modify, merge,
+%% publish, distribute, sublicense, and/or sell copies of the Software,
+%% and to permit persons to whom the Software is furnished to do
+%% so, subject to the following conditions:
+%%
+%% The above copyright notice and this permission notice shall be included
+%% in all copies or substantial portions of the Software.
+%%
+%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+%% EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+%% MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+%% IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+%% CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+%% TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+%% SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+%% @doc Yet another JSON (RFC 4627) library for Erlang.
+
+-module(cjson).
+-author('bob@mochimedia.com').
+-export([encoder/1, encode/1]).
+-export([decoder/1, decode/1]).
+-export([test/0]).
+
+%
+% NOTE: This file was originally mochijson.erl and has been adapted for
+% use with CouchDB.
+%
+% The changes are:
+%   {array, [...]}
+% is now
+%   {...}
+% and:
+%   {struct, [...]}
+% is now
+%   {obj, [...]}
+%
+
+% This is a macro to placate syntax highlighters..
+-define(Q, $\").
+-define(ADV_COL(S, N), S#decoder{column=N+S#decoder.column}).
+-define(INC_COL(S), S#decoder{column=1+S#decoder.column}).
+-define(INC_LINE(S), S#decoder{column=1, line=1+S#decoder.line}).
+
+%% @type iolist() = [char() | binary() | iolist()]
+%% @type iodata() = iolist() | binary()
+%% @type json_string() = atom | string() | binary()
+%% @type json_number() = integer() | float()
+%% @type json_array() = {json_term()}
+%% @type json_object() = {struct, [{json_string(), json_term()}]}
+%% @type json_term() = json_string() | json_number() | json_array() |
+%%                     json_object()
+%% @type encoding() = utf8 | unicode
+%% @type encoder_option() = {input_encoding, encoding()} |
+%%                          {handler, function()}
+%% @type decoder_option() = {input_encoding, encoding()} |
+%%                          {object_hook, function()}
+
+-record(encoder, {input_encoding=utf8,
+          handler=null}).
+
+-record(decoder, {input_encoding=utf8,
+          object_hook=null,
+          line=1,
+          column=1,
+          state=null}).
+
+%% @spec encoder([encoder_option()]) -> function()
+%% @doc Create an encoder/1 with the given options.
+encoder(Options) ->
+    State = parse_encoder_options(Options, #encoder{}),
+    fun (O) -> json_encode(O, State) end.
+
+%% @spec encode(json_term()) -> iolist()
+%% @doc Encode the given as JSON to an iolist.
+encode(Any) ->
+    json_encode(Any, #encoder{}).
+
+%% @spec decoder([decoder_option()]) -> function()
+%% @doc Create a decoder/1 with the given options.
+decoder(Options) ->
+    State = parse_decoder_options(Options, #decoder{}),
+    fun (O) -> json_decode(O, State) end.
+
+%% @spec decode(iolist()) -> json_term()
+%% @doc Decode the given iolist to Erlang terms.
+decode(S) ->
+    json_decode(S, #decoder{}).
+
+test() ->
+    test_all().
+
+%% Internal API
+
+parse_encoder_options([], State) ->
+    State;
+parse_encoder_options([{input_encoding, Encoding} | Rest], State) ->
+    parse_encoder_options(Rest, State#encoder{input_encoding=Encoding});
+parse_encoder_options([{handler, Handler} | Rest], State) ->
+    parse_encoder_options(Rest, State#encoder{handler=Handler}).
+
+parse_decoder_options([], State) ->
+    State;
+parse_decoder_options([{input_encoding, Encoding} | Rest], State) ->
+    parse_decoder_options(Rest, State#decoder{input_encoding=Encoding});
+parse_decoder_options([{object_hook, Hook} | Rest], State) ->
+    parse_decoder_options(Rest, State#decoder{object_hook=Hook}).
+
+
+format_float(F) ->
+    format_float1(lists:reverse(float_to_list(F)), []).
+
+format_float1([$0, $0, _, $e | Rest], []) ->
+    strip_zeros(Rest, []);
+format_float1([Sign, $e | Rest], Acc) ->
+    strip_zeros(Rest, [$e, Sign | Acc]);
+format_float1([C | Rest], Acc) ->
+    format_float1(Rest, [C | Acc]).
+
+strip_zeros(L=[$0, $. | _], Acc) ->
+    lists:reverse(L, Acc);
+strip_zeros([$0 | Rest], Acc) ->
+    strip_zeros(Rest, Acc);
+strip_zeros(L, Acc) ->
+    lists:reverse(L, Acc).
+
+json_encode(true, _State) ->
+    "true";
+json_encode(false, _State) ->
+    "false";
+json_encode(null, _State) ->
+    "null";
+json_encode(I, _State) when is_integer(I) ->
+    integer_to_list(I);
+json_encode(F, _State) when is_float(F) ->
+    format_float(F);
+json_encode(L, State) when is_list(L); is_binary(L); is_atom(L) ->
+    json_encode_string(L, State);
+json_encode({obj, Props}, State) when is_list(Props) ->
+    json_encode_proplist(Props, State);
+json_encode(Array, State) when is_tuple(Array) ->
+    json_encode_array(Array, State);
+json_encode(Bad, #encoder{handler=null}) ->
+    exit({json_encode, {bad_term, Bad}});
+json_encode(Bad, State=#encoder{handler=Handler}) ->
+    json_encode(Handler(Bad), State).
+
+json_encode_array({}, _State) ->
+    "[]";
+json_encode_array(Tuple, State) ->
+    F = fun (O, Acc) ->
+        [$,, json_encode(O, State) | Acc]
+    end,
+    [$, | Acc1] = lists:foldl(F, "[", tuple_to_list(Tuple)),
+    lists:reverse([$\] | Acc1]).
+
+json_encode_proplist([], _State) ->
+    "{}";
+json_encode_proplist(Props, State) ->
+    F = fun ({K, V}, Acc) ->
+        KS = case K of
+             K when is_atom(K) ->
+                 json_encode_string_utf8(atom_to_list(K), [?Q]);
+             K when is_integer(K) ->
+                 json_encode_string(integer_to_list(K), State);
+             K when is_list(K); is_binary(K) ->
+                 json_encode_string(K, State)
+             end,
+        VS = json_encode(V, State),
+        [$,, VS, $:, KS | Acc]
+    end,
+    [$, | Acc1] = lists:foldl(F, "{", Props),
+    lists:reverse([$\} | Acc1]).
+
+json_encode_string(A, _State) when is_atom(A) ->
+    json_encode_string_unicode(xmerl_ucs:from_utf8(atom_to_list(A)), [?Q]);
+json_encode_string(B, _State) when is_binary(B) ->
+    json_encode_string_unicode(xmerl_ucs:from_utf8(B), [?Q]);
+json_encode_string(S, #encoder{input_encoding=utf8}) ->
+    json_encode_string_utf8(S, [?Q]);
+json_encode_string(S, #encoder{input_encoding=unicode}) ->
+    json_encode_string_unicode(S, [?Q]).
+
+json_encode_string_utf8([], Acc) ->
+    lists:reverse([$\" | Acc]);
+json_encode_string_utf8(All=[C | Cs], Acc) ->
+    case C of
+    C when C >= 16#7f ->
+        json_encode_string_unicode(xmerl_ucs:from_utf8(All), Acc);
+    _ ->
+        Acc1 = case C of
+               ?Q ->
+               [?Q, $\\ | Acc];
+               $/ ->
+               [$/, $\\ | Acc];
+               $\\ ->
+               [$\\, $\\ | Acc];
+               $\b ->
+               [$b, $\\ | Acc];
+               $\f ->
+               [$f, $\\ | Acc];
+               $\n ->
+               [$n, $\\ | Acc];
+               $\r ->
+               [$r, $\\ | Acc];
+               $\t ->
+               [$t, $\\ | Acc];
+               C when C >= 0, C < $\s ->
+               [unihex(C) | Acc];
+               C when C >= $\s ->
+               [C | Acc];
+               _ ->
+               exit({json_encode, {bad_char, C}})
+           end,
+        json_encode_string_utf8(Cs, Acc1)
+    end.
+
+json_encode_string_unicode([], Acc) ->
+    lists:reverse([$\" | Acc]);
+json_encode_string_unicode([C | Cs], Acc) ->
+    Acc1 = case C of
+           ?Q ->
+           [?Q, $\\ | Acc];
+           $/ ->
+           [$/, $\\ | Acc];
+           $\\ ->
+           [$\\, $\\ | Acc];
+           $\b ->
+           [$b, $\\ | Acc];
+           $\f ->
+           [$f, $\\ | Acc];
+           $\n ->
+           [$n, $\\ | Acc];
+           $\r ->
+           [$r, $\\ | Acc];
+           $\t ->
+           [$t, $\\ | Acc];
+           C when C >= 0, C < $\s; C >= 16#7f, C =< 16#10FFFF ->
+           [unihex(C) | Acc];
+           C when C < 16#7f ->
+           [C | Acc];
+           _ ->
+           exit({json_encode, {bad_char, C}})
+       end,
+    json_encode_string_unicode(Cs, Acc1).
+
+dehex(C) when C >= $0, C =< $9 ->
+    C - $0;
+dehex(C) when C >= $a, C =< $f ->
+    C - $a + 10;
+dehex(C) when C >= $A, C =< $F ->
+    C - $A + 10.
+
+hexdigit(C) when C >= 0, C =< 9 ->
+    C + $0;
+hexdigit(C) when C =< 15 ->
+    C + $a - 10.
+
+unihex(C) when C < 16#10000 ->
+    <<D3:4, D2:4, D1:4, D0:4>> = <<C:16>>,
+    Digits = [hexdigit(D) || D <- [D3, D2, D1, D0]],
+    [$\\, $u | Digits];
+unihex(C) when C =< 16#10FFFF ->
+    N = C - 16#10000,
+    S1 = 16#d800 bor ((N bsr 10) band 16#3ff),
+    S2 = 16#dc00 bor (N band 16#3ff),
+    [unihex(S1), unihex(S2)].
+
+json_decode(B, S) when is_binary(B) ->
+    json_decode([B], S);
+json_decode(L, S) ->
+    {Res, L1, S1} = decode1(L, S),
+    {eof, [], _} = tokenize(L1, S1#decoder{state=trim}),
+    Res.
+
+decode1(L, S=#decoder{state=null}) ->
+    case tokenize(L, S#decoder{state=any}) of
+    {{const, C}, L1, S1} ->
+        {C, L1, S1};
+    {start_array, L1, S1} ->
+        decode_array(L1, S1#decoder{state=any}, []);
+    {start_object, L1, S1} ->
+        decode_object(L1, S1#decoder{state=key}, [])
+    end.
+
+make_object(V, #decoder{object_hook=null}) ->
+    V;
+make_object(V, #decoder{object_hook=Hook}) ->
+    Hook(V).
+
+decode_object(L, S=#decoder{state=key}, Acc) ->
+    case tokenize(L, S) of
+    {end_object, Rest, S1} ->
+        V = make_object({obj, lists:reverse(Acc)}, S1),
+        {V, Rest, S1#decoder{state=null}};
+    {{const, K}, Rest, S1} when is_list(K) ->
+        {colon, L2, S2} = tokenize(Rest, S1),
+        {V, L3, S3} = decode1(L2, S2#decoder{state=null}),
+        decode_object(L3, S3#decoder{state=comma}, [{K, V} | Acc])
+    end;
+decode_object(L, S=#decoder{state=comma}, Acc) ->
+    case tokenize(L, S) of
+    {end_object, Rest, S1} ->
+        V = make_object({obj, lists:reverse(Acc)}, S1),
+        {V, Rest, S1#decoder{state=null}};
+    {comma, Rest, S1} ->
+        decode_object(Rest, S1#decoder{state=key}, Acc)
+    end.
+
+decode_array(L, S=#decoder{state=any}, Acc) ->
+    case tokenize(L, S) of
+    {end_array, Rest, S1} ->
+        {list_to_tuple(lists:reverse(Acc)), Rest, S1#decoder{state=null}};
+    {start_array, Rest, S1} ->
+        {Array, Rest1, S2} = decode_array(Rest, S1#decoder{state=any}, []),
+        decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+    {start_object, Rest, S1} ->
+        {Array, Rest1, S2} = decode_object(Rest, S1#decoder{state=key}, []),
+        decode_array(Rest1, S2#decoder{state=comma}, [Array | Acc]);
+    {{const, Const}, Rest, S1} ->
+        decode_array(Rest, S1#decoder{state=comma}, [Const | Acc])
+    end;
+decode_array(L, S=#decoder{state=comma}, Acc) ->
+    case tokenize(L, S) of
+    {end_array, Rest, S1} ->
+        {list_to_tuple(lists:reverse(Acc)), Rest, S1#decoder{state=null}};
+    {comma, Rest, S1} ->
+        decode_array(Rest, S1#decoder{state=any}, Acc)
+    end.
+
+tokenize_string(IoList=[C | _], S=#decoder{input_encoding=utf8}, Acc)
+  when is_list(C); is_binary(C); C >= 16#7f ->
+    List = xmerl_ucs:from_utf8(list_to_binary(lists:flatten(IoList))),
+    tokenize_string(List, S#decoder{input_encoding=unicode}, Acc);
+tokenize_string("\"" ++ Rest, S, Acc) ->
+    {lists:reverse(Acc), Rest, ?INC_COL(S)};
+tokenize_string("\\\"" ++ Rest, S, Acc) ->
+    tokenize_string(Rest, ?ADV_COL(S, 2), [$\" | Acc]);
+tokenize_string("\\\\" ++ Rest, S, Acc) ->
+    tokenize_string(Rest, ?ADV_COL(S, 2), [$\\ | Acc]);
+tokenize_string("\\/" ++ Rest, S, Acc) ->
+    tokenize_string(Rest, ?ADV_COL(S, 2), [$/ | Acc]);
+tokenize_string("\\b" ++ Rest, S, Acc) ->
+    tokenize_string(Rest, ?ADV_COL(S, 2), [$\b | Acc]);
+tokenize_string("\\f" ++ Rest, S, Acc) ->
+    tokenize_string(Rest, ?ADV_COL(S, 2), [$\\ | Acc]);
+tokenize_string("\\n" ++ Rest, S, Acc) ->
+    tokenize_string(Rest, ?ADV_COL(S, 2), [$\n | Acc]);
+tokenize_string("\\r" ++ Rest, S, Acc) ->
+    tokenize_string(Rest, ?ADV_COL(S, 2), [$\r | Acc]);
+tokenize_string("\\t" ++ Rest, S, Acc) ->
+    tokenize_string(Rest, ?ADV_COL(S, 2), [$\t | Acc]);
+tokenize_string([$\\, $u, C3, C2, C1, C0 | Rest], S, Acc) ->
+    % coalesce UTF-16 surrogate pair?
+    C = dehex(C0) bor
+    (dehex(C1) bsl 4) bor
+    (dehex(C2) bsl 8) bor
+    (dehex(C3) bsl 12),
+    tokenize_string(Rest, ?ADV_COL(S, 6), [C | Acc]);
+tokenize_string([C | Rest], S, Acc) when C >= $\s; C < 16#10FFFF ->
+    tokenize_string(Rest, ?ADV_COL(S, 1), [C | Acc]).
+
+tokenize_number(IoList=[C | _], Mode, S=#decoder{input_encoding=utf8}, Acc)
+  when is_list(C); is_binary(C); C >= 16#7f ->
+    List = xmerl_ucs:from_utf8(list_to_binary(lists:flatten(IoList))),
+    tokenize_number(List, Mode, S#decoder{input_encoding=unicode}, Acc);
+tokenize_number([$- | Rest], sign, S, []) ->
+    tokenize_number(Rest, int, ?INC_COL(S), [$-]);
+tokenize_number(Rest, sign, S, []) ->
+    tokenize_number(Rest, int, S, []);
+tokenize_number([$0 | Rest], int, S, Acc) ->
+    tokenize_number(Rest, frac, ?INC_COL(S), [$0 | Acc]);
+tokenize_number([C | Rest], int, S, Acc) when C >= $1, C =< $9 ->
+    tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], int1, S, Acc) when C >= $0, C =< $9 ->
+    tokenize_number(Rest, int1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, int1, S, Acc) ->
+    tokenize_number(Rest, frac, S, Acc);
+tokenize_number([$., C | Rest], frac, S, Acc) when C >= $0, C =< $9 ->
+    tokenize_number(Rest, frac1, ?ADV_COL(S, 2), [C, $. | Acc]);
+tokenize_number([E | Rest], frac, S, Acc) when E == $e; E == $E ->
+    tokenize_number(Rest, esign, ?INC_COL(S), [$e, $0, $. | Acc]);
+tokenize_number(Rest, frac, S, Acc) ->
+    {{int, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], frac1, S, Acc) when C >= $0, C =< $9 ->
+    tokenize_number(Rest, frac1, ?INC_COL(S), [C | Acc]);
+tokenize_number([E | Rest], frac1, S, Acc) when E == $e; E == $E ->
+    tokenize_number(Rest, esign, ?INC_COL(S), [$e | Acc]);
+tokenize_number(Rest, frac1, S, Acc) ->
+    {{float, lists:reverse(Acc)}, Rest, S};
+tokenize_number([C | Rest], esign, S, Acc) when C == $-; C == $+ ->
+    tokenize_number(Rest, eint, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, esign, S, Acc) ->
+    tokenize_number(Rest, eint, S, Acc);
+tokenize_number([C | Rest], eint, S, Acc) when C >= $0, C =< $9 ->
+    tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number([C | Rest], eint1, S, Acc) when C >= $0, C =< $9 ->
+    tokenize_number(Rest, eint1, ?INC_COL(S), [C | Acc]);
+tokenize_number(Rest, eint1, S, Acc) ->
+    {{float, lists:reverse(Acc)}, Rest, S}.
+
+tokenize([], S=#decoder{state=trim}) ->
+    {eof, [], S};
+tokenize([L | Rest], S) when is_list(L) ->
+    tokenize(L ++ Rest, S);
+tokenize([B | Rest], S) when is_binary(B) ->
+    tokenize(xmerl_ucs:from_utf8(B) ++ Rest, S);
+tokenize("\r\n" ++ Rest, S) ->
+    tokenize(Rest, ?INC_LINE(S));
+tokenize("\n" ++ Rest, S) ->
+    tokenize(Rest, ?INC_LINE(S));
+tokenize([C | Rest], S) when C == $\s; C == $\t ->
+    tokenize(Rest, ?INC_COL(S));
+tokenize("{" ++ Rest, S) ->
+    {start_object, Rest, ?INC_COL(S)};
+tokenize("}" ++ Rest, S) ->
+    {end_object, Rest, ?INC_COL(S)};
+tokenize("[" ++ Rest, S) ->
+    {start_array, Rest, ?INC_COL(S)};
+tokenize("]" ++ Rest, S) ->
+    {end_array, Rest, ?INC_COL(S)};
+tokenize("," ++ Rest, S) ->
+    {comma, Rest, ?INC_COL(S)};
+tokenize(":" ++ Rest, S) ->
+    {colon, Rest, ?INC_COL(S)};
+tokenize("null" ++ Rest, S) ->
+    {{const, null}, Rest, ?ADV_COL(S, 4)};
+tokenize("true" ++ Rest, S) ->
+    {{const, true}, Rest, ?ADV_COL(S, 4)};
+tokenize("false" ++ Rest, S) ->
+    {{const, false}, Rest, ?ADV_COL(S, 5)};
+tokenize("\"" ++ Rest, S) ->
+    {String, Rest1, S1} = tokenize_string(Rest, ?INC_COL(S), []),
+    {{const, xmerl_ucs:to_utf8(String)}, Rest1, S1};
+tokenize(L=[C | _], S) when C >= $0, C =< $9; C == $- ->
+    case tokenize_number(L, sign, S, []) of
+    {{int, Int}, Rest, S1} ->
+        {{const, list_to_integer(Int)}, Rest, S1};
+    {{float, Float}, Rest, S1} ->
+        {{const, list_to_float(Float)}, Rest, S1}
+    end.
+
+%% testing constructs borrowed from the Yaws JSON implementation.
+
+%% Create an object from a list of Key/Value pairs.
+
+obj_new() ->
+    {obj, []}.
+
+is_obj({obj, Props}) ->
+    F = fun ({K, _}) when is_list(K) ->
+        true;
+        (_) ->
+        false
+    end,
+    lists:all(F, Props).
+
+obj_from_list(Props) ->
+    Obj = {obj, Props},
+    case is_obj(Obj) of
+        true -> Obj;
+        false -> exit(json_bad_object)
+    end.
+
+%% Test for equivalence of Erlang terms.
+%% Due to arbitrary order of construction, equivalent objects might
+%% compare unequal as erlang terms, so we need to carefully recurse
+%% through aggregates (tuples and objects).
+
+equiv({obj, Props1}, {obj, Props2}) ->
+    equiv_object(Props1, Props2);
+equiv(T1, T2) when is_tuple(T1), is_tuple(T2) ->
+    equiv_list(tuple_to_list(T1), tuple_to_list(T2));
+equiv(N1, N2) when is_number(N1), is_number(N2) -> N1 == N2;
+equiv(S1, S2) when is_list(S1), is_list(S2) -> S1 == S2;
+equiv(true, true) -> true;
+equiv(false, false) -> true;
+equiv(null, null) -> true.
+
+%% Object representation and traversal order is unknown.
+%% Use the sledgehammer and sort property lists.
+
+equiv_object(Props1, Props2) ->
+    L1 = lists:keysort(1, Props1),
+    L2 = lists:keysort(1, Props2),
+    Pairs = lists:zip(L1, L2),
+    true = lists:all(fun({{K1, V1}, {K2, V2}}) ->
+    equiv(K1, K2) and equiv(V1, V2)
+    end, Pairs).
+
+%% Recursively compare tuple elements for equivalence.
+
+equiv_list([], []) ->
+    true;
+equiv_list([V1 | L1], [V2 | L2]) ->
+    case equiv(V1, V2) of
+    true ->
+        equiv_list(L1, L2);
+    false ->
+        false
+    end.
+
+test_all() ->
+    test_one(e2j_test_vec(utf8), 1).
+
+test_one([], N) ->
+    io:format("~p tests passed~n", [N-1]),
+    ok;
+test_one([{E, J} | Rest], N) ->
+    io:format("[~p] ~p ~p~n", [N, E, J]),
+    true = equiv(E, decode(J)),
+    true = equiv(E, decode(encode(E))),
+    test_one(Rest, 1+N).
+
+e2j_test_vec(unicode) ->
+    [
+     {"foo" ++ [500] ++ "bar", [$", $f, $o, $o, 500, $b, $a, $r, $"]}
+    ];
+e2j_test_vec(utf8) ->
+    [
+    {1, "1"},
+    {3.1416, "3.14160"}, % text representation may truncate, trail zeroes
+    {-1, "-1"},
+    {-3.1416, "-3.14160"},
+    {12.0e10, "1.20000e+11"},
+    {1.234E+10, "1.23400e+10"},
+    {-1.234E-10, "-1.23400e-10"},
+    {10.0, "1.0e+01"},
+    {123.456, "1.23456E+2"},
+    {10.0, "1e1"},
+    {"foo", "\"foo\""},
+    {"foo" ++ [5] ++ "bar", "\"foo\\u0005bar\""},
+    {"", "\"\""},
+    {[], "\"\""},
+    {"\n\n\n", "\"\\n\\n\\n\""},
+    {obj_new(), "{}"},
+    {obj_from_list([{"foo", "bar"}]), "{\"foo\":\"bar\"}"},
+    {obj_from_list([{"foo", "bar"}, {"baz", 123}]),
+     "{\"foo\":\"bar\",\"baz\":123}"},
+    {{}, "[]"},
+    {{{}}, "[[]]"},
+    {{1, "foo"}, "[1,\"foo\"]"},
+
+    % json array in a json object
+    {obj_from_list([{"foo", {123}}]),
+     "{\"foo\":[123]}"},
+
+    % json object in a json object
+    {obj_from_list([{"foo", obj_from_list([{"bar", true}])}]),
+     "{\"foo\":{\"bar\":true}}"},
+
+    % fold evaluation order
+    {obj_from_list([{"foo", {}},
+                     {"bar", obj_from_list([{"baz", true}])},
+                     {"alice", "bob"}]),
+     "{\"foo\":[],\"bar\":{\"baz\":true},\"alice\":\"bob\"}"},
+
+    % json object in a json array
+    {{-123, "foo", obj_from_list([{"bar", {}}]), null},
+     "[-123,\"foo\",{\"bar\":[]},null]"}
+    ].

Added: incubator/couchdb/trunk/src/couchdb/couch.app.tpl.in
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch.app.tpl.in?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch.app.tpl.in (added)
+++ incubator/couchdb/trunk/src/couchdb/couch.app.tpl.in Fri Mar 28 16:32:19 2008
@@ -0,0 +1,29 @@
+{application,couch,
+             [{description,"@package_name@"},
+              {vsn,"@version@"},
+              {modules,[couch_btree,
+                        cjson,
+                        couch_db,
+                        couch_doc,
+                        couch_query_servers,
+                        couch_file,
+                        couch_server,
+                        couch_server_sup,
+                        couch_stream,
+                        couch_key_tree,
+                        couch_view,
+                        couch_util,
+                        mod_couch,
+                        couch_event_sup,
+                        couch_db_update_notifier,
+                        couch_ft_query,
+                        couch_log,
+                        couch_rep]},
+              {registered,[couch_server,
+                           couch_server_sup,
+                           couch_util,
+                           couch_view,
+                           couch_query_servers,
+                           couch_ft_query]},
+              {applications,[kernel,stdlib,xmerl,couch_inets]},
+              {mod,{couch_server,[]}}]}.

Added: incubator/couchdb/trunk/src/couchdb/couch_btree.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch_btree.erl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch_btree.erl (added)
+++ incubator/couchdb/trunk/src/couchdb/couch_btree.erl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,590 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_btree).
+
+-export([open/2, open/3, query_modify/4, add_remove/3, foldl/3, foldl/4]).
+-export([foldr/3, foldr/4, fold/4, fold/5, row_count/1]).
+-export([lookup/2, get_state/1, test/1, test/0]).
+
+-define(CHUNK_THRESHOLD, 16#fff).
+
+-record(btree,
+    {fd,
+    root,
+    extract_kv = fun({Key, Value}) -> {Key, Value} end,
+    assemble_kv =  fun(Key, Value) -> {Key, Value} end,
+    less = fun(A, B) -> A < B end
+    }).
+
+extract(#btree{extract_kv=Extract}, Value) ->
+    Extract(Value).
+
+assemble(#btree{assemble_kv=Assemble}, Key, Value) ->
+    Assemble(Key, Value).
+
+less(#btree{less=Less}, A, B) ->
+    Less(A, B).
+
+% pass in 'nil' for State if a new Btree.
+open(State, Fd) ->
+    {ok, #btree{root=State, fd=Fd}}.
+    
+set_options(Bt, []) ->
+    Bt;
+set_options(Bt, [{split, Extract}|Rest]) ->
+    set_options(Bt#btree{extract_kv=Extract}, Rest);
+set_options(Bt, [{join, Assemble}|Rest]) ->
+    set_options(Bt#btree{assemble_kv=Assemble}, Rest);
+set_options(Bt, [{less, Less}|Rest]) ->
+    set_options(Bt#btree{less=Less}, Rest).
+
+open(State, Fd, Options) ->
+    {ok, set_options(#btree{root=State, fd=Fd}, Options)}.
+
+get_state(#btree{root=Root}) ->
+    Root.
+
+row_count(#btree{root=nil}) ->
+    0;
+row_count(#btree{root={_RootPointer, Count}}) ->
+    Count.
+
+foldl(Bt, Fun, Acc) ->
+    fold(Bt, fwd, Fun, Acc).
+
+foldl(Bt, Key, Fun, Acc) ->
+    fold(Bt, Key, fwd, Fun, Acc).
+
+foldr(Bt, Fun, Acc) ->
+    fold(Bt, rev, Fun, Acc).
+
+foldr(Bt, Key, Fun, Acc) ->
+    fold(Bt, Key, rev, Fun, Acc).
+
+% wraps a 2 arity function with the proper 3 arity function
+convert_fun_arity(Fun) when is_function(Fun, 2) ->
+    fun(KV, _Offset, AccIn) -> Fun(KV, AccIn) end;
+convert_fun_arity(Fun) when is_function(Fun, 3) ->
+    Fun.    % Already arity 3
+
+fold(Bt, Dir, Fun, Acc) ->
+    {_ContinueFlag, Acc2} = stream_node(Bt, 0, Bt#btree.root, nil, Dir, convert_fun_arity(Fun), Acc),
+    {ok, Acc2}.
+
+fold(Bt, Key, Dir, Fun, Acc) ->
+    {_ContinueFlag, Acc2} = stream_node(Bt, 0, Bt#btree.root, Key, Dir, convert_fun_arity(Fun), Acc),
+    {ok, Acc2}.
+
+add_remove(Bt, InsertKeyValues, RemoveKeys) ->
+    {Result, [], Bt2} = query_modify(Bt, [], InsertKeyValues, RemoveKeys),
+    {Result, Bt2}.
+
+query_modify(Bt, LookupKeys, InsertValues, RemoveKeys) ->
+    #btree{root=Root} = Bt,
+    InsertActions = lists:map(
+        fun(KeyValue) ->
+            {Key, Value} = extract(Bt, KeyValue),
+            {insert, Key, Value}
+        end, InsertValues),
+    RemoveActions = [{remove, Key, nil} || Key <- RemoveKeys],
+    FetchActions = [{fetch, Key, nil} || Key <- LookupKeys],
+    SortFun =
+        fun({OpA, A, _}, {OpB, B, _}) ->
+            case less(Bt, A, B) of
+            true -> true;
+            false ->
+                case less(Bt, B, A) of
+                true -> false;
+                false ->
+                    % A and B are equal, sort by op.
+                    op_order(OpA) < op_order(OpB)
+                end
+            end
+        end,
+    Actions = lists:sort(SortFun, lists:append([InsertActions, RemoveActions, FetchActions])),
+    {ok, KeyPointers, QueryResults, Bt2} = modify_node(Bt, Root, Actions, []),
+    {ok, NewRoot, Bt3} = complete_root(Bt2, KeyPointers),
+    {ok, QueryResults, Bt3#btree{root=NewRoot}}.
+
+% for ordering different operatations with the same key.
+% fetch < remove < insert
+op_order(fetch) -> 1;
+op_order(remove) -> 2;
+op_order(insert) -> 3.
+
+lookup(#btree{root=Root, less=Less}=Bt, Keys) ->
+    SortedKeys = lists:sort(Less, Keys),
+    {ok, SortedResults} = lookup(Bt, Root, SortedKeys),
+    % We want to return the results in the same order as the keys were input
+    % but we may have changed the order when we sorted. So we need to put the
+    % order back into the results.
+    KeyDict = dict:from_list(SortedResults),
+    [dict:fetch(Key, KeyDict) || Key <- Keys].
+
+lookup(_Bt, nil, Keys) ->
+    {ok, [{Key, not_found} || Key <- Keys]};
+lookup(Bt, {Pointer, _Count}, Keys) ->
+    {NodeType, NodeList} = get_node(Bt, Pointer),
+    case NodeType of
+    kp_node ->
+        lookup_kpnode(Bt, NodeList, Keys, []);
+    kv_node ->
+        lookup_kvnode(Bt, NodeList, Keys, [])
+    end.
+
+
+lookup_kpnode(_Bt, [], Keys, Output) ->
+    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
+
+lookup_kpnode(_Bt, _KPs, [], Output) ->
+    {ok, lists:reverse(Output)};
+
+lookup_kpnode(Bt, [{Key, PointerInfo} | RestKPs], LookupKeys, Output) ->
+    % Split the Keys into two lists, queries of values less
+    % than equals, and greater than the current key
+    SplitFun = fun(LookupKey) -> not less(Bt, Key, LookupKey) end,
+    case lists:splitwith(SplitFun, LookupKeys) of
+    {[], GreaterQueries} ->
+        lookup_kpnode(Bt, RestKPs, GreaterQueries, Output);
+    {LessEqQueries, GreaterQueries} ->
+        {ok, Results} = lookup(Bt, PointerInfo, LessEqQueries),
+        lookup_kpnode(Bt, RestKPs, GreaterQueries, lists:reverse(Results, Output))
+    end.
+
+
+
+lookup_kvnode(_Bt, _KVs, [], Output) ->
+    {ok, lists:reverse(Output)};
+lookup_kvnode(_Bt, [], Keys, Output) ->
+    % keys not found
+    {ok, lists:reverse(Output, [{Key, not_found} || Key <- Keys])};
+lookup_kvnode(Bt, [{Key, Value} | RestKVs], [LookupKey | RestLookupKeys], Output) ->
+    case less(Bt, LookupKey, Key) of
+    true ->
+        lookup_kvnode(Bt, [{Key, Value} | RestKVs], RestLookupKeys, [{LookupKey, not_found} | Output]);
+    false ->
+        case less(Bt, Key, LookupKey) of
+        true ->
+            % LookupKey is greater than Key
+            lookup_kvnode(Bt, RestKVs, [LookupKey | RestLookupKeys], Output);
+        false ->
+            % LookupKey is equal to Key
+            lookup_kvnode(Bt, RestKVs, RestLookupKeys, [{LookupKey, {ok, assemble(Bt, LookupKey, Value)}} | Output])
+        end
+    end.
+
+
+complete_root(Bt, []) ->
+    {ok, nil, Bt};
+complete_root(Bt, [{_Key, PointerInfo}])->
+    {ok, PointerInfo, Bt};
+complete_root(Bt, KPs) ->
+    {ok, ResultKeyPointers, Bt2} = write_node(Bt, kp_node, KPs),
+    complete_root(Bt2, ResultKeyPointers).
+
+%%%%%%%%%%%%% The chunkify function sucks! %%%%%%%%%%%%% 
+% It is inaccurate as it does not account for compression when blocks are
+% written. Plus with the "case size(term_to_binary(InList)) of" code it's
+% probably really inefficient.
+
+chunkify(_Bt, []) ->
+    [];
+chunkify(Bt, InList) ->
+    case size(term_to_binary(InList)) of
+    Size when Size > ?CHUNK_THRESHOLD ->
+        NumberOfChunksLikely = ((Size div ?CHUNK_THRESHOLD) + 1),
+        ChunkThreshold = Size div NumberOfChunksLikely,
+        chunkify(Bt, InList, ChunkThreshold, [], 0, []);
+    _Else ->
+        [InList]
+    end.
+
+chunkify(_Bt, [], _ChunkThreshold, [], 0, OutputChunks) ->
+    lists:reverse(OutputChunks);
+chunkify(_Bt, [], _ChunkThreshold, OutList, _OutListSize, OutputChunks) ->
+    lists:reverse([lists:reverse(OutList) | OutputChunks]);
+chunkify(Bt, [InElement | RestInList], ChunkThreshold, OutList, OutListSize, OutputChunks) ->
+    case size(term_to_binary(InElement)) of
+    Size when (Size + OutListSize) > ChunkThreshold ->
+        chunkify(Bt, RestInList, ChunkThreshold, [], 0, [lists:reverse([InElement | OutList]) | OutputChunks]);
+    Size ->
+        chunkify(Bt, RestInList, ChunkThreshold, [InElement | OutList], OutListSize + Size, OutputChunks)
+    end.
+
+modify_node(Bt, RootPointerInfo, Actions, QueryOutput) ->
+    case RootPointerInfo of
+    nil ->
+        NodeType = kv_node,
+        NodeList = [];
+    {Pointer, _count} ->
+        {NodeType, NodeList} = get_node(Bt, Pointer)
+    end,
+    case NodeType of
+    kp_node ->
+        {ok, NewNodeList, QueryOutput2, Bt2} = modify_kpnode(Bt, NodeList, Actions, [], QueryOutput);
+    kv_node ->
+        {ok, NewNodeList, QueryOutput2, Bt2} = modify_kvnode(Bt, NodeList, Actions, [], QueryOutput)
+    end,
+    case NewNodeList of
+    [] ->  % no nodes remain
+        {ok, [], QueryOutput2, Bt2};
+    NodeList ->  % nothing changed
+        {LastKey, _LastValue} = lists:last(NodeList),
+        {ok, [{LastKey, RootPointerInfo}], QueryOutput2, Bt2};
+    _Else2 ->
+        {ok, ResultList, Bt3} = write_node(Bt2, NodeType, NewNodeList),
+        {ok, ResultList, QueryOutput2, Bt3}
+    end.
+
+
+count(kv_node, NodeList) ->
+    length(NodeList);
+count(kp_node, NodeList) ->
+    lists:foldl( fun({_Key, {_Pointer, Count}}, AccCount) ->
+            Count + AccCount
+        end,
+        0, NodeList).
+
+
+get_node(#btree{fd = Fd}, NodePos) ->
+    {ok, {NodeType, NodeList}} = couch_file:pread_term(Fd, NodePos),
+    case NodeType of
+    kp_node ->
+        % Node pointers always point backward on disk.
+        % Validating this prevents infinite loops should
+        % a disk corruption occur.
+        [throw({error, disk_corruption})
+            || {_Key, {SubNodePos, _Count}}
+            <- NodeList, SubNodePos >= NodePos];
+    kv_node ->
+        ok
+    end,
+    {NodeType, NodeList}.
+
+write_node(Bt, NodeType, NodeList) ->
+    % split up nodes into smaller sizes
+    NodeListList = chunkify(Bt, NodeList),
+    % now write out each chunk and return the KeyPointer pairs for those nodes
+    ResultList = [
+        begin
+            {ok, Pointer} = couch_file:append_term(Bt#btree.fd, {NodeType, ANodeList}),
+            {LastKey, _} = lists:last(ANodeList),
+            {LastKey, {Pointer, count(NodeType, ANodeList)}}
+        end
+    ||
+        ANodeList <- NodeListList
+    ],
+    {ok, ResultList, Bt}.
+
+modify_kpnode(Bt, KPs, [], ResultNode, QueryOutput) ->
+    % processed all queries for the current tree
+    {ok, lists:reverse(ResultNode, KPs), QueryOutput, Bt};
+
+modify_kpnode(Bt, [], Actions, [{_Key, PointerInfo} | ResultNode], QueryOutput) ->
+    {ok, ChildKPs, QueryOutput2, Bt2} = modify_node(Bt, PointerInfo, Actions, QueryOutput),
+    {ok, lists:reverse(ResultNode, ChildKPs), QueryOutput2, Bt2};
+
+modify_kpnode(Bt, [{Key,PointerInfo} | RestKPs], Actions, ResultNode, QueryOutput) ->
+    % Split the actions into two lists, queries of values less
+    % than equals, and greater than the current key
+    SplitFun = fun({_ActionType, ActionKey, _ActionValue}) ->
+            not less(Bt, Key, ActionKey)
+        end,
+    case lists:splitwith(SplitFun, Actions) of
+    {[], GreaterQueries} ->
+        modify_kpnode(Bt, RestKPs, GreaterQueries, [{Key, PointerInfo} | ResultNode], QueryOutput);
+    {LessEqQueries, GreaterQueries} ->
+        {ok, ChildKPs, QueryOutput2, Bt2} = modify_node(Bt, PointerInfo, LessEqQueries, QueryOutput),
+        modify_kpnode(Bt2, RestKPs, GreaterQueries, lists:reverse(ChildKPs, ResultNode), QueryOutput2)
+    end.
+
+modify_kvnode(Bt, KVs, [], ResultNode, QueryOutput) ->
+    {ok, lists:reverse(ResultNode, KVs), QueryOutput, Bt};
+modify_kvnode(Bt, [], [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) ->
+    case ActionType of
+    insert ->
+        modify_kvnode(Bt, [], RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+    remove ->
+        % just drop the action
+        modify_kvnode(Bt, [], RestActions, ResultNode, QueryOutput);
+    fetch ->
+        % the key/value must not exist in the tree
+        modify_kvnode(Bt, [], RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+    end;
+modify_kvnode(Bt, [{Key, Value} | RestKVs], [{ActionType, ActionKey, ActionValue} | RestActions], ResultNode, QueryOutput) ->
+    case less(Bt, ActionKey, Key) of
+    true ->
+        case ActionType of
+        insert ->
+            % ActionKey is less than the Key, so insert
+            modify_kvnode(Bt, [{Key, Value} | RestKVs], RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+        remove ->
+            % ActionKey is less than the Key, just drop the action
+            modify_kvnode(Bt, [{Key, Value} | RestKVs], RestActions, ResultNode, QueryOutput);
+        fetch ->
+            % ActionKey is less than the Key, the key/value must not exist in the tree
+            modify_kvnode(Bt, [{Key, Value} | RestKVs], RestActions, ResultNode, [{not_found, {ActionKey, nil}} | QueryOutput])
+        end;
+    false ->
+        case less(Bt, Key, ActionKey) of
+        true ->
+            % ActionKey is greater than Key
+            modify_kvnode(Bt, RestKVs, [{ActionType, ActionKey, ActionValue} | RestActions], [{Key, Value} | ResultNode], QueryOutput);
+        false ->
+            % InsertKey is equal to Key
+            case ActionType of
+            insert ->
+                % ActionKey is less than the Key, so insert
+                modify_kvnode(Bt, RestKVs, RestActions, [{ActionKey, ActionValue} | ResultNode], QueryOutput);
+            remove ->
+                modify_kvnode(Bt, RestKVs, RestActions, ResultNode, QueryOutput);
+            fetch ->
+                % ActionKey is equal to the Key, insert into the QueryOuput, but re-process the node
+                % since an identical action key can follow it.
+                modify_kvnode(Bt, [{Key, Value} | RestKVs], RestActions, ResultNode, [{ok, assemble(Bt, Key, Value)} | QueryOutput])
+            end
+        end
+    end.
+
+adjust_dir(fwd, List) ->
+    List;
+adjust_dir(rev, List) ->
+    lists:reverse(List).
+
+stream_node(Bt, Offset, PointerInfo, nil, Dir, Fun, Acc) ->
+    stream_node(Bt, Offset, PointerInfo, Dir, Fun, Acc);
+stream_node(_Bt, _Offset, nil, _StartKey, _Dir, _Fun, Acc) ->
+    {ok, Acc};
+stream_node(Bt, Offset, {Pointer, _Count}, StartKey, Dir, Fun, Acc) ->
+    {NodeType, NodeList} = get_node(Bt, Pointer),
+    case NodeType of
+    kp_node ->
+        stream_kp_node(Bt, Offset, adjust_dir(Dir, NodeList), StartKey, Dir, Fun, Acc);
+    kv_node ->
+        stream_kv_node(Bt, Offset, adjust_dir(Dir, NodeList), StartKey, Dir, Fun, Acc)
+    end.
+
+stream_node(_Bt, _Offset, nil, _Dir, _Fun, Acc) ->
+    {ok, Acc};
+stream_node(Bt, Offset, {Pointer, _Count}, Dir, Fun, Acc) ->
+    {NodeType, NodeList} = get_node(Bt, Pointer),
+    case NodeType of
+    kp_node ->
+        stream_kp_node(Bt, Offset, adjust_dir(Dir, NodeList), Dir, Fun, Acc);
+    kv_node ->
+        stream_kv_node(Bt, Offset, adjust_dir(Dir, NodeList), Dir, Fun, Acc)
+    end.
+
+stream_kp_node(_Bt, _Offset, [], _Dir, _Fun, Acc) ->
+    {ok, Acc};
+stream_kp_node(Bt, Offset, [{_Key, {Pointer, Count}} | Rest], Dir, Fun, Acc) ->
+    case stream_node(Bt, Offset, {Pointer, Count}, Dir, Fun, Acc) of
+    {ok, Acc2} ->
+        stream_kp_node(Bt, Offset + Count, Rest, Dir, Fun, Acc2);
+    {stop, Acc2} ->
+        {stop, Acc2}
+    end.
+
+drop_nodes(_Bt, Offset, _StartKey, []) ->
+    {Offset, []};
+drop_nodes(Bt, Offset, StartKey, [{NodeKey, {Pointer, Count}} | RestKPs]) ->
+    case less(Bt, NodeKey, StartKey) of
+    true -> drop_nodes(Bt, Offset + Count, StartKey, RestKPs);
+    false -> {Offset, [{NodeKey, {Pointer, Count}} | RestKPs]}
+    end.
+
+stream_kp_node(Bt, Offset, KPs, StartKey, Dir, Fun, Acc) ->
+    {NewOffset, NodesToStream} =
+    case Dir of
+    fwd ->
+        % drop all nodes sorting before the key
+        drop_nodes(Bt, Offset, StartKey, KPs);
+    rev ->
+        % keep all nodes sorting before the key, AND the first node to sort after
+        RevKPs = lists:reverse(KPs),
+        case lists:splitwith(fun({Key, _Pointer}) -> less(Bt, Key, StartKey) end, RevKPs) of
+        {_RevBefore, []} ->
+            % everything sorts before it
+            {Offset, KPs};
+        {RevBefore, [FirstAfter | Drop]} ->
+            {Offset + count(kp_node, Drop), [FirstAfter | lists:reverse(RevBefore)]}
+        end
+    end,
+    case NodesToStream of
+    [] ->
+        {ok, Acc};
+    [{_Key, PointerInfo} | Rest] ->
+        case stream_node(Bt, NewOffset, PointerInfo, StartKey, Dir, Fun, Acc) of
+        {ok, Acc2} ->
+            stream_kp_node(Bt, NewOffset, Rest, Dir, Fun, Acc2);
+        {stop, Acc2} ->
+            {stop, Acc2}
+        end
+    end.
+
+stream_kv_node(_Bt, _Offset, [], _Dir, _Fun, Acc) ->
+    {ok, Acc};
+stream_kv_node(Bt, Offset, [{K, V} | RestKVs], Dir, Fun, Acc) ->
+    case Fun(assemble(Bt, K, V), Offset, Acc) of
+    {ok, Acc2} ->
+        stream_kv_node(Bt, Offset + 1, RestKVs, Dir, Fun, Acc2);
+    {stop, Acc2} ->
+        {stop, Acc2}
+    end.
+
+stream_kv_node(Bt, Offset, KVs, StartKey, Dir, Fun, Acc) ->
+    DropFun =
+    case Dir of
+    fwd ->
+        fun({Key, _}) -> less(Bt, Key, StartKey) end;
+    rev ->
+        fun({Key, _}) -> less(Bt, StartKey, Key) end
+    end,
+    % drop all nodes preceding the key
+    GTEKVs = lists:dropwhile(DropFun, KVs),
+    LenSkipped = length(KVs) - length(GTEKVs),
+    stream_kv_node(Bt, Offset + LenSkipped, GTEKVs, Dir, Fun, Acc).
+
+
+
+
+test()->
+    test(1000).
+
+test(N) ->
+    KeyValues = [{random:uniform(), random:uniform()} || _Seq <- lists:seq(1, N)],
+    test_btree(KeyValues),              % randomly distributed
+    Sorted = lists:sort(KeyValues),
+    test_btree(Sorted),                 % sorted regular
+    test_btree(lists:reverse(Sorted)).  % sorted reverse
+
+
+test_btree(KeyValues) ->
+    {ok, Fd} = couch_file:open("foo", [create,overwrite]),
+    {ok, Btree} = open(nil, Fd),
+
+    % first dump in all the values in one go
+    {ok, Btree10} = add_remove(Btree, KeyValues, []),
+
+    ok = test_keys(Btree10, KeyValues),
+
+    % remove everything
+    {ok, Btree20} = test_remove(Btree10, KeyValues),
+
+    % make sure its empty
+    {ok, false} = foldl(Btree20, fun(_X, _Acc) ->
+            {ok, true} % change Acc to 'true'
+        end,
+        false),
+
+    % add everything back one at a time.
+    {ok, Btree30} = test_add(Btree20, KeyValues),
+
+    ok = test_keys(Btree30, KeyValues),
+
+    KeyValuesRev = lists:reverse(KeyValues),
+
+    % remove everything, in reverse order
+    {ok, Btree40} = test_remove(Btree30, KeyValuesRev),
+
+    % make sure its empty
+    {ok, false} = foldl(Btree40, fun(_X, _Acc) ->
+            {ok, true} % change Acc to 'true'
+        end,
+        false),
+
+
+    {A, B} = every_other(KeyValues),
+
+    % add everything back
+    {ok, Btree50} = test_add(Btree40,KeyValues),
+
+    ok = test_keys(Btree50, KeyValues),
+
+    % remove half the values
+    {ok, Btree60} = test_remove(Btree50, A),
+
+    % verify the remaining
+    ok = test_keys(Btree60, B),
+
+    % add A back
+    {ok, Btree70} = test_add(Btree60, A),
+
+    % verify
+    ok = test_keys(Btree70, KeyValues),
+
+    % remove B
+    {ok, Btree80} = test_remove(Btree70, B),
+
+    % verify the remaining
+    ok = test_keys(Btree80, A),
+
+    ok = couch_file:close(Fd).
+
+
+
+
+every_other(List) ->
+    every_other(List, [], [], 1).
+
+every_other([], AccA, AccB, _Flag) ->
+    {lists:reverse(AccA), lists:reverse(AccB)};
+every_other([H|T], AccA, AccB, 1) ->
+    every_other(T, [H|AccA], AccB, 0);
+every_other([H|T], AccA, AccB, 0) ->
+    every_other(T, AccA, [H|AccB], 1).
+
+test_keys(Btree, List) ->
+    FoldFun =
+    fun(Element, [HAcc|TAcc]) ->
+            Element = HAcc, % must match
+            {ok, TAcc}
+        end,
+    Sorted = lists:sort(List),
+    {ok, []} = foldl(Btree, FoldFun, Sorted),
+    {ok, []} = foldr(Btree, FoldFun, lists:reverse(Sorted)),
+
+    test_lookup(Btree, List).
+
+% Makes sure each key value pair is found in the btree
+test_lookup(_Btree, []) ->
+    ok;
+test_lookup(Btree, [{Key, Value} | Rest]) ->
+    [{ok,{Key, Value}}] = lookup(Btree, [Key]),
+    {ok, []} = foldl(Btree, Key, fun({KeyIn, ValueIn}, []) ->
+            KeyIn = Key,
+            ValueIn = Value,
+            {stop, []}
+        end,
+        []),
+    {ok, []} = foldr(Btree, Key, fun({KeyIn, ValueIn}, []) ->
+            KeyIn = Key,
+            ValueIn = Value,
+            {stop, []}
+        end,
+        []),
+    test_lookup(Btree, Rest).
+
+% removes each key one at a time from the btree
+test_remove(Btree, []) ->
+    {ok, Btree};
+test_remove(Btree, [{Key, _Value} | Rest]) ->
+    {ok, Btree2} = add_remove(Btree,[], [Key]),
+    test_remove(Btree2, Rest).
+
+% adds each key one at a time from the btree
+test_add(Btree, []) ->
+    {ok, Btree};
+test_add(Btree, [KeyValue | Rest]) ->
+    {ok, Btree2} = add_remove(Btree, [KeyValue], []),
+    test_add(Btree2, Rest).

Added: incubator/couchdb/trunk/src/couchdb/couch_db.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch_db.erl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch_db.erl (added)
+++ incubator/couchdb/trunk/src/couchdb/couch_db.erl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,757 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_db).
+-behaviour(gen_server).
+
+-export([open/2,create/2,create/3,get_doc_info/2]).
+-export([save_docs/2, save_docs/3, get_db_info/1, update_doc/3, update_docs/2, update_docs/3]).
+-export([delete_doc/3,open_doc/2,open_doc/3,close/1,enum_docs_since/4,enum_docs_since/5]).
+-export([enum_docs/4,enum_docs/5, open_doc_revs/4, get_missing_revs/2]).
+-export([start_update_loop/1]).
+-export([init/1,terminate/2,handle_call/3,handle_cast/2,code_change/3,handle_info/2]).
+
+-include("couch_db.hrl").
+
+-record(db_header,
+    {write_version = 0,
+     last_update_seq = 0,
+     summary_stream_state = nil,
+     docinfo_by_Id_btree_state = nil,
+     docinfo_by_seq_btree_state = nil,
+     local_docs_btree_state = nil,
+     doc_count=0,
+     doc_del_count=0
+    }).
+
+-record(db,
+    {main_pid,
+    update_pid,
+    fd,
+    header = #db_header{},
+    summary_stream,
+    docinfo_by_Id_btree,
+    docinfo_by_seq_btree,
+    local_docs_btree,
+    last_update_seq,
+    doc_count,
+    doc_del_count,
+    name
+    }).
+
+start_link(DbName, Filepath, Options) ->
+    case couch_file:open(Filepath, Options) of
+    {ok, Fd} ->
+         Result = gen_server:start_link(couch_db, {DbName, Fd, Options}, []),
+         unlink(Fd),
+         Result;
+    {error, enoent} ->
+        % couldn't find file
+        {error, not_found};
+    Else ->
+        Else
+    end.
+
+%%% Interface functions %%%
+
+create(Filepath, Options) ->
+    create(Filepath, Filepath, Options).
+
+create(DbName, Filepath, Options) when is_list(Options) ->
+    start_link(DbName, Filepath, [create | Options]).
+
+open(DbName, Filepath) ->
+    start_link(DbName, Filepath, []).
+
+delete_doc(MainPid, Id, Revisions) ->
+    DeletedDocs = [#doc{id=Id, revs=[Rev], deleted=true} || Rev <- Revisions],
+    {ok, [Result]} = update_docs(MainPid, DeletedDocs, [new_edits]),
+    {ok, Result}.
+
+open_doc(MainPid, IdOrDocInfo) ->
+    open_doc(MainPid, IdOrDocInfo, []).
+
+open_doc(MainPid, Id, Options) ->
+    case open_doc_int(get_db(MainPid), Id, Options) of
+    {ok, #doc{deleted=true}=Doc} ->
+        case lists:member(deleted, Options) of
+        true ->
+            {ok, Doc};
+        false ->
+            {not_found, deleted}
+        end;
+    Else ->
+        Else
+    end.
+
+open_doc_revs(MainPid, Id, Revs, Options) ->
+    open_doc_revs_int(get_db(MainPid), Id, Revs, Options).
+
+get_missing_revs(MainPid, IdRevsList) ->
+    Ids = [Id1 || {Id1, _Revs} <- IdRevsList],
+    FullDocInfoResults = get_full_doc_infos(MainPid, Ids),
+    Results = lists:zipwith(
+        fun({Id, Revs}, FullDocInfoResult) ->
+            case FullDocInfoResult of
+            {ok, #full_doc_info{rev_tree=RevisionTree}} ->
+                {Id, couch_key_tree:find_missing(RevisionTree, Revs)};
+            not_found ->
+                {Id, Revs}
+            end
+        end,
+        IdRevsList, FullDocInfoResults),
+    {ok, Results}.
+
+get_doc_info(Db, Id) ->
+    case get_full_doc_info(Db, Id) of
+    {ok, DocInfo} ->
+        {ok, couch_doc:to_doc_info(DocInfo)};
+    Else ->
+        Else
+    end.
+    
+%   returns {ok, DocInfo} or not_found
+get_full_doc_info(Db, Id) ->
+    [Result] = get_full_doc_infos(Db, [Id]),
+    Result.
+
+
+get_full_doc_infos(MainPid, Ids) when is_pid(MainPid) ->
+    get_full_doc_infos(get_db(MainPid), Ids);
+get_full_doc_infos(#db{}=Db, Ids) ->
+    couch_btree:lookup(Db#db.docinfo_by_Id_btree, Ids).
+
+get_db_info(MainPid) when is_pid(MainPid) ->
+    get_db_info(get_db(MainPid));
+get_db_info(#db{doc_count=Count, doc_del_count=DelCount, last_update_seq=SeqNum}) ->
+    InfoList = [
+        {doc_count, Count},
+        {doc_del_count, DelCount},
+        {last_update_seq, SeqNum}
+        ],
+    {ok, InfoList}.
+
+update_doc(MainPid, Doc, Options) ->
+    {ok, [NewRev]} = update_docs(MainPid, [Doc], Options),
+    {ok, NewRev}.
+
+update_docs(MainPid, Docs) ->
+    update_docs(MainPid, Docs, []).
+    
+% group_alike_docs groups the sorted documents into sublist buckets, by id.
+% ([DocA, DocA, DocB, DocC], []) -> [[DocA, DocA], [DocB], [DocC]]
+group_alike_docs(Docs) ->
+    Sorted = lists:sort(fun(#doc{id=A},#doc{id=B})-> A < B end, Docs),
+    group_alike_docs(Sorted, []).
+
+group_alike_docs([], Buckets) ->
+    lists:reverse(Buckets);
+group_alike_docs([Doc|Rest], []) ->
+    group_alike_docs(Rest, [[Doc]]);
+group_alike_docs([Doc|Rest], [Bucket|RestBuckets]) ->
+    [#doc{id=BucketId}|_] = Bucket,
+    case Doc#doc.id == BucketId of
+    true ->
+        % add to existing bucket
+        group_alike_docs(Rest, [[Doc|Bucket]|RestBuckets]);
+    false ->
+        % add to new bucket
+       group_alike_docs(Rest, [[Doc]|[Bucket|RestBuckets]])
+    end.
+    
+
+prepare_doc_for_new_edit(Db, #doc{id=Id,revs=[NewRev|PrevRevs]}=Doc, OldFullDocInfo, LeafRevsDict) ->
+    case PrevRevs of
+    [PrevRev|_] ->
+        case dict:find(PrevRev, LeafRevsDict) of
+        {ok, {Deleted, Sp, DiskRevs}} ->
+            case couch_doc:has_stubs(Doc) of
+            true ->
+                DiskDoc = make_doc(Db, Id, Deleted, Sp, DiskRevs),
+                Doc2 = couch_doc:merge_stubs(Doc, DiskDoc),
+                Doc2#doc{revs=[NewRev|DiskRevs]};
+            false ->
+                Doc#doc{revs=[NewRev|DiskRevs]}
+            end;
+        error ->
+            throw(conflict)
+        end;
+    [] ->
+        % new doc, and we have existing revs. 
+        OldDocInfo = couch_doc:to_doc_info(OldFullDocInfo),
+        if OldDocInfo#doc_info.deleted ->
+            % existing doc is a deleton
+            % allow this new doc to be a later revision.
+            {_Deleted, _Sp, Revs} = dict:fetch(OldDocInfo#doc_info.rev, LeafRevsDict),
+            Doc#doc{revs=[NewRev|Revs]};
+        true ->
+            throw(conflict)
+        end
+    end.
+
+update_docs(MainPid, Docs, Options) ->
+    Docs2 = lists:map(
+        fun(#doc{id=Id,revs=Revs}=Doc) ->
+            case Id of
+            ?LOCAL_DOC_PREFIX ++ _ ->
+                Rev = case Revs of [] -> 0; [Rev0|_] -> list_to_integer(Rev0) end,
+                Doc#doc{revs=[integer_to_list(Rev + 1)]};
+            _ ->
+                Doc#doc{revs=[integer_to_list(couch_util:rand32()) | Revs]}
+            end
+        end, Docs),
+    DocBuckets = group_alike_docs(Docs2),
+    Ids = [Id || [#doc{id=Id}|_] <- DocBuckets],
+    Db = get_db(MainPid),
+    
+    % first things first, lookup the doc by id and get the most recent
+    
+    ExistingDocs = get_full_doc_infos(Db, Ids),
+    
+    DocBuckets2 = lists:zipwith(
+        fun(Bucket, not_found) ->
+            % no existing revs, make sure no old revision is specified.
+            [throw(conflict) || #doc{revs=[_NewRev, _OldRev | _]} <- Bucket],
+            Bucket;
+        (Bucket, {ok, #full_doc_info{rev_tree=OldRevTree}=OldFullDocInfo}) ->
+            Leafs = couch_key_tree:get_all_leafs(OldRevTree),
+            LeafRevsDict = dict:from_list([{Rev, {Deleted, Sp, Revs}} || {Rev, {Deleted, Sp}, Revs} <- Leafs]),
+            [prepare_doc_for_new_edit(Db, Doc, OldFullDocInfo, LeafRevsDict) || Doc <- Bucket]
+        end,
+        DocBuckets, ExistingDocs),
+    
+    % flush unwritten binaries to disk.
+    DocBuckets3 = [[doc_flush_binaries(Doc, Db#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets2],
+    
+    case gen_server:call(MainPid, {update_docs, DocBuckets3, Options}) of
+    ok ->
+        % return back the new rev ids, in the same order input.
+        {ok, [NewRev || #doc{revs=[NewRev|_]} <- Docs2]};
+    Else->
+        throw(Else)
+    end.
+
+save_docs(MainPid, Docs) ->
+    save_docs(MainPid, Docs, []).
+
+save_docs(MainPid, Docs, Options) ->
+    % flush unwritten binaries to disk.
+    Db = get_db(MainPid),
+    DocBuckets = group_alike_docs(Docs),
+    DocBuckets2 = [[doc_flush_binaries(Doc, Db#db.fd) || Doc <- Bucket] || Bucket <- DocBuckets],
+    ok = gen_server:call(MainPid, {update_docs, DocBuckets2, Options}).
+
+
+doc_flush_binaries(Doc, Fd) ->
+    % calc size of binaries to write out
+    Bins = Doc#doc.attachments,
+    PreAllocSize = lists:foldl(
+        fun(BinValue, SizeAcc) ->
+            case BinValue of
+            {_Key, {_Type, {Fd0, _StreamPointer, _Len}}} when Fd0 == Fd ->
+                % already written to our file, nothing to write
+                SizeAcc;
+            {_Key, {_Type, {_OtherFd, _StreamPointer, Len}}} ->
+                % written to a different file
+                SizeAcc + Len;
+            {_Key, {_Type, Bin}} when is_binary(Bin) ->
+                SizeAcc + size(Bin)
+            end
+        end,
+        0, Bins),
+
+    {ok, OutputStream} = couch_stream:open(Fd),
+    ok = couch_stream:ensure_buffer(OutputStream, PreAllocSize),
+
+    NewBins = lists:map(
+        fun({Key, {Type, BinValue}}) ->
+            NewBinValue =
+            case BinValue of
+            {Fd0, StreamPointer, Len} when Fd0 == Fd ->
+                % already written to our file, nothing to write
+                {Fd, StreamPointer, Len};
+            {OtherFd, StreamPointer, Len} ->
+                % written to a different file (or a closed file
+                % instance, which will cause an error)
+                {ok, {NewStreamPointer, Len}, _EndSp} =
+                couch_stream:foldl(OtherFd, StreamPointer, Len,
+                    fun(Bin, {BeginPointer, SizeAcc}) ->
+                        {ok, Pointer} = couch_stream:write(OutputStream, Bin),
+                        case SizeAcc of
+                        0 -> % this was the first write, record the pointer
+                            {ok, {Pointer, size(Bin)}};
+                        _ ->
+                            {ok, {BeginPointer, SizeAcc  + size(Bin)}}
+                        end
+                    end,
+                    {{0,0}, 0}),
+                {Fd, NewStreamPointer, Len};
+            Bin when is_binary(Bin), size(Bin) > 0 ->
+                {ok, StreamPointer} = couch_stream:write(OutputStream, Bin),
+                {Fd, StreamPointer, size(Bin)}
+            end,
+            {Key, {Type, NewBinValue}}
+        end, Bins),
+
+    {ok, _FinalPos} = couch_stream:close(OutputStream),
+
+    Doc#doc{attachments = NewBins}.
+
+enum_docs_since(MainPid, SinceSeq, Direction, InFun, Ctx) ->
+    Db = get_db(MainPid),
+    couch_btree:fold(Db#db.docinfo_by_seq_btree, SinceSeq + 1, Direction, InFun, Ctx).
+
+enum_docs_since(MainPid, SinceSeq, InFun, Acc) ->
+    enum_docs_since(MainPid, SinceSeq, fwd, InFun, Acc).
+
+enum_docs(MainPid, StartId, Direction, InFun, InAcc) ->
+    Db = get_db(MainPid),
+    couch_btree:fold(Db#db.docinfo_by_Id_btree, StartId, Direction, InFun, InAcc).
+
+enum_docs(MainPid, StartId, InFun, Ctx) ->
+    enum_docs(MainPid, StartId, fwd, InFun, Ctx).
+
+close(MainPid) ->
+    Ref = erlang:monitor(process, MainPid),
+    unlink(MainPid),
+    exit(MainPid, normal),
+    receive
+    {'DOWN', Ref, process, MainPid, _Reason} ->
+        ok
+    end.
+
+
+% server functions
+
+init({DbName, Fd, Options}) ->
+    link(Fd),
+    case lists:member(create, Options) of
+    true ->
+        % create a new header and writes it to the file
+        Header =  #db_header{},
+        ok = couch_file:write_header(Fd, <<$g, $m, $k, 0>>, Header),
+        ok = couch_file:sync(Fd),
+        init_main(DbName, Fd, Header);
+    false ->
+        {ok, Header} = couch_file:read_header(Fd, <<$g, $m, $k, 0>>),
+        init_main(DbName, Fd, Header)
+    end.
+
+btree_by_seq_split(DocInfo) ->
+    #doc_info{
+        id = Id,
+        rev = Rev,
+        update_seq = Seq,
+        summary_pointer = Sp,
+        conflict_revs = Conflicts,
+        deleted_conflict_revs = DelConflicts,
+        deleted = Deleted} = DocInfo,
+    {Seq,{Id, Rev, Sp, Conflicts, DelConflicts, Deleted}}.
+    
+btree_by_seq_join(Seq,{Id, Rev, Sp, Conflicts, DelConflicts, Deleted}) ->
+    #doc_info{
+        id = Id,
+        rev = Rev,
+        update_seq = Seq,
+        summary_pointer = Sp,
+        conflict_revs = Conflicts,
+        deleted_conflict_revs = DelConflicts,
+        deleted = Deleted}.
+
+btree_by_name_split(#full_doc_info{id=Id, update_seq=Seq, rev_tree=Tree}) ->
+    {Id, {Seq, Tree}}.
+
+btree_by_name_join(Id, {Seq, Tree}) ->
+    #full_doc_info{id=Id, update_seq=Seq, rev_tree=Tree}.
+    
+
+init_main(DbName, Fd, Header) ->
+    {ok, SummaryStream} = couch_stream:open(Header#db_header.summary_stream_state, Fd),
+    ok = couch_stream:set_min_buffer(SummaryStream, 10000),
+    {ok, IdBtree} = couch_btree:open(Header#db_header.docinfo_by_Id_btree_state, Fd,
+        [{split, fun(V) -> btree_by_name_split(V) end},
+        {join, fun(K,V) -> btree_by_name_join(K,V) end}] ),
+    {ok, SeqBtree} = couch_btree:open(Header#db_header.docinfo_by_seq_btree_state, Fd,
+            [{split, fun(V) -> btree_by_seq_split(V) end},
+            {join, fun(K,V) -> btree_by_seq_join(K,V) end}] ),
+    {ok, LocalDocsBtree} = couch_btree:open(Header#db_header.local_docs_btree_state, Fd),
+
+    Db = #db{
+        main_pid=self(),
+        fd=Fd,
+        header=Header,
+        summary_stream = SummaryStream,
+        docinfo_by_Id_btree = IdBtree,
+        docinfo_by_seq_btree = SeqBtree,
+        local_docs_btree = LocalDocsBtree,
+        last_update_seq = Header#db_header.last_update_seq,
+        doc_count = Header#db_header.doc_count,
+        doc_del_count = Header#db_header.doc_del_count,
+        name = DbName
+        },
+
+    UpdatePid = spawn_link(couch_db, start_update_loop, [Db]),
+
+    {ok, Db#db{update_pid=UpdatePid}}.
+
+terminate(_Reason, Db) ->
+    Db#db.update_pid ! close,
+    couch_file:close(Db#db.fd).
+    
+handle_call({update_docs, DocActions, Options}, From, #db{update_pid=Updater}=Db) ->
+    Updater ! {From, update_docs, DocActions, Options},
+    {noreply, Db};
+handle_call(get_db, _From, Db) ->
+    {reply, {ok, Db}, Db};
+handle_call({db_updated, NewDb}, _From, _OldDb) ->
+    {reply, ok, NewDb}.
+
+
+handle_cast(foo, Main) ->
+    {noreply, Main}.
+
+%%% Internal function %%%
+
+start_update_loop(Db) ->
+    update_loop(Db#db{update_pid=self()}).
+    
+update_loop(Db) ->
+    receive
+    {OrigFrom, update_docs, DocActions, Options} ->
+        case (catch update_docs_int(Db, DocActions, Options)) of
+        {ok, Db2} ->
+            ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
+            gen_server:reply(OrigFrom, ok),
+            couch_db_update_notifier:notify({updated, Db2#db.name}),
+            update_loop(Db2);
+        conflict ->
+            gen_server:reply(OrigFrom, conflict),
+            update_loop(Db);
+        Error ->
+            exit(Error) % we crashed
+        end;
+    close ->
+        % terminate loop
+        exit(normal)
+    end.
+
+get_db(MainPid) ->
+    {ok, Db} = gen_server:call(MainPid, get_db),
+    Db.
+
+open_doc_revs_int(Db, Id, Revs, Options) ->
+    case get_full_doc_info(Db, Id) of
+    {ok, #full_doc_info{rev_tree=RevTree}} ->
+        {FoundRevs, MissingRevs} =
+        case Revs of
+        all ->
+            {couch_key_tree:get_all_leafs(RevTree), []};
+        _ ->
+            case lists:member(latest, Options) of
+            true ->
+                couch_key_tree:get_key_leafs(RevTree, Revs);
+            false ->
+                couch_key_tree:get(RevTree, Revs)
+            end
+        end,
+        FoundResults =
+        lists:map(fun({Rev, Value, FoundRevPath}) ->
+            case Value of
+            0 ->
+                % we have the rev in our list but know nothing about it
+                {{not_found, missing}, Rev};
+            {IsDeleted, SummaryPtr} ->
+                {ok, make_doc(Db, Id, IsDeleted, SummaryPtr, FoundRevPath)}
+            end
+        end, FoundRevs),
+        Results = FoundResults ++ [{{not_found, missing}, MissingRev} || MissingRev <- MissingRevs],
+        {ok, Results};
+    not_found when Revs == all ->
+        {ok, []};
+    not_found ->
+        {ok, [{{not_found, missing}, Rev} || Rev <- Revs]}
+    end.
+
+open_doc_int(Db, ?LOCAL_DOC_PREFIX ++ _ = Id, _Options) ->
+    case couch_btree:lookup(Db#db.local_docs_btree, [Id]) of
+    [{ok, {_, {Rev, BodyData}}}] ->
+        {ok, #doc{id=Id, revs=[integer_to_list(Rev)], body=BodyData}};
+    [not_found] ->
+        {not_found, missing}
+    end;
+open_doc_int(Db, #doc_info{id=Id,rev=Rev,deleted=IsDeleted,summary_pointer=Sp}=DocInfo, Options) ->
+    Doc = make_doc(Db, Id, IsDeleted, Sp, [Rev]),
+    {ok, Doc#doc{meta=doc_meta_info(DocInfo, [], Options)}};
+open_doc_int(Db, #full_doc_info{id=Id,rev_tree=RevTree}=FullDocInfo, Options) ->
+    #doc_info{deleted=IsDeleted,rev=Rev, summary_pointer=Sp} = DocInfo =
+        couch_doc:to_doc_info(FullDocInfo),
+    {[{_Rev,_Value, Revs}], []} = couch_key_tree:get(RevTree, [Rev]),
+    Doc = make_doc(Db, Id, IsDeleted, Sp, Revs),
+    {ok, Doc#doc{meta=doc_meta_info(DocInfo, RevTree, Options)}};
+open_doc_int(Db, Id, Options) ->
+    case get_full_doc_info(Db, Id) of
+    {ok, FullDocInfo} ->
+        open_doc_int(Db, FullDocInfo, Options);
+    not_found ->
+        throw({not_found, missing})
+    end.
+
+doc_meta_info(DocInfo, RevTree, Options) ->
+    case lists:member(revs_info, Options) of
+    false -> [];
+    true ->
+        {[RevPath],[]} = 
+            couch_key_tree:get_full_key_paths(RevTree, [DocInfo#doc_info.rev]),
+        [{revs_info, [{Rev, Deleted} || {Rev, {Deleted, _Sp0}} <- RevPath]}]
+    end ++
+    case lists:member(conflicts, Options) of
+    false -> [];
+    true ->
+        case DocInfo#doc_info.conflict_revs of
+        [] -> [];
+        _ -> [{conflicts, DocInfo#doc_info.conflict_revs}]
+        end
+    end ++
+    case lists:member(deleted_conflicts, Options) of
+    false -> [];
+    true ->
+        case DocInfo#doc_info.deleted_conflict_revs of
+        [] -> [];
+        _ -> [{deleted_conflicts, DocInfo#doc_info.deleted_conflict_revs}]
+        end
+    end.
+
+% rev tree functions
+
+doc_to_tree(Doc) ->
+    doc_to_tree(Doc, lists:reverse(Doc#doc.revs)).
+
+doc_to_tree(Doc, [RevId]) ->
+    [{RevId, Doc, []}];
+doc_to_tree(Doc, [RevId | Rest]) ->
+    [{RevId, [], doc_to_tree(Doc, Rest)}].
+
+make_doc(Db, Id, Deleted, SummaryPointer, RevisionPath) ->
+    {BodyData, BinValues} =
+    case SummaryPointer of
+    nil ->
+        {[], []};
+    _ ->
+        {ok, {BodyData0, BinValues0}} = couch_stream:read_term(Db#db.summary_stream, SummaryPointer),
+        {BodyData0, [{Name, {Type, {Db#db.fd, Sp, Len}}} || {Name, {Type, Sp, Len}} <- BinValues0]}   
+    end,
+    #doc{
+        id = Id,
+        revs = RevisionPath,
+        body = BodyData,
+        attachments = BinValues,
+        deleted = Deleted
+        }.
+
+flush_trees(_Db, [], AccFlushedTrees) ->
+    {ok, lists:reverse(AccFlushedTrees)};
+flush_trees(Db, [Unflushed | RestUnflushed], AccFlushed) ->
+       Flushed = couch_key_tree:map(
+        fun(_Rev, Value) ->
+            case Value of
+            #doc{attachments=Atts,deleted=IsDeleted}=Doc ->
+                % this node value is actually an unwritten document summary,
+                % write to disk.
+                
+                % convert bins, removing the FD.
+                % All bins should have been flushed to disk already.
+                Bins = [{BinName, {BinType, BinSp, BinLen}} || {BinName, {BinType, {_Fd, BinSp, BinLen}}} <- Atts],
+                {ok, NewSummaryPointer} = couch_stream:write_term(Db#db.summary_stream, {Doc#doc.body, Bins}),
+                {IsDeleted, NewSummaryPointer};
+            _ ->
+                Value
+            end
+        end, Unflushed),
+    flush_trees(Db, RestUnflushed, [Flushed | AccFlushed]).
+
+merge_rev_trees(_NoConflicts, [], [], AccNewTrees) ->
+    {ok, lists:reverse(AccNewTrees)};
+merge_rev_trees(NoConflicts, [NewDocs | RestDocsList],
+        [OldTree | RestOldTrees], AccNewTrees) ->
+    UpdatesRevTree = lists:foldl(
+        fun(NewDoc, AccTree) ->
+            couch_key_tree:merge(AccTree, doc_to_tree(NewDoc))
+        end,
+        [], NewDocs),
+    NewRevTree = couch_key_tree:merge(OldTree, UpdatesRevTree),
+    if NoConflicts andalso OldTree == [] ->
+        OldConflicts = couch_key_tree:count_leafs(OldTree),
+        NewConflicts = couch_key_tree:count_leafs(NewRevTree),
+        if NewConflicts > OldConflicts ->
+            throw(conflict);
+        true -> ok
+        end;
+    true -> ok
+    end,
+    merge_rev_trees(NoConflicts, RestDocsList, RestOldTrees, [NewRevTree | AccNewTrees]).
+
+new_index_entries([], [], Seq, DocCount, DelCount, AccById, AccBySeq) ->
+    {ok, Seq, DocCount, DelCount, AccById, AccBySeq};
+new_index_entries([Id|RestIds], [RevTree|RestTrees], Seq0, DocCount, DelCount, AccById, AccBySeq) ->
+    Seq = Seq0 + 1,
+    FullDocInfo = #full_doc_info{id=Id, update_seq=Seq, rev_tree=RevTree},
+    #doc_info{deleted=Deleted} = DocInfo = couch_doc:to_doc_info(FullDocInfo),
+    {DocCount2, DelCount2} =
+    if Deleted -> {DocCount, DelCount + 1};
+    true -> {DocCount + 1, DelCount} 
+    end,
+    new_index_entries(RestIds, RestTrees, Seq, DocCount2, DelCount2, [FullDocInfo|AccById], [DocInfo|AccBySeq]).
+
+update_docs_int(Db, DocsList, Options) ->
+    #db{
+        docinfo_by_Id_btree = DocInfoByIdBTree,
+        docinfo_by_seq_btree = DocInfoBySeqBTree,
+        last_update_seq = LastSeq,
+        doc_count = FullDocCount,
+        doc_del_count = FullDelCount
+        } = Db,
+
+    % separate out the NonRep documents from the rest of the documents
+    {DocsList2, NonRepDocs} = lists:foldl(
+        fun([#doc{id=Id}=Doc | Rest]=Docs, {DocsListAcc, NonRepDocsAcc}) ->
+            case Id of
+            ?LOCAL_DOC_PREFIX ++ _ when Rest==[] ->
+                % when saving NR (non rep) documents, you can only save a single rev
+                {DocsListAcc, [Doc | NonRepDocsAcc]};
+            Id->
+                {[Docs | DocsListAcc], NonRepDocsAcc}
+            end
+        end, {[], []}, DocsList),
+    
+    Ids = [Id || [#doc{id=Id}|_] <- DocsList2], 
+    
+    % lookup up the existing documents, if they exist.
+    OldDocLookups = couch_btree:lookup(DocInfoByIdBTree, Ids),
+    OldDocTrees = lists:map(
+        fun({ok, #full_doc_info{rev_tree=OldRevTree}}) ->
+            OldRevTree;
+        (not_found) ->
+            []
+        end,
+        OldDocLookups),
+    
+    {OldCount, OldDelCount} = lists:foldl(
+        fun({ok, FullDocInfo}, {OldCountAcc, OldDelCountAcc}) ->
+            case couch_doc:to_doc_info(FullDocInfo) of
+            #doc_info{deleted=false} ->
+                {OldCountAcc + 1, OldDelCountAcc};
+            _ ->
+                {OldCountAcc , OldDelCountAcc + 1}
+            end;
+        (not_found, Acc) ->
+            Acc
+        end, {0, 0}, OldDocLookups),
+    
+    % Merge the new docs into the revision trees.
+    NoConflicts = lists:member(no_conflicts, Options),
+    {ok, NewRevTrees} = merge_rev_trees(NoConflicts, DocsList2, OldDocTrees, []),
+    
+    RemoveSeqs = [ OldSeq || {ok, #full_doc_info{update_seq=OldSeq}} <- OldDocLookups],
+    
+    % All regular documents are now ready to write.
+    
+    % Try to write the local documents first, a conflict might be generated
+    {ok, Db2}  = update_local_docs(Db, NonRepDocs),
+    
+    % Write out the documents summaries (they are stored in the nodes of the rev trees)
+    {ok, FlushedRevTrees} = flush_trees(Db2, NewRevTrees, []),
+    
+    {ok, NewSeq, NewDocsCount, NewDelCount, InfoById, InfoBySeq} =
+        new_index_entries(Ids, FlushedRevTrees, LastSeq, 0, 0, [], []),
+
+    % and the indexes to the documents
+    {ok, DocInfoBySeqBTree2} = couch_btree:add_remove(DocInfoBySeqBTree, InfoBySeq, RemoveSeqs),
+    {ok, DocInfoByIdBTree2} = couch_btree:add_remove(DocInfoByIdBTree, InfoById, []),
+
+    Db3 = Db2#db{
+        docinfo_by_Id_btree = DocInfoByIdBTree2,
+        docinfo_by_seq_btree = DocInfoBySeqBTree2,
+        last_update_seq = NewSeq,
+        doc_count = FullDocCount + NewDocsCount - OldCount,
+        doc_del_count = FullDelCount + NewDelCount - OldDelCount
+        },
+
+    case lists:member(delay_commit, Options) of
+    true ->
+        {ok, Db3};
+    false ->
+        commit_outstanding(Db3)
+    end.
+
+update_local_docs(#db{local_docs_btree=Btree}=Db, Docs) ->
+    Ids = [Id || #doc{id=Id} <- Docs],
+    OldDocLookups = couch_btree:lookup(Btree, Ids),
+    BtreeEntries = lists:zipwith(
+        fun(#doc{id=Id,deleted=Delete,revs=Revs,body=Body}, OldDocLookup) ->
+            BasedOnRev =
+            case Revs of
+                [] -> 0;
+                [RevStr|_] -> list_to_integer(RevStr) - 1
+            end,
+            OldRev =
+            case OldDocLookup of
+                {ok, {_, {OldRev0, _}}} -> OldRev0;
+                not_found -> 0
+            end,
+            case OldRev == BasedOnRev of
+            true ->
+                case Delete of
+                    false -> {update, {Id, {OldRev+1, Body}}};
+                    true  -> {remove, Id}
+                end;
+            false ->
+                throw(conflict)
+            end
+            
+        end, Docs, OldDocLookups),
+
+    BtreeIdsRemove = [Id || {remove, Id} <- BtreeEntries],
+    BtreeIdsUpdate = [ByIdDocInfo || {update, ByIdDocInfo} <- BtreeEntries],
+
+    {ok, Btree2} =
+        couch_btree:add_remove(Btree, BtreeIdsUpdate, BtreeIdsRemove),
+
+    {ok, Db#db{local_docs_btree = Btree2}}.
+
+
+
+commit_outstanding(#db{fd=Fd, header=Header} = Db) ->
+    ok = couch_file:sync(Fd), % commit outstanding data
+    Header2 = Header#db_header{
+        last_update_seq = Db#db.last_update_seq,
+        summary_stream_state = couch_stream:get_state(Db#db.summary_stream),
+        docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
+        docinfo_by_Id_btree_state = couch_btree:get_state(Db#db.docinfo_by_Id_btree),
+        local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
+        doc_count = Db#db.doc_count,
+        doc_del_count = Db#db.doc_del_count
+        },
+    ok = couch_file:write_header(Fd, <<$g, $m, $k, 0>>, Header2),
+    ok = couch_file:sync(Fd), % commit header to disk
+    Db2 = Db#db{
+        header = Header2
+        },
+    {ok, Db2}.
+
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.
+
+handle_info(_Info, State) ->
+    {noreply, State}.
+    
+    

Added: incubator/couchdb/trunk/src/couchdb/couch_db.hrl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch_db.hrl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch_db.hrl (added)
+++ incubator/couchdb/trunk/src/couchdb/couch_db.hrl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,56 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-define(LOCAL_DOC_PREFIX, "_local/").
+-define(DESIGN_DOC_PREFIX0, "_design").
+-define(DESIGN_DOC_PREFIX, "_design/").
+
+-define(DEFAULT_ATTACHMENT_CONTENT_TYPE, "application/octet-stream").
+
+-record(doc_info,
+    {
+    id = "",
+    rev = "",
+    update_seq = 0,
+    summary_pointer = nil,
+    conflict_revs = [],
+    deleted_conflict_revs = [],
+    deleted = false
+    }).
+
+-record(full_doc_info,
+    {id = "",
+    update_seq = 0,
+    rev_tree = []
+    }).
+
+-record(doc,
+    {
+    id = "",
+    revs = [], % in the form [{RevId, IsAvailable}, ...]
+
+    % the json body object.
+    body = {obj, []},
+
+    % each attachment contains:
+    %    {data, Type, <<binary>>}
+    % or:
+    %    {pointer, Type, {FileHandle, StreamPointer, Length}}
+    attachments = [],
+
+    deleted = false,
+
+    % key/value tuple of meta information, provided when using special options:
+    % couch_db:open_doc(Db, Id, Options).
+    meta = []
+    }).
+    

Added: incubator/couchdb/trunk/src/couchdb/couch_db_update_notifier.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch_db_update_notifier.erl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch_db_update_notifier.erl (added)
+++ incubator/couchdb/trunk/src/couchdb/couch_db_update_notifier.erl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,66 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+%
+% This causes an OS process to spawned and it is notified every time a database
+% is updated.
+%
+% The notifications are in the form of a the database name sent as a line of
+% text to the OS processes stdout.
+%
+
+-module(couch_db_update_notifier).
+
+-behaviour(gen_event).
+
+-export([start_link/1, notify/1]).
+-export([init/1, terminate/2, handle_event/2, handle_call/2, handle_info/2, code_change/3,stop/1]).
+
+-define(ERR_HANDLE, {Port, {exit_status, Status}} -> {stop, {unknown_error, Status}, {unknown_error, Status}, Port}).
+
+start_link(Exec) ->
+    couch_event_sup:start_link(couch_db_update, {couch_db_update_notifier, make_ref()}, Exec).
+
+notify(Event) ->
+    gen_event:notify(couch_db_update, Event).
+
+stop(Pid) ->
+    couch_event_sup:stop(Pid).
+
+init(Exec) when is_list(Exec) -> % an exe
+    Port = open_port({spawn, Exec}, [stream, exit_status, hide]),
+    {ok, Port};
+init(Else) ->
+    {ok, Else}.
+
+terminate(_Reason, _Port) ->
+    ok.
+
+handle_event(Event, Fun) when is_function(Fun, 1) ->
+    Fun(Event),
+    {ok, Fun};
+handle_event(Event, {Fun, FunAcc}) ->
+    FunAcc2 = Fun(Event, FunAcc),
+    {ok, {Fun, FunAcc2}};
+handle_event({EventAtom, DbName}, Port) ->
+    Obj = {obj, [{type, atom_to_list(EventAtom)}, {db, DbName}]},
+    true = port_command(Port, cjson:encode(Obj) ++ "\n"),
+    {ok, Port}.
+
+handle_call(_Request, State) ->
+    {ok, ok, State}.
+
+handle_info({'EXIT', _, _Reason}, _Port) ->
+    remove_handler.
+
+code_change(_OldVsn, State, _Extra) ->
+    {ok, State}.

Added: incubator/couchdb/trunk/src/couchdb/couch_doc.erl
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch_doc.erl?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch_doc.erl (added)
+++ incubator/couchdb/trunk/src/couchdb/couch_doc.erl Fri Mar 28 16:32:19 2008
@@ -0,0 +1,199 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License.  You may obtain a copy of
+% the License at
+%
+%   http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(couch_doc).
+
+-export([get_view_functions/1, is_special_doc/1,to_doc_info/1]).
+-export([bin_foldl/3,bin_size/1,bin_to_binary/1]).
+-export([from_json_obj/1,to_json_obj/2,has_stubs/1, merge_stubs/2]).
+
+-include("couch_db.hrl").
+
+to_json_obj(#doc{id=Id,deleted=Del,body=Body,revs=Revs,meta=Meta}=Doc,Options)->
+    {obj, [{"_id", Id}] ++
+        case Revs of
+        [] -> [];
+        _ -> [{"_rev", lists:nth(1, Revs)}]
+        end ++
+        case Del of
+        false ->
+            {obj, BodyProps} = Body,
+            BodyProps;
+        true ->
+            [{"_deleted", true}]
+        end ++
+        case lists:member(revs, Options) of
+        false -> [];
+        true ->
+            [{"_revs", list_to_tuple(Revs)}]
+        end ++
+        lists:map(
+            fun({revs_info, RevsInfo}) ->
+                JsonRevsInfo =
+                [{obj, [{rev, Rev}, {status, atom_to_list(Status)}]} ||
+                    {Rev, Status} <- RevsInfo],
+                {"_revs_info", list_to_tuple(JsonRevsInfo)};
+            ({conflicts, Conflicts}) ->
+                {"_conflicts", list_to_tuple(Conflicts)};
+            ({deleted_conflicts, Conflicts}) ->
+                {"_deleted_conflicts", list_to_tuple(Conflicts)}
+            end, Meta) ++
+        case lists:member(attachments, Options) of
+        true -> % return the full rev list and the binaries as strings.
+            BinProps = lists:map(
+                fun({Name, {Type, BinValue}}) ->
+                    {Name, {obj, [{"content-type", Type},
+                                    {"data", couch_util:encodeBase64(bin_to_binary(BinValue))}]}}
+                end,
+                Doc#doc.attachments),
+            case BinProps of
+            [] -> [];
+            _ -> [{"_attachments", {obj, BinProps}}]
+            end;
+        false ->
+            BinProps = lists:map(
+                fun({Name, {Type, BinValue}}) ->
+                    {Name, {obj, [{"stub", true}, {"content-type", Type},
+                                    {"length", bin_size(BinValue)}]}}
+                end,
+                Doc#doc.attachments),
+            case BinProps of
+                [] -> [];
+                _ -> [{"_attachments", {obj, BinProps}}]
+            end
+        end
+        }.
+
+from_json_obj({obj, Props}) ->
+    {obj,JsonBins} = proplists:get_value("_attachments", Props, {obj, []}),
+    Bins = lists:flatmap(fun({Name, {obj, BinProps}}) ->
+        case proplists:get_value("stub", BinProps) of
+        true ->
+            [{Name, stub}];
+        _ ->
+            Value = proplists:get_value("data", BinProps),
+            Type = proplists:get_value("content-type", BinProps,
+                    ?DEFAULT_ATTACHMENT_CONTENT_TYPE),
+            [{Name, {Type, couch_util:decodeBase64(Value)}}]
+        end
+    end, JsonBins),
+    AllowedSpecialMembers = ["id", "revs", "rev", "attachments", "revs_info",
+        "conflicts", "deleted_conflicts", "deleted"],
+    [case lists:member(Name, AllowedSpecialMembers) of
+        true ->
+            ok;
+        false ->
+            throw({doc_validation, io_lib:format("Bad special document member: _~s", [Name])})
+        end
+         || {[$_|Name], _Value} <- Props],
+    Revs =
+    case tuple_to_list(proplists:get_value("_revs", Props, {})) of
+    [] ->
+        case proplists:get_value("_rev", Props) of
+        undefined -> [];
+        Rev -> [Rev]
+        end;
+    Revs0 ->
+        Revs0
+    end,
+    #doc{
+        id = proplists:get_value("_id", Props, ""),
+        revs = Revs,
+        deleted = proplists:get_value("_deleted", Props, false),
+        body = {obj, [{Key, Value} || {[FirstChar|_]=Key, Value} <- Props, FirstChar /= $_]},
+        attachments = Bins
+        }.
+
+
+to_doc_info(#full_doc_info{id=Id,update_seq=Seq,rev_tree=Tree}) ->
+    LeafRevs = couch_key_tree:get_all_leafs(Tree),
+    SortedLeafRevs =
+    lists:sort(fun({RevIdA, {IsDeletedA, _}, PathA}, {RevIdB, {IsDeletedB, _}, PathB}) ->
+            % sort descending by {not deleted, then Depth, then RevisionId}
+            A = {not IsDeletedA, length(PathA), RevIdA},
+            B = {not IsDeletedB, length(PathB), RevIdB},
+            A > B
+        end,
+        LeafRevs),
+
+    [{RevId, {IsDeleted, SummaryPointer}, _Path} | Rest] = SortedLeafRevs,
+
+    {ConflictRevTuples, DeletedConflictRevTuples} =
+        lists:splitwith(fun({_ConflictRevId, {IsDeleted1, _SummaryPointer}, _}) ->
+                not IsDeleted1
+            end, Rest),
+
+    ConflictRevs = [RevId1  || {RevId1, _, _} <- ConflictRevTuples],
+    DeletedConflictRevs = [RevId2   || {RevId2, _, _} <- DeletedConflictRevTuples],
+
+    #doc_info{
+        id=Id,
+        update_seq=Seq,
+        rev = RevId,
+        summary_pointer = SummaryPointer,
+        conflict_revs = ConflictRevs,
+        deleted_conflict_revs = DeletedConflictRevs,
+        deleted = IsDeleted
+        }.
+
+is_special_doc(?DESIGN_DOC_PREFIX ++ _ ) ->
+    true;
+is_special_doc(#doc{id=Id}) ->
+    is_special_doc(Id);
+is_special_doc(_) ->
+    false.
+
+bin_foldl(Bin, Fun, Acc) when is_binary(Bin) ->
+    case Fun(Bin, Acc) of
+        {ok, Acc2} -> {ok, Acc2};
+        {done, Acc2} -> {ok, Acc2}
+    end;
+bin_foldl({Fd, Sp, Len}, Fun, Acc) ->
+    {ok, Acc2, _Sp2} = couch_stream:foldl(Fd, Sp, Len, Fun, Acc),
+    {ok, Acc2}.
+
+bin_size(Bin) when is_binary(Bin) ->
+    size(Bin);
+bin_size({_Fd, _Sp, Len}) ->
+    Len.
+
+bin_to_binary(Bin) when is_binary(Bin) ->
+    Bin;
+bin_to_binary({Fd, Sp, Len}) ->
+    {ok, Bin, _Sp2} = couch_stream:read(Fd, Sp, Len),
+    Bin.
+
+get_view_functions(#doc{body={obj, Fields}}) ->
+    Lang = proplists:get_value("language", Fields, "text/javascript"),
+    {obj, Views} = proplists:get_value("views", Fields, {obj, []}),
+    {Lang, [{ViewName, Value} || {ViewName, Value} <- Views, is_list(Value)]};
+get_view_functions(_Doc) ->
+    none.
+
+has_stubs(#doc{attachments=Bins}) ->
+    has_stubs(Bins);
+has_stubs([]) ->
+    false;
+has_stubs([{_Name, stub}|_]) ->
+    true;
+has_stubs([_Bin|Rest]) ->
+    has_stubs(Rest).
+
+merge_stubs(#doc{attachments=MemBins}=StubsDoc, #doc{attachments=DiskBins}) ->
+    BinDict = dict:from_list(DiskBins),
+    MergedBins = lists:map(
+        fun({Name, stub}) ->
+            {Name, dict:fetch(Name, BinDict)};
+        ({Name, Value}) ->
+            {Name, Value}
+        end, MemBins),
+    StubsDoc#doc{attachments= MergedBins}.

Added: incubator/couchdb/trunk/src/couchdb/couch_erl_driver.c
URL: http://svn.apache.org/viewvc/incubator/couchdb/trunk/src/couchdb/couch_erl_driver.c?rev=642432&view=auto
==============================================================================
--- incubator/couchdb/trunk/src/couchdb/couch_erl_driver.c (added)
+++ incubator/couchdb/trunk/src/couchdb/couch_erl_driver.c Fri Mar 28 16:32:19 2008
@@ -0,0 +1,160 @@
+/*
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License.  You may obtain a copy of the
+License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations under the License.
+
+*/
+
+// This file is the C port driver for Erlang. It provides a low overhead
+// means of calling into C code, however unlike the Fabric engine, coding
+// errors in this module can crash the entire Erlang server.
+
+#include "erl_driver.h"
+#include "unicode/ucol.h"
+#include "unicode/ucasemap.h"
+#ifndef WIN32
+#include <string.h> // for memcpy
+#endif
+
+typedef struct {
+    ErlDrvPort port;
+    UCollator* collNoCase;
+    UCollator* coll;
+} couch_drv_data;
+
+static void couch_drv_stop(ErlDrvData data)
+{
+    couch_drv_data* pData = (couch_drv_data*)data;
+    if (pData->coll) {
+        ucol_close(pData->coll);
+    }
+    if (pData->collNoCase) {
+        ucol_close(pData->collNoCase);
+    }
+    driver_free((char*)pData);
+}
+
+static ErlDrvData couch_drv_start(ErlDrvPort port, char *buff)
+{
+    UErrorCode status = U_ZERO_ERROR;
+    couch_drv_data* pData = (couch_drv_data*)driver_alloc(sizeof(couch_drv_data));
+
+    if (pData == NULL)
+        return ERL_DRV_ERROR_GENERAL;
+
+    pData->port = port;
+    pData->coll = NULL;
+    pData->collNoCase = NULL;
+    pData->coll = ucol_open("", &status);
+
+    if (U_FAILURE(status)) {
+        couch_drv_stop((ErlDrvData)pData);
+        return ERL_DRV_ERROR_GENERAL;
+    }
+
+    pData->collNoCase = ucol_open("", &status);
+    if (U_FAILURE(status)) {
+        couch_drv_stop((ErlDrvData)pData);
+        return ERL_DRV_ERROR_GENERAL;
+    }
+
+    ucol_setAttribute(pData->collNoCase, UCOL_STRENGTH, UCOL_PRIMARY, &status);
+    if (U_FAILURE(status)) {
+        couch_drv_stop((ErlDrvData)pData);
+        return ERL_DRV_ERROR_GENERAL;
+    }
+
+    return (ErlDrvData)pData;
+}
+
+static int return_control_result(void* pLocalResult, int localLen, char **ppRetBuf, int returnLen)
+{
+    if (*ppRetBuf == NULL || localLen > returnLen) {
+        *ppRetBuf = (char*)driver_alloc_binary(localLen);
+        if(*ppRetBuf == NULL) {
+            return -1;
+        }
+    }
+    memcpy(*ppRetBuf, pLocalResult, localLen);
+    return localLen;
+}
+
+static int couch_drv_control(ErlDrvData drv_data, unsigned int command, const char *pBuf,
+             int bufLen, char **rbuf, int rlen)
+{
+    #define COLLATE 0
+    #define COLLATE_NO_CASE 1
+
+    couch_drv_data* pData = (couch_drv_data*)drv_data;
+
+    UErrorCode status = U_ZERO_ERROR;
+    int collResult;
+    char response;
+    UCharIterator iterA;
+    UCharIterator iterB;
+    int32_t length;
+
+    // 2 strings are in the buffer, consecutively
+    // The strings begin first with a 32 bit integer byte length, then the actual
+    // string bytes follow.
+
+    // first 32bits are the length
+    memcpy(&length, pBuf, sizeof(length));
+    pBuf += sizeof(length);
+
+    // point the iterator at it.
+    uiter_setUTF8(&iterA, pBuf, length);
+
+    pBuf += length; // now on to string b
+
+    // first 32bits are the length
+    memcpy(&length, pBuf, sizeof(length));
+    pBuf += sizeof(length);
+
+    // point the iterator at it.
+    uiter_setUTF8(&iterB, pBuf, length);
+
+    if (command == COLLATE)
+        collResult = ucol_strcollIter(pData->coll, &iterA, &iterB, &status);
+    else if (command == COLLATE_NO_CASE)
+        collResult = ucol_strcollIter(pData->collNoCase, &iterA, &iterB, &status);
+    else
+        return -1;
+
+    if (collResult < 0)
+        response = 0; //lt
+    else if (collResult > 0)
+        response = 1; //gt
+    else
+        response = 2; //eq
+
+    return return_control_result(&response, sizeof(response), rbuf, rlen);
+}
+
+ErlDrvEntry couch_driver_entry = {
+        NULL,                                               /* F_PTR init, N/A */
+        couch_drv_start,                    /* L_PTR start, called when port is opened */
+        couch_drv_stop,                     /* F_PTR stop, called when port is closed */
+        NULL,                   /* F_PTR output, called when erlang has sent */
+        NULL,                                               /* F_PTR ready_input, called when input descriptor ready */
+        NULL,                                               /* F_PTR ready_output, called when output descriptor ready */
+        "couch_erl_driver",                          /* char *driver_name, the argument to open_port */
+        NULL,                                               /* F_PTR finish, called when unloaded */
+        NULL,                       /* Not used */
+        couch_drv_control,               /* F_PTR control, port_command callback */
+        NULL,                                               /* F_PTR timeout, reserved */
+        NULL                                                /* F_PTR outputv, reserved */
+};
+
+DRIVER_INIT(couch_erl_driver) /* must match name in driver_entry */
+{
+        return &couch_driver_entry;
+}

Propchange: incubator/couchdb/trunk/src/couchdb/couch_erl_driver.c
------------------------------------------------------------------------------
    svn:eol-style = native



Mime
View raw message