couchdb-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From dam...@apache.org
Subject svn commit: r777292 - in /couchdb/branches/tail_header: share/www/script/test/ src/couchdb/
Date Thu, 21 May 2009 22:16:58 GMT
Author: damien
Date: Thu May 21 22:16:57 2009
New Revision: 777292

URL: http://svn.apache.org/viewvc?rev=777292&view=rev
Log:
Added notes to source code where it deals with upgrading from old formats to new, added options
for when to fsync data and header, to allow optimization for underlying FS and for code reliability
and simplifcation reverted to old style delayed commits where updates are if the erlang process
dies.

Modified:
    couchdb/branches/tail_header/share/www/script/test/compact.js
    couchdb/branches/tail_header/share/www/script/test/delayed_commits.js
    couchdb/branches/tail_header/src/couchdb/couch_db.erl
    couchdb/branches/tail_header/src/couchdb/couch_db.hrl
    couchdb/branches/tail_header/src/couchdb/couch_db_updater.erl
    couchdb/branches/tail_header/src/couchdb/couch_doc.erl
    couchdb/branches/tail_header/src/couchdb/couch_file.erl
    couchdb/branches/tail_header/src/couchdb/couch_stream.erl
    couchdb/branches/tail_header/src/couchdb/couch_view_group.erl

Modified: couchdb/branches/tail_header/share/www/script/test/compact.js
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/share/www/script/test/compact.js?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/share/www/script/test/compact.js (original)
+++ couchdb/branches/tail_header/share/www/script/test/compact.js Thu May 21 22:16:57 2009
@@ -15,7 +15,7 @@
   db.deleteDb();
   db.createDb();
   if (debug) debugger;
-  var docs = makeDocs(0, 10);
+  var docs = makeDocs(0, 20);
   db.bulkSave(docs);
 
   var binAttDoc = {
@@ -35,6 +35,7 @@
   for(var i in docs) {
       db.deleteDoc(docs[i]);
   }
+  T(db.ensureFullCommit().ok);
   var deletesize = db.info().disk_size;
   T(deletesize > originalsize);
 

Modified: couchdb/branches/tail_header/share/www/script/test/delayed_commits.js
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/share/www/script/test/delayed_commits.js?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/share/www/script/test/delayed_commits.js (original)
+++ couchdb/branches/tail_header/share/www/script/test/delayed_commits.js Thu May 21 22:16:57
2009
@@ -18,7 +18,7 @@
   
   // By default, couchdb doesn't fully commit documents to disk right away,
   // it waits about a second to batch the full commit flush along with any 
-  // other updates. If os crashes you may lose the most
+  // other updates. If it crashes or is restarted you may lose the most
   // recent commits.
   
   T(db.save({_id:"1",a:2,b:4}).ok);
@@ -26,26 +26,31 @@
   
   restartServer();
   
-  T(db.open("1") != null);
+  T(db.open("1") == null); // lost the update.
+  // note if we waited > 1 sec before the restart, the doc would likely
+  // commit.
+  
+  
+  // Retry the same thing but with full commits on.
   
   var db2 = new CouchDB("test_suite_db", {"X-Couch-Full-Commit":"true"});
   
-  T(db2.save({_id:"2",a:2,b:4}).ok);
-  T(db2.open("2") != null);
+  T(db2.save({_id:"1",a:2,b:4}).ok);
+  T(db2.open("1") != null);
   
   restartServer();
   
-  T(db2.open("2") != null);
+  T(db2.open("1") != null);
   
   // You can update but without committing immediately, and then ensure
   // everything is commited in the last step.
   
-  T(db.save({_id:"3",a:2,b:4}).ok);
-  T(db.open("3") != null);
+  T(db.save({_id:"2",a:2,b:4}).ok);
+  T(db.open("2") != null);
   T(db.ensureFullCommit().ok);
   restartServer();
   
-  T(db.open("3") != null);
+  T(db.open("2") != null);
   
   // However, it's possible even when flushed, that the server crashed between
   // the update and the commit, and you don't want to check to make sure
@@ -59,8 +64,8 @@
   
   var instanceStartTime = db.info().instance_start_time;
   
-  T(db.save({_id:"4",a:2,b:4}).ok);
-  T(db.open("4") != null);
+  T(db.save({_id:"3",a:2,b:4}).ok);
+  T(db.open("3") != null);
   
   restartServer();
   
@@ -68,14 +73,14 @@
   T(commitResult.ok && commitResult.instance_start_time != instanceStartTime);
   // start times don't match, meaning the server lost our change
   
-  T(db.open("4") != null);
+  T(db.open("3") == null); // yup lost it
   
   // retry with no server restart
   
   var instanceStartTime = db.info().instance_start_time;
   
-  T(db.save({_id:"5",a:2,b:4}).ok);
-  T(db.open("5") != null);
+  T(db.save({_id:"4",a:2,b:4}).ok);
+  T(db.open("4") != null);
   
   var commitResult = db.ensureFullCommit();
   T(commitResult.ok && commitResult.instance_start_time == instanceStartTime);
@@ -83,11 +88,11 @@
   
   restartServer();
   
-  T(db.open("5") != null);
+  T(db.open("4") != null);
   
   // Now test that when we exceed the max_dbs_open, pending commits are safely
   // written.
-  T(db.save({_id:"6",foo:"bar"}).ok);
+  T(db.save({_id:"5",foo:"bar"}).ok);
   var max = 2;
   run_on_modified_server(
     [{section: "couchdb",
@@ -100,7 +105,7 @@
         dbi.deleteDb();
         dbi.createDb();
       }
-      T(db.open("6").foo=="bar");
+      T(db.open("5").foo=="bar");
       for(var i=0; i<max+1; i++) {
         var dbi = new CouchDB("test_suite_db" + i);
         dbi.deleteDb();

Modified: couchdb/branches/tail_header/src/couchdb/couch_db.erl
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/src/couchdb/couch_db.erl?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/src/couchdb/couch_db.erl (original)
+++ couchdb/branches/tail_header/src/couchdb/couch_db.erl Thu May 21 22:16:57 2009
@@ -819,6 +819,7 @@
 read_doc(Fd, Pos) when is_integer(Pos) ->
     couch_file:pread_term(Fd, Pos);
 read_doc(Fd, OldStyleStreamPointer) ->
+    % 09 UPGRADE CODE
     couch_stream:old_read_term(Fd, OldStyleStreamPointer).
 
 

Modified: couchdb/branches/tail_header/src/couchdb/couch_db.hrl
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/src/couchdb/couch_db.hrl?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/src/couchdb/couch_db.hrl (original)
+++ couchdb/branches/tail_header/src/couchdb/couch_db.hrl Thu May 21 22:16:57 2009
@@ -114,7 +114,7 @@
 -record(db_header,
     {disk_version = ?LATEST_DISK_VERSION,  
      update_seq = 0,
-     unused,
+     unused = 0,
      fulldocinfo_by_id_btree_state = nil,
      docinfo_by_seq_btree_state = nil,
      local_docs_btree_state = nil,
@@ -144,7 +144,8 @@
     admins_ptr = nil,
     user_ctx = #user_ctx{},
     waiting_delayed_commit = nil,
-    revs_limit = 1000
+    revs_limit = 1000,
+    fsync_options = []
     }).
 
 

Modified: couchdb/branches/tail_header/src/couchdb/couch_db_updater.erl
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/src/couchdb/couch_db_updater.erl?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/src/couchdb/couch_db_updater.erl (original)
+++ couchdb/branches/tail_header/src/couchdb/couch_db_updater.erl Thu May 21 22:16:57 2009
@@ -29,13 +29,7 @@
         % delete any old compaction files that might be hanging around
         file:delete(Filepath ++ ".compact");
     false ->
-        ok = couch_file:upgrade_old_header(Fd, <<$g, $m, $k, 0>>),
-        case couch_config:get("couchdb", "sync_on_open", "true") of
-        "true" ->
-            ok = couch_file:sync(Fd);
-        _ ->
-            ok
-        end,
+        ok = couch_file:upgrade_old_header(Fd, <<$g, $m, $k, 0>>), % 09 UPGRADE
CODE
         {ok, Header} = couch_file:read_header(Fd)
     end,
     
@@ -62,9 +56,8 @@
     end;
 handle_call(full_commit, _From, #db{waiting_delayed_commit=nil}=Db) ->
     {reply, ok, Db}; % no data waiting, return ok immediately
-handle_call(full_commit, _From,  #db{fd=Fd,update_seq=Seq}=Db) ->
-    ok = couch_file:sync(Fd),
-    {reply, ok, Db#db{waiting_delayed_commit=nil,committed_update_seq=Seq}}; % commit the
data and return ok
+handle_call(full_commit, _From,  #db{fd=Fd}=Db) ->
+    {reply, ok, commit_data(Db)}; % commit the data and return ok
 handle_call(increment_update_seq, _From, Db) ->
     Db2 = commit_data(Db#db{update_seq=Db#db.update_seq+1}),
     ok = gen_server:call(Db2#db.main_pid, {db_updated, Db2}),
@@ -197,9 +190,8 @@
         {noreply, Db2}
     end.
 
-handle_info(delayed_commit, #db{update_seq=Seq}=Db) ->
-    ok = couch_file:sync(Db#db.fd),
-    {noreply, Db#db{waiting_delayed_commit=nil,committed_update_seq=Seq}}.
+handle_info(delayed_commit, Db) ->
+    {noreply, commit_data(Db)}.
 
 code_change(_OldVsn, State, _Extra) ->
     {ok, State}.
@@ -222,6 +214,7 @@
             [#rev_info{rev=Rev,seq=Seq,deleted=true,body_sp = Bp} || 
                 {Rev, Seq, Bp} <- DeletedRevInfos]};
 btree_by_seq_join(KeySeq,{Id, Rev, Bp, Conflicts, DelConflicts, Deleted}) ->
+    % 09 UPGRADE CODE
     % this is the 0.9.0 and earlier by_seq record. It's missing the body pointers
     % and individual seq nums for conflicts that are currently in the index, 
     % meaning the filtered _changes api will not work except for on main docs.
@@ -252,6 +245,7 @@
         (_RevId, ?REV_MISSING) ->
             ?REV_MISSING;
         (_RevId, {IsDeleted, BodyPointer}) ->
+            % 09 UPGRADE CODE
             % this is the 0.9.0 and earlier rev info record. It's missing the seq
             % nums, which means couchdb will sometimes reexamine unchanged
             % documents with the _changes API.
@@ -290,16 +284,25 @@
 
 
 init_db(DbName, Filepath, Fd, Header0) ->
-    case element(2, Header0) of
-    1 -> ok; % 0.9
-    2 -> ok; % post 0.9 and pre 0.10
-    ?LATEST_DISK_VERSION -> ok;
+    Header1 = simple_upgrade_record(Header0, #db_header{}),
+    Header =
+    case element(2, Header1) of
+    1 -> Header1#db_header{unused = 0}; % 0.9
+    2 -> Header1#db_header{unused = 0}; % post 0.9 and pre 0.10
+    ?LATEST_DISK_VERSION -> Header1;
     _ -> throw({database_disk_version_error, "Incorrect disk header version"})
     end,
-    Header1 = Header0#db_header{unused = 0}, % used in versions 1 and 2, but not later
-    Header = simple_upgrade_record(Header1, #db_header{}),
     Less = fun less_docid/2,
-            
+        
+    {ok, FsyncOptions} = couch_util:parse_term(
+            couch_config:get("couchdb", "fsync_options", 
+                    "[before_header, after_header, on_file_open]")),
+    
+    case lists:member(on_file_open, FsyncOptions) of
+    true -> ok = couch_file:sync(Fd);
+    _ -> ok
+    end,
+        
     {ok, IdBtree} = couch_btree:open(Header#db_header.fulldocinfo_by_id_btree_state, Fd,
         [{split, fun(X) -> btree_by_id_split(X) end},
         {join, fun(X,Y) -> btree_by_id_join(X,Y) end},
@@ -337,7 +340,8 @@
         admins = Admins,
         admins_ptr = AdminsPtr,
         instance_start_time = StartTime,
-        revs_limit = Header#db_header.revs_limit
+        revs_limit = Header#db_header.revs_limit,
+        fsync_options = FsyncOptions
         }.
 
 
@@ -556,76 +560,101 @@
 commit_data(Db) ->
     commit_data(Db, false).
 
-
-commit_data(#db{fd=Fd, header=Header} = Db, Delay) ->
-    Header2 = Header#db_header{
+db_to_header(Db, Header) ->
+    Header#db_header{
         update_seq = Db#db.update_seq,
         docinfo_by_seq_btree_state = couch_btree:get_state(Db#db.docinfo_by_seq_btree),
         fulldocinfo_by_id_btree_state = couch_btree:get_state(Db#db.fulldocinfo_by_id_btree),
         local_docs_btree_state = couch_btree:get_state(Db#db.local_docs_btree),
         admins_ptr = Db#db.admins_ptr,
-        revs_limit = Db#db.revs_limit
-        },
-    if Header == Header2 ->
+        revs_limit = Db#db.revs_limit}.
+
+commit_data(#db{fd=Fd,header=OldHeader,fsync_options=FsyncOptions}=Db, Delay) ->
+    Header = db_to_header(Db, OldHeader),
+    if OldHeader == Header ->
         Db;
     Delay and (Db#db.waiting_delayed_commit == nil) ->
-        ok = couch_file:write_header(Fd, Header2),
         Db#db{waiting_delayed_commit=
                 erlang:send_after(1000, self(), delayed_commit)};
     Delay ->
-        ok = couch_file:write_header(Fd, Header2),
-        Db#db{header=Header2};
+        Db;
     true ->
-        if not is_atom(Db#db.waiting_delayed_commit) ->
+        if Db#db.waiting_delayed_commit /= nil ->
             case erlang:cancel_timer(Db#db.waiting_delayed_commit) of
             false -> receive delayed_commit -> ok after 0 -> ok end;
             _ -> ok
             end;
         true -> ok
         end,
-        ok = couch_file:write_header(Fd, Header2),
-        ok = couch_file:sync(Fd),
-        Db#db{waiting_delayed_commit=nil,header=Header2,committed_update_seq=Db#db.update_seq}
+        case lists:member(before_header, FsyncOptions) of
+        true -> ok = couch_file:sync(Fd);
+        _    -> ok
+        end,
+        
+        ok = couch_file:write_header(Fd, Header),
+        
+        case lists:member(after_header, FsyncOptions) of
+        true -> ok = couch_file:sync(Fd);
+        _    -> ok
+        end,
+        
+        Db#db{waiting_delayed_commit=nil,
+            header=Header,
+            committed_update_seq=Db#db.update_seq}
     end.
 
 
-copy_raw_doc(SrcFd, SrcSp, DestFd) ->
+copy_doc_attachments(SrcFd, SrcSp, DestFd) ->
     {ok, {BodyData, BinInfos}} = couch_db:read_doc(SrcFd, SrcSp),
     % copy the bin values
     NewBinInfos = lists:map(
         fun({Name, {Type, BinSp, Len}}) when is_tuple(BinSp) orelse BinSp == null ->
+            % 09 UPGRADE CODE
             {NewBinSp, Len} = couch_stream:old_copy_to_new_stream(SrcFd, BinSp, Len, DestFd),
             {Name, {Type, NewBinSp, Len}};
         ({Name, {Type, BinSp, Len}}) ->
             {NewBinSp, Len} = couch_stream:copy_to_new_stream(SrcFd, BinSp, DestFd),
             {Name, {Type, NewBinSp, Len}}
         end, BinInfos),
-    % now write the document summary
-    {ok, Sp} = couch_file:append_term(DestFd, {BodyData, NewBinInfos}),
-    Sp.
+    {BodyData, NewBinInfos}.
 
-copy_rev_tree(_SrcFd, _DestFd, []) ->
+copy_rev_tree_attachments(_SrcFd, _DestFd, []) ->
     [];
-copy_rev_tree(SrcFd, DestFd, [{Start, Tree} | RestTree]) ->
+copy_rev_tree_attachments(SrcFd, DestFd, [{Start, Tree} | RestTree]) ->
     % root nner node, only copy info/data from leaf nodes
-    [Tree2] = copy_rev_tree(SrcFd, DestFd, [Tree]),
-    [{Start, Tree2} | copy_rev_tree(SrcFd, DestFd, RestTree)];
-copy_rev_tree(SrcFd, DestFd, [{RevId, {IsDel, Sp, Seq}, []} | RestTree]) ->
+    [Tree2] = copy_rev_tree_attachments(SrcFd, DestFd, [Tree]),
+    [{Start, Tree2} | copy_rev_tree_attachments(SrcFd, DestFd, RestTree)];
+copy_rev_tree_attachments(SrcFd, DestFd, [{RevId, {IsDel, Sp, Seq}, []} | RestTree]) ->
     % This is a leaf node, copy it over
-    NewSp = copy_raw_doc(SrcFd, Sp, DestFd),
-    [{RevId, {IsDel, NewSp, Seq}, []} | copy_rev_tree(SrcFd, DestFd, RestTree)];
-copy_rev_tree(SrcFd, DestFd, [{RevId, _, SubTree} | RestTree]) ->
+    DocBody = copy_doc_attachments(SrcFd, Sp, DestFd),
+    [{RevId, {IsDel, DocBody, Seq}, []} | copy_rev_tree_attachments(SrcFd, DestFd, RestTree)];
+copy_rev_tree_attachments(SrcFd, DestFd, [{RevId, _, SubTree} | RestTree]) ->
     % inner node, only copy info/data from leaf nodes
-    [{RevId, ?REV_MISSING, copy_rev_tree(SrcFd, DestFd, SubTree)} | copy_rev_tree(SrcFd,
DestFd, RestTree)].
+    [{RevId, ?REV_MISSING, copy_rev_tree_attachments(SrcFd, DestFd, SubTree)} | copy_rev_tree_attachments(SrcFd,
DestFd, RestTree)].
+
     
 copy_docs(#db{fd=SrcFd}=Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) ->
     Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
     LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),
+    
+    % write out the attachments
     NewFullDocInfos0 = lists:map(
         fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
-            Info#full_doc_info{rev_tree=copy_rev_tree(SrcFd, DestFd, RevTree)}
+            Info#full_doc_info{rev_tree=copy_rev_tree_attachments(SrcFd, DestFd, RevTree)}
         end, LookupResults),
-    NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos0),
+    % write out the docs
+    % we do this in 2 stages so the docs are written out contiguously, making
+    % view indexing and replication faster.
+    NewFullDocInfos1 = lists:map(
+        fun(#full_doc_info{rev_tree=RevTree}=Info) ->
+            Info#full_doc_info{rev_tree=couch_key_tree:map_leafs(
+                fun(_Key, {IsDel, DocBody, Seq}) ->
+                    {ok, Pos} = couch_file:append_term(DestFd, DocBody),
+                    {IsDel, Pos, Seq}
+                end, RevTree)}
+        end, NewFullDocInfos0),
+
+    NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos1),
     NewDocInfos = [couch_doc:to_doc_info(Info) || Info <- NewFullDocInfos],
     RemoveSeqs =
     case Retry of
@@ -647,7 +676,9 @@
 
 
           
-copy_compact(Db, NewDb, Retry) ->
+copy_compact(Db, NewDb0, Retry) ->
+    FsyncOptions = [Op || Op <- NewDb0#db.fsync_options, Op == before_header],
+    NewDb = NewDb0#db{fsync_options=FsyncOptions},
     TotalChanges = couch_db:count_changes_since(Db, NewDb#db.update_seq),
     EnumBySeqFun =
     fun(#doc_info{high_seq=Seq}=DocInfo, _Offset, {AccNewDb, AccUncopied, TotalCopied}) ->
@@ -656,7 +687,7 @@
         if TotalCopied rem 1000 == 0 ->
             NewDb2 = copy_docs(Db, AccNewDb, lists:reverse([DocInfo | AccUncopied]), Retry),
             if TotalCopied rem 10000 == 0 ->
-                {ok, {commit_data(NewDb2#db{update_seq=Seq}, true), [], TotalCopied + 1}};
+                {ok, {commit_data(NewDb2#db{update_seq=Seq}), [], TotalCopied + 1}};
             true ->
                 {ok, {NewDb2#db{update_seq=Seq}, [], TotalCopied + 1}}
             end;
@@ -682,7 +713,7 @@
         NewDb4 = NewDb3
     end,
     
-    commit_data(NewDb4#db{update_seq=Db#db.update_seq}, true).
+    commit_data(NewDb4#db{update_seq=Db#db.update_seq}).
 
 start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
     CompactFile = Filepath ++ ".compact",
@@ -699,8 +730,7 @@
         ok = couch_file:write_header(Fd, Header=#db_header{})
     end,
     NewDb = init_db(Name, CompactFile, Fd, Header),
-    unlink(Fd),
-    NewDb2 = copy_compact(Db, NewDb#db{waiting_delayed_commit=never}, Retry),
+    NewDb2 = copy_compact(Db, NewDb, Retry),
     
     gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}),
     close_db(NewDb2).

Modified: couchdb/branches/tail_header/src/couchdb/couch_doc.erl
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/src/couchdb/couch_doc.erl?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/src/couchdb/couch_doc.erl (original)
+++ couchdb/branches/tail_header/src/couchdb/couch_doc.erl Thu May 21 22:16:57 2009
@@ -252,6 +252,7 @@
 bin_foldl(Bin, Fun, Acc) when is_binary(Bin) ->
     Fun(Bin, Acc);
 bin_foldl({Fd, Sp, Len}, Fun, Acc) when is_tuple(Sp) orelse Sp == null ->
+    % 09 UPGRADE CODE
     couch_stream:old_foldl(Fd, Sp, Len, Fun, Acc);
 bin_foldl({Fd, Sp, _Len}, Fun, Acc) ->
     couch_stream:foldl(Fd, Sp, Fun, Acc).

Modified: couchdb/branches/tail_header/src/couchdb/couch_file.erl
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/src/couchdb/couch_file.erl?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/src/couchdb/couch_file.erl (original)
+++ couchdb/branches/tail_header/src/couchdb/couch_file.erl Thu May 21 22:16:57 2009
@@ -19,7 +19,7 @@
 
 -record(file, {
     fd,
-    tail_append_begin=0
+    tail_append_begin=0 % 09 UPGRADE CODE
     }).
 
 -export([open/1, open/2, close/1, bytes/1, sync/1, append_binary/2,old_pread/3]).
@@ -121,6 +121,7 @@
     if HasPrefixes ->
         {ok, remove_block_prefixes(BlockOffset, RawBin), Pos + TotalBytes};
     true ->
+        % 09 UPGRADE CODE
         <<ReturnBin:Len/binary, _/binary>> = RawBin,
         {ok, [ReturnBin], Pos + Len}
     end.
@@ -162,10 +163,12 @@
     catch unlink(Fd),
     Result.
 
+% 09 UPGRADE CODE
 old_pread(Fd, Pos, Len) ->
     {ok, <<RawBin:Len/binary>>, false} = gen_server:call(Fd, {pread, Pos, Len},
infinity),
     {ok, RawBin}.
 
+% 09 UPGRADE CODE
 upgrade_old_header(Fd, Sig) ->
     gen_server:call(Fd, {upgrade_old_header, Sig}, infinity).
 
@@ -301,9 +304,10 @@
     {ok, Pos} = file:position(Fd, eof),
     {reply, find_header(Fd, Pos div ?SIZE_BLOCK), File}.
         
-
+% 09 UPGRADE CODE
 -define(HEADER_SIZE, 2048). % size of each segment of the doubly written header
 
+% 09 UPGRADE CODE
 read_old_header(Fd, Prefix) ->
     {ok, Bin} = file:pread(Fd, 0, 2*(?HEADER_SIZE)),
     <<Bin1:(?HEADER_SIZE)/binary, Bin2:(?HEADER_SIZE)/binary>> = Bin,
@@ -348,6 +352,7 @@
         Result
     end.
     
+% 09 UPGRADE CODE
 extract_header(Prefix, Bin) ->
     SizeOfPrefix = size(Prefix),
     SizeOfTermBin = ?HEADER_SIZE -
@@ -372,7 +377,7 @@
     end.
     
 
-
+% 09 UPGRADE CODE
 write_old_header(Fd, Prefix, Data) ->
     TermBin = term_to_binary(Data),
     % the size of all the bytes written to the header, including the md5 signature (16 bytes)

Modified: couchdb/branches/tail_header/src/couchdb/couch_stream.erl
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/src/couchdb/couch_stream.erl?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/src/couchdb/couch_stream.erl (original)
+++ couchdb/branches/tail_header/src/couchdb/couch_stream.erl Thu May 21 22:16:57 2009
@@ -58,6 +58,8 @@
         end, ok),
     close(Dest).
 
+
+% 09 UPGRADE CODE
 old_copy_to_new_stream(Fd, Pos, Len, DestFd) ->
     {ok, Dest} = open(DestFd),
     old_foldl(Fd, Pos, Len,
@@ -66,8 +68,7 @@
         end, ok),
     close(Dest).
 
-
-
+% 09 UPGRADE CODE    
 old_foldl(_Fd, null, 0, _Fun, Acc) ->
     Acc;
 old_foldl(Fd, OldPointer, Len, Fun, Acc) when is_tuple(OldPointer)->
@@ -140,7 +141,7 @@
     
 
 
-
+% 09 UPGRADE CODE
 old_read_term(Fd, Sp) ->
     {ok, <<TermLen:(?STREAM_OFFSET_BITS)>>, Sp2}
         = old_read(Fd, Sp, ?STREAM_OFFSET_BYTES),
@@ -152,6 +153,7 @@
     Bin = list_to_binary(lists:reverse(RevBin)),
     {ok, Bin, Sp2}.
 
+% 09 UPGRADE CODE
 old_stream_data(_Fd, Sp, 0, _MaxChunk, _Fun, Acc) ->
     {ok, Acc, Sp};
 old_stream_data(Fd, {Pos, 0}, Num, MaxChunk, Fun, Acc) ->

Modified: couchdb/branches/tail_header/src/couchdb/couch_view_group.erl
URL: http://svn.apache.org/viewvc/couchdb/branches/tail_header/src/couchdb/couch_view_group.erl?rev=777292&r1=777291&r2=777292&view=diff
==============================================================================
--- couchdb/branches/tail_header/src/couchdb/couch_view_group.erl (original)
+++ couchdb/branches/tail_header/src/couchdb/couch_view_group.erl Thu May 21 22:16:57 2009
@@ -313,6 +313,7 @@
             if ForceReset ->
                 {ok, reset_file(Db, Fd, DbName, Group)};
             true ->
+                % 09 UPGRADE CODE
                 ok = couch_file:upgrade_old_header(Fd, <<$r, $c, $k, 0>>),
                 case (catch couch_file:read_header(Fd)) of
                 {ok, {Sig, HeaderInfo}} ->



Mime
View raw message