couchdb-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From "Mike Leddy (JIRA)" <j...@apache.org>
Subject [jira] Commented: (COUCHDB-1021) Compacting a database does not preserve the purge_seq
Date Tue, 11 Jan 2011 20:34:46 GMT

    [ https://issues.apache.org/jira/browse/COUCHDB-1021?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=12980317#action_12980317
] 

Mike Leddy commented on COUCHDB-1021:
-------------------------------------

Yes I do :-) ..... Thanks for the outline of what is necessary. I ended up with this patch:

--- couchdb-1.0.1/src/couchdb/couch_db_updater.erl	2011-01-11 15:08:15.000000000 -0300
+++ couchdb-1.0.1.new/src/couchdb/couch_db_updater.erl	2011-01-11 15:25:32.000000000 -0300
@@ -847,7 +847,7 @@
 
     commit_data(NewDb4#db{update_seq=Db#db.update_seq}).
 
-start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
+start_copy_compact(#db{name=Name,filepath=Filepath,header=#db_header{purge_seq=PurgeSeq}}=Db)
->
     CompactFile = Filepath ++ ".compact",
     ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]),
     case couch_file:open(CompactFile) of
@@ -866,9 +866,19 @@
         Retry = false,
         ok = couch_file:write_header(Fd, Header=#db_header{})
     end,
+
     NewDb = init_db(Name, CompactFile, Fd, Header),
-    unlink(Fd),
-    NewDb2 = copy_compact(Db, NewDb, Retry),
-    close_db(NewDb2),
+    NewDb2 = if PurgeSeq > 0 ->
+        {ok, PurgedIdsRevs} = couch_db:get_last_purged(Db),
+        {ok, Pointer} = couch_file:append_term(Fd, PurgedIdsRevs),
+        unlink(Fd),
+        commit_data(NewDb#db{header=#db_header{purge_seq=PurgeSeq, purged_docs=Pointer}});
+    true ->
+        unlink(Fd),
+        NewDb
+    end,
+
+    NewDb3 = copy_compact(Db, NewDb2, Retry),
+    close_db(NewDb3),
     gen_server:cast(Db#db.update_pid, {compact_done, CompactFile}).

Maybe I'm being paranoid duplicating the unlink but I wasn't sure if it needed to be done
before the commit_data. 
Better safe than sorry....


> Compacting a database does not preserve the purge_seq
> -----------------------------------------------------
>
>                 Key: COUCHDB-1021
>                 URL: https://issues.apache.org/jira/browse/COUCHDB-1021
>             Project: CouchDB
>          Issue Type: Bug
>          Components: Database Core
>    Affects Versions: 1.0.1
>         Environment: All platforms
>            Reporter: Mike Leddy
>            Priority: Minor
>
> On compacting a database the purge_seq becomes zero. As a result subsequently accessing
any view will cause the view to be rebuilt from scratch. I resolved the issue for me by patching
start_copy_compact, but this only works if you can guarantee there will be no purging done
during compaction:
> --- couchdb-1.0.1/src/couchdb/couch_db_updater.erl
> +++ couchdb-1.0.1.new/src/couchdb/couch_db_updater.erl
> @@ -857,7 +857,7 @@
>  
>      commit_data(NewDb4#db{update_seq=Db#db.update_seq}).
>  
> -start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
> +start_copy_compact(#db{name=Name,filepath=Filepath,header=#db_header{purge_seq=PurgeSeq}}=Db)
->
>      CompactFile = Filepath ++ ".compact",
>      ?LOG_DEBUG("Compaction process spawned for db \"~s\"", [Name]),
>      case couch_file:open(CompactFile) of
> @@ -869,7 +869,7 @@
>          couch_task_status:add_task(<<"Database Compaction">>, Name, <<"Starting">>),
>          {ok, Fd} = couch_file:open(CompactFile, [create]),
>          Retry = false,
> -        ok = couch_file:write_header(Fd, Header=#db_header{})
> +        ok = couch_file:write_header(Fd, Header=#db_header{purge_seq=PurgeSeq})
>      end,
>      NewDb = init_db(Name, CompactFile, Fd, Header),
>      unlink(Fd),
>  I am sure that there must be a better way of doing this.....

-- 
This message is automatically generated by JIRA.
-
You can reply to this email to add a comment to the issue online.


Mime
View raw message