couchdb-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From vatam...@apache.org
Subject [couchdb] branch 63012-scheduler updated: [fixup] 80 col line width
Date Sun, 09 Apr 2017 07:02:05 GMT
This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch 63012-scheduler
in repository https://gitbox.apache.org/repos/asf/couchdb.git

The following commit(s) were added to refs/heads/63012-scheduler by this push:
       new  7eb753e   [fixup] 80 col line width
7eb753e is described below

commit 7eb753eca2b117a08eb7583f0153c1b3d11cac14
Author: Nick Vatamaniuc <vatamane@apache.org>
AuthorDate: Sun Apr 9 03:01:42 2017 -0400

    [fixup] 80 col line width
    
     but left most existing older module intact
---
 src/couch_replicator/src/couch_multidb_changes.erl |  6 +--
 src/couch_replicator/src/couch_replicator.erl      | 15 ++++---
 .../src/couch_replicator_clustering.erl            | 10 ++---
 .../src/couch_replicator_connection.erl            | 27 ++++++++----
 .../src/couch_replicator_db_changes.erl            |  3 +-
 .../src/couch_replicator_doc_processor.erl         | 49 +++++++++++++---------
 .../src/couch_replicator_doc_processor_worker.erl  | 28 ++++++++-----
 src/couch_replicator/src/couch_replicator_docs.erl | 25 ++++++-----
 .../src/couch_replicator_filters.erl               | 20 +++++----
 .../src/couch_replicator_httpc_pool.erl            |  3 +-
 .../src/couch_replicator_rate_limiter.erl          |  7 ++--
 .../src/couch_replicator_rate_limiter_tables.erl   |  8 ++--
 .../src/couch_replicator_scheduler.erl             | 46 +++++++++++++-------
 13 files changed, 153 insertions(+), 94 deletions(-)

diff --git a/src/couch_replicator/src/couch_multidb_changes.erl b/src/couch_replicator/src/couch_multidb_changes.erl
index c141a4a..103e549 100644
--- a/src/couch_replicator/src/couch_multidb_changes.erl
+++ b/src/couch_replicator/src/couch_multidb_changes.erl
@@ -35,7 +35,7 @@
     suffix :: binary(),
     event_server :: reference(),
     scanner :: nil | pid(),
-    pids :: [{binary(),pid()}],
+    pids :: [{binary(), pid()}],
     skip_ddocs :: boolean()
 }).
 
@@ -135,7 +135,7 @@ handle_info({'EXIT', From, Reason}, #state{scanner = From} = State) ->
     {stop, {scanner_died, Reason}, State};
 
 handle_info({'EXIT', From, Reason}, #state{pids = Pids} = State) ->
-    couch_log:debug("~p change feed exited ~p",[State#state.suffix, From]),
+    couch_log:debug("~p change feed exited ~p", [State#state.suffix, From]),
     case lists:keytake(From, 2, Pids) of
         {value, {DbName, From}, NewPids} ->
             if Reason == normal -> ok; true ->
@@ -479,7 +479,7 @@ t_handle_info_scanner_crashed() ->
 t_handle_info_event_server_exited() ->
     ?_test(begin
         Res = handle_info({'DOWN', esref, type, espid, reason}, mock_state()),
-        ?assertMatch({stop, {couch_event_server_died, reason},_}, Res)
+        ?assertMatch({stop, {couch_event_server_died, reason}, _}, Res)
     end).
 
 t_handle_info_unknown_pid_exited() ->
diff --git a/src/couch_replicator/src/couch_replicator.erl b/src/couch_replicator/src/couch_replicator.erl
index 345cda3..6e24a56 100644
--- a/src/couch_replicator/src/couch_replicator.erl
+++ b/src/couch_replicator/src/couch_replicator.erl
@@ -27,7 +27,7 @@
     error,         % Could not be turned into a replication job
     running,       % Scheduled and running
     pending,       % Scheduled and waiting to run
-    crashing,      % Scheduled but crashing, possibly backed off by the scheduler
+    crashing,      % Scheduled but crashing, backed off by the scheduler
     completed,     % Non-continuous (normal) completed replication
     failed         % Terminal failure, will not be retried anymore
 ]).
@@ -144,7 +144,8 @@ replication_states() ->
     ?REPLICATION_STATES.
 
 
--spec stream_terminal_docs_info(binary(), user_doc_cb(), any(), [atom()]) -> any().
+-spec stream_terminal_docs_info(binary(), user_doc_cb(), any(), [atom()]) ->
+    any().
 stream_terminal_docs_info(Db, Cb, UserAcc, States) ->
     DDoc = <<"_replicator">>,
     View = <<"terminal_states">>,
@@ -172,7 +173,8 @@ stream_active_docs_info(Cb, UserAcc, States) ->
     stream_active_docs_info(Nodes, Cb, UserAcc, States).
 
 
--spec stream_active_docs_info([node()], user_doc_cb(), any(), [atom()]) -> any().
+-spec stream_active_docs_info([node()], user_doc_cb(), any(), [atom()]) ->
+    any().
 stream_active_docs_info([], _Cb, UserAcc, _States) ->
     UserAcc;
 
@@ -366,7 +368,8 @@ t_username_must_match() ->
         UserCtx1 = #user_ctx{name = <<"user">>, roles = [<<"somerole">>]},
         ?assertEqual(ok, check_authorization(<<"RepId">>, UserCtx1)),
         UserCtx2 = #user_ctx{name = <<"other">>, roles = [<<"somerole">>]},
-        ?assertThrow({unauthorized, _}, check_authorization(<<"RepId">>, UserCtx2))
+        ?assertThrow({unauthorized, _}, check_authorization(<<"RepId">>,
+            UserCtx2))
     end).
 
 
@@ -391,7 +394,9 @@ expect_rep_user_ctx(Name, Role) ->
 strip_url_creds_test_() ->
      {
         foreach,
-        fun () -> meck:expect(config, get, fun(_, _, Default) -> Default end) end,
+        fun () -> meck:expect(config, get,
+            fun(_, _, Default) -> Default end)
+        end,
         fun (_) -> meck:unload() end,
         [
             t_strip_local_db_creds(),
diff --git a/src/couch_replicator/src/couch_replicator_clustering.erl b/src/couch_replicator/src/couch_replicator_clustering.erl
index b37b8e7..c808a64 100644
--- a/src/couch_replicator/src/couch_replicator_clustering.erl
+++ b/src/couch_replicator/src/couch_replicator_clustering.erl
@@ -17,11 +17,11 @@
 %
 % Cluster stability is defined as "there have been no nodes added or removed in
 % last `QuietPeriod` seconds". QuietPeriod value is configurable. To ensure a
-% speedier startup, during initialization there is a shorter StartupQuietPeriod in
-% effect (also configurable).
+% speedier startup, during initialization there is a shorter StartupPeriod
+% in effect (also configurable).
 %
-% This module is also in charge of calculating ownership of replications based on
-% where their _repicator db documents shards live.
+% This module is also in charge of calculating ownership of replications based
+% on where their _replicator db documents shards live.
 
 
 -module(couch_replicator_clustering).
@@ -145,7 +145,7 @@ handle_info(stability_check, State) ->
        true ->
            couch_replicator_notifier:notify({cluster, stable}),
            couch_stats:update_gauge([couch_replicator, cluster_is_stable], 1),
-           couch_log:notice("~s : publishing cluster `stable` event", [?MODULE]),
+           couch_log:notice("~s : publish cluster `stable` event", [?MODULE]),
            {noreply, State};
        false ->
            Timer = new_timer(interval(State)),
diff --git a/src/couch_replicator/src/couch_replicator_connection.erl b/src/couch_replicator/src/couch_replicator_connection.erl
index 64ec2b7..fc5ea97 100644
--- a/src/couch_replicator/src/couch_replicator_connection.erl
+++ b/src/couch_replicator/src/couch_replicator_connection.erl
@@ -47,9 +47,11 @@ start_link() ->
 
 init([]) ->
     process_flag(trap_exit, true),
-    ?MODULE = ets:new(?MODULE, [named_table, public, {keypos, #connection.worker}]),
+    ?MODULE = ets:new(?MODULE, [named_table, public,
+        {keypos, #connection.worker}]),
     ok = config:listen_for_changes(?MODULE, nil),
-    Interval = config:get_integer("replicator", "connection_close_interval", ?DEFAULT_CLOSE_INTERVAL),
+    Interval = config:get_integer("replicator", "connection_close_interval",
+        ?DEFAULT_CLOSE_INTERVAL),
     {ok, Timer} = timer:send_after(Interval, close_idle_connections),
     ibrowse:add_config([{inactivity_timeout, Interval}]),
     {ok, #state{close_interval=Interval, timer=Timer}}.
@@ -82,12 +84,15 @@ handle_call({acquire, URL}, From, State) ->
     {Pid, _Ref} = From,
     case ibrowse_lib:parse_url(URL) of
         #url{host=Host, port=Port} ->
-            case ets:match_object(?MODULE, #connection{host=Host, port=Port, mref=undefined,
_='_'}, 1) of
+            Pat = #connection{host=Host, port=Port, mref=undefined, _='_'},
+            case ets:match_object(?MODULE, Pat, 1) of
                 '$end_of_table' ->
                     {reply, {error, all_allocated}, State};
                 {[Worker], _Cont} ->
-                    couch_stats:increment_counter([couch_replicator, connection, acquires]),
-                    ets:insert(?MODULE, Worker#connection{mref=monitor(process, Pid)}),
+                    couch_stats:increment_counter([couch_replicator, connection,
+                        acquires]),
+                    ets:insert(?MODULE, Worker#connection{mref=monitor(process,
+                        Pid)}),
                     {reply, {ok, Worker#connection.worker}, State}
             end;
         {error, invalid_uri} ->
@@ -99,10 +104,12 @@ handle_call({create, URL, Worker}, From, State) ->
     case ibrowse_lib:parse_url(URL) of
         #url{host=Host, port=Port} ->
             link(Worker),
-            couch_stats:increment_counter([couch_replicator, connection, creates]),
+            couch_stats:increment_counter([couch_replicator, connection,
+                creates]),
             true = ets:insert_new(
                 ?MODULE,
-                #connection{host=Host, port=Port, worker=Worker, mref=monitor(process, Pid)}
+                #connection{host=Host, port=Port, worker=Worker,
+                    mref=monitor(process, Pid)}
             ),
             {reply, ok, State}
     end.
@@ -131,13 +138,15 @@ handle_cast({connection_close_interval, V}, State) ->
 
 % owner crashed
 handle_info({'DOWN', Ref, process, _Pid, _Reason}, State) ->
-    couch_stats:increment_counter([couch_replicator, connection, owner_crashes]),
+    couch_stats:increment_counter([couch_replicator, connection,
+        owner_crashes]),
     ets:match_delete(?MODULE, #connection{mref=Ref, _='_'}),
     {noreply, State};
 
 % worker crashed
 handle_info({'EXIT', Pid, Reason}, State) ->
-    couch_stats:increment_counter([couch_replicator, connection, worker_crashes]),
+    couch_stats:increment_counter([couch_replicator, connection,
+        worker_crashes]),
     case ets:lookup(?MODULE, Pid) of
         [] ->
             ok;
diff --git a/src/couch_replicator/src/couch_replicator_db_changes.erl b/src/couch_replicator/src/couch_replicator_db_changes.erl
index dd4be64..b06e1e9 100644
--- a/src/couch_replicator/src/couch_replicator_db_changes.erl
+++ b/src/couch_replicator/src/couch_replicator_db_changes.erl
@@ -77,7 +77,8 @@ restart_mdb_changes(#state{mdb_changes = nil} = State) ->
     Suffix = <<"_replicator">>,
     CallbackMod = couch_replicator_doc_processor,
     Options = [skip_ddocs],
-    {ok, Pid} = couch_multidb_changes:start_link(Suffix, CallbackMod, nil, Options),
+    {ok, Pid} = couch_multidb_changes:start_link(Suffix, CallbackMod, nil,
+        Options),
     couch_stats:increment_counter([couch_replicator, db_scans]),
     couch_log:notice("Started replicator db changes listener ~p", [Pid]),
     State#state{mdb_changes = Pid};
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor.erl b/src/couch_replicator/src/couch_replicator_doc_processor.erl
index 9063436..4e07e79 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor.erl
@@ -156,8 +156,8 @@ process_updated({DbName, _DocId} = Id, JsonRepDoc) ->
     % Parsing replication doc (but not calculating the id) could throw an
     % exception which would indicate this document is malformed. This exception
     % should propagate to db_change function and will be recorded as permanent
-    % failure in the document. User will have to delete and re-create the document
-    % to fix the problem.
+    % failure in the document. User will have to delete and re-create the
+    % document to fix the problem.
     Rep0 = couch_replicator_docs:parse_rep_doc_without_id(JsonRepDoc),
     Rep = Rep0#rep{db_name = DbName, start_time = os:timestamp()},
     Filter = case couch_replicator_filters:parse(Rep#rep.options) of
@@ -320,20 +320,23 @@ worker_returned(Ref, Id, {ok, RepId}) ->
                 % Filtered replication id didn't change.
                 Row0;
             #rdoc{rid = nil, filter = user} ->
-                % Calculated new replication id for a filtered replication. Make sure
-                % to schedule another check as filter code could change. Replications
-                % starts could have been failing, so also clear error count.
+                % Calculated new replication id for a filtered replication. Make
+                % sure to schedule another check as filter code could change.
+                % Replication starts could have been failing, so also clear
+                % error count.
                 Row0#rdoc{rid = RepId};
             #rdoc{rid = OldRepId, filter = user} ->
-                % Replication id of existing replication job with filter has changed.
-                % Remove old replication job from scheduler and schedule check to check
-                % for future changes.
+                % Replication id of existing replication job with filter has
+                % changed. Remove old replication job from scheduler and
+                % schedule check to check for future changes.
                 ok = couch_replicator_scheduler:remove_job(OldRepId),
-                Msg = io_lib:format("Replication id changed: ~p -> ~p", [OldRepId, RepId]),
+                Msg = io_lib:format("Replication id changed: ~p -> ~p", [
+                    OldRepId, RepId]),
                 Row0#rdoc{rid = RepId, info = couch_util:to_binary(Msg)};
             #rdoc{rid = nil} ->
-                % Calculated new replication id for non-filtered replication. Remove
-                % replication doc body, after this we won't needed any more.
+                % Calculated new replication id for non-filtered replication.
+                % Remove replication doc body, after this we won't needed any
+                % more.
                 Row0#rdoc{rep=nil, rid=RepId, info=nil}
         end,
         true = ets:insert(?MODULE, NewRow),
@@ -710,8 +713,8 @@ t_failed_change() ->
     end).
 
 
-% Normal change, but according to cluster ownership algorithm, replication belongs to
-% a different node, so this node should skip it.
+% Normal change, but according to cluster ownership algorithm, replication
+% belongs to a different node, so this node should skip it.
 t_change_for_different_node() ->
    ?_test(begin
         meck:expect(couch_replicator_clustering, owner, 2, different_node),
@@ -721,7 +724,8 @@ t_change_for_different_node() ->
 
 
 % Change handled when cluster is unstable (nodes are added or removed), so
-% job is not added. A rescan will be triggered soon and change will be evaluated again.
+% job is not added. A rescan will be triggered soon and change will be
+% evaluated again.
 t_change_when_cluster_unstable() ->
    ?_test(begin
        meck:expect(couch_replicator_clustering, owner, 2, unstable),
@@ -739,8 +743,10 @@ t_ejson_docs() ->
         EJsonDocs = docs([]),
         ?assertMatch([{[_|_]}], EJsonDocs),
         [{DocProps}] = EJsonDocs,
-        {value, StateTime, DocProps1} = lists:keytake(last_updated, 1, DocProps),
-        ?assertMatch({last_updated, BinVal1} when is_binary(BinVal1), StateTime),
+        {value, StateTime, DocProps1} = lists:keytake(last_updated, 1,
+            DocProps),
+        ?assertMatch({last_updated, BinVal1} when is_binary(BinVal1),
+            StateTime),
         {value, StartTime, DocProps2} = lists:keytake(start_time, 1, DocProps1),
         ?assertMatch({start_time, BinVal2} when is_binary(BinVal2), StartTime),
         ExpectedProps = [
@@ -774,7 +780,9 @@ t_cluster_membership_foldl() ->
 normalize_rep_test_() ->
     {
         setup,
-        fun() -> meck:expect(config, get, fun(_, _, Default) -> Default end) end,
+        fun() -> meck:expect(config, get,
+            fun(_, _, Default) -> Default end)
+        end,
         fun(_) -> meck:unload() end,
         ?_test(begin
             EJson1 = {[
@@ -800,7 +808,9 @@ normalize_rep_test_() ->
 get_worker_ref_test_() ->
     {
         setup,
-        fun() -> ets:new(?MODULE, [named_table, public, {keypos, #rdoc.id}]) end,
+        fun() ->
+            ets:new(?MODULE, [named_table, public, {keypos, #rdoc.id}])
+        end,
         fun(_) -> ets:delete(?MODULE) end,
         ?_test(begin
             Id = {<<"db">>, <<"doc">>},
@@ -825,7 +835,8 @@ setup() ->
     meck:expect(config, get, fun(_, _, Default) -> Default end),
     meck:expect(config, listen_for_changes, 2, ok),
     meck:expect(couch_replicator_clustering, owner, 2, node()),
-    meck:expect(couch_replicator_clustering, link_cluster_event_listener, 3, ok),
+    meck:expect(couch_replicator_clustering, link_cluster_event_listener, 3,
+        ok),
     meck:expect(couch_replicator_doc_processor_worker, spawn_worker, 4, pid),
     meck:expect(couch_replicator_scheduler, remove_job, 1, ok),
     meck:expect(couch_replicator_docs, remove_state_fields, 2, ok),
diff --git a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
index 4f6dab0..1d3c36d 100644
--- a/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
+++ b/src/couch_replicator/src/couch_replicator_doc_processor_worker.erl
@@ -25,11 +25,14 @@
 % Spawn a worker which attempts to calculate replication id then add a
 % replication job to scheduler. This function create a monitor to the worker
 % a worker will then exit with the #doc_worker_result{} record within
-% ?WORKER_TIMEOUT_MSEC timeout period.A timeout is considered a `temporary_error`.
-% Result will be sent as the `Reason` in the {'DOWN',...} message.
+% ?WORKER_TIMEOUT_MSEC timeout period.A timeout is considered a
+%`temporary_error`. Result will be sent as the `Reason` in the {'DOWN',...}
+% message.
 -spec spawn_worker(db_doc_id(), #rep{}, seconds(), reference()) -> pid().
 spawn_worker(Id, Rep, WaitSec, WRef) ->
-    {Pid, _Ref} = spawn_monitor(fun() -> worker_fun(Id, Rep, WaitSec, WRef) end),
+    {Pid, _Ref} = spawn_monitor(fun() ->
+        worker_fun(Id, Rep, WaitSec, WRef)
+    end),
     Pid.
 
 
@@ -66,8 +69,8 @@ worker_fun(Id, Rep, WaitSec, WRef) ->
 
 
 % Try to start a replication. Used by a worker. This function should return
-% rep_start_result(), also throws {filter_fetch_error, Reason} if cannot fetch filter.
-% It can also block for an indeterminate amount of time while fetching the
+% rep_start_result(), also throws {filter_fetch_error, Reason} if cannot fetch
+% filter.It can also block for an indeterminate amount of time while fetching
 % filter.
 maybe_start_replication(Id, RepWithoutId, WRef) ->
     Rep = couch_replicator_docs:update_rep_id(RepWithoutId),
@@ -93,8 +96,8 @@ maybe_add_job_to_scheduler({DbName, DocId}, Rep, WRef) ->
     nil ->
         % Before adding a job check that this worker is still the current
         % worker. This is to handle a race condition where a worker which was
-        % sleeping and then checking a replication filter may inadvertently re-add
-        % a replication which was already deleted.
+        % sleeping and then checking a replication filter may inadvertently
+        % re-add a replication which was already deleted.
         case couch_replicator_doc_processor:get_worker_ref({DbName, DocId}) of
         WRef ->
             ok = couch_replicator_scheduler:add_job(Rep),
@@ -164,13 +167,14 @@ t_already_running_same_docid() ->
    end).
 
 
-% There is a transient replication with same replication id running. Ignore change.
+% There is a transient replication with same replication id running. Ignore.
 t_already_running_transient() ->
    ?_test(begin
        Id = {?DB, ?DOC1},
        mock_already_running(null, null),
        Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
-       ?assertMatch({temporary_error, _}, maybe_start_replication(Id, Rep, nil)),
+       ?assertMatch({temporary_error, _}, maybe_start_replication(Id, Rep,
+           nil)),
        ?assert(did_not_add_job())
    end).
 
@@ -182,7 +186,8 @@ t_already_running_other_db_other_doc() ->
        Id = {?DB, ?DOC1},
        mock_already_running(<<"otherdb">>, <<"otherdoc">>),
        Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
-       ?assertMatch({permanent_failure, _}, maybe_start_replication(Id, Rep, nil)),
+       ?assertMatch({permanent_failure, _}, maybe_start_replication(Id, Rep,
+           nil)),
        ?assert(did_not_add_job()),
        1 == meck:num_calls(couch_replicator_docs, update_failed, '_')
    end).
@@ -222,7 +227,8 @@ t_ignore_if_worker_ref_does_not_match() ->
     ?_test(begin
        Id = {?DB, ?DOC1},
        Rep = couch_replicator_docs:parse_rep_doc_without_id(change()),
-       meck:expect(couch_replicator_doc_processor, get_worker_ref, 1, make_ref()),
+       meck:expect(couch_replicator_doc_processor, get_worker_ref, 1,
+           make_ref()),
        ?assertEqual(ignore, maybe_start_replication(Id, Rep, make_ref())),
        ?assertNot(added_job())
    end).
diff --git a/src/couch_replicator/src/couch_replicator_docs.erl b/src/couch_replicator/src/couch_replicator_docs.erl
index 1bf26c5..70517f4 100644
--- a/src/couch_replicator/src/couch_replicator_docs.erl
+++ b/src/couch_replicator/src/couch_replicator_docs.erl
@@ -81,7 +81,8 @@ update_failed(DbName, DocId, Error) ->
         {<<"_replication_state">>, <<"failed">>},
         {<<"_replication_stats">>, undefined},
         {<<"_replication_state_reason">>, Reason}]),
-    couch_stats:increment_counter([couch_replicator, docs, failed_state_updates]).
+    couch_stats:increment_counter([couch_replicator, docs,
+        failed_state_updates]).
 
 
 -spec update_triggered(#rep{}, rep_id()) -> ok.
@@ -117,7 +118,8 @@ update_error(#rep{db_name = DbName, doc_id = DocId, id = RepId}, Error)
->
 
 -spec ensure_rep_db_exists() -> {ok, #db{}}.
 ensure_rep_db_exists() ->
-    Db = case couch_db:open_int(?REP_DB_NAME, [?CTX, sys_db, nologifmissing]) of
+    Db = case couch_db:open_int(?REP_DB_NAME, [?CTX, sys_db,
+            nologifmissing]) of
         {ok, Db0} ->
             Db0;
         _Error ->
@@ -205,9 +207,9 @@ replication_design_doc_props(DDocId) ->
 % Note: parse_rep_doc can handle filtered replications. During parsing of the
 % replication doc it will make possibly remote http requests to the source
 % database. If failure or parsing of filter docs fails, parse_doc throws a
-% {filter_fetch_error, Error} excation. This exception should be considered transient
-% in respect to the contents of the document itself, since it depends on
-% netowrk availability of the source db and other factors.
+% {filter_fetch_error, Error} excation. This exception should be considered
+% transient in respect to the contents of the document itself, since it depends
+% on netowrk availability of the source db and other factors.
 -spec parse_rep_doc({[_]}) -> #rep{}.
 parse_rep_doc(RepDoc) ->
     {ok, Rep} = try
@@ -313,7 +315,7 @@ update_rep_doc(RepDbName, RepDocId, KVs, Wait) when is_binary(RepDocId)
->
         end
     catch
         throw:conflict ->
-            Msg = "Conflict when updating replication document `~s`. Retrying.",
+            Msg = "Conflict when updating replication doc `~s`. Retrying.",
             couch_log:error(Msg, [RepDocId]),
             ok = timer:sleep(random:uniform(erlang:min(128, Wait)) * 100),
             update_rep_doc(RepDbName, RepDocId, KVs, Wait * 2)
@@ -399,7 +401,8 @@ parse_rep_db({Props}, Proxy, Options) ->
             consumer_key = ?b2l(get_value(<<"consumer_key">>, OauthProps)),
             token = ?b2l(get_value(<<"token">>, OauthProps)),
             token_secret = ?b2l(get_value(<<"token_secret">>, OauthProps)),
-            consumer_secret = ?b2l(get_value(<<"consumer_secret">>, OauthProps)),
+            consumer_secret = ?b2l(get_value(<<"consumer_secret">>,
+                OauthProps)),
             signature_method =
                 case get_value(<<"signature_method">>, OauthProps) of
                 undefined ->        hmac_sha1;
@@ -452,7 +455,8 @@ make_options(Props) ->
     DefTimeout = config:get("replicator", "connection_timeout", "30000"),
     DefRetries = config:get("replicator", "retries_per_request", "10"),
     UseCheckpoints = config:get("replicator", "use_checkpoints", "true"),
-    DefCheckpointInterval = config:get("replicator", "checkpoint_interval", "30000"),
+    DefCheckpointInterval = config:get("replicator", "checkpoint_interval",
+        "30000"),
     {ok, DefSocketOptions} = couch_util:parse_term(
         config:get("replicator", "socket_options",
             "[{keepalive, true}, {nodelay, false}]")),
@@ -536,7 +540,8 @@ check_options(Options) ->
         {false, _, false} -> Options;
         {_, false, false} -> Options;
         _ ->
-            throw({bad_request, "`doc_ids`, `filter`, `selector` are mutually exclusive options"})
+            throw({bad_request,
+                "`doc_ids`,`filter`,`selector` are mutually exclusive"})
     end.
 
 -spec parse_proxy_params(binary() | [_]) -> [_].
@@ -688,7 +693,7 @@ error_reason(Reason) ->
 
 check_options_pass_values_test() ->
     ?assertEqual(check_options([]), []),
-    ?assertEqual(check_options([baz, {other,fiz}]), [baz, {other, fiz}]),
+    ?assertEqual(check_options([baz, {other, fiz}]), [baz, {other, fiz}]),
     ?assertEqual(check_options([{doc_ids, x}]), [{doc_ids, x}]),
     ?assertEqual(check_options([{filter, x}]), [{filter, x}]),
     ?assertEqual(check_options([{selector, x}]), [{selector, x}]).
diff --git a/src/couch_replicator/src/couch_replicator_filters.erl b/src/couch_replicator/src/couch_replicator_filters.erl
index 90c654a..99f8cf0 100644
--- a/src/couch_replicator/src/couch_replicator_filters.erl
+++ b/src/couch_replicator/src/couch_replicator_filters.erl
@@ -105,12 +105,14 @@ view_type(Props, Options) ->
 % Private functions
 
 fetch_internal(DDocName, FilterName, Source, UserCtx) ->
-    Db = case (catch couch_replicator_api_wrap:db_open(Source, [{user_ctx, UserCtx}])) of
+    Db = case (catch couch_replicator_api_wrap:db_open(Source,
+        [{user_ctx, UserCtx}])) of
     {ok, Db0} ->
         Db0;
     DbError ->
         DbErrorMsg = io_lib:format("Could not open source database `~s`: ~s",
-           [couch_replicator_api_wrap:db_uri(Source), couch_util:to_binary(DbError)]),
+           [couch_replicator_api_wrap:db_uri(Source),
+               couch_util:to_binary(DbError)]),
         throw({fetch_error, iolist_to_binary(DbErrorMsg)})
     end,
     try
@@ -121,8 +123,10 @@ fetch_internal(DDocName, FilterName, Source, UserCtx) ->
         DocError ->
             DocErrorMsg = io_lib:format(
                 "Couldn't open document `_design/~s` from source "
-                "database `~s`: ~s", [DDocName, couch_replicator_api_wrap:db_uri(Source),
-                    couch_util:to_binary(DocError)]),
+                "database `~s`: ~s", [DDocName,
+                couch_replicator_api_wrap:db_uri(Source),
+                couch_util:to_binary(DocError)]
+            ),
             throw({fetch_error, iolist_to_binary(DocErrorMsg)})
         end,
         try
@@ -133,8 +137,10 @@ fetch_internal(DDocName, FilterName, Source, UserCtx) ->
              _Tag:CodeError ->
                  CodeErrorMsg = io_lib:format(
                      "Couldn't parse filter code from document ~s on `~s` "
-                     " Error: ~s", [DDocName, couch_replicator_api_wrap:db_uri(Source),
-                         couch_util:to_binary(CodeError)]),
+                     " Error: ~s", [DDocName,
+                     couch_replicator_api_wrap:db_uri(Source),
+                     couch_util:to_binary(CodeError)]
+                 ),
                  throw({fetch_error, CodeErrorMsg})
          end
     after
@@ -192,7 +198,7 @@ ejsort_basic_values_test() ->
     ?assertEqual(ejsort({[]}), {[]}).
 
 ejsort_compound_values_test() ->
-    ?assertEqual(ejsort([2, 1, 3 ,<<"a">>]), [2, 1, 3, <<"a">>]),
+    ?assertEqual(ejsort([2, 1, 3, <<"a">>]), [2, 1, 3, <<"a">>]),
     Ej1 = {[{<<"a">>, 0}, {<<"c">>, 0},  {<<"b">>, 0}]},
     Ej1s =  {[{<<"a">>, 0}, {<<"b">>, 0}, {<<"c">>, 0}]},
     ?assertEqual(ejsort(Ej1), Ej1s),
diff --git a/src/couch_replicator/src/couch_replicator_httpc_pool.erl b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
index abdf1c5..33fb61f 100644
--- a/src/couch_replicator/src/couch_replicator_httpc_pool.erl
+++ b/src/couch_replicator/src/couch_replicator_httpc_pool.erl
@@ -108,7 +108,8 @@ handle_info({'EXIT', Pid, _Reason}, State) ->
         Workers2 ->
             case queue:out(Waiting) of
                 {empty, _} ->
-                    {noreply, State#state{workers = Workers2, callers = NewCallers0}};
+                    {noreply, State#state{workers = Workers2,
+                        callers = NewCallers0}};
                 {{value, From}, Waiting2} ->
                     {ok, Worker} = couch_replicator_connection:acquire(Url),
                     NewCallers1 = monitor_client(NewCallers0, Worker, From),
diff --git a/src/couch_replicator/src/couch_replicator_rate_limiter.erl b/src/couch_replicator/src/couch_replicator_rate_limiter.erl
index 011372b..b344ef1 100644
--- a/src/couch_replicator/src/couch_replicator_rate_limiter.erl
+++ b/src/couch_replicator/src/couch_replicator_rate_limiter.erl
@@ -24,7 +24,8 @@
 % The algorithm referenced above estimates a rate, whereas the implemented
 % algorithm uses an interval (in milliseconds). It preserves the original
 % semantics, that is the failure part is multplicative and the success part is
-% additive. The relationship between rate and interval is: rate = 1000 / interval.
+% additive. The relationship between rate and interval is: rate = 1000 /
+% interval.
 %
 % There are two main API functions:
 %
@@ -79,8 +80,8 @@
 % would become less accurate as more time passes. In such case choose to
 % optimistically decay the estimated value. That is assume there a certain
 % rate of successful requests happened. (For reference, TCP congestion algorithm
-% also handles a variation of this in RFC 5681 under "Restarting Idle Connections"
-% section).
+% also handles a variation of this in RFC 5681 under "Restarting Idle
+% Connections" section).
 -define(TIME_DECAY_FACTOR, 2).
 -define(TIME_DECAY_THRESHOLD, 1000).
 
diff --git a/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl b/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
index aa087d6..2f8c647 100644
--- a/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
+++ b/src/couch_replicator/src/couch_replicator_rate_limiter_tables.erl
@@ -17,11 +17,11 @@
 %
 % Cluster stability is defined as "there have been no nodes added or removed in
 % last `QuietPeriod` seconds". QuietPeriod value is configurable. To ensure a
-% speedier startup, during initialization there is a shorter StartupQuietPeriod in
-% effect (also configurable).
+% speedier startup, during initialization there is a shorter StartupQuietPeriod
+% in effect (also configurable).
 %
-% This module is also in charge of calculating ownership of replications based on
-% where their _repicator db documents shards live.
+% This module is also in charge of calculating ownership of replications based
+% on where their _repicator db documents shards live.
 
 -module(couch_replicator_rate_limiter_tables).
 
diff --git a/src/couch_replicator/src/couch_replicator_scheduler.erl b/src/couch_replicator/src/couch_replicator_scheduler.erl
index e9159b9..b53b19c 100644
--- a/src/couch_replicator/src/couch_replicator_scheduler.erl
+++ b/src/couch_replicator/src/couch_replicator_scheduler.erl
@@ -127,7 +127,8 @@ job_summary(JobId, HealthThreshold) ->
                 {info, Info},
                 {error_count, ErrorCount},
                 {last_updated, last_updated(History)},
-                {start_time, couch_replicator_utils:iso8601(Rep#rep.start_time)},
+                {start_time,
+                    couch_replicator_utils:iso8601(Rep#rep.start_time)},
                 {proxy, job_proxy_url(Rep#rep.source)}
             ];
         {error, not_found} ->
@@ -170,10 +171,13 @@ init(_) ->
     EtsOpts = [named_table, {read_concurrency, true}, {keypos, #job.id}],
     ?MODULE = ets:new(?MODULE, EtsOpts),
     ok = config:listen_for_changes(?MODULE, nil),
-    Interval = config:get_integer("replicator", "interval", ?DEFAULT_SCHEDULER_INTERVAL),
+    Interval = config:get_integer("replicator", "interval",
+        ?DEFAULT_SCHEDULER_INTERVAL),
     MaxJobs = config:get_integer("replicator", "max_jobs", ?DEFAULT_MAX_JOBS),
-    MaxChurn = config:get_integer("replicator", "max_churn", ?DEFAULT_MAX_CHURN),
-    MaxHistory = config:get_integer("replicator", "max_history", ?DEFAULT_MAX_HISTORY),
+    MaxChurn = config:get_integer("replicator", "max_churn",
+        ?DEFAULT_MAX_CHURN),
+    MaxHistory = config:get_integer("replicator", "max_history",
+        ?DEFAULT_MAX_HISTORY),
     {ok, Timer} = timer:send_after(Interval, reschedule),
     State = #state{
         interval = Interval,
@@ -206,19 +210,23 @@ handle_call(_, _From, State) ->
     {noreply, State}.
 
 
-handle_cast({set_max_jobs, MaxJobs}, State) when is_integer(MaxJobs), MaxJobs >= 0 ->
+handle_cast({set_max_jobs, MaxJobs}, State) when is_integer(MaxJobs),
+        MaxJobs >= 0 ->
     couch_log:notice("~p: max_jobs set to ~B", [?MODULE, MaxJobs]),
     {noreply, State#state{max_jobs = MaxJobs}};
 
-handle_cast({set_max_churn, MaxChurn}, State) when is_integer(MaxChurn), MaxChurn > 0
->
+handle_cast({set_max_churn, MaxChurn}, State) when is_integer(MaxChurn),
+        MaxChurn > 0 ->
     couch_log:notice("~p: max_churn set to ~B", [?MODULE, MaxChurn]),
     {noreply, State#state{max_churn = MaxChurn}};
 
-handle_cast({set_max_history, MaxHistory}, State) when is_integer(MaxHistory), MaxHistory
> 0 ->
+handle_cast({set_max_history, MaxHistory}, State) when is_integer(MaxHistory),
+        MaxHistory > 0 ->
     couch_log:notice("~p: max_history set to ~B", [?MODULE, MaxHistory]),
     {noreply, State#state{max_history = MaxHistory}};
 
-handle_cast({set_interval, Interval}, State) when is_integer(Interval), Interval > 0 ->
+handle_cast({set_interval, Interval}, State) when is_integer(Interval),
+        Interval > 0 ->
     couch_log:notice("~p: interval set to ~B", [?MODULE, Interval]),
     {noreply, State#state{interval = Interval}};
 
@@ -470,7 +478,8 @@ consecutive_crashes([{{crashed, _}, CrashT}, {_, PrevT} = PrevEvent |
Rest],
             consecutive_crashes([PrevEvent | Rest], HealthThreshold, Count + 1)
     end;
 
-consecutive_crashes([{stopped, _}, {started, _} | _], _HealthThreshold, Count) ->
+consecutive_crashes([{stopped, _}, {started, _} | _], _HealthThreshold,
+        Count) ->
     Count;
 
 consecutive_crashes([_ | Rest], HealthThreshold, Count) ->
@@ -507,7 +516,8 @@ maybe_remove_job_int(JobId, State) ->
             true = remove_job_int(Job),
             couch_stats:increment_counter([couch_replicator, jobs, removes]),
             TotalJobs = ets:info(?MODULE, size),
-            couch_stats:update_gauge([couch_replicator, jobs, total], TotalJobs),
+            couch_stats:update_gauge([couch_replicator, jobs, total],
+                TotalJobs),
             update_running_jobs_stats(),
             ok;
         {error, not_found} ->
@@ -695,7 +705,8 @@ last_started(#job{} = Job) ->
     end.
 
 
--spec update_history(#job{}, event_type(), erlang:timestamp(), #state{}) -> #job{}.
+-spec update_history(#job{}, event_type(), erlang:timestamp(), #state{}) ->
+    #job{}.
 update_history(Job, Type, When, State) ->
     History0 = [{Type, When} | Job#job.history],
     History1 = lists:sublist(History0, State#state.max_history),
@@ -749,7 +760,8 @@ stats_fold(#job{pid = P, history = [{started, T} | _]}, Acc) when is_pid(P)
->
 
 
 
--spec avg(Sum :: non_neg_integer(), N :: non_neg_integer())  -> non_neg_integer().
+-spec avg(Sum :: non_neg_integer(), N :: non_neg_integer()) ->
+    non_neg_integer().
 avg(_Sum, 0) ->
     0;
 
@@ -1174,8 +1186,8 @@ t_oneshot_dont_get_starting_priority() ->
 
 
 % This tested in other test cases, it is here to mainly make explicit a property
-% of one-shot replications -- they can starve other jobs if they "take control" of
-% all the available scheduler slots.
+% of one-shot replications -- they can starve other jobs if they "take control"
+% of all the available scheduler slots.
 t_oneshot_will_hog_the_scheduler() ->
     ?_test(begin
         Jobs = [
@@ -1215,7 +1227,8 @@ t_if_transient_job_crashes_it_gets_removed() ->
         setup_jobs([Job]),
         ?assertEqual(1, ets:info(?MODULE, size)),
         State = #state{max_history = 3},
-        {noreply, State} = handle_info({'DOWN', r1, process, Pid, failed}, State),
+        {noreply, State} = handle_info({'DOWN', r1, process, Pid, failed},
+            State),
         ?assertEqual(0, ets:info(?MODULE, size))
    end).
 
@@ -1232,7 +1245,8 @@ t_if_permanent_job_crashes_it_stays_in_ets() ->
         setup_jobs([Job]),
         ?assertEqual(1, ets:info(?MODULE, size)),
         State = #state{max_jobs =1, max_history = 3},
-        {noreply, State} = handle_info({'DOWN', r1, process, Pid, failed}, State),
+        {noreply, State} = handle_info({'DOWN', r1, process, Pid, failed},
+            State),
         ?assertEqual(1, ets:info(?MODULE, size)),
         [Job1] = ets:lookup(?MODULE, job1),
         [Latest | _] = Job1#job.history,

-- 
To stop receiving notification emails like this one, please contact
['"commits@couchdb.apache.org" <commits@couchdb.apache.org>'].

Mime
View raw message