incubator-couchdb-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Robert Newson <rnew...@apache.org>
Subject Re: Futon Test Suite
Date Thu, 11 Aug 2011 08:17:07 GMT
Well, there are some cases where we need to work around browser (I
mean, IE) bugs. This mechanism would be generic, so folks stuck
supporting antique browsers have a way to fix themselves. For example,
caching /_session is apparently the cause of login problems on
antediluvian browsers.

B.

On 11 August 2011 08:37, Paul Davis <paul.joseph.davis@gmail.com> wrote:
> Seems like that'd just end up eating its way into the code base
> special casing all the various places we set headers.
>
> On the other hand if we had a command line test runner and could very
> specifically set headers without the confounding effects of running in
> a browser this wouldn't even be an issue.
>
> On Thu, Aug 11, 2011 at 2:29 AM, Robert Newson <rnewson@apache.org> wrote:
>> I wonder if we should add a custom request header like
>> X-CouchDB-NoCache which sets all the cachebusting headers (Expires in
>> the paste, etc) rather than that hack.
>>
>> On 11 August 2011 02:15, Filipe David Manana <fdmanana@apache.org> wrote:
>>> On Wed, Aug 10, 2011 at 5:58 PM, Paul Davis <paul.joseph.davis@gmail.com>
wrote:
>>>>
>>>> Since no one seems to have believed me I decided to take a closer look
>>>
>>> I believe you, and in my machine, replication.js, takes about 120ms.
>>>
>>>> at replication.js tests. And as I pointed out it was just polling a
>>>> URL in a tight loop for 3s at a time. On my machine, this patch drops
>>>> replication.js from 93329ms to 41785ms
>>>
>>> That's awesome. If it doesn't make assertions fails for others, go ahead.
>>>
>>> One thing I noticed in the past is that the browser seems to cache the
>>> results of db.info() call. A solution (that is employed somewhere
>>> else, but for another request) is to add some random parameter to the
>>> query string, like  /db?anticache=Math.random(1000000).
>>>
>>>>. I'll point out that that's
>>>> more than twice as fast. And that was just an obvious optimization
>>>> from watching the log scroll. There are plenty more simple things that
>>>> could be done to speed these up.
>>>>
>>>> Also, this patch makes me think that a _replication/localid -> JSON
>>>> status blob might be useful. Though I dunno how possible that is. I
>>>> reckon if we had that these would be sped up even more.
>>>>
>>>>
>>>> diff --git a/share/www/script/couch.js b/share/www/script/couch.js
>>>> index 304c9c1..792e638 100644
>>>> --- a/share/www/script/couch.js
>>>> +++ b/share/www/script/couch.js
>>>> @@ -40,6 +40,8 @@ function CouchDB(name, httpHeaders) {
>>>>     if (this.last_req.status == 404) {
>>>>       return false;
>>>>     }
>>>> +    var t0 = new Date();
>>>> +    while(true) {if((new Date()) - t0 > 100) break;}
>>>>     CouchDB.maybeThrowError(this.last_req);
>>>>     return JSON.parse(this.last_req.responseText);
>>>>   };
>>>> diff --git a/share/www/script/test/replication.js
>>>> b/share/www/script/test/replication.js
>>>> index 65c5eaa..b82375a 100644
>>>> --- a/share/www/script/test/replication.js
>>>> +++ b/share/www/script/test/replication.js
>>>> @@ -149,24 +149,40 @@ couchTests.replication = function(debug) {
>>>>   }
>>>>
>>>>
>>>> -  function waitForSeq(sourceDb, targetDb) {
>>>> -    var targetSeq,
>>>> -        sourceSeq = sourceDb.info().update_seq,
>>>> +  function waitForSeq(sourceDb, targetDb, rep_id) {
>>>> +    var seq = sourceDb.info().update_seq,
>>>> +        ri = new RegExp(rep_id),
>>>> +        tasks,
>>>>         t0 = new Date(),
>>>>         t1,
>>>>         ms = 3000;
>>>>
>>>>     do {
>>>> -      targetSeq = targetDb.info().update_seq;
>>>> +      tasks = JSON.parse(CouchDB.request("GET",
>>>> "/_active_tasks").responseText);
>>>> +      for(var i = 0; i < tasks.length; i++) {
>>>> +        if(!ri.test(tasks[i].task)) continue;
>>>> +        var captured = /Processed (\d+)/.exec(tasks[i].status);
>>>> +        if(parseInt(captured[1]) >= seq) return;
>>>> +        break;
>>>> +      }
>>>>       t1 = new Date();
>>>> -    } while (((t1 - t0) <= ms) && targetSeq < sourceSeq);
>>>> +    } while ((t1 - t0) <= ms);
>>>>   }
>>>>
>>>> +  function waitForRepEnd(rep_id) {
>>>> +    var ri = new RegExp(rep_id),
>>>> +        tasks,
>>>> +        t0 = new Date(),
>>>> +        t1,
>>>> +        ms = 3000;
>>>>
>>>> -  function wait(ms) {
>>>> -    var t0 = new Date(), t1;
>>>>     do {
>>>> -      CouchDB.request("GET", "/");
>>>> +      tasks = JSON.parse(CouchDB.request("GET",
>>>> "/_active_tasks").responseText);
>>>> +      var found = false;
>>>> +      for(var i = 0; i < tasks.length; i++) {
>>>> +        if(!ri.test(tasks[i].task)) found = true;
>>>> +      }
>>>> +      if(!found) return;
>>>>       t1 = new Date();
>>>>     } while ((t1 - t0) <= ms);
>>>>   }
>>>> @@ -1143,7 +1159,7 @@ couchTests.replication = function(debug) {
>>>>
>>>>     var rep_id = repResult._local_id;
>>>>
>>>> -    waitForSeq(sourceDb, targetDb);
>>>> +    waitForSeq(sourceDb, targetDb, rep_id);
>>>>
>>>>     for (j = 0; j < docs.length; j++) {
>>>>       doc = docs[j];
>>>> @@ -1181,7 +1197,7 @@ couchTests.replication = function(debug) {
>>>>     var ddoc = docs[docs.length - 1]; // design doc
>>>>     addAtt(sourceDb, ddoc, "readme.txt", att1_data, "text/plain");
>>>>
>>>> -    waitForSeq(sourceDb, targetDb);
>>>> +    waitForSeq(sourceDb, targetDb, rep_id);
>>>>
>>>>     var modifDocs = docs.slice(10, 15).concat([ddoc]);
>>>>     for (j = 0; j < modifDocs.length; j++) {
>>>> @@ -1226,7 +1242,7 @@ couchTests.replication = function(debug) {
>>>>     // add another attachment to the ddoc on source
>>>>     addAtt(sourceDb, ddoc, "data.dat", att2_data, "application/binary");
>>>>
>>>> -    waitForSeq(sourceDb, targetDb);
>>>> +    waitForSeq(sourceDb, targetDb, rep_id);
>>>>
>>>>     copy = targetDb.open(ddoc._id);
>>>>     var atts = copy._attachments;
>>>> @@ -1263,7 +1279,7 @@ couchTests.replication = function(debug) {
>>>>     var newDocs = makeDocs(25, 35);
>>>>     populateDb(sourceDb, newDocs, true);
>>>>
>>>> -    waitForSeq(sourceDb, targetDb);
>>>> +    waitForSeq(sourceDb, targetDb, rep_id);
>>>>
>>>>     for (j = 0; j < newDocs.length; j++) {
>>>>       doc = newDocs[j];
>>>> @@ -1282,7 +1298,7 @@ couchTests.replication = function(debug) {
>>>>     TEquals(true, sourceDb.deleteDoc(newDocs[0]).ok);
>>>>     TEquals(true, sourceDb.deleteDoc(newDocs[6]).ok);
>>>>
>>>> -    waitForSeq(sourceDb, targetDb);
>>>> +    waitForSeq(sourceDb, targetDb, rep_id);
>>>>
>>>>     copy = targetDb.open(newDocs[0]._id);
>>>>     TEquals(null, copy);
>>>> @@ -1317,7 +1333,7 @@ couchTests.replication = function(debug) {
>>>>     };
>>>>     TEquals(true, sourceDb.save(doc).ok);
>>>>
>>>> -    wait(2000);
>>>> +    waitForRepEnd(rep_id);
>>>>     copy = targetDb.open(doc._id);
>>>>     TEquals(null, copy);
>>>>   }
>>>> @@ -1359,7 +1375,7 @@ couchTests.replication = function(debug) {
>>>>
>>>>   var tasksAfter = JSON.parse(xhr.responseText);
>>>>   TEquals(tasks.length, tasksAfter.length);
>>>> -  waitForSeq(sourceDb, targetDb);
>>>> +  waitForSeq(sourceDb, targetDb, rep_id);
>>>>   T(sourceDb.open("30") !== null);
>>>>
>>>>   // cancel replication
>>>>
>>>
>>>
>>>
>>> --
>>> Filipe David Manana,
>>> fdmanana@gmail.com, fdmanana@apache.org
>>>
>>> "Reasonable men adapt themselves to the world.
>>>  Unreasonable men adapt the world to themselves.
>>>  That's why all progress depends on unreasonable men."
>>>
>>
>

Mime
View raw message