couchdb-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From gar...@apache.org
Subject [31/52] [partial] couchdb-nmo git commit: prepare for release
Date Wed, 14 Oct 2015 10:09:28 GMT
http://git-wip-us.apache.org/repos/asf/couchdb-nmo/blob/753f1767/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/lib/wireprotocol/3_2_support.js
----------------------------------------------------------------------
diff --git a/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/lib/wireprotocol/3_2_support.js
b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/lib/wireprotocol/3_2_support.js
new file mode 100644
index 0000000..c5e61aa
--- /dev/null
+++ b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/lib/wireprotocol/3_2_support.js
@@ -0,0 +1,494 @@
+"use strict";
+
+var Insert = require('./commands').Insert
+  , Update = require('./commands').Update
+  , Remove = require('./commands').Remove
+  , Query = require('../connection/commands').Query
+  , copy = require('../connection/utils').copy
+  , KillCursor = require('../connection/commands').KillCursor
+  , GetMore = require('../connection/commands').GetMore
+  , Query = require('../connection/commands').Query
+  , ReadPreference = require('../topologies/read_preference')
+  , f = require('util').format
+  , CommandResult = require('../topologies/command_result')
+  , MongoError = require('../error')
+  , Long = require('bson').Long;
+
+var WireProtocol = function(legacyWireProtocol) {
+  this.legacyWireProtocol = legacyWireProtocol;
+}
+
+//
+// Execute a write operation
+var executeWrite = function(topology, type, opsField, ns, ops, options, callback) {
+  if(ops.length == 0) throw new MongoError("insert must contain at least one document");
+  if(typeof options == 'function') {
+    callback = options;
+    options = {};
+  }
+
+  // Split the ns up to get db and collection
+  var p = ns.split(".");
+  var d = p.shift();
+  // Options
+  var ordered = typeof options.ordered == 'boolean' ? options.ordered : true;
+  var writeConcern = options.writeConcern || {};
+  // return skeleton
+  var writeCommand = {};
+  writeCommand[type] = p.join('.');
+  writeCommand[opsField] = ops;
+  writeCommand.ordered = ordered;
+  writeCommand.writeConcern = writeConcern;
+
+  // Do we have bypassDocumentValidation set, then enable it on the write command
+  if(typeof options.bypassDocumentValidation == 'boolean') {
+    writeCommand.bypassDocumentValidation = options.bypassDocumentValidation;
+  }
+
+  // Options object
+  var opts = {};
+  if(type == 'insert') opts.checkKeys = true;
+  // Ensure we support serialization of functions
+  if(options.serializeFunctions) opts.serializeFunctions = options.serializeFunctions;
+  if(options.ignoreUndefined) opts.ignoreUndefined = options.ignoreUndefined;
+  // Execute command
+  topology.command(f("%s.$cmd", d), writeCommand, opts, callback);
+}
+
+//
+// Needs to support legacy mass insert as well as ordered/unordered legacy
+// emulation
+//
+WireProtocol.prototype.insert = function(topology, ismaster, ns, bson, pool, callbacks, ops,
options, callback) {
+  executeWrite(topology, 'insert', 'documents', ns, ops, options, callback);
+}
+
+WireProtocol.prototype.update = function(topology, ismaster, ns, bson, pool, callbacks, ops,
options, callback) {
+  executeWrite(topology, 'update', 'updates', ns, ops, options, callback);
+}
+
+WireProtocol.prototype.remove = function(topology, ismaster, ns, bson, pool, callbacks, ops,
options, callback) {
+  executeWrite(topology, 'delete', 'deletes', ns, ops, options, callback);
+}
+
+WireProtocol.prototype.killCursor = function(bson, ns, cursorId, connection, callbacks, callback)
{
+  // Build command namespace
+  var parts = ns.split(/\./);
+  // Command namespace
+  var commandns = f('%s.$cmd', parts.shift());
+  // Create getMore command
+  var killcursorCmd = {
+    killCursors: parts.join('.'),
+    cursors: [cursorId]
+  }
+
+  // Build Query object
+  var query = new Query(bson, commandns, killcursorCmd, {
+      numberToSkip: 0, numberToReturn: -1
+    , checkKeys: false, returnFieldSelector: null
+  });
+
+  // Set query flags
+  query.slaveOk = true;
+
+  // Execute the kill cursor command
+  if(connection && connection.isConnected()) {
+    connection.write(query.toBin());
+  }
+
+  // Kill cursor callback
+  var killCursorCallback = function(err, r) {
+    if(err) {
+      if(typeof callback != 'function') return;
+      return callback(err);
+    }
+
+    // If we have a timed out query or a cursor that was killed
+    if((r.responseFlags & (1 << 0)) != 0) {
+      if(typeof callback != 'function') return;
+      return callback(new MongoError("cursor killed or timed out"), null);
+    }
+
+    if(!Array.isArray(r.documents) || r.documents.length == 0) {
+      if(typeof callback != 'function') return;
+      return callback(new MongoError(f('invalid getMore result returned for cursor id %s',
cursorState.cursorId)));
+    }
+
+    // Return the result
+    if(typeof callback == 'function') {
+      callback(null, r.documents[0]);
+    }
+  }
+
+  // Register a callback
+  callbacks.register(query.requestId, killCursorCallback);
+}
+
+WireProtocol.prototype.getMore = function(bson, ns, cursorState, batchSize, raw, connection,
callbacks, options, callback) {
+  var readPreference = options.readPreference || new ReadPreference('primary');
+  if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
+  if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must
be a ReadPreference instance');
+  // Build command namespace
+  var parts = ns.split(/\./);
+  // Command namespace
+  var commandns = f('%s.$cmd', parts.shift());
+
+  // Check if we have an maxTimeMS set
+  var maxTimeMS = typeof cursorState.cmd.maxTimeMS == 'number' ? cursorState.cmd.maxTimeMS
: 3000;
+
+  // Create getMore command
+  var getMoreCmd = {
+    getMore: cursorState.cursorId,
+    collection: parts.join('.'),
+    batchSize: batchSize,
+    maxTimeMS: maxTimeMS
+  }
+
+  // Build Query object
+  var query = new Query(bson, commandns, getMoreCmd, {
+      numberToSkip: 0, numberToReturn: -1
+    , checkKeys: false, returnFieldSelector: null
+  });
+
+  // Set query flags
+  query.slaveOk = readPreference.slaveOk();
+
+  // Query callback
+  var queryCallback = function(err, r) {
+    if(err) return callback(err);
+
+    // If we have a timed out query or a cursor that was killed
+    if((r.responseFlags & (1 << 0)) != 0) {
+      return callback(new MongoError("cursor killed or timed out"), null);
+    }
+
+    if(!Array.isArray(r.documents) || r.documents.length == 0)
+      return callback(new MongoError(f('invalid getMore result returned for cursor id %s',
cursorState.cursorId)));
+
+    // Raw, return all the extracted documents
+    if(raw) {
+      cursorState.documents = r.documents;
+      cursorState.cursorId = r.cursorId;
+      return callback(null, r.documents);
+    }
+
+    // Ensure we have a Long valie cursor id
+    var cursorId = typeof r.documents[0].cursor.id == 'number'
+      ? Long.fromNumber(r.documents[0].cursor.id)
+      : r.documents[0].cursor.id;
+
+    // Set all the values
+    cursorState.documents = r.documents[0].cursor.nextBatch;
+    cursorState.cursorId = cursorId;
+
+    // Return the result
+    callback(null, r.documents[0]);
+  }
+
+  // If we have a raw query decorate the function
+  if(raw) {
+    queryCallback.raw = raw;
+  }
+
+  // Add the result field needed
+  queryCallback.documentsReturnedIn = 'nextBatch';
+
+  // Register a callback
+  callbacks.register(query.requestId, queryCallback);
+  // Write out the getMore command
+  connection.write(query.toBin());
+}
+
+WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options)
{
+  // Establish type of command
+  if(cmd.find) {
+    if(cmd.exhaust) {
+      return this.legacyWireProtocol.command(bson, ns, cmd, cursorState, topology, options);
+    }
+
+    // Create the find command
+    var query = executeFindCommand(bson, ns, cmd, cursorState, topology, options)
+    // Mark the cmd as virtual
+    cmd.virtual = false;
+    // Signal the documents are in the firstBatch value
+    query.documentsReturnedIn = 'firstBatch';
+    // Return the query
+    return query;
+  } else if(cursorState.cursorId != null) {
+  } else if(cmd) {
+    return setupCommand(bson, ns, cmd, cursorState, topology, options);
+  } else {
+    throw new MongoError(f("command %s does not return a cursor", JSON.stringify(cmd)));
+  }
+}
+
+// // Command
+// {
+//     find: ns
+//   , query: <object>
+//   , limit: <n>
+//   , fields: <object>
+//   , skip: <n>
+//   , hint: <string>
+//   , explain: <boolean>
+//   , snapshot: <boolean>
+//   , batchSize: <n>
+//   , returnKey: <boolean>
+//   , maxScan: <n>
+//   , min: <n>
+//   , max: <n>
+//   , showDiskLoc: <boolean>
+//   , comment: <string>
+//   , maxTimeMS: <n>
+//   , raw: <boolean>
+//   , readPreference: <ReadPreference>
+//   , tailable: <boolean>
+//   , oplogReplay: <boolean>
+//   , noCursorTimeout: <boolean>
+//   , awaitdata: <boolean>
+//   , exhaust: <boolean>
+//   , partial: <boolean>
+// }
+
+// FIND/GETMORE SPEC
+// {
+//     “find”: <string>,
+//     “filter”: { ... },
+//     “sort”: { ... },
+//     “projection”: { ... },
+//     “hint”: { ... },
+//     “skip”: <int>,
+//     “limit”: <int>,
+//     “batchSize”: <int>,
+//     “singleBatch”: <bool>,
+//     “comment”: <string>,
+//     “maxScan”: <int>,
+//     “maxTimeMS”: <int>,
+//     “max”: { ... },
+//     “min”: { ... },
+//     “returnKey”: <bool>,
+//     “showRecordId”: <bool>,
+//     “snapshot”: <bool>,
+//     “tailable”: <bool>,
+//     “oplogReplay”: <bool>,
+//     “noCursorTimeout”: <bool>,
+//     “awaitData”: <bool>,
+//     “partial”: <bool>,
+//     “$readPreference”: { ... }
+// }
+
+//
+// Execute a find command
+var executeFindCommand = function(bson, ns, cmd, cursorState, topology, options) {
+  var readPreference = options.readPreference || new ReadPreference('primary');
+  if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
+  if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must
be a ReadPreference instance');
+
+  // Ensure we have at least some options
+  options = options || {};
+  // Set the optional batchSize
+  cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
+
+  // Build command namespace
+  var parts = ns.split(/\./);
+  // Command namespace
+  var commandns = f('%s.$cmd', parts.shift());
+
+  // Build actual find command
+  var findCmd = {
+    find: parts.join('.')
+  };
+
+  // I we provided a filter
+  if(cmd.query) findCmd.filter = cmd.query;
+
+  // Sort value
+  var sortValue = cmd.sort;
+
+  // Handle issue of sort being an Array
+  if(Array.isArray(sortValue)) {
+    var sortObject = {};
+
+    if(sortValue.length > 0 && !Array.isArray(sortValue[0])) {
+      var sortDirection = sortValue[1];
+      // Translate the sort order text
+      if(sortDirection == 'asc') {
+        sortDirection = 1;
+      } else if(sortDirection == 'desc') {
+        sortDirection = -1;
+      }
+
+      // Set the sort order
+      sortObject[sortValue[0]] = sortDirection;
+    } else {
+      for(var i = 0; i < sortValue.length; i++) {
+        var sortDirection = sortValue[i][1];
+        // Translate the sort order text
+        if(sortDirection == 'asc') {
+          sortDirection = 1;
+        } else if(sortDirection == 'desc') {
+          sortDirection = -1;
+        }
+
+        // Set the sort order
+        sortObject[sortValue[i][0]] = sortDirection;
+      }
+    }
+
+    sortValue = sortObject;
+  };
+
+  // Add sort to command
+  if(cmd.sort) findCmd.sort = sortValue;
+  // Add a projection to the command
+  if(cmd.fields) findCmd.projection = cmd.fields;
+  // Add a hint to the command
+  if(cmd.hint) findCmd.hint = cmd.hint;
+  // Add a skip
+  if(cmd.skip) findCmd.skip = cmd.skip;
+  // Add a limit
+  if(cmd.limit) findCmd.limit = cmd.limit;
+  // Add a batchSize
+  if(cmd.batchSize) findCmd.batchSize = cmd.batchSize;
+
+  // Check if we wish to have a singleBatch
+  if(cmd.limit < 0) {
+    findCmd.limit = Math.abs(cmd.limit);
+    findCmd.singleBatch = true;
+  }
+
+  // If we have comment set
+  if(cmd.comment) findCmd.comment = cmd.comment;
+
+  // If we have maxScan
+  if(cmd.maxScan) findCmd.maxScan = cmd.maxScan;
+
+  // If we have maxTimeMS set
+  if(cmd.maxTimeMS) findCmd.maxTimeMS = cmd.maxTimeMS;
+
+  // If we have min
+  if(cmd.min) findCmd.min = cmd.min;
+
+  // If we have max
+  if(cmd.max) findCmd.max = cmd.max;
+
+  // If we have returnKey set
+  if(cmd.returnKey) findCmd.returnKey = cmd.returnKey;
+
+  // If we have showDiskLoc set
+  if(cmd.showDiskLoc) findCmd.showRecordId = cmd.showDiskLoc;
+
+  // If we have snapshot set
+  if(cmd.snapshot) findCmd.snapshot = cmd.snapshot;
+
+  // If we have tailable set
+  if(cmd.tailable) findCmd.tailable = cmd.tailable;
+
+  // If we have oplogReplay set
+  if(cmd.oplogReplay) findCmd.oplogReplay = cmd.oplogReplay;
+
+  // If we have noCursorTimeout set
+  if(cmd.noCursorTimeout) findCmd.noCursorTimeout = cmd.noCursorTimeout;
+
+  // If we have awaitData set
+  if(cmd.awaitData) findCmd.awaitData = cmd.awaitData;
+  if(cmd.awaitdata) findCmd.awaitData = cmd.awaitdata;
+
+  // If we have partial set
+  if(cmd.partial) findCmd.partial = cmd.partial;
+
+  // We have a Mongos topology, check if we need to add a readPreference
+  if(topology.type == 'mongos' && readPreference) {
+    findCmd['$readPreference'] = readPreference.toJSON();
+  }
+
+  // If we have explain, we need to rewrite the find command
+  // to wrap it in the explain command
+  if(cmd.explain) {
+    findCmd = {
+      explain: findCmd
+    }
+  }
+
+  // Did we provide a readConcern
+  if(cmd.readConcern) findCmd.readConcern = cmd.readConcern;
+
+  // Set up the serialize and ignoreUndefined fields
+  var serializeFunctions = typeof options.serializeFunctions == 'boolean' 
+    ? options.serializeFunctions : false;
+  var ignoreUndefined = typeof options.ignoreUndefined == 'boolean' 
+    ? options.ignoreUndefined : false;
+
+  // Build Query object
+  var query = new Query(bson, commandns, findCmd, {
+      numberToSkip: 0, numberToReturn: -1
+    , checkKeys: false, returnFieldSelector: null
+    , serializeFunctions: serializeFunctions, ignoreUndefined: ignoreUndefined
+  });
+
+  // Set query flags
+  query.slaveOk = readPreference.slaveOk();
+
+  // Return the query
+  return query;
+}
+
+//
+// Set up a command cursor
+var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
+  var readPreference = options.readPreference || new ReadPreference('primary');
+  if(typeof readPreference == 'string') readPreference = new ReadPreference(readPreference);
+  if(!(readPreference instanceof ReadPreference)) throw new MongoError('readPreference must
be a ReadPreference instance');
+
+  // Set empty options object
+  options = options || {}
+
+  // Final query
+  var finalCmd = {};
+  for(var name in cmd) {
+    finalCmd[name] = cmd[name];
+  }
+
+  // Build command namespace
+  var parts = ns.split(/\./);
+
+  // We have a Mongos topology, check if we need to add a readPreference
+  if(topology.type == 'mongos' && readPreference) {
+    finalCmd['$readPreference'] = readPreference.toJSON();
+  }
+
+  // Serialize functions
+  var serializeFunctions = typeof options.serializeFunctions == 'boolean'
+    ? options.serializeFunctions : false;
+
+  // Set up the serialize and ignoreUndefined fields
+  var ignoreUndefined = typeof options.ignoreUndefined == 'boolean' 
+    ? options.ignoreUndefined : false;
+
+  // Build Query object
+  var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
+      numberToSkip: 0, numberToReturn: -1
+    , checkKeys: false, serializeFunctions: serializeFunctions
+    , ignoreUndefined: ignoreUndefined
+  });
+
+  // Set query flags
+  query.slaveOk = readPreference.slaveOk();
+
+  // Return the query
+  return query;
+}
+
+/**
+ * @ignore
+ */
+var bindToCurrentDomain = function(callback) {
+  var domain = process.domain;
+  if(domain == null || callback == null) {
+    return callback;
+  } else {
+    return domain.bind(callback);
+  }
+}
+
+module.exports = WireProtocol;

http://git-wip-us.apache.org/repos/asf/couchdb-nmo/blob/753f1767/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/lib/wireprotocol/commands.js
----------------------------------------------------------------------
diff --git a/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/lib/wireprotocol/commands.js
b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/lib/wireprotocol/commands.js
new file mode 100644
index 0000000..9c665ee
--- /dev/null
+++ b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/lib/wireprotocol/commands.js
@@ -0,0 +1,357 @@
+"use strict";
+
+var MongoError = require('../error');
+
+// Wire command operation ids
+var OP_UPDATE = 2001;
+var OP_INSERT = 2002;
+var OP_DELETE = 2006;
+
+var Insert = function(requestId, ismaster, bson, ns, documents, options) {
+  // Basic options needed to be passed in
+  if(ns == null) throw new MongoError("ns must be specified for query");
+  if(!Array.isArray(documents) || documents.length == 0) throw new MongoError("documents
array must contain at least one document to insert");
+
+  // Validate that we are not passing 0x00 in the colletion name
+  if(!!~ns.indexOf("\x00")) {
+    throw new MongoError("namespace cannot contain a null character");
+  }
+
+  // Set internal
+  this.requestId = requestId;
+  this.bson = bson;
+  this.ns = ns;
+  this.documents = documents;
+  this.ismaster = ismaster;
+
+  // Ensure empty options
+  options = options || {};
+
+  // Unpack options
+  this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions
: false;
+  this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined
: false;
+  this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : true;
+  this.continueOnError = typeof options.continueOnError == 'boolean' ? options.continueOnError
: false;
+  // Set flags
+  this.flags = this.continueOnError ? 1 : 0;
+}
+
+// To Binary
+Insert.prototype.toBin = function() {
+  // Contains all the buffers to be written
+  var buffers = [];
+
+  // Header buffer
+  var header = new Buffer(
+    4 * 4 // Header
+    + 4   // Flags
+    + Buffer.byteLength(this.ns) + 1 // namespace
+  );
+
+  // Add header to buffers
+  buffers.push(header);
+
+  // Total length of the message
+  var totalLength = header.length;
+
+  // Serialize all the documents
+  for(var i = 0; i < this.documents.length; i++) {
+    var buffer = this.bson.serialize(this.documents[i]
+      , this.checkKeys
+      , true
+      , this.serializeFunctions
+      , 0, this.ignoreUndefined);
+
+    // Document is larger than maxBsonObjectSize, terminate serialization
+    if(buffer.length > this.ismaster.maxBsonObjectSize) {
+      throw new MongoError("Document exceeds maximum allowed bson size of " + this.ismaster.maxBsonObjectSize
+ " bytes");
+    }
+
+    // Add to total length of wire protocol message
+    totalLength = totalLength + buffer.length;
+    // Add to buffer
+    buffers.push(buffer);
+  }
+
+  // Command is larger than maxMessageSizeBytes terminate serialization
+  if(totalLength > this.ismaster.maxMessageSizeBytes) {
+    throw new MongoError("Command exceeds maximum message size of " + this.ismaster.maxMessageSizeBytes
+ " bytes");
+  }
+
+  // Add all the metadata
+  var index = 0;
+
+  // Write header length
+  header[index + 3] = (totalLength >> 24) & 0xff;
+  header[index + 2] = (totalLength >> 16) & 0xff;
+  header[index + 1] = (totalLength >> 8) & 0xff;
+  header[index] = (totalLength) & 0xff;
+  index = index + 4;
+
+  // Write header requestId
+  header[index + 3] = (this.requestId >> 24) & 0xff;
+  header[index + 2] = (this.requestId >> 16) & 0xff;
+  header[index + 1] = (this.requestId >> 8) & 0xff;
+  header[index] = (this.requestId) & 0xff;
+  index = index + 4;
+
+  // No flags
+  header[index + 3] = (0 >> 24) & 0xff;
+  header[index + 2] = (0 >> 16) & 0xff;
+  header[index + 1] = (0 >> 8) & 0xff;
+  header[index] = (0) & 0xff;
+  index = index + 4;
+
+  // Operation
+  header[index + 3] = (OP_INSERT >> 24) & 0xff;
+  header[index + 2] = (OP_INSERT >> 16) & 0xff;
+  header[index + 1] = (OP_INSERT >> 8) & 0xff;
+  header[index] = (OP_INSERT) & 0xff;
+  index = index + 4;
+
+  // Flags
+  header[index + 3] = (this.flags >> 24) & 0xff;
+  header[index + 2] = (this.flags >> 16) & 0xff;
+  header[index + 1] = (this.flags >> 8) & 0xff;
+  header[index] = (this.flags) & 0xff;
+  index = index + 4;
+
+  // Write collection name
+  index = index + header.write(this.ns, index, 'utf8') + 1;
+  header[index - 1] = 0;
+
+  // Return the buffers
+  return buffers;
+}
+
+var Update = function(requestId, ismaster, bson, ns, update, options) {
+  // Basic options needed to be passed in
+  if(ns == null) throw new MongoError("ns must be specified for query");
+
+  // Ensure empty options
+  options = options || {};
+
+  // Set internal
+  this.requestId = requestId;
+  this.bson = bson;
+  this.ns = ns;
+  this.ismaster = ismaster;
+
+  // Unpack options
+  this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions
: false;
+  this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined
: false;
+  this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : false;
+
+  // Unpack the update document
+  this.upsert = typeof update[0].upsert == 'boolean' ? update[0].upsert : false;
+  this.multi = typeof update[0].multi == 'boolean' ? update[0].multi : false;
+  this.q = update[0].q;
+  this.u = update[0].u;
+
+  // Create flag value
+  this.flags = this.upsert ? 1 : 0;
+  this.flags = this.multi ? this.flags | 2 : this.flags;
+}
+
+// To Binary
+Update.prototype.toBin = function() {
+  // Contains all the buffers to be written
+  var buffers = [];
+
+  // Header buffer
+  var header = new Buffer(
+    4 * 4 // Header
+    + 4   // ZERO
+    + Buffer.byteLength(this.ns) + 1 // namespace
+    + 4   // Flags
+  );
+
+  // Add header to buffers
+  buffers.push(header);
+
+  // Total length of the message
+  var totalLength = header.length;
+
+  // Serialize the selector
+  var selector = this.bson.serialize(this.q
+    , this.checkKeys
+    , true
+    , this.serializeFunctions
+    , 0, this.ignoreUndefined);
+  buffers.push(selector);
+  totalLength = totalLength + selector.length;
+
+  // Serialize the update
+  var update = this.bson.serialize(this.u
+    , this.checkKeys
+    , true
+    , this.serializeFunctions
+    , 0, this.ignoreUndefined);
+  buffers.push(update);
+  totalLength = totalLength + update.length;
+
+  // Index in header buffer
+  var index = 0;
+
+  // Write header length
+  header[index + 3] = (totalLength >> 24) & 0xff;
+  header[index + 2] = (totalLength >> 16) & 0xff;
+  header[index + 1] = (totalLength >> 8) & 0xff;
+  header[index] = (totalLength) & 0xff;
+  index = index + 4;
+
+  // Write header requestId
+  header[index + 3] = (this.requestId >> 24) & 0xff;
+  header[index + 2] = (this.requestId >> 16) & 0xff;
+  header[index + 1] = (this.requestId >> 8) & 0xff;
+  header[index] = (this.requestId) & 0xff;
+  index = index + 4;
+
+  // No flags
+  header[index + 3] = (0 >> 24) & 0xff;
+  header[index + 2] = (0 >> 16) & 0xff;
+  header[index + 1] = (0 >> 8) & 0xff;
+  header[index] = (0) & 0xff;
+  index = index + 4;
+
+  // Operation
+  header[index + 3] = (OP_UPDATE >> 24) & 0xff;
+  header[index + 2] = (OP_UPDATE >> 16) & 0xff;
+  header[index + 1] = (OP_UPDATE >> 8) & 0xff;
+  header[index] = (OP_UPDATE) & 0xff;
+  index = index + 4;
+
+  // Write ZERO
+  header[index + 3] = (0 >> 24) & 0xff;
+  header[index + 2] = (0 >> 16) & 0xff;
+  header[index + 1] = (0 >> 8) & 0xff;
+  header[index] = (0) & 0xff;
+  index = index + 4;
+
+  // Write collection name
+  index = index + header.write(this.ns, index, 'utf8') + 1;
+  header[index - 1] = 0;
+
+  // Flags
+  header[index + 3] = (this.flags >> 24) & 0xff;
+  header[index + 2] = (this.flags >> 16) & 0xff;
+  header[index + 1] = (this.flags >> 8) & 0xff;
+  header[index] = (this.flags) & 0xff;
+  index = index + 4;
+
+  // Return the buffers
+  return buffers;
+}
+
+var Remove = function(requestId, ismaster, bson, ns, remove, options) {
+  // Basic options needed to be passed in
+  if(ns == null) throw new MongoError("ns must be specified for query");
+
+  // Ensure empty options
+  options = options || {};
+
+  // Set internal
+  this.requestId = requestId;
+  this.bson = bson;
+  this.ns = ns;
+  this.ismaster = ismaster;
+
+  // Unpack options
+  this.serializeFunctions = typeof options.serializeFunctions == 'boolean' ? options.serializeFunctions
: false;
+  this.ignoreUndefined = typeof options.ignoreUndefined == 'boolean' ? options.ignoreUndefined
: false;
+  this.checkKeys = typeof options.checkKeys == 'boolean' ? options.checkKeys : false;
+
+  // Unpack the update document
+  this.limit = typeof remove[0].limit == 'number' ? remove[0].limit : 1;
+  this.q = remove[0].q;
+
+  // Create flag value
+  this.flags = this.limit == 1 ? 1 : 0;
+}
+
+// To Binary
+Remove.prototype.toBin = function() {
+  // Contains all the buffers to be written
+  var buffers = [];
+
+  // Header buffer
+  var header = new Buffer(
+    4 * 4 // Header
+    + 4   // ZERO
+    + Buffer.byteLength(this.ns) + 1 // namespace
+    + 4   // Flags
+  );
+
+  // Add header to buffers
+  buffers.push(header);
+
+  // Total length of the message
+  var totalLength = header.length;
+
+  // Serialize the selector
+  var selector = this.bson.serialize(this.q
+    , this.checkKeys
+    , true
+    , this.serializeFunctions
+    , 0, this.ignoreUndefined);
+  buffers.push(selector);
+  totalLength = totalLength + selector.length;
+
+  // Index in header buffer
+  var index = 0;
+
+  // Write header length
+  header[index + 3] = (totalLength >> 24) & 0xff;
+  header[index + 2] = (totalLength >> 16) & 0xff;
+  header[index + 1] = (totalLength >> 8) & 0xff;
+  header[index] = (totalLength) & 0xff;
+  index = index + 4;
+
+  // Write header requestId
+  header[index + 3] = (this.requestId >> 24) & 0xff;
+  header[index + 2] = (this.requestId >> 16) & 0xff;
+  header[index + 1] = (this.requestId >> 8) & 0xff;
+  header[index] = (this.requestId) & 0xff;
+  index = index + 4;
+
+  // No flags
+  header[index + 3] = (0 >> 24) & 0xff;
+  header[index + 2] = (0 >> 16) & 0xff;
+  header[index + 1] = (0 >> 8) & 0xff;
+  header[index] = (0) & 0xff;
+  index = index + 4;
+
+  // Operation
+  header[index + 3] = (OP_DELETE >> 24) & 0xff;
+  header[index + 2] = (OP_DELETE >> 16) & 0xff;
+  header[index + 1] = (OP_DELETE >> 8) & 0xff;
+  header[index] = (OP_DELETE) & 0xff;
+  index = index + 4;
+
+  // Write ZERO
+  header[index + 3] = (0 >> 24) & 0xff;
+  header[index + 2] = (0 >> 16) & 0xff;
+  header[index + 1] = (0 >> 8) & 0xff;
+  header[index] = (0) & 0xff;
+  index = index + 4;
+
+  // Write collection name
+  index = index + header.write(this.ns, index, 'utf8') + 1;
+  header[index - 1] = 0;
+
+  // Write ZERO
+  header[index + 3] = (this.flags >> 24) & 0xff;
+  header[index + 2] = (this.flags >> 16) & 0xff;
+  header[index + 1] = (this.flags >> 8) & 0xff;
+  header[index] = (this.flags) & 0xff;
+  index = index + 4;
+
+  // Return the buffers
+  return buffers;
+}
+
+module.exports = {
+    Insert: Insert
+  , Update: Update
+  , Remove: Remove
+}

http://git-wip-us.apache.org/repos/asf/couchdb-nmo/blob/753f1767/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/HISTORY
----------------------------------------------------------------------
diff --git a/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/HISTORY
b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/HISTORY
new file mode 100644
index 0000000..ecf5994
--- /dev/null
+++ b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/HISTORY
@@ -0,0 +1,113 @@
+0.4.16 2015-10-07
+-----------------
+- Fixed issue with return statement in Map.js.
+
+0.4.15 2015-10-06
+-----------------
+- Exposed Map correctly via index.js file.
+
+0.4.14 2015-10-06
+-----------------
+- Exposed Map correctly via bson.js file.
+
+0.4.13 2015-10-06
+-----------------
+- Added ES6 Map type serialization as well as a polyfill for ES5.
+
+0.4.12 2015-09-18
+-----------------
+- Made ignore undefined an optional parameter.
+
+0.4.11 2015-08-06
+-----------------
+- Minor fix for invalid key checking.
+
+0.4.10 2015-08-06
+-----------------
+- NODE-38 Added new BSONRegExp type to allow direct serialization to MongoDB type.
+- Some performance improvements by in lining code.
+
+0.4.9 2015-08-06
+----------------
+- Undefined fields are omitted from serialization in objects.
+
+0.4.8 2015-07-14
+----------------
+- Fixed size validation to ensure we can deserialize from dumped files.
+
+0.4.7 2015-06-26
+----------------
+- Added ability to instruct deserializer to return raw BSON buffers for named array fields.
+- Minor deserialization optimization by moving inlined function out.
+
+0.4.6 2015-06-17
+----------------
+- Fixed serializeWithBufferAndIndex bug.
+
+0.4.5 2015-06-17
+----------------
+- Removed any references to the shared buffer to avoid non GC collectible bson instances.
+
+0.4.4 2015-06-17
+----------------
+- Fixed rethrowing of error when not RangeError.
+
+0.4.3 2015-06-17
+----------------
+- Start buffer at 64K and double as needed, meaning we keep a low memory profile until needed.
+
+0.4.2 2015-06-16
+----------------
+- More fixes for corrupt Bson
+
+0.4.1 2015-06-16
+----------------
+- More fixes for corrupt Bson
+
+0.4.0 2015-06-16
+----------------
+- New JS serializer serializing into a single buffer then copying out the new buffer. Performance
is similar to current C++ parser.
+- Removed bson-ext extension dependency for now.
+
+0.3.2 2015-03-27
+----------------
+- Removed node-gyp from install script in package.json.
+
+0.3.1 2015-03-27
+----------------
+- Return pure js version on native() call if failed to initialize.
+
+0.3.0 2015-03-26
+----------------
+- Pulled out all C++ code into bson-ext and made it an optional dependency.
+
+0.2.21 2015-03-21
+-----------------
+- Updated Nan to 1.7.0 to support io.js and node 0.12.0
+
+0.2.19 2015-02-16
+-----------------
+- Updated Nan to 1.6.2 to support io.js and node 0.12.0
+
+0.2.18 2015-01-20
+-----------------
+- Updated Nan to 1.5.1 to support io.js
+
+0.2.16 2014-12-17
+-----------------
+- Made pid cycle on 0xffff to avoid weird overflows on creation of ObjectID's
+
+0.2.12 2014-08-24
+-----------------
+- Fixes for fortify review of c++ extension
+- toBSON correctly allows returns of non objects
+
+0.2.3 2013-10-01
+----------------
+- Drying of ObjectId code for generation of id (Issue #54, https://github.com/moredip)
+- Fixed issue where corrupt CString's could cause endless loop
+- Support for Node 0.11.X > (Issue #49, https://github.com/kkoopa)
+
+0.1.4 2012-09-25
+----------------
+- Added precompiled c++ native extensions for win32 ia32 and x64

http://git-wip-us.apache.org/repos/asf/couchdb-nmo/blob/753f1767/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/LICENSE
----------------------------------------------------------------------
diff --git a/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/LICENSE
b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

http://git-wip-us.apache.org/repos/asf/couchdb-nmo/blob/753f1767/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/README.md
----------------------------------------------------------------------
diff --git a/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/README.md
b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/README.md
new file mode 100644
index 0000000..56327c2
--- /dev/null
+++ b/node_modules/couchbulkimporter/node_modules/mongodb/node_modules/mongodb-core/node_modules/bson/README.md
@@ -0,0 +1,69 @@
+Javascript + C++ BSON parser
+============================
+
+This BSON parser is primarily meant to be used with the `mongodb` node.js driver.
+However, wonderful tools such as `onejs` can package up a BSON parser that will work in the
browser.
+The current build is located in the `browser_build/bson.js` file.
+
+A simple example of how to use BSON in the browser:
+
+```html
+<html>
+<head>
+  <script src="https://raw.github.com/mongodb/js-bson/master/browser_build/bson.js">
+  </script>
+</head>
+<body onload="start();">
+<script>
+  function start() {
+    var BSON = bson().BSON;
+    var Long = bson().Long;
+
+    var doc = {long: Long.fromNumber(100)}
+
+    // Serialize a document
+    var data = BSON.serialize(doc, false, true, false);
+    // De serialize it again
+    var doc_2 = BSON.deserialize(data);
+  }
+</script>
+</body>
+</html>
+```
+
+A simple example of how to use BSON in `node.js`:
+
+```javascript
+var bson = require("bson");
+var BSON = new bson.BSONPure.BSON();
+var Long = bson.BSONPure.Long;
+
+var doc = {long: Long.fromNumber(100)}
+
+// Serialize a document
+var data = BSON.serialize(doc, false, true, false);
+console.log("data:", data);
+
+// Deserialize the resulting Buffer
+var doc_2 = BSON.deserialize(data);
+console.log("doc_2:", doc_2);
+```
+
+The API consists of two simple methods to serialize/deserialize objects to/from BSON format:
+
+  * BSON.serialize(object, checkKeys, asBuffer, serializeFunctions)
+     * @param {Object} object the Javascript object to serialize.
+     * @param {Boolean} checkKeys the serializer will check if keys are valid.
+     * @param {Boolean} asBuffer return the serialized object as a Buffer object **(ignore)**.
+     * @param {Boolean} serializeFunctions serialize the javascript functions **(default:false)**
+     * @return {TypedArray/Array} returns a TypedArray or Array depending on what your browser
supports
+ 
+  * BSON.deserialize(buffer, options, isArray)
+     * Options
+       * **evalFunctions** {Boolean, default:false}, evaluate functions in the BSON document
scoped to the object deserialized.
+       * **cacheFunctions** {Boolean, default:false}, cache evaluated functions for reuse.
+       * **cacheFunctionsCrc32** {Boolean, default:false}, use a crc32 code for caching,
otherwise use the string of the function.
+     * @param {TypedArray/Array} a TypedArray/Array containing the BSON data
+     * @param {Object} [options] additional options used for the deserialization.
+     * @param {Boolean} [isArray] ignore used for recursive parsing.
+     * @return {Object} returns the deserialized Javascript Object.


Mime
View raw message