mirror of
https://github.com/bvanroll/rpiRadio.git
synced 2025-08-31 13:02:44 +00:00
Initial Commit
This commit is contained in:
442
ProjectNow/NodeServer/node_modules/mongodb/lib/bulk/common.js
generated
vendored
Normal file
442
ProjectNow/NodeServer/node_modules/mongodb/lib/bulk/common.js
generated
vendored
Normal file
@@ -0,0 +1,442 @@
|
||||
'use strict';
|
||||
|
||||
var Long = require('mongodb-core').BSON.Long,
|
||||
MongoError = require('mongodb-core').MongoError,
|
||||
util = require('util');
|
||||
|
||||
// Error codes
|
||||
var UNKNOWN_ERROR = 8;
|
||||
var INVALID_BSON_ERROR = 22;
|
||||
var WRITE_CONCERN_ERROR = 64;
|
||||
var MULTIPLE_ERROR = 65;
|
||||
|
||||
// Insert types
|
||||
var INSERT = 1;
|
||||
var UPDATE = 2;
|
||||
var REMOVE = 3;
|
||||
|
||||
/**
|
||||
* Helper function to define properties
|
||||
* @ignore
|
||||
*/
|
||||
var defineReadOnlyProperty = function(self, name, value) {
|
||||
Object.defineProperty(self, name, {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return value;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Keeps the state of a unordered batch so we can rewrite the results
|
||||
* correctly after command execution
|
||||
* @ignore
|
||||
*/
|
||||
var Batch = function(batchType, originalZeroIndex) {
|
||||
this.originalZeroIndex = originalZeroIndex;
|
||||
this.currentIndex = 0;
|
||||
this.originalIndexes = [];
|
||||
this.batchType = batchType;
|
||||
this.operations = [];
|
||||
this.size = 0;
|
||||
this.sizeBytes = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Wraps a legacy operation so we can correctly rewrite it's error
|
||||
* @ignore
|
||||
*/
|
||||
var LegacyOp = function(batchType, operation, index) {
|
||||
this.batchType = batchType;
|
||||
this.index = index;
|
||||
this.operation = operation;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new BulkWriteResult instance (INTERNAL TYPE, do not instantiate directly)
|
||||
*
|
||||
* @class
|
||||
* @property {boolean} ok Did bulk operation correctly execute
|
||||
* @property {number} nInserted number of inserted documents
|
||||
* @property {number} nUpdated number of documents updated logically
|
||||
* @property {number} nUpserted Number of upserted documents
|
||||
* @property {number} nModified Number of documents updated physically on disk
|
||||
* @property {number} nRemoved Number of removed documents
|
||||
* @return {BulkWriteResult} a BulkWriteResult instance
|
||||
*/
|
||||
var BulkWriteResult = function(bulkResult) {
|
||||
defineReadOnlyProperty(this, 'ok', bulkResult.ok);
|
||||
defineReadOnlyProperty(this, 'nInserted', bulkResult.nInserted);
|
||||
defineReadOnlyProperty(this, 'nUpserted', bulkResult.nUpserted);
|
||||
defineReadOnlyProperty(this, 'nMatched', bulkResult.nMatched);
|
||||
defineReadOnlyProperty(this, 'nModified', bulkResult.nModified);
|
||||
defineReadOnlyProperty(this, 'nRemoved', bulkResult.nRemoved);
|
||||
|
||||
/**
|
||||
* Return an array of inserted ids
|
||||
*
|
||||
* @return {object[]}
|
||||
*/
|
||||
this.getInsertedIds = function() {
|
||||
return bulkResult.insertedIds;
|
||||
};
|
||||
|
||||
/**
|
||||
* Return an array of upserted ids
|
||||
*
|
||||
* @return {object[]}
|
||||
*/
|
||||
this.getUpsertedIds = function() {
|
||||
return bulkResult.upserted;
|
||||
};
|
||||
|
||||
/**
|
||||
* Return the upserted id at position x
|
||||
*
|
||||
* @param {number} index the number of the upserted id to return, returns undefined if no result for passed in index
|
||||
* @return {object}
|
||||
*/
|
||||
this.getUpsertedIdAt = function(index) {
|
||||
return bulkResult.upserted[index];
|
||||
};
|
||||
|
||||
/**
|
||||
* Return raw internal result
|
||||
*
|
||||
* @return {object}
|
||||
*/
|
||||
this.getRawResponse = function() {
|
||||
return bulkResult;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns true if the bulk operation contains a write error
|
||||
*
|
||||
* @return {boolean}
|
||||
*/
|
||||
this.hasWriteErrors = function() {
|
||||
return bulkResult.writeErrors.length > 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns the number of write errors off the bulk operation
|
||||
*
|
||||
* @return {number}
|
||||
*/
|
||||
this.getWriteErrorCount = function() {
|
||||
return bulkResult.writeErrors.length;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a specific write error object
|
||||
*
|
||||
* @param {number} index of the write error to return, returns null if there is no result for passed in index
|
||||
* @return {WriteError}
|
||||
*/
|
||||
this.getWriteErrorAt = function(index) {
|
||||
if (index < bulkResult.writeErrors.length) {
|
||||
return bulkResult.writeErrors[index];
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieve all write errors
|
||||
*
|
||||
* @return {object[]}
|
||||
*/
|
||||
this.getWriteErrors = function() {
|
||||
return bulkResult.writeErrors;
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieve lastOp if available
|
||||
*
|
||||
* @return {object}
|
||||
*/
|
||||
this.getLastOp = function() {
|
||||
return bulkResult.lastOp;
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieve the write concern error if any
|
||||
*
|
||||
* @return {WriteConcernError}
|
||||
*/
|
||||
this.getWriteConcernError = function() {
|
||||
if (bulkResult.writeConcernErrors.length === 0) {
|
||||
return null;
|
||||
} else if (bulkResult.writeConcernErrors.length === 1) {
|
||||
// Return the error
|
||||
return bulkResult.writeConcernErrors[0];
|
||||
} else {
|
||||
// Combine the errors
|
||||
var errmsg = '';
|
||||
for (var i = 0; i < bulkResult.writeConcernErrors.length; i++) {
|
||||
var err = bulkResult.writeConcernErrors[i];
|
||||
errmsg = errmsg + err.errmsg;
|
||||
|
||||
// TODO: Something better
|
||||
if (i === 0) errmsg = errmsg + ' and ';
|
||||
}
|
||||
|
||||
return new WriteConcernError({ errmsg: errmsg, code: WRITE_CONCERN_ERROR });
|
||||
}
|
||||
};
|
||||
|
||||
this.toJSON = function() {
|
||||
return bulkResult;
|
||||
};
|
||||
|
||||
this.toString = function() {
|
||||
return 'BulkWriteResult(' + this.toJSON(bulkResult) + ')';
|
||||
};
|
||||
|
||||
this.isOk = function() {
|
||||
return bulkResult.ok === 1;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new WriteConcernError instance (INTERNAL TYPE, do not instantiate directly)
|
||||
*
|
||||
* @class
|
||||
* @property {number} code Write concern error code.
|
||||
* @property {string} errmsg Write concern error message.
|
||||
* @return {WriteConcernError} a WriteConcernError instance
|
||||
*/
|
||||
var WriteConcernError = function(err) {
|
||||
if (!(this instanceof WriteConcernError)) return new WriteConcernError(err);
|
||||
|
||||
// Define properties
|
||||
defineReadOnlyProperty(this, 'code', err.code);
|
||||
defineReadOnlyProperty(this, 'errmsg', err.errmsg);
|
||||
|
||||
this.toJSON = function() {
|
||||
return { code: err.code, errmsg: err.errmsg };
|
||||
};
|
||||
|
||||
this.toString = function() {
|
||||
return 'WriteConcernError(' + err.errmsg + ')';
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new WriteError instance (INTERNAL TYPE, do not instantiate directly)
|
||||
*
|
||||
* @class
|
||||
* @property {number} code Write concern error code.
|
||||
* @property {number} index Write concern error original bulk operation index.
|
||||
* @property {string} errmsg Write concern error message.
|
||||
* @return {WriteConcernError} a WriteConcernError instance
|
||||
*/
|
||||
var WriteError = function(err) {
|
||||
if (!(this instanceof WriteError)) return new WriteError(err);
|
||||
|
||||
// Define properties
|
||||
defineReadOnlyProperty(this, 'code', err.code);
|
||||
defineReadOnlyProperty(this, 'index', err.index);
|
||||
defineReadOnlyProperty(this, 'errmsg', err.errmsg);
|
||||
|
||||
//
|
||||
// Define access methods
|
||||
this.getOperation = function() {
|
||||
return err.op;
|
||||
};
|
||||
|
||||
this.toJSON = function() {
|
||||
return { code: err.code, index: err.index, errmsg: err.errmsg, op: err.op };
|
||||
};
|
||||
|
||||
this.toString = function() {
|
||||
return 'WriteError(' + JSON.stringify(this.toJSON()) + ')';
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Merges results into shared data structure
|
||||
* @ignore
|
||||
*/
|
||||
var mergeBatchResults = function(ordered, batch, bulkResult, err, result) {
|
||||
// If we have an error set the result to be the err object
|
||||
if (err) {
|
||||
result = err;
|
||||
} else if (result && result.result) {
|
||||
result = result.result;
|
||||
} else if (result == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Do we have a top level error stop processing and return
|
||||
if (result.ok === 0 && bulkResult.ok === 1) {
|
||||
bulkResult.ok = 0;
|
||||
|
||||
var writeError = {
|
||||
index: 0,
|
||||
code: result.code || 0,
|
||||
errmsg: result.message,
|
||||
op: batch.operations[0]
|
||||
};
|
||||
|
||||
bulkResult.writeErrors.push(new WriteError(writeError));
|
||||
return;
|
||||
} else if (result.ok === 0 && bulkResult.ok === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Deal with opTime if available
|
||||
if (result.opTime || result.lastOp) {
|
||||
var opTime = result.lastOp || result.opTime;
|
||||
var lastOpTS = null;
|
||||
var lastOpT = null;
|
||||
|
||||
// We have a time stamp
|
||||
if (opTime && opTime._bsontype === 'Timestamp') {
|
||||
if (bulkResult.lastOp == null) {
|
||||
bulkResult.lastOp = opTime;
|
||||
} else if (opTime.greaterThan(bulkResult.lastOp)) {
|
||||
bulkResult.lastOp = opTime;
|
||||
}
|
||||
} else {
|
||||
// Existing TS
|
||||
if (bulkResult.lastOp) {
|
||||
lastOpTS =
|
||||
typeof bulkResult.lastOp.ts === 'number'
|
||||
? Long.fromNumber(bulkResult.lastOp.ts)
|
||||
: bulkResult.lastOp.ts;
|
||||
lastOpT =
|
||||
typeof bulkResult.lastOp.t === 'number'
|
||||
? Long.fromNumber(bulkResult.lastOp.t)
|
||||
: bulkResult.lastOp.t;
|
||||
}
|
||||
|
||||
// Current OpTime TS
|
||||
var opTimeTS = typeof opTime.ts === 'number' ? Long.fromNumber(opTime.ts) : opTime.ts;
|
||||
var opTimeT = typeof opTime.t === 'number' ? Long.fromNumber(opTime.t) : opTime.t;
|
||||
|
||||
// Compare the opTime's
|
||||
if (bulkResult.lastOp == null) {
|
||||
bulkResult.lastOp = opTime;
|
||||
} else if (opTimeTS.greaterThan(lastOpTS)) {
|
||||
bulkResult.lastOp = opTime;
|
||||
} else if (opTimeTS.equals(lastOpTS)) {
|
||||
if (opTimeT.greaterThan(lastOpT)) {
|
||||
bulkResult.lastOp = opTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have an insert Batch type
|
||||
if (batch.batchType === INSERT && result.n) {
|
||||
bulkResult.nInserted = bulkResult.nInserted + result.n;
|
||||
}
|
||||
|
||||
// If we have an insert Batch type
|
||||
if (batch.batchType === REMOVE && result.n) {
|
||||
bulkResult.nRemoved = bulkResult.nRemoved + result.n;
|
||||
}
|
||||
|
||||
var nUpserted = 0;
|
||||
|
||||
// We have an array of upserted values, we need to rewrite the indexes
|
||||
if (Array.isArray(result.upserted)) {
|
||||
nUpserted = result.upserted.length;
|
||||
|
||||
for (var i = 0; i < result.upserted.length; i++) {
|
||||
bulkResult.upserted.push({
|
||||
index: result.upserted[i].index + batch.originalZeroIndex,
|
||||
_id: result.upserted[i]._id
|
||||
});
|
||||
}
|
||||
} else if (result.upserted) {
|
||||
nUpserted = 1;
|
||||
|
||||
bulkResult.upserted.push({
|
||||
index: batch.originalZeroIndex,
|
||||
_id: result.upserted
|
||||
});
|
||||
}
|
||||
|
||||
// If we have an update Batch type
|
||||
if (batch.batchType === UPDATE && result.n) {
|
||||
var nModified = result.nModified;
|
||||
bulkResult.nUpserted = bulkResult.nUpserted + nUpserted;
|
||||
bulkResult.nMatched = bulkResult.nMatched + (result.n - nUpserted);
|
||||
|
||||
if (typeof nModified === 'number') {
|
||||
bulkResult.nModified = bulkResult.nModified + nModified;
|
||||
} else {
|
||||
bulkResult.nModified = null;
|
||||
}
|
||||
}
|
||||
|
||||
if (Array.isArray(result.writeErrors)) {
|
||||
for (i = 0; i < result.writeErrors.length; i++) {
|
||||
writeError = {
|
||||
index: batch.originalZeroIndex + result.writeErrors[i].index,
|
||||
code: result.writeErrors[i].code,
|
||||
errmsg: result.writeErrors[i].errmsg,
|
||||
op: batch.operations[result.writeErrors[i].index]
|
||||
};
|
||||
|
||||
bulkResult.writeErrors.push(new WriteError(writeError));
|
||||
}
|
||||
}
|
||||
|
||||
if (result.writeConcernError) {
|
||||
bulkResult.writeConcernErrors.push(new WriteConcernError(result.writeConcernError));
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Clone the options
|
||||
var cloneOptions = function(options) {
|
||||
var clone = {};
|
||||
var keys = Object.keys(options);
|
||||
for (var i = 0; i < keys.length; i++) {
|
||||
clone[keys[i]] = options[keys[i]];
|
||||
}
|
||||
|
||||
return clone;
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new BulkWriteError
|
||||
*
|
||||
* @class
|
||||
* @param {Error|string|object} message The error message
|
||||
* @param {BulkWriteResult} result The result of the bulk write operation
|
||||
* @return {BulkWriteError} A BulkWriteError instance
|
||||
* @extends {MongoError}
|
||||
*/
|
||||
const BulkWriteError = function(error, result) {
|
||||
var message = error.err || error.errmsg || error.errMessage || error;
|
||||
MongoError.call(this, message);
|
||||
|
||||
var keys = typeof error === 'object' ? Object.keys(error) : [];
|
||||
for (var i = 0; i < keys.length; i++) {
|
||||
this[keys[i]] = error[keys[i]];
|
||||
}
|
||||
|
||||
this.name = 'BulkWriteError';
|
||||
this.result = result;
|
||||
};
|
||||
util.inherits(BulkWriteError, MongoError);
|
||||
|
||||
// Exports symbols
|
||||
exports.BulkWriteError = BulkWriteError;
|
||||
exports.BulkWriteResult = BulkWriteResult;
|
||||
exports.WriteError = WriteError;
|
||||
exports.Batch = Batch;
|
||||
exports.LegacyOp = LegacyOp;
|
||||
exports.mergeBatchResults = mergeBatchResults;
|
||||
exports.cloneOptions = cloneOptions;
|
||||
exports.INVALID_BSON_ERROR = INVALID_BSON_ERROR;
|
||||
exports.WRITE_CONCERN_ERROR = WRITE_CONCERN_ERROR;
|
||||
exports.MULTIPLE_ERROR = MULTIPLE_ERROR;
|
||||
exports.UNKNOWN_ERROR = UNKNOWN_ERROR;
|
||||
exports.INSERT = INSERT;
|
||||
exports.UPDATE = UPDATE;
|
||||
exports.REMOVE = REMOVE;
|
610
ProjectNow/NodeServer/node_modules/mongodb/lib/bulk/ordered.js
generated
vendored
Normal file
610
ProjectNow/NodeServer/node_modules/mongodb/lib/bulk/ordered.js
generated
vendored
Normal file
@@ -0,0 +1,610 @@
|
||||
'use strict';
|
||||
|
||||
const common = require('./common');
|
||||
const utils = require('../utils');
|
||||
const toError = require('../utils').toError;
|
||||
const handleCallback = require('../utils').handleCallback;
|
||||
const shallowClone = utils.shallowClone;
|
||||
const BulkWriteResult = common.BulkWriteResult;
|
||||
const ObjectID = require('mongodb-core').BSON.ObjectID;
|
||||
const BSON = require('mongodb-core').BSON;
|
||||
const Batch = common.Batch;
|
||||
const mergeBatchResults = common.mergeBatchResults;
|
||||
const executeOperation = utils.executeOperation;
|
||||
const BulkWriteError = require('./common').BulkWriteError;
|
||||
const applyWriteConcern = utils.applyWriteConcern;
|
||||
|
||||
var bson = new BSON([
|
||||
BSON.Binary,
|
||||
BSON.Code,
|
||||
BSON.DBRef,
|
||||
BSON.Decimal128,
|
||||
BSON.Double,
|
||||
BSON.Int32,
|
||||
BSON.Long,
|
||||
BSON.Map,
|
||||
BSON.MaxKey,
|
||||
BSON.MinKey,
|
||||
BSON.ObjectId,
|
||||
BSON.BSONRegExp,
|
||||
BSON.Symbol,
|
||||
BSON.Timestamp
|
||||
]);
|
||||
|
||||
/**
|
||||
* Create a FindOperatorsOrdered instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @return {FindOperatorsOrdered} a FindOperatorsOrdered instance.
|
||||
*/
|
||||
var FindOperatorsOrdered = function(self) {
|
||||
this.s = self.s;
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a single update document to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc update operations
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.update = function(updateDocument) {
|
||||
// Perform upsert
|
||||
var upsert = typeof this.s.currentOp.upsert === 'boolean' ? this.s.currentOp.upsert : false;
|
||||
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector,
|
||||
u: updateDocument,
|
||||
multi: true,
|
||||
upsert: upsert
|
||||
};
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the update document to the list
|
||||
return addToOperationsList(this, common.UPDATE, document);
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a single update one document to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc update operations
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.updateOne = function(updateDocument) {
|
||||
// Perform upsert
|
||||
var upsert = typeof this.s.currentOp.upsert === 'boolean' ? this.s.currentOp.upsert : false;
|
||||
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector,
|
||||
u: updateDocument,
|
||||
multi: false,
|
||||
upsert: upsert
|
||||
};
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the update document to the list
|
||||
return addToOperationsList(this, common.UPDATE, document);
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a replace one operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} doc the new document to replace the existing one with
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.replaceOne = function(updateDocument) {
|
||||
this.updateOne(updateDocument);
|
||||
};
|
||||
|
||||
/**
|
||||
* Upsert modifier for update bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsOrdered}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.upsert = function() {
|
||||
this.s.currentOp.upsert = true;
|
||||
return this;
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a remove one operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.deleteOne = function() {
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector,
|
||||
limit: 1
|
||||
};
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the remove document to the list
|
||||
return addToOperationsList(this, common.REMOVE, document);
|
||||
};
|
||||
|
||||
// Backward compatibility
|
||||
FindOperatorsOrdered.prototype.removeOne = FindOperatorsOrdered.prototype.deleteOne;
|
||||
|
||||
/**
|
||||
* Add a remove operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
FindOperatorsOrdered.prototype.delete = function() {
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector,
|
||||
limit: 0
|
||||
};
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the remove document to the list
|
||||
return addToOperationsList(this, common.REMOVE, document);
|
||||
};
|
||||
|
||||
// Backward compatibility
|
||||
FindOperatorsOrdered.prototype.remove = FindOperatorsOrdered.prototype.delete;
|
||||
|
||||
// Add to internal list of documents
|
||||
var addToOperationsList = function(_self, docType, document) {
|
||||
// Get the bsonSize
|
||||
var bsonSize = bson.calculateObjectSize(document, {
|
||||
checkKeys: false
|
||||
});
|
||||
|
||||
// Throw error if the doc is bigger than the max BSON size
|
||||
if (bsonSize >= _self.s.maxBatchSizeBytes) {
|
||||
throw toError('document is larger than the maximum size ' + _self.s.maxBatchSizeBytes);
|
||||
}
|
||||
|
||||
// Create a new batch object if we don't have a current one
|
||||
if (_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
|
||||
|
||||
// Check if we need to create a new batch
|
||||
if (
|
||||
_self.s.currentBatchSize + 1 >= _self.s.maxWriteBatchSize ||
|
||||
_self.s.currentBatchSizeBytes + _self.s.currentBatchSizeBytes >= _self.s.maxBatchSizeBytes ||
|
||||
_self.s.currentBatch.batchType !== docType
|
||||
) {
|
||||
// Save the batch to the execution stack
|
||||
_self.s.batches.push(_self.s.currentBatch);
|
||||
|
||||
// Create a new batch
|
||||
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
|
||||
|
||||
// Reset the current size trackers
|
||||
_self.s.currentBatchSize = 0;
|
||||
_self.s.currentBatchSizeBytes = 0;
|
||||
} else {
|
||||
// Update current batch size
|
||||
_self.s.currentBatchSize = _self.s.currentBatchSize + 1;
|
||||
_self.s.currentBatchSizeBytes = _self.s.currentBatchSizeBytes + bsonSize;
|
||||
}
|
||||
|
||||
if (docType === common.INSERT) {
|
||||
_self.s.bulkResult.insertedIds.push({ index: _self.s.currentIndex, _id: document._id });
|
||||
}
|
||||
|
||||
// We have an array of documents
|
||||
if (Array.isArray(document)) {
|
||||
throw toError('operation passed in cannot be an Array');
|
||||
} else {
|
||||
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
|
||||
_self.s.currentBatch.operations.push(document);
|
||||
_self.s.currentBatchSizeBytes = _self.s.currentBatchSizeBytes + bsonSize;
|
||||
_self.s.currentIndex = _self.s.currentIndex + 1;
|
||||
}
|
||||
|
||||
// Return self
|
||||
return _self;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new OrderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @property {number} length Get the number of operations in the bulk.
|
||||
* @return {OrderedBulkOperation} a OrderedBulkOperation instance.
|
||||
*/
|
||||
function OrderedBulkOperation(topology, collection, options) {
|
||||
options = options == null ? {} : options;
|
||||
// TODO Bring from driver information in isMaster
|
||||
var executed = false;
|
||||
|
||||
// Current item
|
||||
var currentOp = null;
|
||||
|
||||
// Handle to the bson serializer, used to calculate running sizes
|
||||
var bson = topology.bson;
|
||||
|
||||
// Namespace for the operation
|
||||
var namespace = collection.collectionName;
|
||||
|
||||
// Set max byte size
|
||||
var maxBatchSizeBytes =
|
||||
topology.isMasterDoc && topology.isMasterDoc.maxBsonObjectSize
|
||||
? topology.isMasterDoc.maxBsonObjectSize
|
||||
: 1024 * 1025 * 16;
|
||||
var maxWriteBatchSize =
|
||||
topology.isMasterDoc && topology.isMasterDoc.maxWriteBatchSize
|
||||
? topology.isMasterDoc.maxWriteBatchSize
|
||||
: 1000;
|
||||
|
||||
// Get the write concern
|
||||
var writeConcern = applyWriteConcern(shallowClone(options), { collection: collection }, options);
|
||||
writeConcern = writeConcern.writeConcern;
|
||||
|
||||
// Get the promiseLibrary
|
||||
var promiseLibrary = options.promiseLibrary || Promise;
|
||||
|
||||
// Final results
|
||||
var bulkResult = {
|
||||
ok: 1,
|
||||
writeErrors: [],
|
||||
writeConcernErrors: [],
|
||||
insertedIds: [],
|
||||
nInserted: 0,
|
||||
nUpserted: 0,
|
||||
nMatched: 0,
|
||||
nModified: 0,
|
||||
nRemoved: 0,
|
||||
upserted: []
|
||||
};
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Final result
|
||||
bulkResult: bulkResult,
|
||||
// Current batch state
|
||||
currentBatch: null,
|
||||
currentIndex: 0,
|
||||
currentBatchSize: 0,
|
||||
currentBatchSizeBytes: 0,
|
||||
batches: [],
|
||||
// Write concern
|
||||
writeConcern: writeConcern,
|
||||
// Max batch size options
|
||||
maxBatchSizeBytes: maxBatchSizeBytes,
|
||||
maxWriteBatchSize: maxWriteBatchSize,
|
||||
// Namespace
|
||||
namespace: namespace,
|
||||
// BSON
|
||||
bson: bson,
|
||||
// Topology
|
||||
topology: topology,
|
||||
// Options
|
||||
options: options,
|
||||
// Current operation
|
||||
currentOp: currentOp,
|
||||
// Executed
|
||||
executed: executed,
|
||||
// Collection
|
||||
collection: collection,
|
||||
// Promise Library
|
||||
promiseLibrary: promiseLibrary,
|
||||
// Fundamental error
|
||||
err: null,
|
||||
// Bypass validation
|
||||
bypassDocumentValidation:
|
||||
typeof options.bypassDocumentValidation === 'boolean'
|
||||
? options.bypassDocumentValidation
|
||||
: false,
|
||||
// check keys
|
||||
checkKeys: typeof options.checkKeys === 'boolean' ? options.checkKeys : true
|
||||
};
|
||||
}
|
||||
|
||||
OrderedBulkOperation.prototype.raw = function(op) {
|
||||
var key = Object.keys(op)[0];
|
||||
|
||||
// Set up the force server object id
|
||||
var forceServerObjectId =
|
||||
typeof this.s.options.forceServerObjectId === 'boolean'
|
||||
? this.s.options.forceServerObjectId
|
||||
: this.s.collection.s.db.options.forceServerObjectId;
|
||||
|
||||
// Update operations
|
||||
if (
|
||||
(op.updateOne && op.updateOne.q) ||
|
||||
(op.updateMany && op.updateMany.q) ||
|
||||
(op.replaceOne && op.replaceOne.q)
|
||||
) {
|
||||
op[key].multi = op.updateOne || op.replaceOne ? false : true;
|
||||
return addToOperationsList(this, common.UPDATE, op[key]);
|
||||
}
|
||||
|
||||
// Crud spec update format
|
||||
if (op.updateOne || op.updateMany || op.replaceOne) {
|
||||
var multi = op.updateOne || op.replaceOne ? false : true;
|
||||
var operation = { q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi };
|
||||
operation.upsert = op[key].upsert ? true : false;
|
||||
if (op.collation) operation.collation = op.collation;
|
||||
if (op[key].arrayFilters) operation.arrayFilters = op[key].arrayFilters;
|
||||
return addToOperationsList(this, common.UPDATE, operation);
|
||||
}
|
||||
|
||||
// Remove operations
|
||||
if (
|
||||
op.removeOne ||
|
||||
op.removeMany ||
|
||||
(op.deleteOne && op.deleteOne.q) ||
|
||||
(op.deleteMany && op.deleteMany.q)
|
||||
) {
|
||||
op[key].limit = op.removeOne ? 1 : 0;
|
||||
return addToOperationsList(this, common.REMOVE, op[key]);
|
||||
}
|
||||
|
||||
// Crud spec delete operations, less efficient
|
||||
if (op.deleteOne || op.deleteMany) {
|
||||
var limit = op.deleteOne ? 1 : 0;
|
||||
operation = { q: op[key].filter, limit: limit };
|
||||
if (op.collation) operation.collation = op.collation;
|
||||
return addToOperationsList(this, common.REMOVE, operation);
|
||||
}
|
||||
|
||||
// Insert operations
|
||||
if (op.insertOne && op.insertOne.document == null) {
|
||||
if (forceServerObjectId !== true && op.insertOne._id == null) op.insertOne._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, op.insertOne);
|
||||
} else if (op.insertOne && op.insertOne.document) {
|
||||
if (forceServerObjectId !== true && op.insertOne.document._id == null)
|
||||
op.insertOne.document._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, op.insertOne.document);
|
||||
}
|
||||
|
||||
if (op.insertMany) {
|
||||
for (var i = 0; i < op.insertMany.length; i++) {
|
||||
if (forceServerObjectId !== true && op.insertMany[i]._id == null)
|
||||
op.insertMany[i]._id = new ObjectID();
|
||||
addToOperationsList(this, common.INSERT, op.insertMany[i]);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// No valid type of operation
|
||||
throw toError(
|
||||
'bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany'
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a single insert document to the bulk operation
|
||||
*
|
||||
* @param {object} doc the document to insert
|
||||
* @throws {MongoError}
|
||||
* @return {OrderedBulkOperation}
|
||||
*/
|
||||
OrderedBulkOperation.prototype.insert = function(document) {
|
||||
if (this.s.collection.s.db.options.forceServerObjectId !== true && document._id == null)
|
||||
document._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, document);
|
||||
};
|
||||
|
||||
/**
|
||||
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
|
||||
*
|
||||
* @method
|
||||
* @param {object} selector The selector for the bulk operation.
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsOrdered}
|
||||
*/
|
||||
OrderedBulkOperation.prototype.find = function(selector) {
|
||||
if (!selector) {
|
||||
throw toError('Bulk find operation must specify a selector');
|
||||
}
|
||||
|
||||
// Save a current selector
|
||||
this.s.currentOp = {
|
||||
selector: selector
|
||||
};
|
||||
|
||||
return new FindOperatorsOrdered(this);
|
||||
};
|
||||
|
||||
Object.defineProperty(OrderedBulkOperation.prototype, 'length', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this.s.currentIndex;
|
||||
}
|
||||
});
|
||||
|
||||
//
|
||||
// Execute next write command in a chain
|
||||
var executeCommands = function(self, options, callback) {
|
||||
if (self.s.batches.length === 0) {
|
||||
return handleCallback(callback, null, new BulkWriteResult(self.s.bulkResult));
|
||||
}
|
||||
|
||||
// Ordered execution of the command
|
||||
var batch = self.s.batches.shift();
|
||||
|
||||
var resultHandler = function(err, result) {
|
||||
// Error is a driver related error not a bulk op error, terminate
|
||||
if ((err && err.driver) || (err && err.message)) {
|
||||
return handleCallback(callback, err);
|
||||
}
|
||||
|
||||
// If we have and error
|
||||
if (err) err.ok = 0;
|
||||
// Merge the results together
|
||||
var mergeResult = mergeBatchResults(true, batch, self.s.bulkResult, err, result);
|
||||
const writeResult = new BulkWriteResult(self.s.bulkResult);
|
||||
if (mergeResult != null) {
|
||||
return handleCallback(callback, null, writeResult);
|
||||
}
|
||||
|
||||
// If we are ordered and have errors and they are
|
||||
// not all replication errors terminate the operation
|
||||
if (self.s.bulkResult.writeErrors.length > 0) {
|
||||
if (self.s.bulkResult.writeErrors.length === 1) {
|
||||
return handleCallback(
|
||||
callback,
|
||||
new BulkWriteError(toError(self.s.bulkResult.writeErrors[0]), writeResult),
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
return handleCallback(
|
||||
callback,
|
||||
new BulkWriteError(
|
||||
toError({
|
||||
message: 'write operation failed',
|
||||
code: self.s.bulkResult.writeErrors[0].code,
|
||||
writeErrors: self.s.bulkResult.writeErrors
|
||||
}),
|
||||
writeResult
|
||||
),
|
||||
null
|
||||
);
|
||||
} else if (writeResult.getWriteConcernError()) {
|
||||
return handleCallback(
|
||||
callback,
|
||||
new BulkWriteError(toError(writeResult.getWriteConcernError()), writeResult),
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
// Execute the next command in line
|
||||
executeCommands(self, options, callback);
|
||||
};
|
||||
|
||||
var finalOptions = Object.assign({ ordered: true }, options);
|
||||
if (self.s.writeConcern != null) {
|
||||
finalOptions.writeConcern = self.s.writeConcern;
|
||||
}
|
||||
|
||||
// Set an operationIf if provided
|
||||
if (self.operationId) {
|
||||
resultHandler.operationId = self.operationId;
|
||||
}
|
||||
|
||||
// Serialize functions
|
||||
if (self.s.options.serializeFunctions) {
|
||||
finalOptions.serializeFunctions = true;
|
||||
}
|
||||
|
||||
// Ignore undefined
|
||||
if (self.s.options.ignoreUndefined) {
|
||||
finalOptions.ignoreUndefined = true;
|
||||
}
|
||||
|
||||
// Is the bypassDocumentValidation options specific
|
||||
if (self.s.bypassDocumentValidation === true) {
|
||||
finalOptions.bypassDocumentValidation = true;
|
||||
}
|
||||
|
||||
// Is the checkKeys option disabled
|
||||
if (self.s.checkKeys === false) {
|
||||
finalOptions.checkKeys = false;
|
||||
}
|
||||
|
||||
try {
|
||||
if (batch.batchType === common.INSERT) {
|
||||
self.s.topology.insert(
|
||||
self.s.collection.namespace,
|
||||
batch.operations,
|
||||
finalOptions,
|
||||
resultHandler
|
||||
);
|
||||
} else if (batch.batchType === common.UPDATE) {
|
||||
self.s.topology.update(
|
||||
self.s.collection.namespace,
|
||||
batch.operations,
|
||||
finalOptions,
|
||||
resultHandler
|
||||
);
|
||||
} else if (batch.batchType === common.REMOVE) {
|
||||
self.s.topology.remove(
|
||||
self.s.collection.namespace,
|
||||
batch.operations,
|
||||
finalOptions,
|
||||
resultHandler
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
// Force top level error
|
||||
err.ok = 0;
|
||||
// Merge top level error and return
|
||||
handleCallback(callback, null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback OrderedBulkOperation~resultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {BulkWriteResult} result The bulk write result.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Execute the ordered bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {(number|string)} [options.w=null] The write concern.
|
||||
* @param {number} [options.wtimeout=null] The write concern timeout.
|
||||
* @param {boolean} [options.j=false] Specify a journal write concern.
|
||||
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
|
||||
* @param {OrderedBulkOperation~resultCallback} [callback] The result callback
|
||||
* @throws {MongoError}
|
||||
* @return {Promise} returns Promise if no callback passed
|
||||
*/
|
||||
OrderedBulkOperation.prototype.execute = function(_writeConcern, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
if (this.s.executed) {
|
||||
var executedError = toError('batch cannot be re-executed');
|
||||
return typeof callback === 'function'
|
||||
? callback(executedError, null)
|
||||
: this.s.promiseLibrary.reject(executedError);
|
||||
}
|
||||
|
||||
if (typeof _writeConcern === 'function') {
|
||||
callback = _writeConcern;
|
||||
} else if (_writeConcern && typeof _writeConcern === 'object') {
|
||||
this.s.writeConcern = _writeConcern;
|
||||
}
|
||||
|
||||
// If we have current batch
|
||||
if (this.s.currentBatch) this.s.batches.push(this.s.currentBatch);
|
||||
|
||||
// If we have no operations in the bulk raise an error
|
||||
if (this.s.batches.length === 0) {
|
||||
var emptyBatchError = toError('Invalid Operation, no operations specified');
|
||||
return typeof callback === 'function'
|
||||
? callback(emptyBatchError, null)
|
||||
: this.s.promiseLibrary.reject(emptyBatchError);
|
||||
}
|
||||
|
||||
return executeOperation(this.s.topology, executeCommands, [this, options, callback]);
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns an unordered batch object
|
||||
* @ignore
|
||||
*/
|
||||
var initializeOrderedBulkOp = function(topology, collection, options) {
|
||||
return new OrderedBulkOperation(topology, collection, options);
|
||||
};
|
||||
|
||||
initializeOrderedBulkOp.OrderedBulkOperation = OrderedBulkOperation;
|
||||
module.exports = initializeOrderedBulkOp;
|
||||
module.exports.Bulk = OrderedBulkOperation;
|
624
ProjectNow/NodeServer/node_modules/mongodb/lib/bulk/unordered.js
generated
vendored
Normal file
624
ProjectNow/NodeServer/node_modules/mongodb/lib/bulk/unordered.js
generated
vendored
Normal file
@@ -0,0 +1,624 @@
|
||||
'use strict';
|
||||
|
||||
const common = require('./common');
|
||||
const utils = require('../utils');
|
||||
const toError = require('../utils').toError;
|
||||
const handleCallback = require('../utils').handleCallback;
|
||||
const shallowClone = utils.shallowClone;
|
||||
const BulkWriteResult = common.BulkWriteResult;
|
||||
const ObjectID = require('mongodb-core').BSON.ObjectID;
|
||||
const BSON = require('mongodb-core').BSON;
|
||||
const Batch = common.Batch;
|
||||
const mergeBatchResults = common.mergeBatchResults;
|
||||
const executeOperation = utils.executeOperation;
|
||||
const BulkWriteError = require('./common').BulkWriteError;
|
||||
const applyWriteConcern = utils.applyWriteConcern;
|
||||
|
||||
var bson = new BSON([
|
||||
BSON.Binary,
|
||||
BSON.Code,
|
||||
BSON.DBRef,
|
||||
BSON.Decimal128,
|
||||
BSON.Double,
|
||||
BSON.Int32,
|
||||
BSON.Long,
|
||||
BSON.Map,
|
||||
BSON.MaxKey,
|
||||
BSON.MinKey,
|
||||
BSON.ObjectId,
|
||||
BSON.BSONRegExp,
|
||||
BSON.Symbol,
|
||||
BSON.Timestamp
|
||||
]);
|
||||
|
||||
/**
|
||||
* Create a FindOperatorsUnordered instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @property {number} length Get the number of operations in the bulk.
|
||||
* @return {FindOperatorsUnordered} a FindOperatorsUnordered instance.
|
||||
*/
|
||||
var FindOperatorsUnordered = function(self) {
|
||||
this.s = self.s;
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a single update document to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} updateDocument update operations
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsUnordered}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.update = function(updateDocument) {
|
||||
// Perform upsert
|
||||
var upsert = typeof this.s.currentOp.upsert === 'boolean' ? this.s.currentOp.upsert : false;
|
||||
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector,
|
||||
u: updateDocument,
|
||||
multi: true,
|
||||
upsert: upsert
|
||||
};
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the update document to the list
|
||||
return addToOperationsList(this, common.UPDATE, document);
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a single update one document to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} updateDocument update operations
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsUnordered}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.updateOne = function(updateDocument) {
|
||||
// Perform upsert
|
||||
var upsert = typeof this.s.currentOp.upsert === 'boolean' ? this.s.currentOp.upsert : false;
|
||||
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector,
|
||||
u: updateDocument,
|
||||
multi: false,
|
||||
upsert: upsert
|
||||
};
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the update document to the list
|
||||
return addToOperationsList(this, common.UPDATE, document);
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a replace one operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} updateDocument the new document to replace the existing one with
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsUnordered}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.replaceOne = function(updateDocument) {
|
||||
this.updateOne(updateDocument);
|
||||
};
|
||||
|
||||
/**
|
||||
* Upsert modifier for update bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsUnordered}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.upsert = function() {
|
||||
this.s.currentOp.upsert = true;
|
||||
return this;
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a remove one operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsUnordered}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.removeOne = function() {
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector,
|
||||
limit: 1
|
||||
};
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the remove document to the list
|
||||
return addToOperationsList(this, common.REMOVE, document);
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a remove operation to the bulk operation
|
||||
*
|
||||
* @method
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsUnordered}
|
||||
*/
|
||||
FindOperatorsUnordered.prototype.remove = function() {
|
||||
// Establish the update command
|
||||
var document = {
|
||||
q: this.s.currentOp.selector,
|
||||
limit: 0
|
||||
};
|
||||
|
||||
// Clear out current Op
|
||||
this.s.currentOp = null;
|
||||
// Add the remove document to the list
|
||||
return addToOperationsList(this, common.REMOVE, document);
|
||||
};
|
||||
|
||||
//
|
||||
// Add to the operations list
|
||||
//
|
||||
var addToOperationsList = function(_self, docType, document) {
|
||||
// Get the bsonSize
|
||||
var bsonSize = bson.calculateObjectSize(document, {
|
||||
checkKeys: false
|
||||
});
|
||||
// Throw error if the doc is bigger than the max BSON size
|
||||
if (bsonSize >= _self.s.maxBatchSizeBytes)
|
||||
throw toError('document is larger than the maximum size ' + _self.s.maxBatchSizeBytes);
|
||||
// Holds the current batch
|
||||
_self.s.currentBatch = null;
|
||||
// Get the right type of batch
|
||||
if (docType === common.INSERT) {
|
||||
_self.s.currentBatch = _self.s.currentInsertBatch;
|
||||
} else if (docType === common.UPDATE) {
|
||||
_self.s.currentBatch = _self.s.currentUpdateBatch;
|
||||
} else if (docType === common.REMOVE) {
|
||||
_self.s.currentBatch = _self.s.currentRemoveBatch;
|
||||
}
|
||||
|
||||
// Create a new batch object if we don't have a current one
|
||||
if (_self.s.currentBatch == null) _self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
|
||||
|
||||
// Check if we need to create a new batch
|
||||
if (
|
||||
_self.s.currentBatch.size + 1 >= _self.s.maxWriteBatchSize ||
|
||||
_self.s.currentBatch.sizeBytes + bsonSize >= _self.s.maxBatchSizeBytes ||
|
||||
_self.s.currentBatch.batchType !== docType
|
||||
) {
|
||||
// Save the batch to the execution stack
|
||||
_self.s.batches.push(_self.s.currentBatch);
|
||||
|
||||
// Create a new batch
|
||||
_self.s.currentBatch = new Batch(docType, _self.s.currentIndex);
|
||||
}
|
||||
|
||||
// We have an array of documents
|
||||
if (Array.isArray(document)) {
|
||||
throw toError('operation passed in cannot be an Array');
|
||||
} else {
|
||||
_self.s.currentBatch.operations.push(document);
|
||||
_self.s.currentBatch.originalIndexes.push(_self.s.currentIndex);
|
||||
_self.s.currentIndex = _self.s.currentIndex + 1;
|
||||
}
|
||||
|
||||
// Save back the current Batch to the right type
|
||||
if (docType === common.INSERT) {
|
||||
_self.s.currentInsertBatch = _self.s.currentBatch;
|
||||
_self.s.bulkResult.insertedIds.push({
|
||||
index: _self.s.bulkResult.insertedIds.length,
|
||||
_id: document._id
|
||||
});
|
||||
} else if (docType === common.UPDATE) {
|
||||
_self.s.currentUpdateBatch = _self.s.currentBatch;
|
||||
} else if (docType === common.REMOVE) {
|
||||
_self.s.currentRemoveBatch = _self.s.currentBatch;
|
||||
}
|
||||
|
||||
// Update current batch size
|
||||
_self.s.currentBatch.size = _self.s.currentBatch.size + 1;
|
||||
_self.s.currentBatch.sizeBytes = _self.s.currentBatch.sizeBytes + bsonSize;
|
||||
|
||||
// Return self
|
||||
return _self;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a new UnorderedBulkOperation instance (INTERNAL TYPE, do not instantiate directly)
|
||||
* @class
|
||||
* @property {number} length Get the number of operations in the bulk.
|
||||
* @return {UnorderedBulkOperation} a UnorderedBulkOperation instance.
|
||||
*/
|
||||
var UnorderedBulkOperation = function(topology, collection, options) {
|
||||
options = options == null ? {} : options;
|
||||
|
||||
// Get the namesspace for the write operations
|
||||
var namespace = collection.collectionName;
|
||||
// Used to mark operation as executed
|
||||
var executed = false;
|
||||
|
||||
// Current item
|
||||
// var currentBatch = null;
|
||||
var currentOp = null;
|
||||
|
||||
// Handle to the bson serializer, used to calculate running sizes
|
||||
var bson = topology.bson;
|
||||
|
||||
// Set max byte size
|
||||
var maxBatchSizeBytes =
|
||||
topology.isMasterDoc && topology.isMasterDoc.maxBsonObjectSize
|
||||
? topology.isMasterDoc.maxBsonObjectSize
|
||||
: 1024 * 1025 * 16;
|
||||
var maxWriteBatchSize =
|
||||
topology.isMasterDoc && topology.isMasterDoc.maxWriteBatchSize
|
||||
? topology.isMasterDoc.maxWriteBatchSize
|
||||
: 1000;
|
||||
|
||||
// Get the write concern
|
||||
var writeConcern = applyWriteConcern(shallowClone(options), { collection: collection }, options);
|
||||
writeConcern = writeConcern.writeConcern;
|
||||
|
||||
// Get the promiseLibrary
|
||||
var promiseLibrary = options.promiseLibrary || Promise;
|
||||
|
||||
// Final results
|
||||
var bulkResult = {
|
||||
ok: 1,
|
||||
writeErrors: [],
|
||||
writeConcernErrors: [],
|
||||
insertedIds: [],
|
||||
nInserted: 0,
|
||||
nUpserted: 0,
|
||||
nMatched: 0,
|
||||
nModified: 0,
|
||||
nRemoved: 0,
|
||||
upserted: []
|
||||
};
|
||||
|
||||
// Internal state
|
||||
this.s = {
|
||||
// Final result
|
||||
bulkResult: bulkResult,
|
||||
// Current batch state
|
||||
currentInsertBatch: null,
|
||||
currentUpdateBatch: null,
|
||||
currentRemoveBatch: null,
|
||||
currentBatch: null,
|
||||
currentIndex: 0,
|
||||
batches: [],
|
||||
// Write concern
|
||||
writeConcern: writeConcern,
|
||||
// Max batch size options
|
||||
maxBatchSizeBytes: maxBatchSizeBytes,
|
||||
maxWriteBatchSize: maxWriteBatchSize,
|
||||
// Namespace
|
||||
namespace: namespace,
|
||||
// BSON
|
||||
bson: bson,
|
||||
// Topology
|
||||
topology: topology,
|
||||
// Options
|
||||
options: options,
|
||||
// Current operation
|
||||
currentOp: currentOp,
|
||||
// Executed
|
||||
executed: executed,
|
||||
// Collection
|
||||
collection: collection,
|
||||
// Promise Library
|
||||
promiseLibrary: promiseLibrary,
|
||||
// Bypass validation
|
||||
bypassDocumentValidation:
|
||||
typeof options.bypassDocumentValidation === 'boolean'
|
||||
? options.bypassDocumentValidation
|
||||
: false,
|
||||
// check keys
|
||||
checkKeys: typeof options.checkKeys === 'boolean' ? options.checkKeys : true
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Add a single insert document to the bulk operation
|
||||
*
|
||||
* @param {object} document the document to insert
|
||||
* @throws {MongoError}
|
||||
* @return {UnorderedBulkOperation}
|
||||
*/
|
||||
UnorderedBulkOperation.prototype.insert = function(document) {
|
||||
if (this.s.collection.s.db.options.forceServerObjectId !== true && document._id == null)
|
||||
document._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, document);
|
||||
};
|
||||
|
||||
/**
|
||||
* Initiate a find operation for an update/updateOne/remove/removeOne/replaceOne
|
||||
*
|
||||
* @method
|
||||
* @param {object} selector The selector for the bulk operation.
|
||||
* @throws {MongoError}
|
||||
* @return {FindOperatorsUnordered}
|
||||
*/
|
||||
UnorderedBulkOperation.prototype.find = function(selector) {
|
||||
if (!selector) {
|
||||
throw toError('Bulk find operation must specify a selector');
|
||||
}
|
||||
|
||||
// Save a current selector
|
||||
this.s.currentOp = {
|
||||
selector: selector
|
||||
};
|
||||
|
||||
return new FindOperatorsUnordered(this);
|
||||
};
|
||||
|
||||
Object.defineProperty(UnorderedBulkOperation.prototype, 'length', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this.s.currentIndex;
|
||||
}
|
||||
});
|
||||
|
||||
UnorderedBulkOperation.prototype.raw = function(op) {
|
||||
var key = Object.keys(op)[0];
|
||||
|
||||
// Set up the force server object id
|
||||
var forceServerObjectId =
|
||||
typeof this.s.options.forceServerObjectId === 'boolean'
|
||||
? this.s.options.forceServerObjectId
|
||||
: this.s.collection.s.db.options.forceServerObjectId;
|
||||
|
||||
// Update operations
|
||||
if (
|
||||
(op.updateOne && op.updateOne.q) ||
|
||||
(op.updateMany && op.updateMany.q) ||
|
||||
(op.replaceOne && op.replaceOne.q)
|
||||
) {
|
||||
op[key].multi = op.updateOne || op.replaceOne ? false : true;
|
||||
return addToOperationsList(this, common.UPDATE, op[key]);
|
||||
}
|
||||
|
||||
// Crud spec update format
|
||||
if (op.updateOne || op.updateMany || op.replaceOne) {
|
||||
var multi = op.updateOne || op.replaceOne ? false : true;
|
||||
var operation = { q: op[key].filter, u: op[key].update || op[key].replacement, multi: multi };
|
||||
if (op[key].upsert) operation.upsert = true;
|
||||
if (op[key].arrayFilters) operation.arrayFilters = op[key].arrayFilters;
|
||||
return addToOperationsList(this, common.UPDATE, operation);
|
||||
}
|
||||
|
||||
// Remove operations
|
||||
if (
|
||||
op.removeOne ||
|
||||
op.removeMany ||
|
||||
(op.deleteOne && op.deleteOne.q) ||
|
||||
(op.deleteMany && op.deleteMany.q)
|
||||
) {
|
||||
op[key].limit = op.removeOne ? 1 : 0;
|
||||
return addToOperationsList(this, common.REMOVE, op[key]);
|
||||
}
|
||||
|
||||
// Crud spec delete operations, less efficient
|
||||
if (op.deleteOne || op.deleteMany) {
|
||||
var limit = op.deleteOne ? 1 : 0;
|
||||
operation = { q: op[key].filter, limit: limit };
|
||||
return addToOperationsList(this, common.REMOVE, operation);
|
||||
}
|
||||
|
||||
// Insert operations
|
||||
if (op.insertOne && op.insertOne.document == null) {
|
||||
if (forceServerObjectId !== true && op.insertOne._id == null) op.insertOne._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, op.insertOne);
|
||||
} else if (op.insertOne && op.insertOne.document) {
|
||||
if (forceServerObjectId !== true && op.insertOne.document._id == null)
|
||||
op.insertOne.document._id = new ObjectID();
|
||||
return addToOperationsList(this, common.INSERT, op.insertOne.document);
|
||||
}
|
||||
|
||||
if (op.insertMany) {
|
||||
for (var i = 0; i < op.insertMany.length; i++) {
|
||||
if (forceServerObjectId !== true && op.insertMany[i]._id == null)
|
||||
op.insertMany[i]._id = new ObjectID();
|
||||
addToOperationsList(this, common.INSERT, op.insertMany[i]);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// No valid type of operation
|
||||
throw toError(
|
||||
'bulkWrite only supports insertOne, insertMany, updateOne, updateMany, removeOne, removeMany, deleteOne, deleteMany'
|
||||
);
|
||||
};
|
||||
|
||||
//
|
||||
// Execute the command
|
||||
var executeBatch = function(self, batch, options, callback) {
|
||||
var finalOptions = Object.assign({ ordered: false }, options);
|
||||
if (self.s.writeConcern != null) {
|
||||
finalOptions.writeConcern = self.s.writeConcern;
|
||||
}
|
||||
|
||||
var resultHandler = function(err, result) {
|
||||
// Error is a driver related error not a bulk op error, terminate
|
||||
if ((err && err.driver) || (err && err.message)) {
|
||||
return handleCallback(callback, err);
|
||||
}
|
||||
|
||||
// If we have and error
|
||||
if (err) err.ok = 0;
|
||||
handleCallback(callback, null, mergeBatchResults(false, batch, self.s.bulkResult, err, result));
|
||||
};
|
||||
|
||||
// Set an operationIf if provided
|
||||
if (self.operationId) {
|
||||
resultHandler.operationId = self.operationId;
|
||||
}
|
||||
|
||||
// Serialize functions
|
||||
if (self.s.options.serializeFunctions) {
|
||||
finalOptions.serializeFunctions = true;
|
||||
}
|
||||
|
||||
// Ignore undefined
|
||||
if (self.s.options.ignoreUndefined) {
|
||||
finalOptions.ignoreUndefined = true;
|
||||
}
|
||||
|
||||
// Is the bypassDocumentValidation options specific
|
||||
if (self.s.bypassDocumentValidation === true) {
|
||||
finalOptions.bypassDocumentValidation = true;
|
||||
}
|
||||
|
||||
// Is the checkKeys option disabled
|
||||
if (self.s.checkKeys === false) {
|
||||
finalOptions.checkKeys = false;
|
||||
}
|
||||
|
||||
try {
|
||||
if (batch.batchType === common.INSERT) {
|
||||
self.s.topology.insert(
|
||||
self.s.collection.namespace,
|
||||
batch.operations,
|
||||
finalOptions,
|
||||
resultHandler
|
||||
);
|
||||
} else if (batch.batchType === common.UPDATE) {
|
||||
self.s.topology.update(
|
||||
self.s.collection.namespace,
|
||||
batch.operations,
|
||||
finalOptions,
|
||||
resultHandler
|
||||
);
|
||||
} else if (batch.batchType === common.REMOVE) {
|
||||
self.s.topology.remove(
|
||||
self.s.collection.namespace,
|
||||
batch.operations,
|
||||
finalOptions,
|
||||
resultHandler
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
// Force top level error
|
||||
err.ok = 0;
|
||||
// Merge top level error and return
|
||||
handleCallback(callback, null, mergeBatchResults(false, batch, self.s.bulkResult, err, null));
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Execute all the commands
|
||||
var executeBatches = function(self, options, callback) {
|
||||
var numberOfCommandsToExecute = self.s.batches.length;
|
||||
// Execute over all the batches
|
||||
for (var i = 0; i < self.s.batches.length; i++) {
|
||||
executeBatch(self, self.s.batches[i], options, function(err) {
|
||||
// Count down the number of commands left to execute
|
||||
numberOfCommandsToExecute = numberOfCommandsToExecute - 1;
|
||||
|
||||
// Execute
|
||||
if (numberOfCommandsToExecute === 0) {
|
||||
// Driver level error
|
||||
if (err) return handleCallback(callback, err);
|
||||
|
||||
const writeResult = new BulkWriteResult(self.s.bulkResult);
|
||||
if (self.s.bulkResult.writeErrors.length > 0) {
|
||||
if (self.s.bulkResult.writeErrors.length === 1) {
|
||||
return handleCallback(
|
||||
callback,
|
||||
new BulkWriteError(toError(self.s.bulkResult.writeErrors[0]), writeResult),
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
return handleCallback(
|
||||
callback,
|
||||
new BulkWriteError(
|
||||
toError({
|
||||
message: 'write operation failed',
|
||||
code: self.s.bulkResult.writeErrors[0].code,
|
||||
writeErrors: self.s.bulkResult.writeErrors
|
||||
}),
|
||||
writeResult
|
||||
),
|
||||
null
|
||||
);
|
||||
} else if (writeResult.getWriteConcernError()) {
|
||||
return handleCallback(
|
||||
callback,
|
||||
new BulkWriteError(toError(writeResult.getWriteConcernError()), writeResult),
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
return handleCallback(callback, null, writeResult);
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* The callback format for results
|
||||
* @callback UnorderedBulkOperation~resultCallback
|
||||
* @param {MongoError} error An error instance representing the error during the execution.
|
||||
* @param {BulkWriteResult} result The bulk write result.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Execute the ordered bulk operation
|
||||
*
|
||||
* @method
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {(number|string)} [options.w=null] The write concern.
|
||||
* @param {number} [options.wtimeout=null] The write concern timeout.
|
||||
* @param {boolean} [options.j=false] Specify a journal write concern.
|
||||
* @param {boolean} [options.fsync=false] Specify a file sync write concern.
|
||||
* @param {UnorderedBulkOperation~resultCallback} [callback] The result callback
|
||||
* @throws {MongoError}
|
||||
* @return {Promise} returns Promise if no callback passed
|
||||
*/
|
||||
UnorderedBulkOperation.prototype.execute = function(_writeConcern, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
if (this.s.executed) {
|
||||
var executedError = toError('batch cannot be re-executed');
|
||||
return typeof callback === 'function'
|
||||
? callback(executedError, null)
|
||||
: this.s.promiseLibrary.reject(executedError);
|
||||
}
|
||||
|
||||
if (typeof _writeConcern === 'function') {
|
||||
callback = _writeConcern;
|
||||
} else if (_writeConcern && typeof _writeConcern === 'object') {
|
||||
this.s.writeConcern = _writeConcern;
|
||||
}
|
||||
|
||||
// If we have current batch
|
||||
if (this.s.currentInsertBatch) this.s.batches.push(this.s.currentInsertBatch);
|
||||
if (this.s.currentUpdateBatch) this.s.batches.push(this.s.currentUpdateBatch);
|
||||
if (this.s.currentRemoveBatch) this.s.batches.push(this.s.currentRemoveBatch);
|
||||
|
||||
// If we have no operations in the bulk raise an error
|
||||
if (this.s.batches.length === 0) {
|
||||
var emptyBatchError = toError('Invalid Operation, no operations specified');
|
||||
return typeof callback === 'function'
|
||||
? callback(emptyBatchError, null)
|
||||
: this.s.promiseLibrary.reject(emptyBatchError);
|
||||
}
|
||||
|
||||
return executeOperation(this.s.topology, executeBatches, [this, options, callback]);
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns an unordered batch object
|
||||
* @ignore
|
||||
*/
|
||||
var initializeUnorderedBulkOp = function(topology, collection, options) {
|
||||
return new UnorderedBulkOperation(topology, collection, options);
|
||||
};
|
||||
|
||||
initializeUnorderedBulkOp.UnorderedBulkOperation = UnorderedBulkOperation;
|
||||
module.exports = initializeUnorderedBulkOp;
|
||||
module.exports.Bulk = UnorderedBulkOperation;
|
Reference in New Issue
Block a user