mirror of
https://github.com/bvanroll/rpiRadio.git
synced 2025-08-30 20:42:44 +00:00
Initial Commit
This commit is contained in:
29
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/defaultAuthProviders.js
generated
vendored
Normal file
29
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/defaultAuthProviders.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
'use strict';
|
||||
|
||||
const MongoCR = require('./mongocr');
|
||||
const X509 = require('./x509');
|
||||
const Plain = require('./plain');
|
||||
const GSSAPI = require('./gssapi');
|
||||
const SSPI = require('./sspi');
|
||||
const ScramSHA1 = require('./scram').ScramSHA1;
|
||||
const ScramSHA256 = require('./scram').ScramSHA256;
|
||||
|
||||
/**
|
||||
* Returns the default authentication providers.
|
||||
*
|
||||
* @param {BSON} bson Bson definition
|
||||
* @returns {Object} a mapping of auth names to auth types
|
||||
*/
|
||||
function defaultAuthProviders(bson) {
|
||||
return {
|
||||
mongocr: new MongoCR(bson),
|
||||
x509: new X509(bson),
|
||||
plain: new Plain(bson),
|
||||
gssapi: new GSSAPI(bson),
|
||||
sspi: new SSPI(bson),
|
||||
'scram-sha-1': new ScramSHA1(bson),
|
||||
'scram-sha-256': new ScramSHA256(bson)
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = { defaultAuthProviders };
|
384
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/gssapi.js
generated
vendored
Normal file
384
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/gssapi.js
generated
vendored
Normal file
@@ -0,0 +1,384 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
require_optional = require('require_optional'),
|
||||
Query = require('../connection/commands').Query,
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
var AuthSession = function(db, username, password, options) {
|
||||
this.db = db;
|
||||
this.username = username;
|
||||
this.password = password;
|
||||
this.options = options;
|
||||
};
|
||||
|
||||
AuthSession.prototype.equal = function(session) {
|
||||
return (
|
||||
session.db === this.db &&
|
||||
session.username === this.username &&
|
||||
session.password === this.password
|
||||
);
|
||||
};
|
||||
|
||||
// Kerberos class
|
||||
var Kerberos = null;
|
||||
var MongoAuthProcess = null;
|
||||
|
||||
// Try to grab the Kerberos class
|
||||
try {
|
||||
Kerberos = require_optional('kerberos').Kerberos;
|
||||
// Authentication process for Mongo
|
||||
MongoAuthProcess = require_optional('kerberos').processes.MongoAuthProcess;
|
||||
} catch (err) {} // eslint-disable-line
|
||||
|
||||
/**
|
||||
* Creates a new GSSAPI authentication mechanism
|
||||
* @class
|
||||
* @return {GSSAPI} A cursor instance
|
||||
*/
|
||||
var GSSAPI = function(bson) {
|
||||
this.bson = bson;
|
||||
this.authStore = [];
|
||||
};
|
||||
|
||||
/**
|
||||
* Authenticate
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {string} db Name of the database
|
||||
* @param {string} username Username
|
||||
* @param {string} password Password
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
GSSAPI.prototype.auth = function(server, connections, db, username, password, options, callback) {
|
||||
var self = this;
|
||||
// We don't have the Kerberos library
|
||||
if (Kerberos == null) return callback(new Error('Kerberos library is not installed'));
|
||||
var gssapiServiceName = options['gssapiServiceName'] || 'mongodb';
|
||||
// Total connections
|
||||
var count = connections.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
|
||||
// Valid connections
|
||||
var numberOfValidConnections = 0;
|
||||
var errorObject = null;
|
||||
|
||||
// For each connection we need to authenticate
|
||||
while (connections.length > 0) {
|
||||
// Execute MongoCR
|
||||
var execute = function(connection) {
|
||||
// Start Auth process for a connection
|
||||
GSSAPIInitialize(
|
||||
self,
|
||||
db,
|
||||
username,
|
||||
password,
|
||||
db,
|
||||
gssapiServiceName,
|
||||
server,
|
||||
connection,
|
||||
options,
|
||||
function(err, r) {
|
||||
// Adjust count
|
||||
count = count - 1;
|
||||
|
||||
// If we have an error
|
||||
if (err) {
|
||||
errorObject = err;
|
||||
} else if (r.result['$err']) {
|
||||
errorObject = r.result;
|
||||
} else if (r.result['errmsg']) {
|
||||
errorObject = r.result;
|
||||
} else {
|
||||
numberOfValidConnections = numberOfValidConnections + 1;
|
||||
}
|
||||
|
||||
// We have authenticated all connections
|
||||
if (count === 0 && numberOfValidConnections > 0) {
|
||||
// Store the auth details
|
||||
addAuthSession(self.authStore, new AuthSession(db, username, password, options));
|
||||
// Return correct authentication
|
||||
callback(null, true);
|
||||
} else if (count === 0) {
|
||||
if (errorObject == null)
|
||||
errorObject = new MongoError(f('failed to authenticate using mongocr'));
|
||||
callback(errorObject, false);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
var _execute = function(_connection) {
|
||||
process.nextTick(function() {
|
||||
execute(_connection);
|
||||
});
|
||||
};
|
||||
|
||||
_execute(connections.shift());
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Initialize step
|
||||
var GSSAPIInitialize = function(
|
||||
self,
|
||||
db,
|
||||
username,
|
||||
password,
|
||||
authdb,
|
||||
gssapiServiceName,
|
||||
server,
|
||||
connection,
|
||||
options,
|
||||
callback
|
||||
) {
|
||||
// Create authenticator
|
||||
var mongo_auth_process = new MongoAuthProcess(
|
||||
connection.host,
|
||||
connection.port,
|
||||
gssapiServiceName,
|
||||
options
|
||||
);
|
||||
|
||||
// Perform initialization
|
||||
mongo_auth_process.init(username, password, function(err) {
|
||||
if (err) return callback(err, false);
|
||||
|
||||
// Perform the first step
|
||||
mongo_auth_process.transition('', function(err, payload) {
|
||||
if (err) return callback(err, false);
|
||||
|
||||
// Call the next db step
|
||||
MongoDBGSSAPIFirstStep(
|
||||
self,
|
||||
mongo_auth_process,
|
||||
payload,
|
||||
db,
|
||||
username,
|
||||
password,
|
||||
authdb,
|
||||
server,
|
||||
connection,
|
||||
callback
|
||||
);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
//
|
||||
// Perform first step against mongodb
|
||||
var MongoDBGSSAPIFirstStep = function(
|
||||
self,
|
||||
mongo_auth_process,
|
||||
payload,
|
||||
db,
|
||||
username,
|
||||
password,
|
||||
authdb,
|
||||
server,
|
||||
connection,
|
||||
callback
|
||||
) {
|
||||
// Build the sasl start command
|
||||
var command = {
|
||||
saslStart: 1,
|
||||
mechanism: 'GSSAPI',
|
||||
payload: payload,
|
||||
autoAuthorize: 1
|
||||
};
|
||||
|
||||
// Write the commmand on the connection
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
if (err) return callback(err, false);
|
||||
var doc = r.result;
|
||||
// Execute mongodb transition
|
||||
mongo_auth_process.transition(r.result.payload, function(err, payload) {
|
||||
if (err) return callback(err, false);
|
||||
|
||||
// MongoDB API Second Step
|
||||
MongoDBGSSAPISecondStep(
|
||||
self,
|
||||
mongo_auth_process,
|
||||
payload,
|
||||
doc,
|
||||
db,
|
||||
username,
|
||||
password,
|
||||
authdb,
|
||||
server,
|
||||
connection,
|
||||
callback
|
||||
);
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
//
|
||||
// Perform first step against mongodb
|
||||
var MongoDBGSSAPISecondStep = function(
|
||||
self,
|
||||
mongo_auth_process,
|
||||
payload,
|
||||
doc,
|
||||
db,
|
||||
username,
|
||||
password,
|
||||
authdb,
|
||||
server,
|
||||
connection,
|
||||
callback
|
||||
) {
|
||||
// Build Authentication command to send to MongoDB
|
||||
var command = {
|
||||
saslContinue: 1,
|
||||
conversationId: doc.conversationId,
|
||||
payload: payload
|
||||
};
|
||||
|
||||
// Execute the command
|
||||
// Write the commmand on the connection
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
if (err) return callback(err, false);
|
||||
var doc = r.result;
|
||||
// Call next transition for kerberos
|
||||
mongo_auth_process.transition(doc.payload, function(err, payload) {
|
||||
if (err) return callback(err, false);
|
||||
|
||||
// Call the last and third step
|
||||
MongoDBGSSAPIThirdStep(
|
||||
self,
|
||||
mongo_auth_process,
|
||||
payload,
|
||||
doc,
|
||||
db,
|
||||
username,
|
||||
password,
|
||||
authdb,
|
||||
server,
|
||||
connection,
|
||||
callback
|
||||
);
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
var MongoDBGSSAPIThirdStep = function(
|
||||
self,
|
||||
mongo_auth_process,
|
||||
payload,
|
||||
doc,
|
||||
db,
|
||||
username,
|
||||
password,
|
||||
authdb,
|
||||
server,
|
||||
connection,
|
||||
callback
|
||||
) {
|
||||
// Build final command
|
||||
var command = {
|
||||
saslContinue: 1,
|
||||
conversationId: doc.conversationId,
|
||||
payload: payload
|
||||
};
|
||||
|
||||
// Execute the command
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
if (err) return callback(err, false);
|
||||
mongo_auth_process.transition(null, function(err) {
|
||||
if (err) return callback(err, null);
|
||||
callback(null, r);
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
// Add to store only if it does not exist
|
||||
var addAuthSession = function(authStore, session) {
|
||||
var found = false;
|
||||
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
if (authStore[i].equal(session)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) authStore.push(session);
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove authStore credentials
|
||||
* @method
|
||||
* @param {string} db Name of database we are removing authStore details about
|
||||
* @return {object}
|
||||
*/
|
||||
GSSAPI.prototype.logout = function(dbName) {
|
||||
this.authStore = this.authStore.filter(function(x) {
|
||||
return x.db !== dbName;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Re authenticate pool
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
GSSAPI.prototype.reauthenticate = function(server, connections, callback) {
|
||||
var authStore = this.authStore.slice(0);
|
||||
var count = authStore.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
// Iterate over all the auth details stored
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
this.auth(
|
||||
server,
|
||||
connections,
|
||||
authStore[i].db,
|
||||
authStore[i].username,
|
||||
authStore[i].password,
|
||||
authStore[i].options,
|
||||
function(err) {
|
||||
count = count - 1;
|
||||
// Done re-authenticating
|
||||
if (count === 0) {
|
||||
callback(err, null);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a result from a authentication strategy
|
||||
*
|
||||
* @callback authResultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {boolean} result The result of the authentication process
|
||||
*/
|
||||
|
||||
module.exports = GSSAPI;
|
214
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/mongocr.js
generated
vendored
Normal file
214
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/mongocr.js
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
crypto = require('crypto'),
|
||||
Query = require('../connection/commands').Query,
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
var AuthSession = function(db, username, password) {
|
||||
this.db = db;
|
||||
this.username = username;
|
||||
this.password = password;
|
||||
};
|
||||
|
||||
AuthSession.prototype.equal = function(session) {
|
||||
return (
|
||||
session.db === this.db &&
|
||||
session.username === this.username &&
|
||||
session.password === this.password
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new MongoCR authentication mechanism
|
||||
* @class
|
||||
* @return {MongoCR} A cursor instance
|
||||
*/
|
||||
var MongoCR = function(bson) {
|
||||
this.bson = bson;
|
||||
this.authStore = [];
|
||||
};
|
||||
|
||||
// Add to store only if it does not exist
|
||||
var addAuthSession = function(authStore, session) {
|
||||
var found = false;
|
||||
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
if (authStore[i].equal(session)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) authStore.push(session);
|
||||
};
|
||||
|
||||
/**
|
||||
* Authenticate
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {string} db Name of the database
|
||||
* @param {string} username Username
|
||||
* @param {string} password Password
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
MongoCR.prototype.auth = function(server, connections, db, username, password, callback) {
|
||||
var self = this;
|
||||
// Total connections
|
||||
var count = connections.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
|
||||
// Valid connections
|
||||
var numberOfValidConnections = 0;
|
||||
var errorObject = null;
|
||||
|
||||
// For each connection we need to authenticate
|
||||
while (connections.length > 0) {
|
||||
// Execute MongoCR
|
||||
var executeMongoCR = function(connection) {
|
||||
// Write the commmand on the connection
|
||||
server(
|
||||
connection,
|
||||
new Query(
|
||||
self.bson,
|
||||
f('%s.$cmd', db),
|
||||
{
|
||||
getnonce: 1
|
||||
},
|
||||
{
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}
|
||||
),
|
||||
function(err, r) {
|
||||
var nonce = null;
|
||||
var key = null;
|
||||
|
||||
// Adjust the number of connections left
|
||||
// Get nonce
|
||||
if (err == null) {
|
||||
nonce = r.result.nonce;
|
||||
// Use node md5 generator
|
||||
var md5 = crypto.createHash('md5');
|
||||
// Generate keys used for authentication
|
||||
md5.update(username + ':mongo:' + password, 'utf8');
|
||||
var hash_password = md5.digest('hex');
|
||||
// Final key
|
||||
md5 = crypto.createHash('md5');
|
||||
md5.update(nonce + username + hash_password, 'utf8');
|
||||
key = md5.digest('hex');
|
||||
}
|
||||
|
||||
// Execute command
|
||||
// Write the commmand on the connection
|
||||
server(
|
||||
connection,
|
||||
new Query(
|
||||
self.bson,
|
||||
f('%s.$cmd', db),
|
||||
{
|
||||
authenticate: 1,
|
||||
user: username,
|
||||
nonce: nonce,
|
||||
key: key
|
||||
},
|
||||
{
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}
|
||||
),
|
||||
function(err, r) {
|
||||
count = count - 1;
|
||||
|
||||
// If we have an error
|
||||
if (err) {
|
||||
errorObject = err;
|
||||
} else if (r.result['$err']) {
|
||||
errorObject = r.result;
|
||||
} else if (r.result['errmsg']) {
|
||||
errorObject = r.result;
|
||||
} else {
|
||||
numberOfValidConnections = numberOfValidConnections + 1;
|
||||
}
|
||||
|
||||
// We have authenticated all connections
|
||||
if (count === 0 && numberOfValidConnections > 0) {
|
||||
// Store the auth details
|
||||
addAuthSession(self.authStore, new AuthSession(db, username, password));
|
||||
// Return correct authentication
|
||||
callback(null, true);
|
||||
} else if (count === 0) {
|
||||
if (errorObject == null)
|
||||
errorObject = new MongoError(f('failed to authenticate using mongocr'));
|
||||
callback(errorObject, false);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
var _execute = function(_connection) {
|
||||
process.nextTick(function() {
|
||||
executeMongoCR(_connection);
|
||||
});
|
||||
};
|
||||
|
||||
_execute(connections.shift());
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove authStore credentials
|
||||
* @method
|
||||
* @param {string} db Name of database we are removing authStore details about
|
||||
* @return {object}
|
||||
*/
|
||||
MongoCR.prototype.logout = function(dbName) {
|
||||
this.authStore = this.authStore.filter(function(x) {
|
||||
return x.db !== dbName;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Re authenticate pool
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
MongoCR.prototype.reauthenticate = function(server, connections, callback) {
|
||||
var authStore = this.authStore.slice(0);
|
||||
var count = authStore.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
// Iterate over all the auth details stored
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
this.auth(
|
||||
server,
|
||||
connections,
|
||||
authStore[i].db,
|
||||
authStore[i].username,
|
||||
authStore[i].password,
|
||||
function(err) {
|
||||
count = count - 1;
|
||||
// Done re-authenticating
|
||||
if (count === 0) {
|
||||
callback(err, null);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a result from a authentication strategy
|
||||
*
|
||||
* @callback authResultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {boolean} result The result of the authentication process
|
||||
*/
|
||||
|
||||
module.exports = MongoCR;
|
183
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/plain.js
generated
vendored
Normal file
183
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/plain.js
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
retrieveBSON = require('../connection/utils').retrieveBSON,
|
||||
Query = require('../connection/commands').Query,
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
var BSON = retrieveBSON(),
|
||||
Binary = BSON.Binary;
|
||||
|
||||
var AuthSession = function(db, username, password) {
|
||||
this.db = db;
|
||||
this.username = username;
|
||||
this.password = password;
|
||||
};
|
||||
|
||||
AuthSession.prototype.equal = function(session) {
|
||||
return (
|
||||
session.db === this.db &&
|
||||
session.username === this.username &&
|
||||
session.password === this.password
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new Plain authentication mechanism
|
||||
* @class
|
||||
* @return {Plain} A cursor instance
|
||||
*/
|
||||
var Plain = function(bson) {
|
||||
this.bson = bson;
|
||||
this.authStore = [];
|
||||
};
|
||||
|
||||
/**
|
||||
* Authenticate
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {string} db Name of the database
|
||||
* @param {string} username Username
|
||||
* @param {string} password Password
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
Plain.prototype.auth = function(server, connections, db, username, password, callback) {
|
||||
var self = this;
|
||||
// Total connections
|
||||
var count = connections.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
|
||||
// Valid connections
|
||||
var numberOfValidConnections = 0;
|
||||
var errorObject = null;
|
||||
|
||||
// For each connection we need to authenticate
|
||||
while (connections.length > 0) {
|
||||
// Execute MongoCR
|
||||
var execute = function(connection) {
|
||||
// Create payload
|
||||
var payload = new Binary(f('\x00%s\x00%s', username, password));
|
||||
|
||||
// Let's start the sasl process
|
||||
var command = {
|
||||
saslStart: 1,
|
||||
mechanism: 'PLAIN',
|
||||
payload: payload,
|
||||
autoAuthorize: 1
|
||||
};
|
||||
|
||||
// Let's start the process
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
// Adjust count
|
||||
count = count - 1;
|
||||
|
||||
// If we have an error
|
||||
if (err) {
|
||||
errorObject = err;
|
||||
} else if (r.result['$err']) {
|
||||
errorObject = r.result;
|
||||
} else if (r.result['errmsg']) {
|
||||
errorObject = r.result;
|
||||
} else {
|
||||
numberOfValidConnections = numberOfValidConnections + 1;
|
||||
}
|
||||
|
||||
// We have authenticated all connections
|
||||
if (count === 0 && numberOfValidConnections > 0) {
|
||||
// Store the auth details
|
||||
addAuthSession(self.authStore, new AuthSession(db, username, password));
|
||||
// Return correct authentication
|
||||
callback(null, true);
|
||||
} else if (count === 0) {
|
||||
if (errorObject == null)
|
||||
errorObject = new MongoError(f('failed to authenticate using mongocr'));
|
||||
callback(errorObject, false);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
var _execute = function(_connection) {
|
||||
process.nextTick(function() {
|
||||
execute(_connection);
|
||||
});
|
||||
};
|
||||
|
||||
_execute(connections.shift());
|
||||
}
|
||||
};
|
||||
|
||||
// Add to store only if it does not exist
|
||||
var addAuthSession = function(authStore, session) {
|
||||
var found = false;
|
||||
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
if (authStore[i].equal(session)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) authStore.push(session);
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove authStore credentials
|
||||
* @method
|
||||
* @param {string} db Name of database we are removing authStore details about
|
||||
* @return {object}
|
||||
*/
|
||||
Plain.prototype.logout = function(dbName) {
|
||||
this.authStore = this.authStore.filter(function(x) {
|
||||
return x.db !== dbName;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Re authenticate pool
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
Plain.prototype.reauthenticate = function(server, connections, callback) {
|
||||
var authStore = this.authStore.slice(0);
|
||||
var count = authStore.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
// Iterate over all the auth details stored
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
this.auth(
|
||||
server,
|
||||
connections,
|
||||
authStore[i].db,
|
||||
authStore[i].username,
|
||||
authStore[i].password,
|
||||
function(err) {
|
||||
count = count - 1;
|
||||
// Done re-authenticating
|
||||
if (count === 0) {
|
||||
callback(err, null);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a result from a authentication strategy
|
||||
*
|
||||
* @callback authResultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {boolean} result The result of the authentication process
|
||||
*/
|
||||
|
||||
module.exports = Plain;
|
441
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/scram.js
generated
vendored
Normal file
441
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/scram.js
generated
vendored
Normal file
@@ -0,0 +1,441 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
crypto = require('crypto'),
|
||||
retrieveBSON = require('../connection/utils').retrieveBSON,
|
||||
Query = require('../connection/commands').Query,
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
let saslprep;
|
||||
|
||||
try {
|
||||
saslprep = require('saslprep');
|
||||
} catch (e) {
|
||||
// don't do anything;
|
||||
}
|
||||
|
||||
var BSON = retrieveBSON(),
|
||||
Binary = BSON.Binary;
|
||||
|
||||
var AuthSession = function(db, username, password) {
|
||||
this.db = db;
|
||||
this.username = username;
|
||||
this.password = password;
|
||||
};
|
||||
|
||||
AuthSession.prototype.equal = function(session) {
|
||||
return (
|
||||
session.db === this.db &&
|
||||
session.username === this.username &&
|
||||
session.password === this.password
|
||||
);
|
||||
};
|
||||
|
||||
var id = 0;
|
||||
|
||||
/**
|
||||
* Creates a new ScramSHA authentication mechanism
|
||||
* @class
|
||||
* @return {ScramSHA} A cursor instance
|
||||
*/
|
||||
var ScramSHA = function(bson, cryptoMethod) {
|
||||
this.bson = bson;
|
||||
this.authStore = [];
|
||||
this.id = id++;
|
||||
this.cryptoMethod = cryptoMethod || 'sha1';
|
||||
};
|
||||
|
||||
var parsePayload = function(payload) {
|
||||
var dict = {};
|
||||
var parts = payload.split(',');
|
||||
|
||||
for (var i = 0; i < parts.length; i++) {
|
||||
var valueParts = parts[i].split('=');
|
||||
dict[valueParts[0]] = valueParts[1];
|
||||
}
|
||||
|
||||
return dict;
|
||||
};
|
||||
|
||||
var passwordDigest = function(username, password) {
|
||||
if (typeof username !== 'string') throw new MongoError('username must be a string');
|
||||
if (typeof password !== 'string') throw new MongoError('password must be a string');
|
||||
if (password.length === 0) throw new MongoError('password cannot be empty');
|
||||
// Use node md5 generator
|
||||
var md5 = crypto.createHash('md5');
|
||||
// Generate keys used for authentication
|
||||
md5.update(username + ':mongo:' + password, 'utf8');
|
||||
return md5.digest('hex');
|
||||
};
|
||||
|
||||
// XOR two buffers
|
||||
function xor(a, b) {
|
||||
if (!Buffer.isBuffer(a)) a = new Buffer(a);
|
||||
if (!Buffer.isBuffer(b)) b = new Buffer(b);
|
||||
const length = Math.max(a.length, b.length);
|
||||
const res = [];
|
||||
|
||||
for (let i = 0; i < length; i += 1) {
|
||||
res.push(a[i] ^ b[i]);
|
||||
}
|
||||
|
||||
return new Buffer(res).toString('base64');
|
||||
}
|
||||
|
||||
function H(method, text) {
|
||||
return crypto
|
||||
.createHash(method)
|
||||
.update(text)
|
||||
.digest();
|
||||
}
|
||||
|
||||
function HMAC(method, key, text) {
|
||||
return crypto
|
||||
.createHmac(method, key)
|
||||
.update(text)
|
||||
.digest();
|
||||
}
|
||||
|
||||
var _hiCache = {};
|
||||
var _hiCacheCount = 0;
|
||||
var _hiCachePurge = function() {
|
||||
_hiCache = {};
|
||||
_hiCacheCount = 0;
|
||||
};
|
||||
|
||||
const hiLengthMap = {
|
||||
sha256: 32,
|
||||
sha1: 20
|
||||
};
|
||||
|
||||
function HI(data, salt, iterations, cryptoMethod) {
|
||||
// omit the work if already generated
|
||||
const key = [data, salt.toString('base64'), iterations].join('_');
|
||||
if (_hiCache[key] !== undefined) {
|
||||
return _hiCache[key];
|
||||
}
|
||||
|
||||
// generate the salt
|
||||
const saltedData = crypto.pbkdf2Sync(
|
||||
data,
|
||||
salt,
|
||||
iterations,
|
||||
hiLengthMap[cryptoMethod],
|
||||
cryptoMethod
|
||||
);
|
||||
|
||||
// cache a copy to speed up the next lookup, but prevent unbounded cache growth
|
||||
if (_hiCacheCount >= 200) {
|
||||
_hiCachePurge();
|
||||
}
|
||||
|
||||
_hiCache[key] = saltedData;
|
||||
_hiCacheCount += 1;
|
||||
return saltedData;
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticate
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {string} db Name of the database
|
||||
* @param {string} username Username
|
||||
* @param {string} password Password
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
ScramSHA.prototype.auth = function(server, connections, db, username, password, callback) {
|
||||
var self = this;
|
||||
// Total connections
|
||||
var count = connections.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
|
||||
// Valid connections
|
||||
var numberOfValidConnections = 0;
|
||||
var errorObject = null;
|
||||
|
||||
const cryptoMethod = this.cryptoMethod;
|
||||
let mechanism = 'SCRAM-SHA-1';
|
||||
let processedPassword;
|
||||
|
||||
if (cryptoMethod === 'sha256') {
|
||||
mechanism = 'SCRAM-SHA-256';
|
||||
|
||||
let saslprepFn = (server.s && server.s.saslprep) || saslprep;
|
||||
|
||||
if (saslprepFn) {
|
||||
processedPassword = saslprepFn(password);
|
||||
} else {
|
||||
console.warn('Warning: no saslprep library specified. Passwords will not be sanitized');
|
||||
processedPassword = password;
|
||||
}
|
||||
} else {
|
||||
processedPassword = passwordDigest(username, password);
|
||||
}
|
||||
|
||||
// Execute MongoCR
|
||||
var executeScram = function(connection) {
|
||||
// Clean up the user
|
||||
username = username.replace('=', '=3D').replace(',', '=2C');
|
||||
|
||||
// Create a random nonce
|
||||
var nonce = crypto.randomBytes(24).toString('base64');
|
||||
// var nonce = 'MsQUY9iw0T9fx2MUEz6LZPwGuhVvWAhc'
|
||||
|
||||
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
|
||||
// Since the username is not sasl-prep-d, we need to do this here.
|
||||
const firstBare = Buffer.concat([
|
||||
Buffer.from('n=', 'utf8'),
|
||||
Buffer.from(username, 'utf8'),
|
||||
Buffer.from(',r=', 'utf8'),
|
||||
Buffer.from(nonce, 'utf8')
|
||||
]);
|
||||
|
||||
// Build command structure
|
||||
var cmd = {
|
||||
saslStart: 1,
|
||||
mechanism: mechanism,
|
||||
payload: new Binary(Buffer.concat([Buffer.from('n,,', 'utf8'), firstBare])),
|
||||
autoAuthorize: 1
|
||||
};
|
||||
|
||||
// Handle the error
|
||||
var handleError = function(err, r) {
|
||||
if (err) {
|
||||
numberOfValidConnections = numberOfValidConnections - 1;
|
||||
errorObject = err;
|
||||
return false;
|
||||
} else if (r.result['$err']) {
|
||||
errorObject = r.result;
|
||||
return false;
|
||||
} else if (r.result['errmsg']) {
|
||||
errorObject = r.result;
|
||||
return false;
|
||||
} else {
|
||||
numberOfValidConnections = numberOfValidConnections + 1;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
// Finish up
|
||||
var finish = function(_count, _numberOfValidConnections) {
|
||||
if (_count === 0 && _numberOfValidConnections > 0) {
|
||||
// Store the auth details
|
||||
addAuthSession(self.authStore, new AuthSession(db, username, password));
|
||||
// Return correct authentication
|
||||
return callback(null, true);
|
||||
} else if (_count === 0) {
|
||||
if (errorObject == null)
|
||||
errorObject = new MongoError(f('failed to authenticate using scram'));
|
||||
return callback(errorObject, false);
|
||||
}
|
||||
};
|
||||
|
||||
var handleEnd = function(_err, _r) {
|
||||
// Handle any error
|
||||
handleError(_err, _r);
|
||||
// Adjust the number of connections
|
||||
count = count - 1;
|
||||
// Execute the finish
|
||||
finish(count, numberOfValidConnections);
|
||||
};
|
||||
|
||||
// Write the commmand on the connection
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, f('%s.$cmd', db), cmd, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
// Do we have an error, handle it
|
||||
if (handleError(err, r) === false) {
|
||||
count = count - 1;
|
||||
|
||||
if (count === 0 && numberOfValidConnections > 0) {
|
||||
// Store the auth details
|
||||
addAuthSession(self.authStore, new AuthSession(db, username, password));
|
||||
// Return correct authentication
|
||||
return callback(null, true);
|
||||
} else if (count === 0) {
|
||||
if (errorObject == null)
|
||||
errorObject = new MongoError(f('failed to authenticate using scram'));
|
||||
return callback(errorObject, false);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Get the dictionary
|
||||
var dict = parsePayload(r.result.payload.value());
|
||||
|
||||
// Unpack dictionary
|
||||
var iterations = parseInt(dict.i, 10);
|
||||
var salt = dict.s;
|
||||
var rnonce = dict.r;
|
||||
|
||||
// Set up start of proof
|
||||
var withoutProof = f('c=biws,r=%s', rnonce);
|
||||
var saltedPassword = HI(
|
||||
processedPassword,
|
||||
new Buffer(salt, 'base64'),
|
||||
iterations,
|
||||
cryptoMethod
|
||||
);
|
||||
|
||||
if (iterations && iterations < 4096) {
|
||||
const error = new MongoError(`Server returned an invalid iteration count ${iterations}`);
|
||||
return callback(error, false);
|
||||
}
|
||||
|
||||
// Create the client key
|
||||
const clientKey = HMAC(cryptoMethod, saltedPassword, 'Client Key');
|
||||
|
||||
// Create the stored key
|
||||
const storedKey = H(cryptoMethod, clientKey);
|
||||
|
||||
// Create the authentication message
|
||||
const authMessage = [
|
||||
firstBare,
|
||||
r.result.payload.value().toString('base64'),
|
||||
withoutProof
|
||||
].join(',');
|
||||
|
||||
// Create client signature
|
||||
const clientSignature = HMAC(cryptoMethod, storedKey, authMessage);
|
||||
|
||||
// Create client proof
|
||||
const clientProof = f('p=%s', xor(clientKey, clientSignature));
|
||||
|
||||
// Create client final
|
||||
const clientFinal = [withoutProof, clientProof].join(',');
|
||||
|
||||
// Create continue message
|
||||
const cmd = {
|
||||
saslContinue: 1,
|
||||
conversationId: r.result.conversationId,
|
||||
payload: new Binary(new Buffer(clientFinal))
|
||||
};
|
||||
|
||||
//
|
||||
// Execute sasl continue
|
||||
// Write the commmand on the connection
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, f('%s.$cmd', db), cmd, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
if (r && r.result.done === false) {
|
||||
var cmd = {
|
||||
saslContinue: 1,
|
||||
conversationId: r.result.conversationId,
|
||||
payload: new Buffer(0)
|
||||
};
|
||||
|
||||
// Write the commmand on the connection
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, f('%s.$cmd', db), cmd, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
handleEnd(err, r);
|
||||
}
|
||||
);
|
||||
} else {
|
||||
handleEnd(err, r);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
var _execute = function(_connection) {
|
||||
process.nextTick(function() {
|
||||
executeScram(_connection);
|
||||
});
|
||||
};
|
||||
|
||||
// For each connection we need to authenticate
|
||||
while (connections.length > 0) {
|
||||
_execute(connections.shift());
|
||||
}
|
||||
};
|
||||
|
||||
// Add to store only if it does not exist
|
||||
var addAuthSession = function(authStore, session) {
|
||||
var found = false;
|
||||
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
if (authStore[i].equal(session)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) authStore.push(session);
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove authStore credentials
|
||||
* @method
|
||||
* @param {string} db Name of database we are removing authStore details about
|
||||
* @return {object}
|
||||
*/
|
||||
ScramSHA.prototype.logout = function(dbName) {
|
||||
this.authStore = this.authStore.filter(function(x) {
|
||||
return x.db !== dbName;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Re authenticate pool
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
ScramSHA.prototype.reauthenticate = function(server, connections, callback) {
|
||||
var authStore = this.authStore.slice(0);
|
||||
var count = authStore.length;
|
||||
// No connections
|
||||
if (count === 0) return callback(null, null);
|
||||
// Iterate over all the auth details stored
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
this.auth(
|
||||
server,
|
||||
connections,
|
||||
authStore[i].db,
|
||||
authStore[i].username,
|
||||
authStore[i].password,
|
||||
function(err) {
|
||||
count = count - 1;
|
||||
// Done re-authenticating
|
||||
if (count === 0) {
|
||||
callback(err, null);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
class ScramSHA1 extends ScramSHA {
|
||||
constructor(bson) {
|
||||
super(bson, 'sha1');
|
||||
}
|
||||
}
|
||||
|
||||
class ScramSHA256 extends ScramSHA {
|
||||
constructor(bson) {
|
||||
super(bson, 'sha256');
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { ScramSHA1, ScramSHA256 };
|
304
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/sspi.js
generated
vendored
Normal file
304
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/sspi.js
generated
vendored
Normal file
@@ -0,0 +1,304 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
require_optional = require('require_optional'),
|
||||
Query = require('../connection/commands').Query,
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
var AuthSession = function(db, username, password, options) {
|
||||
this.db = db;
|
||||
this.username = username;
|
||||
this.password = password;
|
||||
this.options = options;
|
||||
};
|
||||
|
||||
AuthSession.prototype.equal = function(session) {
|
||||
return (
|
||||
session.db === this.db &&
|
||||
session.username === this.username &&
|
||||
session.password === this.password
|
||||
);
|
||||
};
|
||||
|
||||
// Kerberos class
|
||||
var Kerberos = null;
|
||||
var MongoAuthProcess = null;
|
||||
|
||||
// Try to grab the Kerberos class
|
||||
try {
|
||||
Kerberos = require_optional('kerberos').Kerberos;
|
||||
// Authentication process for Mongo
|
||||
MongoAuthProcess = require_optional('kerberos').processes.MongoAuthProcess;
|
||||
} catch (err) {} // eslint-disable-line
|
||||
|
||||
/**
|
||||
* Creates a new SSPI authentication mechanism
|
||||
* @class
|
||||
* @return {SSPI} A cursor instance
|
||||
*/
|
||||
var SSPI = function(bson) {
|
||||
this.bson = bson;
|
||||
this.authStore = [];
|
||||
};
|
||||
|
||||
/**
|
||||
* Authenticate
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {string} db Name of the database
|
||||
* @param {string} username Username
|
||||
* @param {string} password Password
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
SSPI.prototype.auth = function(server, connections, db, username, password, options, callback) {
|
||||
var self = this;
|
||||
// We don't have the Kerberos library
|
||||
if (Kerberos == null) return callback(new Error('Kerberos library is not installed'));
|
||||
var gssapiServiceName = options['gssapiServiceName'] || 'mongodb';
|
||||
// Total connections
|
||||
var count = connections.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
|
||||
// Valid connections
|
||||
var numberOfValidConnections = 0;
|
||||
var errorObject = null;
|
||||
|
||||
// For each connection we need to authenticate
|
||||
while (connections.length > 0) {
|
||||
// Execute MongoCR
|
||||
var execute = function(connection) {
|
||||
// Start Auth process for a connection
|
||||
SSIPAuthenticate(
|
||||
self,
|
||||
username,
|
||||
password,
|
||||
gssapiServiceName,
|
||||
server,
|
||||
connection,
|
||||
options,
|
||||
function(err, r) {
|
||||
// Adjust count
|
||||
count = count - 1;
|
||||
|
||||
// If we have an error
|
||||
if (err) {
|
||||
errorObject = err;
|
||||
} else if (r && typeof r === 'object' && r.result['$err']) {
|
||||
errorObject = r.result;
|
||||
} else if (r && typeof r === 'object' && r.result['errmsg']) {
|
||||
errorObject = r.result;
|
||||
} else {
|
||||
numberOfValidConnections = numberOfValidConnections + 1;
|
||||
}
|
||||
|
||||
// We have authenticated all connections
|
||||
if (count === 0 && numberOfValidConnections > 0) {
|
||||
// Store the auth details
|
||||
addAuthSession(self.authStore, new AuthSession(db, username, password, options));
|
||||
// Return correct authentication
|
||||
callback(null, true);
|
||||
} else if (count === 0) {
|
||||
if (errorObject == null)
|
||||
errorObject = new MongoError(f('failed to authenticate using mongocr'));
|
||||
callback(errorObject, false);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
var _execute = function(_connection) {
|
||||
process.nextTick(function() {
|
||||
execute(_connection);
|
||||
});
|
||||
};
|
||||
|
||||
_execute(connections.shift());
|
||||
}
|
||||
};
|
||||
|
||||
var SSIPAuthenticate = function(
|
||||
self,
|
||||
username,
|
||||
password,
|
||||
gssapiServiceName,
|
||||
server,
|
||||
connection,
|
||||
options,
|
||||
callback
|
||||
) {
|
||||
// Build Authentication command to send to MongoDB
|
||||
var command = {
|
||||
saslStart: 1,
|
||||
mechanism: 'GSSAPI',
|
||||
payload: '',
|
||||
autoAuthorize: 1
|
||||
};
|
||||
|
||||
// Create authenticator
|
||||
var mongo_auth_process = new MongoAuthProcess(
|
||||
connection.host,
|
||||
connection.port,
|
||||
gssapiServiceName,
|
||||
options
|
||||
);
|
||||
|
||||
// Execute first sasl step
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
if (err) return callback(err, false);
|
||||
var doc = r.result;
|
||||
|
||||
mongo_auth_process.init(username, password, function(err) {
|
||||
if (err) return callback(err);
|
||||
|
||||
mongo_auth_process.transition(doc.payload, function(err, payload) {
|
||||
if (err) return callback(err);
|
||||
|
||||
// Perform the next step against mongod
|
||||
var command = {
|
||||
saslContinue: 1,
|
||||
conversationId: doc.conversationId,
|
||||
payload: payload
|
||||
};
|
||||
|
||||
// Execute the command
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
if (err) return callback(err, false);
|
||||
var doc = r.result;
|
||||
|
||||
mongo_auth_process.transition(doc.payload, function(err, payload) {
|
||||
if (err) return callback(err);
|
||||
|
||||
// Perform the next step against mongod
|
||||
var command = {
|
||||
saslContinue: 1,
|
||||
conversationId: doc.conversationId,
|
||||
payload: payload
|
||||
};
|
||||
|
||||
// Execute the command
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
if (err) return callback(err, false);
|
||||
var doc = r.result;
|
||||
|
||||
mongo_auth_process.transition(doc.payload, function(err, payload) {
|
||||
// Perform the next step against mongod
|
||||
var command = {
|
||||
saslContinue: 1,
|
||||
conversationId: doc.conversationId,
|
||||
payload: payload
|
||||
};
|
||||
|
||||
// Execute the command
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
if (err) return callback(err, false);
|
||||
var doc = r.result;
|
||||
|
||||
if (doc.done) return callback(null, true);
|
||||
callback(new Error('Authentication failed'), false);
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
// Add to store only if it does not exist
|
||||
var addAuthSession = function(authStore, session) {
|
||||
var found = false;
|
||||
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
if (authStore[i].equal(session)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) authStore.push(session);
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove authStore credentials
|
||||
* @method
|
||||
* @param {string} db Name of database we are removing authStore details about
|
||||
* @return {object}
|
||||
*/
|
||||
SSPI.prototype.logout = function(dbName) {
|
||||
this.authStore = this.authStore.filter(function(x) {
|
||||
return x.db !== dbName;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Re authenticate pool
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
SSPI.prototype.reauthenticate = function(server, connections, callback) {
|
||||
var authStore = this.authStore.slice(0);
|
||||
var count = authStore.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
// Iterate over all the auth details stored
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
this.auth(
|
||||
server,
|
||||
connections,
|
||||
authStore[i].db,
|
||||
authStore[i].username,
|
||||
authStore[i].password,
|
||||
authStore[i].options,
|
||||
function(err) {
|
||||
count = count - 1;
|
||||
// Done re-authenticating
|
||||
if (count === 0) {
|
||||
callback(err, null);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a result from a authentication strategy
|
||||
*
|
||||
* @callback authResultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {boolean} result The result of the authentication process
|
||||
*/
|
||||
|
||||
module.exports = SSPI;
|
179
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/x509.js
generated
vendored
Normal file
179
ProjectNow/NodeServer/node_modules/mongodb-core/lib/auth/x509.js
generated
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
Query = require('../connection/commands').Query,
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
var AuthSession = function(db, username, password) {
|
||||
this.db = db;
|
||||
this.username = username;
|
||||
this.password = password;
|
||||
};
|
||||
|
||||
AuthSession.prototype.equal = function(session) {
|
||||
return (
|
||||
session.db === this.db &&
|
||||
session.username === this.username &&
|
||||
session.password === this.password
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new X509 authentication mechanism
|
||||
* @class
|
||||
* @return {X509} A cursor instance
|
||||
*/
|
||||
var X509 = function(bson) {
|
||||
this.bson = bson;
|
||||
this.authStore = [];
|
||||
};
|
||||
|
||||
/**
|
||||
* Authenticate
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {string} db Name of the database
|
||||
* @param {string} username Username
|
||||
* @param {string} password Password
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
X509.prototype.auth = function(server, connections, db, username, password, callback) {
|
||||
var self = this;
|
||||
// Total connections
|
||||
var count = connections.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
|
||||
// Valid connections
|
||||
var numberOfValidConnections = 0;
|
||||
var errorObject = null;
|
||||
|
||||
// For each connection we need to authenticate
|
||||
while (connections.length > 0) {
|
||||
// Execute MongoCR
|
||||
var execute = function(connection) {
|
||||
// Let's start the sasl process
|
||||
var command = {
|
||||
authenticate: 1,
|
||||
mechanism: 'MONGODB-X509'
|
||||
};
|
||||
|
||||
// Add username if specified
|
||||
if (username) {
|
||||
command.user = username;
|
||||
}
|
||||
|
||||
// Let's start the process
|
||||
server(
|
||||
connection,
|
||||
new Query(self.bson, '$external.$cmd', command, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1
|
||||
}),
|
||||
function(err, r) {
|
||||
// Adjust count
|
||||
count = count - 1;
|
||||
|
||||
// If we have an error
|
||||
if (err) {
|
||||
errorObject = err;
|
||||
} else if (r.result['$err']) {
|
||||
errorObject = r.result;
|
||||
} else if (r.result['errmsg']) {
|
||||
errorObject = r.result;
|
||||
} else {
|
||||
numberOfValidConnections = numberOfValidConnections + 1;
|
||||
}
|
||||
|
||||
// We have authenticated all connections
|
||||
if (count === 0 && numberOfValidConnections > 0) {
|
||||
// Store the auth details
|
||||
addAuthSession(self.authStore, new AuthSession(db, username, password));
|
||||
// Return correct authentication
|
||||
callback(null, true);
|
||||
} else if (count === 0) {
|
||||
if (errorObject == null)
|
||||
errorObject = new MongoError(f('failed to authenticate using mongocr'));
|
||||
callback(errorObject, false);
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
var _execute = function(_connection) {
|
||||
process.nextTick(function() {
|
||||
execute(_connection);
|
||||
});
|
||||
};
|
||||
|
||||
_execute(connections.shift());
|
||||
}
|
||||
};
|
||||
|
||||
// Add to store only if it does not exist
|
||||
var addAuthSession = function(authStore, session) {
|
||||
var found = false;
|
||||
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
if (authStore[i].equal(session)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) authStore.push(session);
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove authStore credentials
|
||||
* @method
|
||||
* @param {string} db Name of database we are removing authStore details about
|
||||
* @return {object}
|
||||
*/
|
||||
X509.prototype.logout = function(dbName) {
|
||||
this.authStore = this.authStore.filter(function(x) {
|
||||
return x.db !== dbName;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Re authenticate pool
|
||||
* @method
|
||||
* @param {{Server}|{ReplSet}|{Mongos}} server Topology the authentication method is being called on
|
||||
* @param {[]Connections} connections Connections to authenticate using this authenticator
|
||||
* @param {authResultCallback} callback The callback to return the result from the authentication
|
||||
* @return {object}
|
||||
*/
|
||||
X509.prototype.reauthenticate = function(server, connections, callback) {
|
||||
var authStore = this.authStore.slice(0);
|
||||
var count = authStore.length;
|
||||
if (count === 0) return callback(null, null);
|
||||
// Iterate over all the auth details stored
|
||||
for (var i = 0; i < authStore.length; i++) {
|
||||
this.auth(
|
||||
server,
|
||||
connections,
|
||||
authStore[i].db,
|
||||
authStore[i].username,
|
||||
authStore[i].password,
|
||||
function(err) {
|
||||
count = count - 1;
|
||||
// Done re-authenticating
|
||||
if (count === 0) {
|
||||
callback(err, null);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* This is a result from a authentication strategy
|
||||
*
|
||||
* @callback authResultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {boolean} result The result of the authentication process
|
||||
*/
|
||||
|
||||
module.exports = X509;
|
228
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/apm.js
generated
vendored
Normal file
228
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/apm.js
generated
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
'use strict';
|
||||
const KillCursor = require('../connection/commands').KillCursor;
|
||||
const GetMore = require('../connection/commands').GetMore;
|
||||
const process = require('process');
|
||||
|
||||
/** Commands that we want to redact because of the sensitive nature of their contents */
|
||||
const SENSITIVE_COMMANDS = new Set([
|
||||
'authenticate',
|
||||
'saslStart',
|
||||
'saslContinue',
|
||||
'getnonce',
|
||||
'createUser',
|
||||
'updateUser',
|
||||
'copydbgetnonce',
|
||||
'copydbsaslstart',
|
||||
'copydb'
|
||||
]);
|
||||
|
||||
// helper methods
|
||||
const extractCommandName = command => Object.keys(command)[0];
|
||||
const calculateDurationInMs = started => {
|
||||
const hrtime = process.hrtime(started);
|
||||
return (hrtime[0] * 1e9 + hrtime[1]) / 1e6;
|
||||
};
|
||||
|
||||
const namespace = command => command.ns;
|
||||
const databaseName = command => command.ns.split('.')[0];
|
||||
const collectionName = command => command.ns.split('.')[1];
|
||||
const generateConnectionId = pool => `${pool.options.host}:${pool.options.port}`;
|
||||
const maybeRedact = (commandName, result) => (SENSITIVE_COMMANDS.has(commandName) ? {} : result);
|
||||
|
||||
const LEGACY_FIND_QUERY_MAP = {
|
||||
$query: 'filter',
|
||||
$orderby: 'sort',
|
||||
$hint: 'hint',
|
||||
$comment: 'comment',
|
||||
$maxScan: 'maxScan',
|
||||
$max: 'max',
|
||||
$min: 'min',
|
||||
$returnKey: 'returnKey',
|
||||
$showDiskLoc: 'showRecordId',
|
||||
$maxTimeMS: 'maxTimeMS',
|
||||
$snapshot: 'snapshot'
|
||||
};
|
||||
|
||||
const LEGACY_FIND_OPTIONS_MAP = {
|
||||
numberToSkip: 'skip',
|
||||
numberToReturn: 'batchSize',
|
||||
returnFieldsSelector: 'projection'
|
||||
};
|
||||
|
||||
const OP_QUERY_KEYS = [
|
||||
'tailable',
|
||||
'oplogReplay',
|
||||
'noCursorTimeout',
|
||||
'awaitData',
|
||||
'partial',
|
||||
'exhaust'
|
||||
];
|
||||
|
||||
/**
|
||||
* Extract the actual command from the query, possibly upconverting if it's a legacy
|
||||
* format
|
||||
*
|
||||
* @param {Object} command the command
|
||||
*/
|
||||
const extractCommand = command => {
|
||||
if (command instanceof GetMore) {
|
||||
return {
|
||||
getMore: command.cursorId,
|
||||
collection: collectionName(command),
|
||||
batchSize: command.numberToReturn
|
||||
};
|
||||
}
|
||||
|
||||
if (command instanceof KillCursor) {
|
||||
return {
|
||||
killCursors: collectionName(command),
|
||||
cursors: command.cursorIds
|
||||
};
|
||||
}
|
||||
|
||||
if (command.query && command.query.$query) {
|
||||
// upconvert legacy find command
|
||||
const result = { find: collectionName(command) };
|
||||
Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => {
|
||||
if (typeof command.query[key] !== 'undefined')
|
||||
result[LEGACY_FIND_QUERY_MAP[key]] = command.query[key];
|
||||
});
|
||||
|
||||
Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => {
|
||||
if (typeof command.options[key] !== 'undefined')
|
||||
result[LEGACY_FIND_OPTIONS_MAP[key]] = command.options[key];
|
||||
});
|
||||
|
||||
OP_QUERY_KEYS.forEach(key => {
|
||||
if (command[key]) result[key] = command[key];
|
||||
});
|
||||
|
||||
if (typeof command.pre32Limit !== 'undefined') {
|
||||
result.limit = command.pre32Limit;
|
||||
}
|
||||
|
||||
if (command.query.$explain) {
|
||||
return { explain: result };
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
return command.query ? command.query : command;
|
||||
};
|
||||
|
||||
const extractReply = (command, reply) => {
|
||||
if (command instanceof GetMore) {
|
||||
return {
|
||||
ok: 1,
|
||||
cursor: {
|
||||
id: reply.message.cursorId,
|
||||
ns: namespace(command),
|
||||
nextBatch: reply.message.documents
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (command instanceof KillCursor) {
|
||||
return {
|
||||
ok: 1,
|
||||
cursorsUnknown: command.cursorIds
|
||||
};
|
||||
}
|
||||
|
||||
// is this a legacy find command?
|
||||
if (command.query && typeof command.query.$query !== 'undefined') {
|
||||
return {
|
||||
ok: 1,
|
||||
cursor: {
|
||||
id: reply.message.cursorId,
|
||||
ns: namespace(command),
|
||||
firstBatch: reply.message.documents
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
return reply.result;
|
||||
};
|
||||
|
||||
/** An event indicating the start of a given command */
|
||||
class CommandStartedEvent {
|
||||
/**
|
||||
* Create a started event
|
||||
*
|
||||
* @param {Pool} pool the pool that originated the command
|
||||
* @param {Object} command the command
|
||||
*/
|
||||
constructor(pool, command) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
|
||||
// NOTE: remove in major revision, this is not spec behavior
|
||||
if (SENSITIVE_COMMANDS.has(commandName)) {
|
||||
this.commandObj = {};
|
||||
this.commandObj[commandName] = true;
|
||||
}
|
||||
|
||||
Object.assign(this, {
|
||||
command: cmd,
|
||||
databaseName: databaseName(command),
|
||||
commandName,
|
||||
requestId: command.requestId,
|
||||
connectionId: generateConnectionId(pool)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/** An event indicating the success of a given command */
|
||||
class CommandSucceededEvent {
|
||||
/**
|
||||
* Create a succeeded event
|
||||
*
|
||||
* @param {Pool} pool the pool that originated the command
|
||||
* @param {Object} command the command
|
||||
* @param {Object} reply the reply for this command from the server
|
||||
* @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration
|
||||
*/
|
||||
constructor(pool, command, reply, started) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
|
||||
Object.assign(this, {
|
||||
duration: calculateDurationInMs(started),
|
||||
commandName,
|
||||
reply: maybeRedact(commandName, extractReply(command, reply)),
|
||||
requestId: command.requestId,
|
||||
connectionId: generateConnectionId(pool)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/** An event indicating the failure of a given command */
|
||||
class CommandFailedEvent {
|
||||
/**
|
||||
* Create a failure event
|
||||
*
|
||||
* @param {Pool} pool the pool that originated the command
|
||||
* @param {Object} command the command
|
||||
* @param {MongoError|Object} error the generated error or a server error response
|
||||
* @param {Array} started a high resolution tuple timestamp of when the command was first sent, to calculate duration
|
||||
*/
|
||||
constructor(pool, command, error, started) {
|
||||
const cmd = extractCommand(command);
|
||||
const commandName = extractCommandName(cmd);
|
||||
|
||||
Object.assign(this, {
|
||||
duration: calculateDurationInMs(started),
|
||||
commandName,
|
||||
failure: maybeRedact(commandName, error),
|
||||
requestId: command.requestId,
|
||||
connectionId: generateConnectionId(pool)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
CommandStartedEvent,
|
||||
CommandSucceededEvent,
|
||||
CommandFailedEvent
|
||||
};
|
34
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/command_result.js
generated
vendored
Normal file
34
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/command_result.js
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* Creates a new CommandResult instance
|
||||
* @class
|
||||
* @param {object} result CommandResult object
|
||||
* @param {Connection} connection A connection instance associated with this result
|
||||
* @return {CommandResult} A cursor instance
|
||||
*/
|
||||
var CommandResult = function(result, connection, message) {
|
||||
this.result = result;
|
||||
this.connection = connection;
|
||||
this.message = message;
|
||||
};
|
||||
|
||||
/**
|
||||
* Convert CommandResult to JSON
|
||||
* @method
|
||||
* @return {object}
|
||||
*/
|
||||
CommandResult.prototype.toJSON = function() {
|
||||
return this.result;
|
||||
};
|
||||
|
||||
/**
|
||||
* Convert CommandResult to String representation
|
||||
* @method
|
||||
* @return {string}
|
||||
*/
|
||||
CommandResult.prototype.toString = function() {
|
||||
return JSON.stringify(this.toJSON());
|
||||
};
|
||||
|
||||
module.exports = CommandResult;
|
535
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/commands.js
generated
vendored
Normal file
535
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/commands.js
generated
vendored
Normal file
@@ -0,0 +1,535 @@
|
||||
'use strict';
|
||||
|
||||
var retrieveBSON = require('./utils').retrieveBSON;
|
||||
var BSON = retrieveBSON();
|
||||
var Long = BSON.Long;
|
||||
|
||||
// Incrementing request id
|
||||
var _requestId = 0;
|
||||
|
||||
// Wire command operation ids
|
||||
var opcodes = require('../wireprotocol/shared').opcodes;
|
||||
|
||||
// Query flags
|
||||
var OPTS_TAILABLE_CURSOR = 2;
|
||||
var OPTS_SLAVE = 4;
|
||||
var OPTS_OPLOG_REPLAY = 8;
|
||||
var OPTS_NO_CURSOR_TIMEOUT = 16;
|
||||
var OPTS_AWAIT_DATA = 32;
|
||||
var OPTS_EXHAUST = 64;
|
||||
var OPTS_PARTIAL = 128;
|
||||
|
||||
// Response flags
|
||||
var CURSOR_NOT_FOUND = 0;
|
||||
var QUERY_FAILURE = 2;
|
||||
var SHARD_CONFIG_STALE = 4;
|
||||
var AWAIT_CAPABLE = 8;
|
||||
|
||||
/**************************************************************
|
||||
* QUERY
|
||||
**************************************************************/
|
||||
var Query = function(bson, ns, query, options) {
|
||||
var self = this;
|
||||
// Basic options needed to be passed in
|
||||
if (ns == null) throw new Error('ns must be specified for query');
|
||||
if (query == null) throw new Error('query must be specified for query');
|
||||
|
||||
// Validate that we are not passing 0x00 in the collection name
|
||||
if (ns.indexOf('\x00') !== -1) {
|
||||
throw new Error('namespace cannot contain a null character');
|
||||
}
|
||||
|
||||
// Basic options
|
||||
this.bson = bson;
|
||||
this.ns = ns;
|
||||
this.query = query;
|
||||
|
||||
// Ensure empty options
|
||||
this.options = options || {};
|
||||
|
||||
// Additional options
|
||||
this.numberToSkip = options.numberToSkip || 0;
|
||||
this.numberToReturn = options.numberToReturn || 0;
|
||||
this.returnFieldSelector = options.returnFieldSelector || null;
|
||||
this.requestId = Query.getRequestId();
|
||||
|
||||
// special case for pre-3.2 find commands, delete ASAP
|
||||
this.pre32Limit = options.pre32Limit;
|
||||
|
||||
// Serialization option
|
||||
this.serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
this.ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
|
||||
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
|
||||
this.batchSize = self.numberToReturn;
|
||||
|
||||
// Flags
|
||||
this.tailable = false;
|
||||
this.slaveOk = typeof options.slaveOk === 'boolean' ? options.slaveOk : false;
|
||||
this.oplogReplay = false;
|
||||
this.noCursorTimeout = false;
|
||||
this.awaitData = false;
|
||||
this.exhaust = false;
|
||||
this.partial = false;
|
||||
};
|
||||
|
||||
//
|
||||
// Assign a new request Id
|
||||
Query.prototype.incRequestId = function() {
|
||||
this.requestId = _requestId++;
|
||||
};
|
||||
|
||||
//
|
||||
// Assign a new request Id
|
||||
Query.nextRequestId = function() {
|
||||
return _requestId + 1;
|
||||
};
|
||||
|
||||
//
|
||||
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
|
||||
Query.prototype.toBin = function() {
|
||||
var self = this;
|
||||
var buffers = [];
|
||||
var projection = null;
|
||||
|
||||
// Set up the flags
|
||||
var flags = 0;
|
||||
if (this.tailable) {
|
||||
flags |= OPTS_TAILABLE_CURSOR;
|
||||
}
|
||||
|
||||
if (this.slaveOk) {
|
||||
flags |= OPTS_SLAVE;
|
||||
}
|
||||
|
||||
if (this.oplogReplay) {
|
||||
flags |= OPTS_OPLOG_REPLAY;
|
||||
}
|
||||
|
||||
if (this.noCursorTimeout) {
|
||||
flags |= OPTS_NO_CURSOR_TIMEOUT;
|
||||
}
|
||||
|
||||
if (this.awaitData) {
|
||||
flags |= OPTS_AWAIT_DATA;
|
||||
}
|
||||
|
||||
if (this.exhaust) {
|
||||
flags |= OPTS_EXHAUST;
|
||||
}
|
||||
|
||||
if (this.partial) {
|
||||
flags |= OPTS_PARTIAL;
|
||||
}
|
||||
|
||||
// If batchSize is different to self.numberToReturn
|
||||
if (self.batchSize !== self.numberToReturn) self.numberToReturn = self.batchSize;
|
||||
|
||||
// Allocate write protocol header buffer
|
||||
var header = new Buffer(
|
||||
4 * 4 + // Header
|
||||
4 + // Flags
|
||||
Buffer.byteLength(self.ns) +
|
||||
1 + // namespace
|
||||
4 + // numberToSkip
|
||||
4 // numberToReturn
|
||||
);
|
||||
|
||||
// Add header to buffers
|
||||
buffers.push(header);
|
||||
|
||||
// Serialize the query
|
||||
var query = self.bson.serialize(this.query, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
|
||||
// Add query document
|
||||
buffers.push(query);
|
||||
|
||||
if (self.returnFieldSelector && Object.keys(self.returnFieldSelector).length > 0) {
|
||||
// Serialize the projection document
|
||||
projection = self.bson.serialize(this.returnFieldSelector, {
|
||||
checkKeys: this.checkKeys,
|
||||
serializeFunctions: this.serializeFunctions,
|
||||
ignoreUndefined: this.ignoreUndefined
|
||||
});
|
||||
// Add projection document
|
||||
buffers.push(projection);
|
||||
}
|
||||
|
||||
// Total message size
|
||||
var totalLength = header.length + query.length + (projection ? projection.length : 0);
|
||||
|
||||
// Set up the index
|
||||
var index = 4;
|
||||
|
||||
// Write total document length
|
||||
header[3] = (totalLength >> 24) & 0xff;
|
||||
header[2] = (totalLength >> 16) & 0xff;
|
||||
header[1] = (totalLength >> 8) & 0xff;
|
||||
header[0] = totalLength & 0xff;
|
||||
|
||||
// Write header information requestId
|
||||
header[index + 3] = (this.requestId >> 24) & 0xff;
|
||||
header[index + 2] = (this.requestId >> 16) & 0xff;
|
||||
header[index + 1] = (this.requestId >> 8) & 0xff;
|
||||
header[index] = this.requestId & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write header information responseTo
|
||||
header[index + 3] = (0 >> 24) & 0xff;
|
||||
header[index + 2] = (0 >> 16) & 0xff;
|
||||
header[index + 1] = (0 >> 8) & 0xff;
|
||||
header[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write header information OP_QUERY
|
||||
header[index + 3] = (opcodes.OP_QUERY >> 24) & 0xff;
|
||||
header[index + 2] = (opcodes.OP_QUERY >> 16) & 0xff;
|
||||
header[index + 1] = (opcodes.OP_QUERY >> 8) & 0xff;
|
||||
header[index] = opcodes.OP_QUERY & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write header information flags
|
||||
header[index + 3] = (flags >> 24) & 0xff;
|
||||
header[index + 2] = (flags >> 16) & 0xff;
|
||||
header[index + 1] = (flags >> 8) & 0xff;
|
||||
header[index] = flags & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write collection name
|
||||
index = index + header.write(this.ns, index, 'utf8') + 1;
|
||||
header[index - 1] = 0;
|
||||
|
||||
// Write header information flags numberToSkip
|
||||
header[index + 3] = (this.numberToSkip >> 24) & 0xff;
|
||||
header[index + 2] = (this.numberToSkip >> 16) & 0xff;
|
||||
header[index + 1] = (this.numberToSkip >> 8) & 0xff;
|
||||
header[index] = this.numberToSkip & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write header information flags numberToReturn
|
||||
header[index + 3] = (this.numberToReturn >> 24) & 0xff;
|
||||
header[index + 2] = (this.numberToReturn >> 16) & 0xff;
|
||||
header[index + 1] = (this.numberToReturn >> 8) & 0xff;
|
||||
header[index] = this.numberToReturn & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Return the buffers
|
||||
return buffers;
|
||||
};
|
||||
|
||||
Query.getRequestId = function() {
|
||||
return ++_requestId;
|
||||
};
|
||||
|
||||
/**************************************************************
|
||||
* GETMORE
|
||||
**************************************************************/
|
||||
var GetMore = function(bson, ns, cursorId, opts) {
|
||||
opts = opts || {};
|
||||
this.numberToReturn = opts.numberToReturn || 0;
|
||||
this.requestId = _requestId++;
|
||||
this.bson = bson;
|
||||
this.ns = ns;
|
||||
this.cursorId = cursorId;
|
||||
};
|
||||
|
||||
//
|
||||
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
|
||||
GetMore.prototype.toBin = function() {
|
||||
var length = 4 + Buffer.byteLength(this.ns) + 1 + 4 + 8 + 4 * 4;
|
||||
// Create command buffer
|
||||
var index = 0;
|
||||
// Allocate buffer
|
||||
var _buffer = new Buffer(length);
|
||||
|
||||
// Write header information
|
||||
// index = write32bit(index, _buffer, length);
|
||||
_buffer[index + 3] = (length >> 24) & 0xff;
|
||||
_buffer[index + 2] = (length >> 16) & 0xff;
|
||||
_buffer[index + 1] = (length >> 8) & 0xff;
|
||||
_buffer[index] = length & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, requestId);
|
||||
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
|
||||
_buffer[index] = this.requestId & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, 0);
|
||||
_buffer[index + 3] = (0 >> 24) & 0xff;
|
||||
_buffer[index + 2] = (0 >> 16) & 0xff;
|
||||
_buffer[index + 1] = (0 >> 8) & 0xff;
|
||||
_buffer[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, OP_GETMORE);
|
||||
_buffer[index + 3] = (opcodes.OP_GETMORE >> 24) & 0xff;
|
||||
_buffer[index + 2] = (opcodes.OP_GETMORE >> 16) & 0xff;
|
||||
_buffer[index + 1] = (opcodes.OP_GETMORE >> 8) & 0xff;
|
||||
_buffer[index] = opcodes.OP_GETMORE & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, 0);
|
||||
_buffer[index + 3] = (0 >> 24) & 0xff;
|
||||
_buffer[index + 2] = (0 >> 16) & 0xff;
|
||||
_buffer[index + 1] = (0 >> 8) & 0xff;
|
||||
_buffer[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write collection name
|
||||
index = index + _buffer.write(this.ns, index, 'utf8') + 1;
|
||||
_buffer[index - 1] = 0;
|
||||
|
||||
// Write batch size
|
||||
// index = write32bit(index, _buffer, numberToReturn);
|
||||
_buffer[index + 3] = (this.numberToReturn >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.numberToReturn >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.numberToReturn >> 8) & 0xff;
|
||||
_buffer[index] = this.numberToReturn & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write cursor id
|
||||
// index = write32bit(index, _buffer, cursorId.getLowBits());
|
||||
_buffer[index + 3] = (this.cursorId.getLowBits() >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorId.getLowBits() >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorId.getLowBits() >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorId.getLowBits() & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, cursorId.getHighBits());
|
||||
_buffer[index + 3] = (this.cursorId.getHighBits() >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorId.getHighBits() >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorId.getHighBits() >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorId.getHighBits() & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Return buffer
|
||||
return _buffer;
|
||||
};
|
||||
|
||||
/**************************************************************
|
||||
* KILLCURSOR
|
||||
**************************************************************/
|
||||
var KillCursor = function(bson, ns, cursorIds) {
|
||||
this.ns = ns;
|
||||
this.requestId = _requestId++;
|
||||
this.cursorIds = cursorIds;
|
||||
};
|
||||
|
||||
//
|
||||
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
|
||||
KillCursor.prototype.toBin = function() {
|
||||
var length = 4 + 4 + 4 * 4 + this.cursorIds.length * 8;
|
||||
|
||||
// Create command buffer
|
||||
var index = 0;
|
||||
var _buffer = new Buffer(length);
|
||||
|
||||
// Write header information
|
||||
// index = write32bit(index, _buffer, length);
|
||||
_buffer[index + 3] = (length >> 24) & 0xff;
|
||||
_buffer[index + 2] = (length >> 16) & 0xff;
|
||||
_buffer[index + 1] = (length >> 8) & 0xff;
|
||||
_buffer[index] = length & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, requestId);
|
||||
_buffer[index + 3] = (this.requestId >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.requestId >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.requestId >> 8) & 0xff;
|
||||
_buffer[index] = this.requestId & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, 0);
|
||||
_buffer[index + 3] = (0 >> 24) & 0xff;
|
||||
_buffer[index + 2] = (0 >> 16) & 0xff;
|
||||
_buffer[index + 1] = (0 >> 8) & 0xff;
|
||||
_buffer[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, OP_KILL_CURSORS);
|
||||
_buffer[index + 3] = (opcodes.OP_KILL_CURSORS >> 24) & 0xff;
|
||||
_buffer[index + 2] = (opcodes.OP_KILL_CURSORS >> 16) & 0xff;
|
||||
_buffer[index + 1] = (opcodes.OP_KILL_CURSORS >> 8) & 0xff;
|
||||
_buffer[index] = opcodes.OP_KILL_CURSORS & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, 0);
|
||||
_buffer[index + 3] = (0 >> 24) & 0xff;
|
||||
_buffer[index + 2] = (0 >> 16) & 0xff;
|
||||
_buffer[index + 1] = (0 >> 8) & 0xff;
|
||||
_buffer[index] = 0 & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write batch size
|
||||
// index = write32bit(index, _buffer, this.cursorIds.length);
|
||||
_buffer[index + 3] = (this.cursorIds.length >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorIds.length >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorIds.length >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorIds.length & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// Write all the cursor ids into the array
|
||||
for (var i = 0; i < this.cursorIds.length; i++) {
|
||||
// Write cursor id
|
||||
// index = write32bit(index, _buffer, cursorIds[i].getLowBits());
|
||||
_buffer[index + 3] = (this.cursorIds[i].getLowBits() >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorIds[i].getLowBits() >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorIds[i].getLowBits() >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorIds[i].getLowBits() & 0xff;
|
||||
index = index + 4;
|
||||
|
||||
// index = write32bit(index, _buffer, cursorIds[i].getHighBits());
|
||||
_buffer[index + 3] = (this.cursorIds[i].getHighBits() >> 24) & 0xff;
|
||||
_buffer[index + 2] = (this.cursorIds[i].getHighBits() >> 16) & 0xff;
|
||||
_buffer[index + 1] = (this.cursorIds[i].getHighBits() >> 8) & 0xff;
|
||||
_buffer[index] = this.cursorIds[i].getHighBits() & 0xff;
|
||||
index = index + 4;
|
||||
}
|
||||
|
||||
// Return buffer
|
||||
return _buffer;
|
||||
};
|
||||
|
||||
var Response = function(bson, message, msgHeader, msgBody, opts) {
|
||||
opts = opts || { promoteLongs: true, promoteValues: true, promoteBuffers: false };
|
||||
this.parsed = false;
|
||||
this.raw = message;
|
||||
this.data = msgBody;
|
||||
this.bson = bson;
|
||||
this.opts = opts;
|
||||
|
||||
// Read the message header
|
||||
this.length = msgHeader.length;
|
||||
this.requestId = msgHeader.requestId;
|
||||
this.responseTo = msgHeader.responseTo;
|
||||
this.opCode = msgHeader.opCode;
|
||||
this.fromCompressed = msgHeader.fromCompressed;
|
||||
|
||||
// Read the message body
|
||||
this.responseFlags = msgBody.readInt32LE(0);
|
||||
this.cursorId = new Long(msgBody.readInt32LE(4), msgBody.readInt32LE(8));
|
||||
this.startingFrom = msgBody.readInt32LE(12);
|
||||
this.numberReturned = msgBody.readInt32LE(16);
|
||||
|
||||
// Preallocate document array
|
||||
this.documents = new Array(this.numberReturned);
|
||||
|
||||
// Flag values
|
||||
this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0;
|
||||
this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0;
|
||||
this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0;
|
||||
this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0;
|
||||
this.promoteLongs = typeof opts.promoteLongs === 'boolean' ? opts.promoteLongs : true;
|
||||
this.promoteValues = typeof opts.promoteValues === 'boolean' ? opts.promoteValues : true;
|
||||
this.promoteBuffers = typeof opts.promoteBuffers === 'boolean' ? opts.promoteBuffers : false;
|
||||
};
|
||||
|
||||
Response.prototype.isParsed = function() {
|
||||
return this.parsed;
|
||||
};
|
||||
|
||||
Response.prototype.parse = function(options) {
|
||||
// Don't parse again if not needed
|
||||
if (this.parsed) return;
|
||||
options = options || {};
|
||||
|
||||
// Allow the return of raw documents instead of parsing
|
||||
var raw = options.raw || false;
|
||||
var documentsReturnedIn = options.documentsReturnedIn || null;
|
||||
var promoteLongs =
|
||||
typeof options.promoteLongs === 'boolean' ? options.promoteLongs : this.opts.promoteLongs;
|
||||
var promoteValues =
|
||||
typeof options.promoteValues === 'boolean' ? options.promoteValues : this.opts.promoteValues;
|
||||
var promoteBuffers =
|
||||
typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : this.opts.promoteBuffers;
|
||||
var bsonSize, _options;
|
||||
|
||||
// Set up the options
|
||||
_options = {
|
||||
promoteLongs: promoteLongs,
|
||||
promoteValues: promoteValues,
|
||||
promoteBuffers: promoteBuffers
|
||||
};
|
||||
|
||||
// Position within OP_REPLY at which documents start
|
||||
// (See https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-reply)
|
||||
this.index = 20;
|
||||
|
||||
//
|
||||
// Single document and documentsReturnedIn set
|
||||
//
|
||||
if (this.numberReturned === 1 && documentsReturnedIn != null && raw) {
|
||||
// Calculate the bson size
|
||||
bsonSize =
|
||||
this.data[this.index] |
|
||||
(this.data[this.index + 1] << 8) |
|
||||
(this.data[this.index + 2] << 16) |
|
||||
(this.data[this.index + 3] << 24);
|
||||
// Slice out the buffer containing the command result document
|
||||
var document = this.data.slice(this.index, this.index + bsonSize);
|
||||
// Set up field we wish to keep as raw
|
||||
var fieldsAsRaw = {};
|
||||
fieldsAsRaw[documentsReturnedIn] = true;
|
||||
_options.fieldsAsRaw = fieldsAsRaw;
|
||||
|
||||
// Deserialize but keep the array of documents in non-parsed form
|
||||
var doc = this.bson.deserialize(document, _options);
|
||||
|
||||
// Get the documents
|
||||
this.documents = doc.cursor[documentsReturnedIn];
|
||||
this.numberReturned = this.documents.length;
|
||||
// Ensure we have a Long valie cursor id
|
||||
this.cursorId =
|
||||
typeof doc.cursor.id === 'number' ? Long.fromNumber(doc.cursor.id) : doc.cursor.id;
|
||||
|
||||
// Adjust the index
|
||||
this.index = this.index + bsonSize;
|
||||
|
||||
// Set as parsed
|
||||
this.parsed = true;
|
||||
return;
|
||||
}
|
||||
|
||||
//
|
||||
// Parse Body
|
||||
//
|
||||
for (var i = 0; i < this.numberReturned; i++) {
|
||||
bsonSize =
|
||||
this.data[this.index] |
|
||||
(this.data[this.index + 1] << 8) |
|
||||
(this.data[this.index + 2] << 16) |
|
||||
(this.data[this.index + 3] << 24);
|
||||
|
||||
// If we have raw results specified slice the return document
|
||||
if (raw) {
|
||||
this.documents[i] = this.data.slice(this.index, this.index + bsonSize);
|
||||
} else {
|
||||
this.documents[i] = this.bson.deserialize(
|
||||
this.data.slice(this.index, this.index + bsonSize),
|
||||
_options
|
||||
);
|
||||
}
|
||||
|
||||
// Adjust the index
|
||||
this.index = this.index + bsonSize;
|
||||
}
|
||||
|
||||
// Set parsed
|
||||
this.parsed = true;
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
Query: Query,
|
||||
GetMore: GetMore,
|
||||
Response: Response,
|
||||
KillCursor: KillCursor
|
||||
};
|
804
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/connection.js
generated
vendored
Normal file
804
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/connection.js
generated
vendored
Normal file
@@ -0,0 +1,804 @@
|
||||
'use strict';
|
||||
|
||||
var inherits = require('util').inherits,
|
||||
EventEmitter = require('events').EventEmitter,
|
||||
net = require('net'),
|
||||
tls = require('tls'),
|
||||
crypto = require('crypto'),
|
||||
f = require('util').format,
|
||||
debugOptions = require('./utils').debugOptions,
|
||||
parseHeader = require('../wireprotocol/shared').parseHeader,
|
||||
decompress = require('../wireprotocol/compression').decompress,
|
||||
Response = require('./commands').Response,
|
||||
MongoNetworkError = require('../error').MongoNetworkError,
|
||||
Logger = require('./logger'),
|
||||
OP_COMPRESSED = require('../wireprotocol/shared').opcodes.OP_COMPRESSED,
|
||||
MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE;
|
||||
|
||||
var _id = 0;
|
||||
var debugFields = [
|
||||
'host',
|
||||
'port',
|
||||
'size',
|
||||
'keepAlive',
|
||||
'keepAliveInitialDelay',
|
||||
'noDelay',
|
||||
'connectionTimeout',
|
||||
'socketTimeout',
|
||||
'singleBufferSerializtion',
|
||||
'ssl',
|
||||
'ca',
|
||||
'crl',
|
||||
'cert',
|
||||
'rejectUnauthorized',
|
||||
'promoteLongs',
|
||||
'promoteValues',
|
||||
'promoteBuffers',
|
||||
'checkServerIdentity'
|
||||
];
|
||||
|
||||
var connectionAccountingSpy = undefined;
|
||||
var connectionAccounting = false;
|
||||
var connections = {};
|
||||
|
||||
/**
|
||||
* Creates a new Connection instance
|
||||
* @class
|
||||
* @param {string} options.host The server host
|
||||
* @param {number} options.port The server port
|
||||
* @param {number} [options.family=null] IP version for DNS lookup, passed down to Node's [`dns.lookup()` function](https://nodejs.org/api/dns.html#dns_dns_lookup_hostname_options_callback). If set to `6`, will only look for ipv6 addresses.
|
||||
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
|
||||
* @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled
|
||||
* @param {boolean} [options.noDelay=true] TCP Connection no delay
|
||||
* @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting
|
||||
* @param {number} [options.socketTimeout=360000] TCP Socket timeout setting
|
||||
* @param {boolean} [options.singleBufferSerializtion=true] Serialize into single buffer, trade of peak memory for serialization speed
|
||||
* @param {boolean} [options.ssl=false] Use SSL for connection
|
||||
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function.
|
||||
* @param {Buffer} [options.ca] SSL Certificate store binary buffer
|
||||
* @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer
|
||||
* @param {Buffer} [options.cert] SSL Certificate binary buffer
|
||||
* @param {Buffer} [options.key] SSL Key file binary buffer
|
||||
* @param {string} [options.passphrase] SSL Certificate pass phrase
|
||||
* @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates
|
||||
* @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits
|
||||
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types.
|
||||
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers.
|
||||
* @fires Connection#connect
|
||||
* @fires Connection#close
|
||||
* @fires Connection#error
|
||||
* @fires Connection#timeout
|
||||
* @fires Connection#parseError
|
||||
* @return {Connection} A cursor instance
|
||||
*/
|
||||
var Connection = function(messageHandler, options) {
|
||||
// Add event listener
|
||||
EventEmitter.call(this);
|
||||
// Set empty if no options passed
|
||||
this.options = options || {};
|
||||
// Identification information
|
||||
this.id = _id++;
|
||||
// Logger instance
|
||||
this.logger = Logger('Connection', options);
|
||||
// No bson parser passed in
|
||||
if (!options.bson) throw new Error('must pass in valid bson parser');
|
||||
// Get bson parser
|
||||
this.bson = options.bson;
|
||||
// Grouping tag used for debugging purposes
|
||||
this.tag = options.tag;
|
||||
// Message handler
|
||||
this.messageHandler = messageHandler;
|
||||
|
||||
// Max BSON message size
|
||||
this.maxBsonMessageSize = options.maxBsonMessageSize || 1024 * 1024 * 16 * 4;
|
||||
// Debug information
|
||||
if (this.logger.isDebug())
|
||||
this.logger.debug(
|
||||
f(
|
||||
'creating connection %s with options [%s]',
|
||||
this.id,
|
||||
JSON.stringify(debugOptions(debugFields, options))
|
||||
)
|
||||
);
|
||||
|
||||
// Default options
|
||||
this.port = options.port || 27017;
|
||||
this.host = options.host || 'localhost';
|
||||
this.family = typeof options.family === 'number' ? options.family : void 0;
|
||||
this.keepAlive = typeof options.keepAlive === 'boolean' ? options.keepAlive : true;
|
||||
this.keepAliveInitialDelay =
|
||||
typeof options.keepAliveInitialDelay === 'number' ? options.keepAliveInitialDelay : 300000;
|
||||
this.noDelay = typeof options.noDelay === 'boolean' ? options.noDelay : true;
|
||||
this.connectionTimeout =
|
||||
typeof options.connectionTimeout === 'number' ? options.connectionTimeout : 30000;
|
||||
this.socketTimeout = typeof options.socketTimeout === 'number' ? options.socketTimeout : 360000;
|
||||
|
||||
// Is the keepAliveInitialDelay > socketTimeout set it to half of socketTimeout
|
||||
if (this.keepAliveInitialDelay > this.socketTimeout) {
|
||||
this.keepAliveInitialDelay = Math.round(this.socketTimeout / 2);
|
||||
}
|
||||
|
||||
// If connection was destroyed
|
||||
this.destroyed = false;
|
||||
|
||||
// Check if we have a domain socket
|
||||
this.domainSocket = this.host.indexOf('/') !== -1;
|
||||
|
||||
// Serialize commands using function
|
||||
this.singleBufferSerializtion =
|
||||
typeof options.singleBufferSerializtion === 'boolean' ? options.singleBufferSerializtion : true;
|
||||
this.serializationFunction = this.singleBufferSerializtion ? 'toBinUnified' : 'toBin';
|
||||
|
||||
// SSL options
|
||||
this.ca = options.ca || null;
|
||||
this.crl = options.crl || null;
|
||||
this.cert = options.cert || null;
|
||||
this.key = options.key || null;
|
||||
this.passphrase = options.passphrase || null;
|
||||
this.ciphers = options.ciphers || null;
|
||||
this.ecdhCurve = options.ecdhCurve || null;
|
||||
this.ssl = typeof options.ssl === 'boolean' ? options.ssl : false;
|
||||
this.rejectUnauthorized =
|
||||
typeof options.rejectUnauthorized === 'boolean' ? options.rejectUnauthorized : true;
|
||||
this.checkServerIdentity =
|
||||
typeof options.checkServerIdentity === 'boolean' ||
|
||||
typeof options.checkServerIdentity === 'function'
|
||||
? options.checkServerIdentity
|
||||
: true;
|
||||
|
||||
// If ssl not enabled
|
||||
if (!this.ssl) this.rejectUnauthorized = false;
|
||||
|
||||
// Response options
|
||||
this.responseOptions = {
|
||||
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
|
||||
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
|
||||
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false
|
||||
};
|
||||
|
||||
// Flushing
|
||||
this.flushing = false;
|
||||
this.queue = [];
|
||||
|
||||
// Internal state
|
||||
this.connection = null;
|
||||
this.writeStream = null;
|
||||
|
||||
// Create hash method
|
||||
var hash = crypto.createHash('sha1');
|
||||
hash.update(f('%s:%s', this.host, this.port));
|
||||
|
||||
// Create a hash name
|
||||
this.hashedName = hash.digest('hex');
|
||||
|
||||
// All operations in flight on the connection
|
||||
this.workItems = [];
|
||||
};
|
||||
|
||||
inherits(Connection, EventEmitter);
|
||||
|
||||
Connection.prototype.setSocketTimeout = function(value) {
|
||||
if (this.connection) {
|
||||
this.connection.setTimeout(value);
|
||||
}
|
||||
};
|
||||
|
||||
Connection.prototype.resetSocketTimeout = function() {
|
||||
if (this.connection) {
|
||||
this.connection.setTimeout(this.socketTimeout);
|
||||
}
|
||||
};
|
||||
|
||||
Connection.enableConnectionAccounting = function(spy) {
|
||||
if (spy) {
|
||||
connectionAccountingSpy = spy;
|
||||
}
|
||||
|
||||
connectionAccounting = true;
|
||||
connections = {};
|
||||
};
|
||||
|
||||
Connection.disableConnectionAccounting = function() {
|
||||
connectionAccounting = false;
|
||||
connectionAccountingSpy = undefined;
|
||||
};
|
||||
|
||||
Connection.connections = function() {
|
||||
return connections;
|
||||
};
|
||||
|
||||
function deleteConnection(id) {
|
||||
// console.log("=== deleted connection " + id + " :: " + (connections[id] ? connections[id].port : ''))
|
||||
delete connections[id];
|
||||
|
||||
if (connectionAccountingSpy) {
|
||||
connectionAccountingSpy.deleteConnection(id);
|
||||
}
|
||||
}
|
||||
|
||||
function addConnection(id, connection) {
|
||||
// console.log("=== added connection " + id + " :: " + connection.port)
|
||||
connections[id] = connection;
|
||||
|
||||
if (connectionAccountingSpy) {
|
||||
connectionAccountingSpy.addConnection(id, connection);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Connection handlers
|
||||
var errorHandler = function(self) {
|
||||
return function(err) {
|
||||
if (connectionAccounting) deleteConnection(self.id);
|
||||
// Debug information
|
||||
if (self.logger.isDebug())
|
||||
self.logger.debug(
|
||||
f(
|
||||
'connection %s for [%s:%s] errored out with [%s]',
|
||||
self.id,
|
||||
self.host,
|
||||
self.port,
|
||||
JSON.stringify(err)
|
||||
)
|
||||
);
|
||||
// Emit the error
|
||||
if (self.listeners('error').length > 0) self.emit('error', new MongoNetworkError(err), self);
|
||||
};
|
||||
};
|
||||
|
||||
var timeoutHandler = function(self) {
|
||||
return function() {
|
||||
if (connectionAccounting) deleteConnection(self.id);
|
||||
// Debug information
|
||||
if (self.logger.isDebug())
|
||||
self.logger.debug(f('connection %s for [%s:%s] timed out', self.id, self.host, self.port));
|
||||
// Emit timeout error
|
||||
self.emit(
|
||||
'timeout',
|
||||
new MongoNetworkError(f('connection %s to %s:%s timed out', self.id, self.host, self.port)),
|
||||
self
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
var closeHandler = function(self) {
|
||||
return function(hadError) {
|
||||
if (connectionAccounting) deleteConnection(self.id);
|
||||
// Debug information
|
||||
if (self.logger.isDebug())
|
||||
self.logger.debug(f('connection %s with for [%s:%s] closed', self.id, self.host, self.port));
|
||||
|
||||
// Emit close event
|
||||
if (!hadError) {
|
||||
self.emit(
|
||||
'close',
|
||||
new MongoNetworkError(f('connection %s to %s:%s closed', self.id, self.host, self.port)),
|
||||
self
|
||||
);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// Handle a message once it is recieved
|
||||
var emitMessageHandler = function(self, message) {
|
||||
var msgHeader = parseHeader(message);
|
||||
if (msgHeader.opCode === OP_COMPRESSED) {
|
||||
msgHeader.fromCompressed = true;
|
||||
var index = MESSAGE_HEADER_SIZE;
|
||||
msgHeader.opCode = message.readInt32LE(index);
|
||||
index += 4;
|
||||
msgHeader.length = message.readInt32LE(index);
|
||||
index += 4;
|
||||
var compressorID = message[index];
|
||||
index++;
|
||||
decompress(compressorID, message.slice(index), function(err, decompressedMsgBody) {
|
||||
if (err) {
|
||||
throw err;
|
||||
}
|
||||
if (decompressedMsgBody.length !== msgHeader.length) {
|
||||
throw new Error(
|
||||
'Decompressing a compressed message from the server failed. The message is corrupt.'
|
||||
);
|
||||
}
|
||||
self.messageHandler(
|
||||
new Response(self.bson, message, msgHeader, decompressedMsgBody, self.responseOptions),
|
||||
self
|
||||
);
|
||||
});
|
||||
} else {
|
||||
self.messageHandler(
|
||||
new Response(
|
||||
self.bson,
|
||||
message,
|
||||
msgHeader,
|
||||
message.slice(MESSAGE_HEADER_SIZE),
|
||||
self.responseOptions
|
||||
),
|
||||
self
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
var dataHandler = function(self) {
|
||||
return function(data) {
|
||||
// Parse until we are done with the data
|
||||
while (data.length > 0) {
|
||||
// If we still have bytes to read on the current message
|
||||
if (self.bytesRead > 0 && self.sizeOfMessage > 0) {
|
||||
// Calculate the amount of remaining bytes
|
||||
var remainingBytesToRead = self.sizeOfMessage - self.bytesRead;
|
||||
// Check if the current chunk contains the rest of the message
|
||||
if (remainingBytesToRead > data.length) {
|
||||
// Copy the new data into the exiting buffer (should have been allocated when we know the message size)
|
||||
data.copy(self.buffer, self.bytesRead);
|
||||
// Adjust the number of bytes read so it point to the correct index in the buffer
|
||||
self.bytesRead = self.bytesRead + data.length;
|
||||
|
||||
// Reset state of buffer
|
||||
data = new Buffer(0);
|
||||
} else {
|
||||
// Copy the missing part of the data into our current buffer
|
||||
data.copy(self.buffer, self.bytesRead, 0, remainingBytesToRead);
|
||||
// Slice the overflow into a new buffer that we will then re-parse
|
||||
data = data.slice(remainingBytesToRead);
|
||||
|
||||
// Emit current complete message
|
||||
try {
|
||||
var emitBuffer = self.buffer;
|
||||
// Reset state of buffer
|
||||
self.buffer = null;
|
||||
self.sizeOfMessage = 0;
|
||||
self.bytesRead = 0;
|
||||
self.stubBuffer = null;
|
||||
|
||||
emitMessageHandler(self, emitBuffer);
|
||||
} catch (err) {
|
||||
var errorObject = {
|
||||
err: 'socketHandler',
|
||||
trace: err,
|
||||
bin: self.buffer,
|
||||
parseState: {
|
||||
sizeOfMessage: self.sizeOfMessage,
|
||||
bytesRead: self.bytesRead,
|
||||
stubBuffer: self.stubBuffer
|
||||
}
|
||||
};
|
||||
// We got a parse Error fire it off then keep going
|
||||
self.emit('parseError', errorObject, self);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Stub buffer is kept in case we don't get enough bytes to determine the
|
||||
// size of the message (< 4 bytes)
|
||||
if (self.stubBuffer != null && self.stubBuffer.length > 0) {
|
||||
// If we have enough bytes to determine the message size let's do it
|
||||
if (self.stubBuffer.length + data.length > 4) {
|
||||
// Prepad the data
|
||||
var newData = new Buffer(self.stubBuffer.length + data.length);
|
||||
self.stubBuffer.copy(newData, 0);
|
||||
data.copy(newData, self.stubBuffer.length);
|
||||
// Reassign for parsing
|
||||
data = newData;
|
||||
|
||||
// Reset state of buffer
|
||||
self.buffer = null;
|
||||
self.sizeOfMessage = 0;
|
||||
self.bytesRead = 0;
|
||||
self.stubBuffer = null;
|
||||
} else {
|
||||
// Add the the bytes to the stub buffer
|
||||
var newStubBuffer = new Buffer(self.stubBuffer.length + data.length);
|
||||
// Copy existing stub buffer
|
||||
self.stubBuffer.copy(newStubBuffer, 0);
|
||||
// Copy missing part of the data
|
||||
data.copy(newStubBuffer, self.stubBuffer.length);
|
||||
// Exit parsing loop
|
||||
data = new Buffer(0);
|
||||
}
|
||||
} else {
|
||||
if (data.length > 4) {
|
||||
// Retrieve the message size
|
||||
// var sizeOfMessage = data.readUInt32LE(0);
|
||||
var sizeOfMessage = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
|
||||
// If we have a negative sizeOfMessage emit error and return
|
||||
if (sizeOfMessage < 0 || sizeOfMessage > self.maxBsonMessageSize) {
|
||||
errorObject = {
|
||||
err: 'socketHandler',
|
||||
trace: '',
|
||||
bin: self.buffer,
|
||||
parseState: {
|
||||
sizeOfMessage: sizeOfMessage,
|
||||
bytesRead: self.bytesRead,
|
||||
stubBuffer: self.stubBuffer
|
||||
}
|
||||
};
|
||||
// We got a parse Error fire it off then keep going
|
||||
self.emit('parseError', errorObject, self);
|
||||
return;
|
||||
}
|
||||
|
||||
// Ensure that the size of message is larger than 0 and less than the max allowed
|
||||
if (
|
||||
sizeOfMessage > 4 &&
|
||||
sizeOfMessage < self.maxBsonMessageSize &&
|
||||
sizeOfMessage > data.length
|
||||
) {
|
||||
self.buffer = new Buffer(sizeOfMessage);
|
||||
// Copy all the data into the buffer
|
||||
data.copy(self.buffer, 0);
|
||||
// Update bytes read
|
||||
self.bytesRead = data.length;
|
||||
// Update sizeOfMessage
|
||||
self.sizeOfMessage = sizeOfMessage;
|
||||
// Ensure stub buffer is null
|
||||
self.stubBuffer = null;
|
||||
// Exit parsing loop
|
||||
data = new Buffer(0);
|
||||
} else if (
|
||||
sizeOfMessage > 4 &&
|
||||
sizeOfMessage < self.maxBsonMessageSize &&
|
||||
sizeOfMessage === data.length
|
||||
) {
|
||||
try {
|
||||
emitBuffer = data;
|
||||
// Reset state of buffer
|
||||
self.buffer = null;
|
||||
self.sizeOfMessage = 0;
|
||||
self.bytesRead = 0;
|
||||
self.stubBuffer = null;
|
||||
// Exit parsing loop
|
||||
data = new Buffer(0);
|
||||
// Emit the message
|
||||
emitMessageHandler(self, emitBuffer);
|
||||
} catch (err) {
|
||||
self.emit('parseError', err, self);
|
||||
}
|
||||
} else if (sizeOfMessage <= 4 || sizeOfMessage > self.maxBsonMessageSize) {
|
||||
errorObject = {
|
||||
err: 'socketHandler',
|
||||
trace: null,
|
||||
bin: data,
|
||||
parseState: {
|
||||
sizeOfMessage: sizeOfMessage,
|
||||
bytesRead: 0,
|
||||
buffer: null,
|
||||
stubBuffer: null
|
||||
}
|
||||
};
|
||||
// We got a parse Error fire it off then keep going
|
||||
self.emit('parseError', errorObject, self);
|
||||
|
||||
// Clear out the state of the parser
|
||||
self.buffer = null;
|
||||
self.sizeOfMessage = 0;
|
||||
self.bytesRead = 0;
|
||||
self.stubBuffer = null;
|
||||
// Exit parsing loop
|
||||
data = new Buffer(0);
|
||||
} else {
|
||||
emitBuffer = data.slice(0, sizeOfMessage);
|
||||
// Reset state of buffer
|
||||
self.buffer = null;
|
||||
self.sizeOfMessage = 0;
|
||||
self.bytesRead = 0;
|
||||
self.stubBuffer = null;
|
||||
// Copy rest of message
|
||||
data = data.slice(sizeOfMessage);
|
||||
// Emit the message
|
||||
emitMessageHandler(self, emitBuffer);
|
||||
}
|
||||
} else {
|
||||
// Create a buffer that contains the space for the non-complete message
|
||||
self.stubBuffer = new Buffer(data.length);
|
||||
// Copy the data to the stub buffer
|
||||
data.copy(self.stubBuffer, 0);
|
||||
// Exit parsing loop
|
||||
data = new Buffer(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// List of socket level valid ssl options
|
||||
var legalSslSocketOptions = [
|
||||
'pfx',
|
||||
'key',
|
||||
'passphrase',
|
||||
'cert',
|
||||
'ca',
|
||||
'ciphers',
|
||||
'NPNProtocols',
|
||||
'ALPNProtocols',
|
||||
'servername',
|
||||
'ecdhCurve',
|
||||
'secureProtocol',
|
||||
'secureContext',
|
||||
'session',
|
||||
'minDHSize'
|
||||
];
|
||||
|
||||
function merge(options1, options2) {
|
||||
// Merge in any allowed ssl options
|
||||
for (var name in options2) {
|
||||
if (options2[name] != null && legalSslSocketOptions.indexOf(name) !== -1) {
|
||||
options1[name] = options2[name];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function makeSSLConnection(self, _options) {
|
||||
let sslOptions = {
|
||||
socket: self.connection,
|
||||
rejectUnauthorized: self.rejectUnauthorized
|
||||
};
|
||||
|
||||
// Merge in options
|
||||
merge(sslOptions, self.options);
|
||||
merge(sslOptions, _options);
|
||||
|
||||
// Set options for ssl
|
||||
if (self.ca) sslOptions.ca = self.ca;
|
||||
if (self.crl) sslOptions.crl = self.crl;
|
||||
if (self.cert) sslOptions.cert = self.cert;
|
||||
if (self.key) sslOptions.key = self.key;
|
||||
if (self.passphrase) sslOptions.passphrase = self.passphrase;
|
||||
|
||||
// Override checkServerIdentity behavior
|
||||
if (self.checkServerIdentity === false) {
|
||||
// Skip the identiy check by retuning undefined as per node documents
|
||||
// https://nodejs.org/api/tls.html#tls_tls_connect_options_callback
|
||||
sslOptions.checkServerIdentity = function() {
|
||||
return undefined;
|
||||
};
|
||||
} else if (typeof self.checkServerIdentity === 'function') {
|
||||
sslOptions.checkServerIdentity = self.checkServerIdentity;
|
||||
}
|
||||
|
||||
// Set default sni servername to be the same as host
|
||||
if (sslOptions.servername == null) {
|
||||
sslOptions.servername = self.host;
|
||||
}
|
||||
|
||||
// Attempt SSL connection
|
||||
const connection = tls.connect(self.port, self.host, sslOptions, function() {
|
||||
// Error on auth or skip
|
||||
if (connection.authorizationError && self.rejectUnauthorized) {
|
||||
return self.emit('error', connection.authorizationError, self, { ssl: true });
|
||||
}
|
||||
|
||||
// Set socket timeout instead of connection timeout
|
||||
connection.setTimeout(self.socketTimeout);
|
||||
// We are done emit connect
|
||||
self.emit('connect', self);
|
||||
});
|
||||
|
||||
// Set the options for the connection
|
||||
connection.setKeepAlive(self.keepAlive, self.keepAliveInitialDelay);
|
||||
connection.setTimeout(self.connectionTimeout);
|
||||
connection.setNoDelay(self.noDelay);
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
function makeUnsecureConnection(self, family) {
|
||||
// Create new connection instance
|
||||
let connection_options;
|
||||
if (self.domainSocket) {
|
||||
connection_options = { path: self.host };
|
||||
} else {
|
||||
connection_options = { port: self.port, host: self.host };
|
||||
connection_options.family = family;
|
||||
}
|
||||
|
||||
const connection = net.createConnection(connection_options);
|
||||
|
||||
// Set the options for the connection
|
||||
connection.setKeepAlive(self.keepAlive, self.keepAliveInitialDelay);
|
||||
connection.setTimeout(self.connectionTimeout);
|
||||
connection.setNoDelay(self.noDelay);
|
||||
|
||||
connection.once('connect', function() {
|
||||
// Set socket timeout instead of connection timeout
|
||||
connection.setTimeout(self.socketTimeout);
|
||||
// Emit connect event
|
||||
self.emit('connect', self);
|
||||
});
|
||||
|
||||
return connection;
|
||||
}
|
||||
|
||||
function doConnect(self, family, _options, _errorHandler) {
|
||||
self.connection = self.ssl
|
||||
? makeSSLConnection(self, _options)
|
||||
: makeUnsecureConnection(self, family);
|
||||
|
||||
// Add handlers for events
|
||||
self.connection.once('error', _errorHandler);
|
||||
self.connection.once('timeout', timeoutHandler(self));
|
||||
self.connection.once('close', closeHandler(self));
|
||||
self.connection.on('data', dataHandler(self));
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect
|
||||
* @method
|
||||
*/
|
||||
Connection.prototype.connect = function(_options) {
|
||||
_options = _options || {};
|
||||
// Set the connections
|
||||
if (connectionAccounting) addConnection(this.id, this);
|
||||
// Check if we are overriding the promoteLongs
|
||||
if (typeof _options.promoteLongs === 'boolean') {
|
||||
this.responseOptions.promoteLongs = _options.promoteLongs;
|
||||
this.responseOptions.promoteValues = _options.promoteValues;
|
||||
this.responseOptions.promoteBuffers = _options.promoteBuffers;
|
||||
}
|
||||
|
||||
const _errorHandler = errorHandler(this);
|
||||
|
||||
if (this.family !== void 0) {
|
||||
return doConnect(this, this.family, _options, _errorHandler);
|
||||
}
|
||||
|
||||
return doConnect(this, 6, _options, err => {
|
||||
if (this.logger.isDebug()) {
|
||||
this.logger.debug(
|
||||
f(
|
||||
'connection %s for [%s:%s] errored out with [%s]',
|
||||
this.id,
|
||||
this.host,
|
||||
this.port,
|
||||
JSON.stringify(err)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// clean up existing event handlers
|
||||
this.connection.removeAllListeners('error');
|
||||
this.connection.removeAllListeners('timeout');
|
||||
this.connection.removeAllListeners('close');
|
||||
this.connection.removeAllListeners('data');
|
||||
this.connection = undefined;
|
||||
|
||||
return doConnect(this, 4, _options, _errorHandler);
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Unref this connection
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
Connection.prototype.unref = function() {
|
||||
if (this.connection) this.connection.unref();
|
||||
else {
|
||||
var self = this;
|
||||
this.once('connect', function() {
|
||||
self.connection.unref();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Destroy connection
|
||||
* @method
|
||||
*/
|
||||
Connection.prototype.destroy = function() {
|
||||
// Set the connections
|
||||
if (connectionAccounting) deleteConnection(this.id);
|
||||
if (this.connection) {
|
||||
// Catch posssible exception thrown by node 0.10.x
|
||||
try {
|
||||
this.connection.end();
|
||||
} catch (err) {} // eslint-disable-line
|
||||
// Destroy connection
|
||||
this.connection.destroy();
|
||||
}
|
||||
|
||||
this.destroyed = true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Write to connection
|
||||
* @method
|
||||
* @param {Command} command Command to write out need to implement toBin and toBinUnified
|
||||
*/
|
||||
Connection.prototype.write = function(buffer) {
|
||||
var i;
|
||||
// Debug Log
|
||||
if (this.logger.isDebug()) {
|
||||
if (!Array.isArray(buffer)) {
|
||||
this.logger.debug(
|
||||
f('writing buffer [%s] to %s:%s', buffer.toString('hex'), this.host, this.port)
|
||||
);
|
||||
} else {
|
||||
for (i = 0; i < buffer.length; i++)
|
||||
this.logger.debug(
|
||||
f('writing buffer [%s] to %s:%s', buffer[i].toString('hex'), this.host, this.port)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Double check that the connection is not destroyed
|
||||
if (this.connection.destroyed === false) {
|
||||
// Write out the command
|
||||
if (!Array.isArray(buffer)) {
|
||||
this.connection.write(buffer, 'binary');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Iterate over all buffers and write them in order to the socket
|
||||
for (i = 0; i < buffer.length; i++) this.connection.write(buffer[i], 'binary');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Connection is destroyed return write failed
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Return id of connection as a string
|
||||
* @method
|
||||
* @return {string}
|
||||
*/
|
||||
Connection.prototype.toString = function() {
|
||||
return '' + this.id;
|
||||
};
|
||||
|
||||
/**
|
||||
* Return json object of connection
|
||||
* @method
|
||||
* @return {object}
|
||||
*/
|
||||
Connection.prototype.toJSON = function() {
|
||||
return { id: this.id, host: this.host, port: this.port };
|
||||
};
|
||||
|
||||
/**
|
||||
* Is the connection connected
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
Connection.prototype.isConnected = function() {
|
||||
if (this.destroyed) return false;
|
||||
return !this.connection.destroyed && this.connection.writable;
|
||||
};
|
||||
|
||||
/**
|
||||
* A server connect event, used to verify that the connection is up and running
|
||||
*
|
||||
* @event Connection#connect
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The server connection closed, all pool connections closed
|
||||
*
|
||||
* @event Connection#close
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The server connection caused an error, all pool connections closed
|
||||
*
|
||||
* @event Connection#error
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The server connection timed out, all pool connections closed
|
||||
*
|
||||
* @event Connection#timeout
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
/**
|
||||
* The driver experienced an invalid message, all pool connections closed
|
||||
*
|
||||
* @event Connection#parseError
|
||||
* @type {Connection}
|
||||
*/
|
||||
|
||||
module.exports = Connection;
|
246
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/logger.js
generated
vendored
Normal file
246
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/logger.js
generated
vendored
Normal file
@@ -0,0 +1,246 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
// Filters for classes
|
||||
var classFilters = {};
|
||||
var filteredClasses = {};
|
||||
var level = null;
|
||||
// Save the process id
|
||||
var pid = process.pid;
|
||||
// current logger
|
||||
var currentLogger = null;
|
||||
|
||||
/**
|
||||
* Creates a new Logger instance
|
||||
* @class
|
||||
* @param {string} className The Class name associated with the logging instance
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {Function} [options.logger=null] Custom logger function;
|
||||
* @param {string} [options.loggerLevel=error] Override default global log level.
|
||||
* @return {Logger} a Logger instance.
|
||||
*/
|
||||
var Logger = function(className, options) {
|
||||
if (!(this instanceof Logger)) return new Logger(className, options);
|
||||
options = options || {};
|
||||
|
||||
// Current reference
|
||||
this.className = className;
|
||||
|
||||
// Current logger
|
||||
if (options.logger) {
|
||||
currentLogger = options.logger;
|
||||
} else if (currentLogger == null) {
|
||||
currentLogger = console.log;
|
||||
}
|
||||
|
||||
// Set level of logging, default is error
|
||||
if (options.loggerLevel) {
|
||||
level = options.loggerLevel || 'error';
|
||||
}
|
||||
|
||||
// Add all class names
|
||||
if (filteredClasses[this.className] == null) classFilters[this.className] = true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Log a message at the debug level
|
||||
* @method
|
||||
* @param {string} message The message to log
|
||||
* @param {object} object additional meta data to log
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.prototype.debug = function(message, object) {
|
||||
if (
|
||||
this.isDebug() &&
|
||||
((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) ||
|
||||
(Object.keys(filteredClasses).length === 0 && classFilters[this.className]))
|
||||
) {
|
||||
var dateTime = new Date().getTime();
|
||||
var msg = f('[%s-%s:%s] %s %s', 'DEBUG', this.className, pid, dateTime, message);
|
||||
var state = {
|
||||
type: 'debug',
|
||||
message: message,
|
||||
className: this.className,
|
||||
pid: pid,
|
||||
date: dateTime
|
||||
};
|
||||
if (object) state.meta = object;
|
||||
currentLogger(msg, state);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Log a message at the warn level
|
||||
* @method
|
||||
* @param {string} message The message to log
|
||||
* @param {object} object additional meta data to log
|
||||
* @return {null}
|
||||
*/
|
||||
(Logger.prototype.warn = function(message, object) {
|
||||
if (
|
||||
this.isWarn() &&
|
||||
((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) ||
|
||||
(Object.keys(filteredClasses).length === 0 && classFilters[this.className]))
|
||||
) {
|
||||
var dateTime = new Date().getTime();
|
||||
var msg = f('[%s-%s:%s] %s %s', 'WARN', this.className, pid, dateTime, message);
|
||||
var state = {
|
||||
type: 'warn',
|
||||
message: message,
|
||||
className: this.className,
|
||||
pid: pid,
|
||||
date: dateTime
|
||||
};
|
||||
if (object) state.meta = object;
|
||||
currentLogger(msg, state);
|
||||
}
|
||||
}),
|
||||
/**
|
||||
* Log a message at the info level
|
||||
* @method
|
||||
* @param {string} message The message to log
|
||||
* @param {object} object additional meta data to log
|
||||
* @return {null}
|
||||
*/
|
||||
(Logger.prototype.info = function(message, object) {
|
||||
if (
|
||||
this.isInfo() &&
|
||||
((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) ||
|
||||
(Object.keys(filteredClasses).length === 0 && classFilters[this.className]))
|
||||
) {
|
||||
var dateTime = new Date().getTime();
|
||||
var msg = f('[%s-%s:%s] %s %s', 'INFO', this.className, pid, dateTime, message);
|
||||
var state = {
|
||||
type: 'info',
|
||||
message: message,
|
||||
className: this.className,
|
||||
pid: pid,
|
||||
date: dateTime
|
||||
};
|
||||
if (object) state.meta = object;
|
||||
currentLogger(msg, state);
|
||||
}
|
||||
}),
|
||||
/**
|
||||
* Log a message at the error level
|
||||
* @method
|
||||
* @param {string} message The message to log
|
||||
* @param {object} object additional meta data to log
|
||||
* @return {null}
|
||||
*/
|
||||
(Logger.prototype.error = function(message, object) {
|
||||
if (
|
||||
this.isError() &&
|
||||
((Object.keys(filteredClasses).length > 0 && filteredClasses[this.className]) ||
|
||||
(Object.keys(filteredClasses).length === 0 && classFilters[this.className]))
|
||||
) {
|
||||
var dateTime = new Date().getTime();
|
||||
var msg = f('[%s-%s:%s] %s %s', 'ERROR', this.className, pid, dateTime, message);
|
||||
var state = {
|
||||
type: 'error',
|
||||
message: message,
|
||||
className: this.className,
|
||||
pid: pid,
|
||||
date: dateTime
|
||||
};
|
||||
if (object) state.meta = object;
|
||||
currentLogger(msg, state);
|
||||
}
|
||||
}),
|
||||
/**
|
||||
* Is the logger set at info level
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
(Logger.prototype.isInfo = function() {
|
||||
return level === 'info' || level === 'debug';
|
||||
}),
|
||||
/**
|
||||
* Is the logger set at error level
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
(Logger.prototype.isError = function() {
|
||||
return level === 'error' || level === 'info' || level === 'debug';
|
||||
}),
|
||||
/**
|
||||
* Is the logger set at error level
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
(Logger.prototype.isWarn = function() {
|
||||
return level === 'error' || level === 'warn' || level === 'info' || level === 'debug';
|
||||
}),
|
||||
/**
|
||||
* Is the logger set at debug level
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
(Logger.prototype.isDebug = function() {
|
||||
return level === 'debug';
|
||||
});
|
||||
|
||||
/**
|
||||
* Resets the logger to default settings, error and no filtered classes
|
||||
* @method
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.reset = function() {
|
||||
level = 'error';
|
||||
filteredClasses = {};
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the current logger function
|
||||
* @method
|
||||
* @return {function}
|
||||
*/
|
||||
Logger.currentLogger = function() {
|
||||
return currentLogger;
|
||||
};
|
||||
|
||||
/**
|
||||
* Set the current logger function
|
||||
* @method
|
||||
* @param {function} logger Logger function.
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.setCurrentLogger = function(logger) {
|
||||
if (typeof logger !== 'function') throw new MongoError('current logger must be a function');
|
||||
currentLogger = logger;
|
||||
};
|
||||
|
||||
/**
|
||||
* Set what classes to log.
|
||||
* @method
|
||||
* @param {string} type The type of filter (currently only class)
|
||||
* @param {string[]} values The filters to apply
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.filter = function(type, values) {
|
||||
if (type === 'class' && Array.isArray(values)) {
|
||||
filteredClasses = {};
|
||||
|
||||
values.forEach(function(x) {
|
||||
filteredClasses[x] = true;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Set the current log level
|
||||
* @method
|
||||
* @param {string} level Set current log level (debug, info, error)
|
||||
* @return {null}
|
||||
*/
|
||||
Logger.setLevel = function(_level) {
|
||||
if (_level !== 'info' && _level !== 'error' && _level !== 'debug' && _level !== 'warn') {
|
||||
throw new Error(f('%s is an illegal logging level', _level));
|
||||
}
|
||||
|
||||
level = _level;
|
||||
};
|
||||
|
||||
module.exports = Logger;
|
1653
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/pool.js
generated
vendored
Normal file
1653
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/pool.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
113
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/utils.js
generated
vendored
Normal file
113
ProjectNow/NodeServer/node_modules/mongodb-core/lib/connection/utils.js
generated
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
'use strict';
|
||||
|
||||
var f = require('util').format,
|
||||
require_optional = require('require_optional');
|
||||
|
||||
// Set property function
|
||||
var setProperty = function(obj, prop, flag, values) {
|
||||
Object.defineProperty(obj, prop.name, {
|
||||
enumerable: true,
|
||||
set: function(value) {
|
||||
if (typeof value !== 'boolean') throw new Error(f('%s required a boolean', prop.name));
|
||||
// Flip the bit to 1
|
||||
if (value === true) values.flags |= flag;
|
||||
// Flip the bit to 0 if it's set, otherwise ignore
|
||||
if (value === false && (values.flags & flag) === flag) values.flags ^= flag;
|
||||
prop.value = value;
|
||||
},
|
||||
get: function() {
|
||||
return prop.value;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Set property function
|
||||
var getProperty = function(obj, propName, fieldName, values, func) {
|
||||
Object.defineProperty(obj, propName, {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
// Not parsed yet, parse it
|
||||
if (values[fieldName] == null && obj.isParsed && !obj.isParsed()) {
|
||||
obj.parse();
|
||||
}
|
||||
|
||||
// Do we have a post processing function
|
||||
if (typeof func === 'function') return func(values[fieldName]);
|
||||
// Return raw value
|
||||
return values[fieldName];
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Set simple property
|
||||
var getSingleProperty = function(obj, name, value) {
|
||||
Object.defineProperty(obj, name, {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return value;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Shallow copy
|
||||
var copy = function(fObj, tObj) {
|
||||
tObj = tObj || {};
|
||||
for (var name in fObj) tObj[name] = fObj[name];
|
||||
return tObj;
|
||||
};
|
||||
|
||||
var debugOptions = function(debugFields, options) {
|
||||
var finaloptions = {};
|
||||
debugFields.forEach(function(n) {
|
||||
finaloptions[n] = options[n];
|
||||
});
|
||||
|
||||
return finaloptions;
|
||||
};
|
||||
|
||||
var retrieveBSON = function() {
|
||||
var BSON = require('bson');
|
||||
BSON.native = false;
|
||||
|
||||
try {
|
||||
var optionalBSON = require_optional('bson-ext');
|
||||
if (optionalBSON) {
|
||||
optionalBSON.native = true;
|
||||
return optionalBSON;
|
||||
}
|
||||
} catch (err) {} // eslint-disable-line
|
||||
|
||||
return BSON;
|
||||
};
|
||||
|
||||
// Throw an error if an attempt to use Snappy is made when Snappy is not installed
|
||||
var noSnappyWarning = function() {
|
||||
throw new Error(
|
||||
'Attempted to use Snappy compression, but Snappy is not installed. Install or disable Snappy compression and try again.'
|
||||
);
|
||||
};
|
||||
|
||||
// Facilitate loading Snappy optionally
|
||||
var retrieveSnappy = function() {
|
||||
var snappy = null;
|
||||
try {
|
||||
snappy = require_optional('snappy');
|
||||
} catch (error) {} // eslint-disable-line
|
||||
if (!snappy) {
|
||||
snappy = {
|
||||
compress: noSnappyWarning,
|
||||
uncompress: noSnappyWarning,
|
||||
compressSync: noSnappyWarning,
|
||||
uncompressSync: noSnappyWarning
|
||||
};
|
||||
}
|
||||
return snappy;
|
||||
};
|
||||
|
||||
exports.setProperty = setProperty;
|
||||
exports.getProperty = getProperty;
|
||||
exports.getSingleProperty = getSingleProperty;
|
||||
exports.copy = copy;
|
||||
exports.debugOptions = debugOptions;
|
||||
exports.retrieveBSON = retrieveBSON;
|
||||
exports.retrieveSnappy = retrieveSnappy;
|
813
ProjectNow/NodeServer/node_modules/mongodb-core/lib/cursor.js
generated
vendored
Normal file
813
ProjectNow/NodeServer/node_modules/mongodb-core/lib/cursor.js
generated
vendored
Normal file
@@ -0,0 +1,813 @@
|
||||
'use strict';
|
||||
|
||||
var Logger = require('./connection/logger'),
|
||||
retrieveBSON = require('./connection/utils').retrieveBSON,
|
||||
MongoError = require('./error').MongoError,
|
||||
MongoNetworkError = require('./error').MongoNetworkError,
|
||||
f = require('util').format;
|
||||
|
||||
var BSON = retrieveBSON(),
|
||||
Long = BSON.Long;
|
||||
|
||||
/**
|
||||
* This is a cursor results callback
|
||||
*
|
||||
* @callback resultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {object} document
|
||||
*/
|
||||
|
||||
/**
|
||||
* @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB
|
||||
* allowing for iteration over the results returned from the underlying query.
|
||||
*
|
||||
* **CURSORS Cannot directly be instantiated**
|
||||
* @example
|
||||
* var Server = require('mongodb-core').Server
|
||||
* , ReadPreference = require('mongodb-core').ReadPreference
|
||||
* , assert = require('assert');
|
||||
*
|
||||
* var server = new Server({host: 'localhost', port: 27017});
|
||||
* // Wait for the connection event
|
||||
* server.on('connect', function(server) {
|
||||
* assert.equal(null, err);
|
||||
*
|
||||
* // Execute the write
|
||||
* var cursor = _server.cursor('integration_tests.inserts_example4', {
|
||||
* find: 'integration_tests.example4'
|
||||
* , query: {a:1}
|
||||
* }, {
|
||||
* readPreference: new ReadPreference('secondary');
|
||||
* });
|
||||
*
|
||||
* // Get the first document
|
||||
* cursor.next(function(err, doc) {
|
||||
* assert.equal(null, err);
|
||||
* server.destroy();
|
||||
* });
|
||||
* });
|
||||
*
|
||||
* // Start connecting
|
||||
* server.connect();
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new Cursor, not to be used directly
|
||||
* @class
|
||||
* @param {object} bson An instance of the BSON parser
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {{object}|Long} cmd The selector (can be a command or a cursorId)
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {object} [options.batchSize=1000] Batchsize for the operation
|
||||
* @param {array} [options.documents=[]] Initial documents list for cursor
|
||||
* @param {object} [options.transforms=null] Transform methods for the cursor results
|
||||
* @param {function} [options.transforms.query] Transform the value returned from the initial query
|
||||
* @param {function} [options.transforms.doc] Transform each document returned from Cursor.prototype.next
|
||||
* @param {object} topology The server topology instance.
|
||||
* @param {object} topologyOptions The server topology options.
|
||||
* @return {Cursor} A cursor instance
|
||||
* @property {number} cursorBatchSize The current cursorBatchSize for the cursor
|
||||
* @property {number} cursorLimit The current cursorLimit for the cursor
|
||||
* @property {number} cursorSkip The current cursorSkip for the cursor
|
||||
*/
|
||||
var Cursor = function(bson, ns, cmd, options, topology, topologyOptions) {
|
||||
options = options || {};
|
||||
|
||||
// Cursor pool
|
||||
this.pool = null;
|
||||
// Cursor server
|
||||
this.server = null;
|
||||
|
||||
// Do we have a not connected handler
|
||||
this.disconnectHandler = options.disconnectHandler;
|
||||
|
||||
// Set local values
|
||||
this.bson = bson;
|
||||
this.ns = ns;
|
||||
this.cmd = cmd;
|
||||
this.options = options;
|
||||
this.topology = topology;
|
||||
|
||||
// All internal state
|
||||
this.cursorState = {
|
||||
cursorId: null,
|
||||
cmd: cmd,
|
||||
documents: options.documents || [],
|
||||
cursorIndex: 0,
|
||||
dead: false,
|
||||
killed: false,
|
||||
init: false,
|
||||
notified: false,
|
||||
limit: options.limit || cmd.limit || 0,
|
||||
skip: options.skip || cmd.skip || 0,
|
||||
batchSize: options.batchSize || cmd.batchSize || 1000,
|
||||
currentLimit: 0,
|
||||
// Result field name if not a cursor (contains the array of results)
|
||||
transforms: options.transforms
|
||||
};
|
||||
|
||||
// Add promoteLong to cursor state
|
||||
if (typeof topologyOptions.promoteLongs === 'boolean') {
|
||||
this.cursorState.promoteLongs = topologyOptions.promoteLongs;
|
||||
} else if (typeof options.promoteLongs === 'boolean') {
|
||||
this.cursorState.promoteLongs = options.promoteLongs;
|
||||
} else if (typeof options.session === 'object') {
|
||||
this.cursorState.session = options.session;
|
||||
}
|
||||
|
||||
// Add promoteValues to cursor state
|
||||
if (typeof topologyOptions.promoteValues === 'boolean') {
|
||||
this.cursorState.promoteValues = topologyOptions.promoteValues;
|
||||
} else if (typeof options.promoteValues === 'boolean') {
|
||||
this.cursorState.promoteValues = options.promoteValues;
|
||||
}
|
||||
|
||||
// Add promoteBuffers to cursor state
|
||||
if (typeof topologyOptions.promoteBuffers === 'boolean') {
|
||||
this.cursorState.promoteBuffers = topologyOptions.promoteBuffers;
|
||||
} else if (typeof options.promoteBuffers === 'boolean') {
|
||||
this.cursorState.promoteBuffers = options.promoteBuffers;
|
||||
}
|
||||
|
||||
if (topologyOptions.reconnect) {
|
||||
this.cursorState.reconnect = topologyOptions.reconnect;
|
||||
}
|
||||
|
||||
// Logger
|
||||
this.logger = Logger('Cursor', topologyOptions);
|
||||
|
||||
//
|
||||
// Did we pass in a cursor id
|
||||
if (typeof cmd === 'number') {
|
||||
this.cursorState.cursorId = Long.fromNumber(cmd);
|
||||
this.cursorState.lastCursorId = this.cursorState.cursorId;
|
||||
} else if (cmd instanceof Long) {
|
||||
this.cursorState.cursorId = cmd;
|
||||
this.cursorState.lastCursorId = cmd;
|
||||
}
|
||||
};
|
||||
|
||||
Cursor.prototype.setCursorBatchSize = function(value) {
|
||||
this.cursorState.batchSize = value;
|
||||
};
|
||||
|
||||
Cursor.prototype.cursorBatchSize = function() {
|
||||
return this.cursorState.batchSize;
|
||||
};
|
||||
|
||||
Cursor.prototype.setCursorLimit = function(value) {
|
||||
this.cursorState.limit = value;
|
||||
};
|
||||
|
||||
Cursor.prototype.cursorLimit = function() {
|
||||
return this.cursorState.limit;
|
||||
};
|
||||
|
||||
Cursor.prototype.setCursorSkip = function(value) {
|
||||
this.cursorState.skip = value;
|
||||
};
|
||||
|
||||
Cursor.prototype.cursorSkip = function() {
|
||||
return this.cursorState.skip;
|
||||
};
|
||||
|
||||
Cursor.prototype._endSession = function(options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
options = options || {};
|
||||
|
||||
const session = this.cursorState.session;
|
||||
|
||||
if (session && (options.force || session.owner === this)) {
|
||||
this.cursorState.session = undefined;
|
||||
session.endSession(callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (callback) {
|
||||
callback();
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
//
|
||||
// Handle callback (including any exceptions thrown)
|
||||
var handleCallback = function(callback, err, result) {
|
||||
try {
|
||||
callback(err, result);
|
||||
} catch (err) {
|
||||
process.nextTick(function() {
|
||||
throw err;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Internal methods
|
||||
Cursor.prototype._find = function(callback) {
|
||||
var self = this;
|
||||
|
||||
if (self.logger.isDebug()) {
|
||||
self.logger.debug(
|
||||
f(
|
||||
'issue initial query [%s] with flags [%s]',
|
||||
JSON.stringify(self.cmd),
|
||||
JSON.stringify(self.query)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
var queryCallback = function(err, r) {
|
||||
if (err) return callback(err);
|
||||
|
||||
// Get the raw message
|
||||
var result = r.message;
|
||||
|
||||
// Query failure bit set
|
||||
if (result.queryFailure) {
|
||||
return callback(new MongoError(result.documents[0]), null);
|
||||
}
|
||||
|
||||
// Check if we have a command cursor
|
||||
if (
|
||||
Array.isArray(result.documents) &&
|
||||
result.documents.length === 1 &&
|
||||
(!self.cmd.find || (self.cmd.find && self.cmd.virtual === false)) &&
|
||||
(result.documents[0].cursor !== 'string' ||
|
||||
result.documents[0]['$err'] ||
|
||||
result.documents[0]['errmsg'] ||
|
||||
Array.isArray(result.documents[0].result))
|
||||
) {
|
||||
// We have a an error document return the error
|
||||
if (result.documents[0]['$err'] || result.documents[0]['errmsg']) {
|
||||
return callback(new MongoError(result.documents[0]), null);
|
||||
}
|
||||
|
||||
// We have a cursor document
|
||||
if (result.documents[0].cursor != null && typeof result.documents[0].cursor !== 'string') {
|
||||
var id = result.documents[0].cursor.id;
|
||||
// If we have a namespace change set the new namespace for getmores
|
||||
if (result.documents[0].cursor.ns) {
|
||||
self.ns = result.documents[0].cursor.ns;
|
||||
}
|
||||
// Promote id to long if needed
|
||||
self.cursorState.cursorId = typeof id === 'number' ? Long.fromNumber(id) : id;
|
||||
self.cursorState.lastCursorId = self.cursorState.cursorId;
|
||||
// If we have a firstBatch set it
|
||||
if (Array.isArray(result.documents[0].cursor.firstBatch)) {
|
||||
self.cursorState.documents = result.documents[0].cursor.firstBatch; //.reverse();
|
||||
}
|
||||
|
||||
// Return after processing command cursor
|
||||
return callback(null, result);
|
||||
}
|
||||
|
||||
if (Array.isArray(result.documents[0].result)) {
|
||||
self.cursorState.documents = result.documents[0].result;
|
||||
self.cursorState.cursorId = Long.ZERO;
|
||||
return callback(null, result);
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise fall back to regular find path
|
||||
self.cursorState.cursorId = result.cursorId;
|
||||
self.cursorState.documents = result.documents;
|
||||
self.cursorState.lastCursorId = result.cursorId;
|
||||
|
||||
// Transform the results with passed in transformation method if provided
|
||||
if (self.cursorState.transforms && typeof self.cursorState.transforms.query === 'function') {
|
||||
self.cursorState.documents = self.cursorState.transforms.query(result);
|
||||
}
|
||||
|
||||
// Return callback
|
||||
callback(null, result);
|
||||
};
|
||||
|
||||
// Options passed to the pool
|
||||
var queryOptions = {};
|
||||
|
||||
// If we have a raw query decorate the function
|
||||
if (self.options.raw || self.cmd.raw) {
|
||||
// queryCallback.raw = self.options.raw || self.cmd.raw;
|
||||
queryOptions.raw = self.options.raw || self.cmd.raw;
|
||||
}
|
||||
|
||||
// Do we have documentsReturnedIn set on the query
|
||||
if (typeof self.query.documentsReturnedIn === 'string') {
|
||||
// queryCallback.documentsReturnedIn = self.query.documentsReturnedIn;
|
||||
queryOptions.documentsReturnedIn = self.query.documentsReturnedIn;
|
||||
}
|
||||
|
||||
// Add promote Long value if defined
|
||||
if (typeof self.cursorState.promoteLongs === 'boolean') {
|
||||
queryOptions.promoteLongs = self.cursorState.promoteLongs;
|
||||
}
|
||||
|
||||
// Add promote values if defined
|
||||
if (typeof self.cursorState.promoteValues === 'boolean') {
|
||||
queryOptions.promoteValues = self.cursorState.promoteValues;
|
||||
}
|
||||
|
||||
// Add promote values if defined
|
||||
if (typeof self.cursorState.promoteBuffers === 'boolean') {
|
||||
queryOptions.promoteBuffers = self.cursorState.promoteBuffers;
|
||||
}
|
||||
|
||||
if (typeof self.cursorState.session === 'object') {
|
||||
queryOptions.session = self.cursorState.session;
|
||||
}
|
||||
|
||||
// Write the initial command out
|
||||
self.server.s.pool.write(self.query, queryOptions, queryCallback);
|
||||
};
|
||||
|
||||
Cursor.prototype._getmore = function(callback) {
|
||||
if (this.logger.isDebug())
|
||||
this.logger.debug(f('schedule getMore call for query [%s]', JSON.stringify(this.query)));
|
||||
// Determine if it's a raw query
|
||||
var raw = this.options.raw || this.cmd.raw;
|
||||
|
||||
// Set the current batchSize
|
||||
var batchSize = this.cursorState.batchSize;
|
||||
if (
|
||||
this.cursorState.limit > 0 &&
|
||||
this.cursorState.currentLimit + batchSize > this.cursorState.limit
|
||||
) {
|
||||
batchSize = this.cursorState.limit - this.cursorState.currentLimit;
|
||||
}
|
||||
|
||||
// Default pool
|
||||
var pool = this.server.s.pool;
|
||||
|
||||
// We have a wire protocol handler
|
||||
this.server.wireProtocolHandler.getMore(
|
||||
this.bson,
|
||||
this.ns,
|
||||
this.cursorState,
|
||||
batchSize,
|
||||
raw,
|
||||
pool,
|
||||
this.options,
|
||||
callback
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Clone the cursor
|
||||
* @method
|
||||
* @return {Cursor}
|
||||
*/
|
||||
Cursor.prototype.clone = function() {
|
||||
return this.topology.cursor(this.ns, this.cmd, this.options);
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the cursor is dead
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor is dead or not
|
||||
*/
|
||||
Cursor.prototype.isDead = function() {
|
||||
return this.cursorState.dead === true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the cursor was killed by the application
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor was killed by the application
|
||||
*/
|
||||
Cursor.prototype.isKilled = function() {
|
||||
return this.cursorState.killed === true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if the cursor notified it's caller about it's death
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor notified the callback
|
||||
*/
|
||||
Cursor.prototype.isNotified = function() {
|
||||
return this.cursorState.notified === true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns current buffered documents length
|
||||
* @method
|
||||
* @return {number} The number of items in the buffered documents
|
||||
*/
|
||||
Cursor.prototype.bufferedCount = function() {
|
||||
return this.cursorState.documents.length - this.cursorState.cursorIndex;
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns current buffered documents
|
||||
* @method
|
||||
* @return {Array} An array of buffered documents
|
||||
*/
|
||||
Cursor.prototype.readBufferedDocuments = function(number) {
|
||||
var unreadDocumentsLength = this.cursorState.documents.length - this.cursorState.cursorIndex;
|
||||
var length = number < unreadDocumentsLength ? number : unreadDocumentsLength;
|
||||
var elements = this.cursorState.documents.slice(
|
||||
this.cursorState.cursorIndex,
|
||||
this.cursorState.cursorIndex + length
|
||||
);
|
||||
|
||||
// Transform the doc with passed in transformation method if provided
|
||||
if (this.cursorState.transforms && typeof this.cursorState.transforms.doc === 'function') {
|
||||
// Transform all the elements
|
||||
for (var i = 0; i < elements.length; i++) {
|
||||
elements[i] = this.cursorState.transforms.doc(elements[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we do not return any more documents than the limit imposed
|
||||
// Just return the number of elements up to the limit
|
||||
if (
|
||||
this.cursorState.limit > 0 &&
|
||||
this.cursorState.currentLimit + elements.length > this.cursorState.limit
|
||||
) {
|
||||
elements = elements.slice(0, this.cursorState.limit - this.cursorState.currentLimit);
|
||||
this.kill();
|
||||
}
|
||||
|
||||
// Adjust current limit
|
||||
this.cursorState.currentLimit = this.cursorState.currentLimit + elements.length;
|
||||
this.cursorState.cursorIndex = this.cursorState.cursorIndex + elements.length;
|
||||
|
||||
// Return elements
|
||||
return elements;
|
||||
};
|
||||
|
||||
/**
|
||||
* Kill the cursor
|
||||
* @method
|
||||
* @param {resultCallback} callback A callback function
|
||||
*/
|
||||
Cursor.prototype.kill = function(callback) {
|
||||
// Set cursor to dead
|
||||
this.cursorState.dead = true;
|
||||
this.cursorState.killed = true;
|
||||
// Remove documents
|
||||
this.cursorState.documents = [];
|
||||
|
||||
// If no cursor id just return
|
||||
if (
|
||||
this.cursorState.cursorId == null ||
|
||||
this.cursorState.cursorId.isZero() ||
|
||||
this.cursorState.init === false
|
||||
) {
|
||||
if (callback) callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
// Default pool
|
||||
var pool = this.server.s.pool;
|
||||
// Execute command
|
||||
this.server.wireProtocolHandler.killCursor(this.bson, this.ns, this.cursorState, pool, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Resets the cursor
|
||||
* @method
|
||||
* @return {null}
|
||||
*/
|
||||
Cursor.prototype.rewind = function() {
|
||||
if (this.cursorState.init) {
|
||||
if (!this.cursorState.dead) {
|
||||
this.kill();
|
||||
}
|
||||
|
||||
this.cursorState.currentLimit = 0;
|
||||
this.cursorState.init = false;
|
||||
this.cursorState.dead = false;
|
||||
this.cursorState.killed = false;
|
||||
this.cursorState.notified = false;
|
||||
this.cursorState.documents = [];
|
||||
this.cursorState.cursorId = null;
|
||||
this.cursorState.cursorIndex = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate if the pool is dead and return error
|
||||
*/
|
||||
var isConnectionDead = function(self, callback) {
|
||||
if (self.pool && self.pool.isDestroyed()) {
|
||||
self.cursorState.killed = true;
|
||||
const err = new MongoNetworkError(
|
||||
f('connection to host %s:%s was destroyed', self.pool.host, self.pool.port)
|
||||
);
|
||||
_setCursorNotifiedImpl(self, () => callback(err));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate if the cursor is dead but was not explicitly killed by user
|
||||
*/
|
||||
var isCursorDeadButNotkilled = function(self, callback) {
|
||||
// Cursor is dead but not marked killed, return null
|
||||
if (self.cursorState.dead && !self.cursorState.killed) {
|
||||
self.cursorState.killed = true;
|
||||
setCursorNotified(self, callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate if the cursor is dead and was killed by user
|
||||
*/
|
||||
var isCursorDeadAndKilled = function(self, callback) {
|
||||
if (self.cursorState.dead && self.cursorState.killed) {
|
||||
handleCallback(callback, new MongoError('cursor is dead'));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate if the cursor was killed by the user
|
||||
*/
|
||||
var isCursorKilled = function(self, callback) {
|
||||
if (self.cursorState.killed) {
|
||||
setCursorNotified(self, callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Mark cursor as being dead and notified
|
||||
*/
|
||||
var setCursorDeadAndNotified = function(self, callback) {
|
||||
self.cursorState.dead = true;
|
||||
setCursorNotified(self, callback);
|
||||
};
|
||||
|
||||
/**
|
||||
* Mark cursor as being notified
|
||||
*/
|
||||
var setCursorNotified = function(self, callback) {
|
||||
_setCursorNotifiedImpl(self, () => handleCallback(callback, null, null));
|
||||
};
|
||||
|
||||
var _setCursorNotifiedImpl = function(self, callback) {
|
||||
self.cursorState.notified = true;
|
||||
self.cursorState.documents = [];
|
||||
self.cursorState.cursorIndex = 0;
|
||||
if (self._endSession) {
|
||||
return self._endSession(undefined, () => callback());
|
||||
}
|
||||
return callback();
|
||||
};
|
||||
|
||||
var nextFunction = function(self, callback) {
|
||||
// We have notified about it
|
||||
if (self.cursorState.notified) {
|
||||
return callback(new Error('cursor is exhausted'));
|
||||
}
|
||||
|
||||
// Cursor is killed return null
|
||||
if (isCursorKilled(self, callback)) return;
|
||||
|
||||
// Cursor is dead but not marked killed, return null
|
||||
if (isCursorDeadButNotkilled(self, callback)) return;
|
||||
|
||||
// We have a dead and killed cursor, attempting to call next should error
|
||||
if (isCursorDeadAndKilled(self, callback)) return;
|
||||
|
||||
// We have just started the cursor
|
||||
if (!self.cursorState.init) {
|
||||
// Topology is not connected, save the call in the provided store to be
|
||||
// Executed at some point when the handler deems it's reconnected
|
||||
if (!self.topology.isConnected(self.options)) {
|
||||
// Only need this for single server, because repl sets and mongos
|
||||
// will always continue trying to reconnect
|
||||
if (self.topology._type === 'server' && !self.topology.s.options.reconnect) {
|
||||
// Reconnect is disabled, so we'll never reconnect
|
||||
return callback(new MongoError('no connection available'));
|
||||
}
|
||||
|
||||
if (self.disconnectHandler != null) {
|
||||
if (self.topology.isDestroyed()) {
|
||||
// Topology was destroyed, so don't try to wait for it to reconnect
|
||||
return callback(new MongoError('Topology was destroyed'));
|
||||
}
|
||||
|
||||
return self.disconnectHandler.addObjectAndMethod(
|
||||
'cursor',
|
||||
self,
|
||||
'next',
|
||||
[callback],
|
||||
callback
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
self.server = self.topology.getServer(self.options);
|
||||
} catch (err) {
|
||||
// Handle the error and add object to next method call
|
||||
if (self.disconnectHandler != null) {
|
||||
return self.disconnectHandler.addObjectAndMethod(
|
||||
'cursor',
|
||||
self,
|
||||
'next',
|
||||
[callback],
|
||||
callback
|
||||
);
|
||||
}
|
||||
|
||||
// Otherwise return the error
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
// Set as init
|
||||
self.cursorState.init = true;
|
||||
|
||||
// Server does not support server
|
||||
if (self.cmd && self.cmd.collation && self.server.ismaster.maxWireVersion < 5) {
|
||||
return callback(new MongoError(f('server %s does not support collation', self.server.name)));
|
||||
}
|
||||
|
||||
try {
|
||||
self.query = self.server.wireProtocolHandler.command(
|
||||
self.bson,
|
||||
self.ns,
|
||||
self.cmd,
|
||||
self.cursorState,
|
||||
self.topology,
|
||||
self.options
|
||||
);
|
||||
} catch (err) {
|
||||
return callback(err);
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't have a cursorId execute the first query
|
||||
if (self.cursorState.cursorId == null) {
|
||||
// Check if pool is dead and return if not possible to
|
||||
// execute the query against the db
|
||||
if (isConnectionDead(self, callback)) return;
|
||||
|
||||
// Check if topology is destroyed
|
||||
if (self.topology.isDestroyed())
|
||||
return callback(
|
||||
new MongoNetworkError('connection destroyed, not possible to instantiate cursor')
|
||||
);
|
||||
|
||||
// query, cmd, options, cursorState, callback
|
||||
self._find(function(err) {
|
||||
if (err) return handleCallback(callback, err, null);
|
||||
|
||||
if (self.cursorState.cursorId && self.cursorState.cursorId.isZero() && self._endSession) {
|
||||
self._endSession();
|
||||
}
|
||||
|
||||
if (
|
||||
self.cursorState.documents.length === 0 &&
|
||||
self.cursorState.cursorId &&
|
||||
self.cursorState.cursorId.isZero() &&
|
||||
!self.cmd.tailable &&
|
||||
!self.cmd.awaitData
|
||||
) {
|
||||
return setCursorNotified(self, callback);
|
||||
}
|
||||
|
||||
nextFunction(self, callback);
|
||||
});
|
||||
} else if (
|
||||
self.cursorState.limit > 0 &&
|
||||
self.cursorState.currentLimit >= self.cursorState.limit
|
||||
) {
|
||||
// Ensure we kill the cursor on the server
|
||||
self.kill();
|
||||
// Set cursor in dead and notified state
|
||||
return setCursorDeadAndNotified(self, callback);
|
||||
} else if (
|
||||
self.cursorState.cursorIndex === self.cursorState.documents.length &&
|
||||
!Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
// Ensure an empty cursor state
|
||||
self.cursorState.documents = [];
|
||||
self.cursorState.cursorIndex = 0;
|
||||
|
||||
// Check if topology is destroyed
|
||||
if (self.topology.isDestroyed())
|
||||
return callback(
|
||||
new MongoNetworkError('connection destroyed, not possible to instantiate cursor')
|
||||
);
|
||||
|
||||
// Check if connection is dead and return if not possible to
|
||||
// execute a getmore on this connection
|
||||
if (isConnectionDead(self, callback)) return;
|
||||
|
||||
// Execute the next get more
|
||||
self._getmore(function(err, doc, connection) {
|
||||
if (err) return handleCallback(callback, err);
|
||||
|
||||
if (self.cursorState.cursorId && self.cursorState.cursorId.isZero() && self._endSession) {
|
||||
self._endSession();
|
||||
}
|
||||
|
||||
// Save the returned connection to ensure all getMore's fire over the same connection
|
||||
self.connection = connection;
|
||||
|
||||
// Tailable cursor getMore result, notify owner about it
|
||||
// No attempt is made here to retry, this is left to the user of the
|
||||
// core module to handle to keep core simple
|
||||
if (
|
||||
self.cursorState.documents.length === 0 &&
|
||||
self.cmd.tailable &&
|
||||
Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
// No more documents in the tailed cursor
|
||||
return handleCallback(
|
||||
callback,
|
||||
new MongoError({
|
||||
message: 'No more documents in tailed cursor',
|
||||
tailable: self.cmd.tailable,
|
||||
awaitData: self.cmd.awaitData
|
||||
})
|
||||
);
|
||||
} else if (
|
||||
self.cursorState.documents.length === 0 &&
|
||||
self.cmd.tailable &&
|
||||
!Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
return nextFunction(self, callback);
|
||||
}
|
||||
|
||||
if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
|
||||
return setCursorDeadAndNotified(self, callback);
|
||||
}
|
||||
|
||||
nextFunction(self, callback);
|
||||
});
|
||||
} else if (
|
||||
self.cursorState.documents.length === self.cursorState.cursorIndex &&
|
||||
self.cmd.tailable &&
|
||||
Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
return handleCallback(
|
||||
callback,
|
||||
new MongoError({
|
||||
message: 'No more documents in tailed cursor',
|
||||
tailable: self.cmd.tailable,
|
||||
awaitData: self.cmd.awaitData
|
||||
})
|
||||
);
|
||||
} else if (
|
||||
self.cursorState.documents.length === self.cursorState.cursorIndex &&
|
||||
Long.ZERO.equals(self.cursorState.cursorId)
|
||||
) {
|
||||
setCursorDeadAndNotified(self, callback);
|
||||
} else {
|
||||
if (self.cursorState.limit > 0 && self.cursorState.currentLimit >= self.cursorState.limit) {
|
||||
// Ensure we kill the cursor on the server
|
||||
self.kill();
|
||||
// Set cursor in dead and notified state
|
||||
return setCursorDeadAndNotified(self, callback);
|
||||
}
|
||||
|
||||
// Increment the current cursor limit
|
||||
self.cursorState.currentLimit += 1;
|
||||
|
||||
// Get the document
|
||||
var doc = self.cursorState.documents[self.cursorState.cursorIndex++];
|
||||
|
||||
// Doc overflow
|
||||
if (!doc || doc.$err) {
|
||||
// Ensure we kill the cursor on the server
|
||||
self.kill();
|
||||
// Set cursor in dead and notified state
|
||||
return setCursorDeadAndNotified(self, function() {
|
||||
handleCallback(callback, new MongoError(doc ? doc.$err : undefined));
|
||||
});
|
||||
}
|
||||
|
||||
// Transform the doc with passed in transformation method if provided
|
||||
if (self.cursorState.transforms && typeof self.cursorState.transforms.doc === 'function') {
|
||||
doc = self.cursorState.transforms.doc(doc);
|
||||
}
|
||||
|
||||
// Return the document
|
||||
handleCallback(callback, null, doc);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieve the next document from the cursor
|
||||
* @method
|
||||
* @param {resultCallback} callback A callback function
|
||||
*/
|
||||
Cursor.prototype.next = function(callback) {
|
||||
nextFunction(this, callback);
|
||||
};
|
||||
|
||||
module.exports = Cursor;
|
82
ProjectNow/NodeServer/node_modules/mongodb-core/lib/error.js
generated
vendored
Normal file
82
ProjectNow/NodeServer/node_modules/mongodb-core/lib/error.js
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
'use strict';
|
||||
|
||||
var util = require('util');
|
||||
|
||||
/**
|
||||
* Creates a new MongoError
|
||||
* @class
|
||||
* @augments Error
|
||||
* @param {Error|string|object} message The error message
|
||||
* @property {string} message The error message
|
||||
* @property {string} stack The error call stack
|
||||
* @return {MongoError} A MongoError instance
|
||||
*/
|
||||
function MongoError(message) {
|
||||
var tmp = Error.apply(this, arguments);
|
||||
tmp.name = this.name = 'MongoError';
|
||||
|
||||
if (message instanceof Error) {
|
||||
this.message = message.message;
|
||||
this.stack = message.stack;
|
||||
} else {
|
||||
if (typeof message === 'string') {
|
||||
this.message = message;
|
||||
} else {
|
||||
this.message = message.message || message.errmsg || message.$err || 'n/a';
|
||||
for (var name in message) {
|
||||
this[name] = message[name];
|
||||
}
|
||||
}
|
||||
if (Error.captureStackTrace) {
|
||||
Error.captureStackTrace(this, this.constructor);
|
||||
}
|
||||
}
|
||||
}
|
||||
util.inherits(MongoError, Error);
|
||||
|
||||
/**
|
||||
* Creates a new MongoError object
|
||||
* @method
|
||||
* @param {Error|string|object} options The options used to create the error.
|
||||
* @return {MongoError} A MongoError instance
|
||||
* @deprecated Use `new MongoError()` instead.
|
||||
*/
|
||||
MongoError.create = function(options) {
|
||||
return new MongoError(options);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new MongoNetworkError
|
||||
* @class
|
||||
* @param {Error|string|object} message The error message
|
||||
* @property {string} message The error message
|
||||
* @property {string} stack The error call stack
|
||||
* @return {MongoNetworkError} A MongoNetworkError instance
|
||||
* @extends {MongoError}
|
||||
*/
|
||||
var MongoNetworkError = function(message) {
|
||||
MongoError.call(this, message);
|
||||
this.name = 'MongoNetworkError';
|
||||
};
|
||||
util.inherits(MongoNetworkError, MongoError);
|
||||
|
||||
/**
|
||||
* An error used when attempting to parse a value (like a connection string)
|
||||
*
|
||||
* @class
|
||||
* @param {Error|string|object} message The error message
|
||||
* @property {string} message The error message
|
||||
* @return {MongoParseError} A MongoNetworkError instance
|
||||
* @extends {MongoError}
|
||||
*/
|
||||
const MongoParseError = function(message) {
|
||||
MongoError.call(this, message);
|
||||
this.name = 'MongoParseError';
|
||||
};
|
||||
util.inherits(MongoParseError, MongoError);
|
||||
|
||||
module.exports = {
|
||||
MongoError: MongoError,
|
||||
MongoNetworkError: MongoNetworkError,
|
||||
MongoParseError: MongoParseError
|
||||
};
|
404
ProjectNow/NodeServer/node_modules/mongodb-core/lib/sessions.js
generated
vendored
Normal file
404
ProjectNow/NodeServer/node_modules/mongodb-core/lib/sessions.js
generated
vendored
Normal file
@@ -0,0 +1,404 @@
|
||||
'use strict';
|
||||
|
||||
const retrieveBSON = require('./connection/utils').retrieveBSON;
|
||||
const EventEmitter = require('events');
|
||||
const BSON = retrieveBSON();
|
||||
const Binary = BSON.Binary;
|
||||
const uuidV4 = require('./utils').uuidV4;
|
||||
const MongoError = require('./error').MongoError;
|
||||
const MongoNetworkError = require('./error').MongoNetworkError;
|
||||
|
||||
function assertAlive(session, callback) {
|
||||
if (session.serverSession == null) {
|
||||
const error = new MongoError('Cannot use a session that has ended');
|
||||
if (typeof callback === 'function') {
|
||||
callback(error, null);
|
||||
return false;
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/** A class representing a client session on the server */
|
||||
class ClientSession extends EventEmitter {
|
||||
/**
|
||||
* Create a client session.
|
||||
* WARNING: not meant to be instantiated directly
|
||||
*
|
||||
* @param {Topology} topology The current client's topology
|
||||
* @param {ServerSessionPool} sessionPool The server session pool
|
||||
* @param {Object} [options] Optional settings
|
||||
* @param {Boolean} [options.causalConsistency] Whether causal consistency should be enabled on this session
|
||||
* @param {Boolean} [options.autoStartTransaction=false] When enabled this session automatically starts a transaction with the provided defaultTransactionOptions.
|
||||
* @param {Object} [options.defaultTransactionOptions] The default TransactionOptions to use for transactions started on this session.
|
||||
* @param {Object} [clientOptions] Optional settings provided when creating a client in the porcelain driver
|
||||
*/
|
||||
constructor(topology, sessionPool, options, clientOptions) {
|
||||
super();
|
||||
|
||||
if (topology == null) {
|
||||
throw new Error('ClientSession requires a topology');
|
||||
}
|
||||
|
||||
if (sessionPool == null || !(sessionPool instanceof ServerSessionPool)) {
|
||||
throw new Error('ClientSession requires a ServerSessionPool');
|
||||
}
|
||||
|
||||
options = options || {};
|
||||
this.topology = topology;
|
||||
this.sessionPool = sessionPool;
|
||||
this.hasEnded = false;
|
||||
this.serverSession = sessionPool.acquire();
|
||||
this.clientOptions = clientOptions;
|
||||
|
||||
this.supports = {
|
||||
causalConsistency:
|
||||
typeof options.causalConsistency !== 'undefined' ? options.causalConsistency : true
|
||||
};
|
||||
|
||||
options = options || {};
|
||||
if (typeof options.initialClusterTime !== 'undefined') {
|
||||
this.clusterTime = options.initialClusterTime;
|
||||
} else {
|
||||
this.clusterTime = null;
|
||||
}
|
||||
|
||||
this.operationTime = null;
|
||||
this.explicit = !!options.explicit;
|
||||
this.owner = options.owner;
|
||||
this.transactionOptions = null;
|
||||
this.autoStartTransaction = options.autoStartTransaction;
|
||||
this.defaultTransactionOptions = Object.assign({}, options.defaultTransactionOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Ends this session on the server
|
||||
*
|
||||
* @param {Object} [options] Optional settings
|
||||
* @param {Function} [callback] Optional callback for completion of this operation
|
||||
*/
|
||||
endSession(options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
if (this.hasEnded) {
|
||||
if (typeof callback === 'function') callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.serverSession && this.inTransaction()) {
|
||||
this.abortTransaction(); // pass in callback?
|
||||
}
|
||||
|
||||
// mark the session as ended, and emit a signal
|
||||
this.hasEnded = true;
|
||||
this.emit('ended', this);
|
||||
|
||||
// release the server session back to the pool
|
||||
this.sessionPool.release(this.serverSession);
|
||||
|
||||
// spec indicates that we should ignore all errors for `endSessions`
|
||||
if (typeof callback === 'function') callback(null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Advances the operationTime for a ClientSession.
|
||||
*
|
||||
* @param {object} operationTime the `BSON.Timestamp` of the operation type it is desired to advance to
|
||||
*/
|
||||
advanceOperationTime(operationTime) {
|
||||
if (this.operationTime == null) {
|
||||
this.operationTime = operationTime;
|
||||
return;
|
||||
}
|
||||
|
||||
if (operationTime.greaterThan(this.operationTime)) {
|
||||
this.operationTime = operationTime;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Used to determine if this session equals another
|
||||
*/
|
||||
equals(session) {
|
||||
if (!(session instanceof ClientSession)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this.id.id.buffer.equals(session.id.id.buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the transaction number on the internal ServerSession
|
||||
*/
|
||||
incrementTransactionNumber() {
|
||||
this.serverSession.txnNumber++;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the statement id on the internal ServerSession
|
||||
*
|
||||
* @param {Number} [operationCount] the number of operations performed
|
||||
*/
|
||||
incrementStatementId(operationCount) {
|
||||
operationCount = operationCount || 1;
|
||||
this.serverSession.stmtId += operationCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns whether this session is current in a transaction or not
|
||||
*/
|
||||
inTransaction() {
|
||||
return this.transactionOptions != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts a new transaction with the given options.
|
||||
*
|
||||
* @param {Object} options Optional settings
|
||||
* @param {ReadConcern} [options.readConcern] The readConcern to use for this transaction
|
||||
* @param {WriteConcern} [options.writeConcern] The writeConcern to use for this transaction
|
||||
*/
|
||||
startTransaction(options) {
|
||||
assertAlive(this);
|
||||
if (this.inTransaction()) {
|
||||
throw new MongoError('Transaction already in progress');
|
||||
}
|
||||
|
||||
// increment txnNumber and reset stmtId to zero.
|
||||
this.serverSession.txnNumber += 1;
|
||||
this.serverSession.stmtId = 0;
|
||||
|
||||
// set transaction options, we will use this to determine if we are in a transaction
|
||||
this.transactionOptions = Object.assign({}, options || this.defaultTransactionOptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Commits the currently active transaction in this session.
|
||||
*
|
||||
* @param {Function} [callback] optional callback for completion of this operation
|
||||
* @return {Promise} A promise is returned if no callback is provided
|
||||
*/
|
||||
commitTransaction(callback) {
|
||||
if (typeof callback === 'function') {
|
||||
endTransaction(this, 'commitTransaction', callback);
|
||||
return;
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
endTransaction(
|
||||
this,
|
||||
'commitTransaction',
|
||||
(err, reply) => (err ? reject(err) : resolve(reply))
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Aborts the currently active transaction in this session.
|
||||
*
|
||||
* @param {Function} [callback] optional callback for completion of this operation
|
||||
* @return {Promise} A promise is returned if no callback is provided
|
||||
*/
|
||||
abortTransaction(callback) {
|
||||
if (typeof callback === 'function') {
|
||||
endTransaction(this, 'abortTransaction', callback);
|
||||
return;
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
endTransaction(
|
||||
this,
|
||||
'abortTransaction',
|
||||
(err, reply) => (err ? reject(err) : resolve(reply))
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms
|
||||
const RETRYABLE_ERROR_CODES = new Set([
|
||||
6, // HostUnreachable
|
||||
7, // HostNotFound
|
||||
64, // WriteConcernFailed
|
||||
89, // NetworkTimeout
|
||||
91, // ShutdownInProgress
|
||||
189, // PrimarySteppedDown
|
||||
9001, // SocketException
|
||||
11600, // InterruptedAtShutdown
|
||||
11602, // InterruptedDueToReplStateChange
|
||||
10107, // NotMaster
|
||||
13435, // NotMasterNoSlaveOk
|
||||
13436 // NotMasterOrSecondary
|
||||
]);
|
||||
|
||||
function isRetryableError(error) {
|
||||
if (
|
||||
RETRYABLE_ERROR_CODES.has(error.code) ||
|
||||
error instanceof MongoNetworkError ||
|
||||
error.message.match(/not master/) ||
|
||||
error.message.match(/node is recovering/)
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
function resetTransactionState(clientSession) {
|
||||
clientSession.transactionOptions = null;
|
||||
}
|
||||
|
||||
function endTransaction(clientSession, commandName, callback) {
|
||||
if (!assertAlive(clientSession, callback)) {
|
||||
// checking result in case callback was called
|
||||
return;
|
||||
}
|
||||
|
||||
if (!clientSession.inTransaction()) {
|
||||
if (clientSession.autoStartTransaction) {
|
||||
clientSession.startTransaction();
|
||||
} else {
|
||||
callback(new MongoError('No transaction started'));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (clientSession.serverSession.stmtId === 0) {
|
||||
// The server transaction was never started.
|
||||
resetTransactionState(clientSession);
|
||||
callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
const command = { [commandName]: 1 };
|
||||
if (clientSession.transactionOptions.writeConcern) {
|
||||
Object.assign(command, { writeConcern: clientSession.transactionOptions.writeConcern });
|
||||
} else if (clientSession.clientOptions && clientSession.clientOptions.w) {
|
||||
Object.assign(command, { writeConcern: { w: clientSession.clientOptions.w } });
|
||||
}
|
||||
|
||||
function commandHandler(e, r) {
|
||||
resetTransactionState(clientSession);
|
||||
callback(e, r);
|
||||
}
|
||||
|
||||
function transactionError(err) {
|
||||
return commandName === 'commitTransaction' ? err : null;
|
||||
}
|
||||
|
||||
// send the command
|
||||
clientSession.topology.command(
|
||||
'admin.$cmd',
|
||||
command,
|
||||
{ session: clientSession },
|
||||
(err, reply) => {
|
||||
if (err && isRetryableError(err)) {
|
||||
return clientSession.topology.command(
|
||||
'admin.$cmd',
|
||||
command,
|
||||
{ session: clientSession },
|
||||
(_err, _reply) => commandHandler(transactionError(_err), _reply)
|
||||
);
|
||||
}
|
||||
|
||||
commandHandler(transactionError(err), reply);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
Object.defineProperty(ClientSession.prototype, 'id', {
|
||||
get: function() {
|
||||
return this.serverSession.id;
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ServerSession {
|
||||
constructor() {
|
||||
this.id = { id: new Binary(uuidV4(), Binary.SUBTYPE_UUID) };
|
||||
this.lastUse = Date.now();
|
||||
this.txnNumber = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {*} sessionTimeoutMinutes
|
||||
*/
|
||||
hasTimedOut(sessionTimeoutMinutes) {
|
||||
// Take the difference of the lastUse timestamp and now, which will result in a value in
|
||||
// milliseconds, and then convert milliseconds to minutes to compare to `sessionTimeoutMinutes`
|
||||
const idleTimeMinutes = Math.round(
|
||||
(((Date.now() - this.lastUse) % 86400000) % 3600000) / 60000
|
||||
);
|
||||
|
||||
return idleTimeMinutes > sessionTimeoutMinutes - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ServerSessionPool {
|
||||
constructor(topology) {
|
||||
if (topology == null) {
|
||||
throw new Error('ServerSessionPool requires a topology');
|
||||
}
|
||||
|
||||
this.topology = topology;
|
||||
this.sessions = [];
|
||||
}
|
||||
|
||||
endAllPooledSessions() {
|
||||
if (this.sessions.length) {
|
||||
this.topology.endSessions(this.sessions.map(session => session.id));
|
||||
this.sessions = [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {ServerSession}
|
||||
*/
|
||||
acquire() {
|
||||
const sessionTimeoutMinutes = this.topology.logicalSessionTimeoutMinutes;
|
||||
while (this.sessions.length) {
|
||||
const session = this.sessions.shift();
|
||||
if (!session.hasTimedOut(sessionTimeoutMinutes)) {
|
||||
return session;
|
||||
}
|
||||
}
|
||||
|
||||
return new ServerSession();
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {*} session
|
||||
*/
|
||||
release(session) {
|
||||
const sessionTimeoutMinutes = this.topology.logicalSessionTimeoutMinutes;
|
||||
while (this.sessions.length) {
|
||||
const session = this.sessions[this.sessions.length - 1];
|
||||
if (session.hasTimedOut(sessionTimeoutMinutes)) {
|
||||
this.sessions.pop();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!session.hasTimedOut(sessionTimeoutMinutes)) {
|
||||
this.sessions.unshift(session);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ClientSession: ClientSession,
|
||||
ServerSession: ServerSession,
|
||||
ServerSessionPool: ServerSessionPool
|
||||
};
|
61
ProjectNow/NodeServer/node_modules/mongodb-core/lib/tools/smoke_plugin.js
generated
vendored
Normal file
61
ProjectNow/NodeServer/node_modules/mongodb-core/lib/tools/smoke_plugin.js
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
'use strict';
|
||||
|
||||
var fs = require('fs');
|
||||
|
||||
/* Note: because this plugin uses process.on('uncaughtException'), only one
|
||||
* of these can exist at any given time. This plugin and anything else that
|
||||
* uses process.on('uncaughtException') will conflict. */
|
||||
exports.attachToRunner = function(runner, outputFile) {
|
||||
var smokeOutput = { results: [] };
|
||||
var runningTests = {};
|
||||
|
||||
var integraPlugin = {
|
||||
beforeTest: function(test, callback) {
|
||||
test.startTime = Date.now();
|
||||
runningTests[test.name] = test;
|
||||
callback();
|
||||
},
|
||||
afterTest: function(test, callback) {
|
||||
smokeOutput.results.push({
|
||||
status: test.status,
|
||||
start: test.startTime,
|
||||
end: Date.now(),
|
||||
test_file: test.name,
|
||||
exit_code: 0,
|
||||
url: ''
|
||||
});
|
||||
delete runningTests[test.name];
|
||||
callback();
|
||||
},
|
||||
beforeExit: function(obj, callback) {
|
||||
fs.writeFile(outputFile, JSON.stringify(smokeOutput), function() {
|
||||
callback();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// In case of exception, make sure we write file
|
||||
process.on('uncaughtException', function(err) {
|
||||
// Mark all currently running tests as failed
|
||||
for (var testName in runningTests) {
|
||||
smokeOutput.results.push({
|
||||
status: 'fail',
|
||||
start: runningTests[testName].startTime,
|
||||
end: Date.now(),
|
||||
test_file: testName,
|
||||
exit_code: 0,
|
||||
url: ''
|
||||
});
|
||||
}
|
||||
|
||||
// write file
|
||||
fs.writeFileSync(outputFile, JSON.stringify(smokeOutput));
|
||||
|
||||
// Standard NodeJS uncaught exception handler
|
||||
console.error(err.stack);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
runner.plugin(integraPlugin);
|
||||
return integraPlugin;
|
||||
};
|
1469
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/mongos.js
generated
vendored
Normal file
1469
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/mongos.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
171
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/read_preference.js
generated
vendored
Normal file
171
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/read_preference.js
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* @fileOverview The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is
|
||||
* used to construct connections.
|
||||
*
|
||||
* @example
|
||||
* const ReplSet = require('mongodb-core').ReplSet,
|
||||
* ReadPreference = require('mongodb-core').ReadPreference,
|
||||
* assert = require('assert');
|
||||
*
|
||||
* const server = new ReplSet([{host: 'localhost', port: 30000}], {setName: 'rs'});
|
||||
* // Wait for the connection event
|
||||
* server.on('connect', function(server) {
|
||||
* const cursor = server.cursor(
|
||||
* 'db.test',
|
||||
* { find: 'db.test', query: {} },
|
||||
* { readPreference: new ReadPreference('secondary') }
|
||||
* );
|
||||
*
|
||||
* cursor.next(function(err, doc) {
|
||||
* server.destroy();
|
||||
* });
|
||||
* });
|
||||
*
|
||||
* // Start connecting
|
||||
* server.connect();
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates a new Pool instance
|
||||
* @class
|
||||
* @param {string} mode A string describing the read preference mode (primary|primaryPreferred|secondary|secondaryPreferred|nearest)
|
||||
* @param {array} tags The tags object
|
||||
* @param {object} [options] Additional read preference options
|
||||
* @param {number} [options.maxStalenessSeconds] Max secondary read staleness in seconds, Minimum value is 90 seconds.
|
||||
* @property {string} mode The read preference mode (primary|primaryPreferred|secondary|secondaryPreferred|nearest)
|
||||
* @property {array} tags The tags object
|
||||
* @property {object} options Additional read preference options
|
||||
* @property {number} maxStalenessSeconds MaxStalenessSeconds value for the read preference
|
||||
* @return {ReadPreference}
|
||||
*/
|
||||
const ReadPreference = function(mode, tags, options) {
|
||||
this.mode = mode;
|
||||
this.tags = tags;
|
||||
this.options = options;
|
||||
|
||||
// Add the maxStalenessSeconds value to the read Preference
|
||||
if (this.options && this.options.maxStalenessSeconds != null) {
|
||||
this.options = options;
|
||||
this.maxStalenessSeconds =
|
||||
this.options.maxStalenessSeconds >= 0 ? this.options.maxStalenessSeconds : null;
|
||||
} else if (tags && typeof tags === 'object') {
|
||||
(this.options = tags), (tags = null);
|
||||
}
|
||||
};
|
||||
|
||||
// Support the deprecated `preference` property introduced in the porcelain layer
|
||||
Object.defineProperty(ReadPreference.prototype, 'preference', {
|
||||
enumerable: true,
|
||||
get: function() {
|
||||
return this.mode;
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Read preference mode constants
|
||||
*/
|
||||
ReadPreference.PRIMARY = 'primary';
|
||||
ReadPreference.PRIMARY_PREFERRED = 'primaryPreferred';
|
||||
ReadPreference.SECONDARY = 'secondary';
|
||||
ReadPreference.SECONDARY_PREFERRED = 'secondaryPreferred';
|
||||
ReadPreference.NEAREST = 'nearest';
|
||||
|
||||
const VALID_MODES = [
|
||||
ReadPreference.PRIMARY,
|
||||
ReadPreference.PRIMARY_PREFERRED,
|
||||
ReadPreference.SECONDARY,
|
||||
ReadPreference.SECONDARY_PREFERRED,
|
||||
ReadPreference.NEAREST,
|
||||
true,
|
||||
false,
|
||||
null
|
||||
];
|
||||
|
||||
/**
|
||||
* Validate if a mode is legal
|
||||
*
|
||||
* @method
|
||||
* @param {string} mode The string representing the read preference mode.
|
||||
* @return {boolean}
|
||||
*/
|
||||
ReadPreference.isValid = function(mode) {
|
||||
return VALID_MODES.indexOf(mode) !== -1;
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate if a mode is legal
|
||||
*
|
||||
* @method
|
||||
* @param {string} mode The string representing the read preference mode.
|
||||
* @return {boolean}
|
||||
*/
|
||||
ReadPreference.prototype.isValid = function(mode) {
|
||||
return ReadPreference.isValid(typeof mode === 'string' ? mode : this.mode);
|
||||
};
|
||||
|
||||
const needSlaveOk = ['primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest'];
|
||||
|
||||
/**
|
||||
* This needs slaveOk bit set
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
ReadPreference.prototype.slaveOk = function() {
|
||||
return needSlaveOk.indexOf(this.mode) !== -1;
|
||||
};
|
||||
|
||||
/**
|
||||
* Are the two read preference equal
|
||||
* @method
|
||||
* @return {boolean}
|
||||
*/
|
||||
ReadPreference.prototype.equals = function(readPreference) {
|
||||
return readPreference.mode === this.mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* Return JSON representation
|
||||
* @method
|
||||
* @return {Object}
|
||||
*/
|
||||
ReadPreference.prototype.toJSON = function() {
|
||||
const readPreference = { mode: this.mode };
|
||||
if (Array.isArray(this.tags)) readPreference.tags = this.tags;
|
||||
if (this.maxStalenessSeconds) readPreference.maxStalenessSeconds = this.maxStalenessSeconds;
|
||||
return readPreference;
|
||||
};
|
||||
|
||||
/**
|
||||
* Primary read preference
|
||||
* @method
|
||||
* @return {ReadPreference}
|
||||
*/
|
||||
ReadPreference.primary = new ReadPreference('primary');
|
||||
/**
|
||||
* Primary Preferred read preference
|
||||
* @method
|
||||
* @return {ReadPreference}
|
||||
*/
|
||||
ReadPreference.primaryPreferred = new ReadPreference('primaryPreferred');
|
||||
/**
|
||||
* Secondary read preference
|
||||
* @method
|
||||
* @return {ReadPreference}
|
||||
*/
|
||||
ReadPreference.secondary = new ReadPreference('secondary');
|
||||
/**
|
||||
* Secondary Preferred read preference
|
||||
* @method
|
||||
* @return {ReadPreference}
|
||||
*/
|
||||
ReadPreference.secondaryPreferred = new ReadPreference('secondaryPreferred');
|
||||
/**
|
||||
* Nearest read preference
|
||||
* @method
|
||||
* @return {ReadPreference}
|
||||
*/
|
||||
ReadPreference.nearest = new ReadPreference('nearest');
|
||||
|
||||
module.exports = ReadPreference;
|
1716
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/replset.js
generated
vendored
Normal file
1716
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/replset.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1104
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/replset_state.js
generated
vendored
Normal file
1104
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/replset_state.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1160
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/server.js
generated
vendored
Normal file
1160
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/server.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
444
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/shared.js
generated
vendored
Normal file
444
ProjectNow/NodeServer/node_modules/mongodb-core/lib/topologies/shared.js
generated
vendored
Normal file
@@ -0,0 +1,444 @@
|
||||
'use strict';
|
||||
|
||||
const os = require('os');
|
||||
const f = require('util').format;
|
||||
const ReadPreference = require('./read_preference');
|
||||
|
||||
/**
|
||||
* Emit event if it exists
|
||||
* @method
|
||||
*/
|
||||
function emitSDAMEvent(self, event, description) {
|
||||
if (self.listeners(event).length > 0) {
|
||||
self.emit(event, description);
|
||||
}
|
||||
}
|
||||
|
||||
// Get package.json variable
|
||||
var driverVersion = require('../../package.json').version;
|
||||
var nodejsversion = f('Node.js %s, %s', process.version, os.endianness());
|
||||
var type = os.type();
|
||||
var name = process.platform;
|
||||
var architecture = process.arch;
|
||||
var release = os.release();
|
||||
|
||||
function createClientInfo(options) {
|
||||
// Build default client information
|
||||
var clientInfo = options.clientInfo
|
||||
? clone(options.clientInfo)
|
||||
: {
|
||||
driver: {
|
||||
name: 'nodejs-core',
|
||||
version: driverVersion
|
||||
},
|
||||
os: {
|
||||
type: type,
|
||||
name: name,
|
||||
architecture: architecture,
|
||||
version: release
|
||||
}
|
||||
};
|
||||
|
||||
// Is platform specified
|
||||
if (clientInfo.platform && clientInfo.platform.indexOf('mongodb-core') === -1) {
|
||||
clientInfo.platform = f('%s, mongodb-core: %s', clientInfo.platform, driverVersion);
|
||||
} else if (!clientInfo.platform) {
|
||||
clientInfo.platform = nodejsversion;
|
||||
}
|
||||
|
||||
// Do we have an application specific string
|
||||
if (options.appname) {
|
||||
// Cut at 128 bytes
|
||||
var buffer = new Buffer(options.appname);
|
||||
// Return the truncated appname
|
||||
var appname = buffer.length > 128 ? buffer.slice(0, 128).toString('utf8') : options.appname;
|
||||
// Add to the clientInfo
|
||||
clientInfo.application = { name: appname };
|
||||
}
|
||||
|
||||
return clientInfo;
|
||||
}
|
||||
|
||||
function createCompressionInfo(options) {
|
||||
if (!options.compression || !options.compression.compressors) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Check that all supplied compressors are valid
|
||||
options.compression.compressors.forEach(function(compressor) {
|
||||
if (compressor !== 'snappy' && compressor !== 'zlib') {
|
||||
throw new Error('compressors must be at least one of snappy or zlib');
|
||||
}
|
||||
});
|
||||
|
||||
return options.compression.compressors;
|
||||
}
|
||||
|
||||
function clone(object) {
|
||||
return JSON.parse(JSON.stringify(object));
|
||||
}
|
||||
|
||||
var getPreviousDescription = function(self) {
|
||||
if (!self.s.serverDescription) {
|
||||
self.s.serverDescription = {
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: 'Unknown'
|
||||
};
|
||||
}
|
||||
|
||||
return self.s.serverDescription;
|
||||
};
|
||||
|
||||
var emitServerDescriptionChanged = function(self, description) {
|
||||
if (self.listeners('serverDescriptionChanged').length > 0) {
|
||||
// Emit the server description changed events
|
||||
self.emit('serverDescriptionChanged', {
|
||||
topologyId: self.s.topologyId !== -1 ? self.s.topologyId : self.id,
|
||||
address: self.name,
|
||||
previousDescription: getPreviousDescription(self),
|
||||
newDescription: description
|
||||
});
|
||||
|
||||
self.s.serverDescription = description;
|
||||
}
|
||||
};
|
||||
|
||||
var getPreviousTopologyDescription = function(self) {
|
||||
if (!self.s.topologyDescription) {
|
||||
self.s.topologyDescription = {
|
||||
topologyType: 'Unknown',
|
||||
servers: [
|
||||
{
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: 'Unknown'
|
||||
}
|
||||
]
|
||||
};
|
||||
}
|
||||
|
||||
return self.s.topologyDescription;
|
||||
};
|
||||
|
||||
var emitTopologyDescriptionChanged = function(self, description) {
|
||||
if (self.listeners('topologyDescriptionChanged').length > 0) {
|
||||
// Emit the server description changed events
|
||||
self.emit('topologyDescriptionChanged', {
|
||||
topologyId: self.s.topologyId !== -1 ? self.s.topologyId : self.id,
|
||||
address: self.name,
|
||||
previousDescription: getPreviousTopologyDescription(self),
|
||||
newDescription: description
|
||||
});
|
||||
|
||||
self.s.serverDescription = description;
|
||||
}
|
||||
};
|
||||
|
||||
var changedIsMaster = function(self, currentIsmaster, ismaster) {
|
||||
var currentType = getTopologyType(self, currentIsmaster);
|
||||
var newType = getTopologyType(self, ismaster);
|
||||
if (newType !== currentType) return true;
|
||||
return false;
|
||||
};
|
||||
|
||||
var getTopologyType = function(self, ismaster) {
|
||||
if (!ismaster) {
|
||||
ismaster = self.ismaster;
|
||||
}
|
||||
|
||||
if (!ismaster) return 'Unknown';
|
||||
if (ismaster.ismaster && ismaster.msg === 'isdbgrid') return 'Mongos';
|
||||
if (ismaster.ismaster && !ismaster.hosts) return 'Standalone';
|
||||
if (ismaster.ismaster) return 'RSPrimary';
|
||||
if (ismaster.secondary) return 'RSSecondary';
|
||||
if (ismaster.arbiterOnly) return 'RSArbiter';
|
||||
return 'Unknown';
|
||||
};
|
||||
|
||||
var inquireServerState = function(self) {
|
||||
return function(callback) {
|
||||
if (self.s.state === 'destroyed') return;
|
||||
// Record response time
|
||||
var start = new Date().getTime();
|
||||
|
||||
// emitSDAMEvent
|
||||
emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: self.name });
|
||||
|
||||
// Attempt to execute ismaster command
|
||||
self.command('admin.$cmd', { ismaster: true }, { monitoring: true }, function(err, r) {
|
||||
if (!err) {
|
||||
// Legacy event sender
|
||||
self.emit('ismaster', r, self);
|
||||
|
||||
// Calculate latencyMS
|
||||
var latencyMS = new Date().getTime() - start;
|
||||
|
||||
// Server heart beat event
|
||||
emitSDAMEvent(self, 'serverHeartbeatSucceeded', {
|
||||
durationMS: latencyMS,
|
||||
reply: r.result,
|
||||
connectionId: self.name
|
||||
});
|
||||
|
||||
// Did the server change
|
||||
if (changedIsMaster(self, self.s.ismaster, r.result)) {
|
||||
// Emit server description changed if something listening
|
||||
emitServerDescriptionChanged(self, {
|
||||
address: self.name,
|
||||
arbiters: [],
|
||||
hosts: [],
|
||||
passives: [],
|
||||
type: !self.s.inTopology ? 'Standalone' : getTopologyType(self)
|
||||
});
|
||||
}
|
||||
|
||||
// Updat ismaster view
|
||||
self.s.ismaster = r.result;
|
||||
|
||||
// Set server response time
|
||||
self.s.isMasterLatencyMS = latencyMS;
|
||||
} else {
|
||||
emitSDAMEvent(self, 'serverHeartbeatFailed', {
|
||||
durationMS: latencyMS,
|
||||
failure: err,
|
||||
connectionId: self.name
|
||||
});
|
||||
}
|
||||
|
||||
// Peforming an ismaster monitoring callback operation
|
||||
if (typeof callback === 'function') {
|
||||
return callback(err, r);
|
||||
}
|
||||
|
||||
// Perform another sweep
|
||||
self.s.inquireServerStateTimeout = setTimeout(inquireServerState(self), self.s.haInterval);
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
//
|
||||
// Clone the options
|
||||
var cloneOptions = function(options) {
|
||||
var opts = {};
|
||||
for (var name in options) {
|
||||
opts[name] = options[name];
|
||||
}
|
||||
return opts;
|
||||
};
|
||||
|
||||
function Interval(fn, time) {
|
||||
var timer = false;
|
||||
|
||||
this.start = function() {
|
||||
if (!this.isRunning()) {
|
||||
timer = setInterval(fn, time);
|
||||
}
|
||||
|
||||
return this;
|
||||
};
|
||||
|
||||
this.stop = function() {
|
||||
clearInterval(timer);
|
||||
timer = false;
|
||||
return this;
|
||||
};
|
||||
|
||||
this.isRunning = function() {
|
||||
return timer !== false;
|
||||
};
|
||||
}
|
||||
|
||||
function Timeout(fn, time) {
|
||||
var timer = false;
|
||||
|
||||
this.start = function() {
|
||||
if (!this.isRunning()) {
|
||||
timer = setTimeout(fn, time);
|
||||
}
|
||||
return this;
|
||||
};
|
||||
|
||||
this.stop = function() {
|
||||
clearTimeout(timer);
|
||||
timer = false;
|
||||
return this;
|
||||
};
|
||||
|
||||
this.isRunning = function() {
|
||||
if (timer && timer._called) return false;
|
||||
return timer !== false;
|
||||
};
|
||||
}
|
||||
|
||||
function diff(previous, current) {
|
||||
// Difference document
|
||||
var diff = {
|
||||
servers: []
|
||||
};
|
||||
|
||||
// Previous entry
|
||||
if (!previous) {
|
||||
previous = { servers: [] };
|
||||
}
|
||||
|
||||
// Check if we have any previous servers missing in the current ones
|
||||
for (var i = 0; i < previous.servers.length; i++) {
|
||||
var found = false;
|
||||
|
||||
for (var j = 0; j < current.servers.length; j++) {
|
||||
if (current.servers[j].address.toLowerCase() === previous.servers[i].address.toLowerCase()) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
// Add to the diff
|
||||
diff.servers.push({
|
||||
address: previous.servers[i].address,
|
||||
from: previous.servers[i].type,
|
||||
to: 'Unknown'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check if there are any severs that don't exist
|
||||
for (j = 0; j < current.servers.length; j++) {
|
||||
found = false;
|
||||
|
||||
// Go over all the previous servers
|
||||
for (i = 0; i < previous.servers.length; i++) {
|
||||
if (previous.servers[i].address.toLowerCase() === current.servers[j].address.toLowerCase()) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Add the server to the diff
|
||||
if (!found) {
|
||||
diff.servers.push({
|
||||
address: current.servers[j].address,
|
||||
from: 'Unknown',
|
||||
to: current.servers[j].type
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Got through all the servers
|
||||
for (i = 0; i < previous.servers.length; i++) {
|
||||
var prevServer = previous.servers[i];
|
||||
|
||||
// Go through all current servers
|
||||
for (j = 0; j < current.servers.length; j++) {
|
||||
var currServer = current.servers[j];
|
||||
|
||||
// Matching server
|
||||
if (prevServer.address.toLowerCase() === currServer.address.toLowerCase()) {
|
||||
// We had a change in state
|
||||
if (prevServer.type !== currServer.type) {
|
||||
diff.servers.push({
|
||||
address: prevServer.address,
|
||||
from: prevServer.type,
|
||||
to: currServer.type
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return difference
|
||||
return diff;
|
||||
}
|
||||
|
||||
/**
|
||||
* Shared function to determine clusterTime for a given topology
|
||||
*
|
||||
* @param {*} topology
|
||||
* @param {*} clusterTime
|
||||
*/
|
||||
function resolveClusterTime(topology, $clusterTime) {
|
||||
if (topology.clusterTime == null) {
|
||||
topology.clusterTime = $clusterTime;
|
||||
} else {
|
||||
if ($clusterTime.clusterTime.greaterThan(topology.clusterTime.clusterTime)) {
|
||||
topology.clusterTime = $clusterTime;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: this is a temporary move until the topologies can be more formally refactored
|
||||
// to share code.
|
||||
const SessionMixins = {
|
||||
endSessions: function(sessions, callback) {
|
||||
if (!Array.isArray(sessions)) {
|
||||
sessions = [sessions];
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// When connected to a sharded cluster the endSessions command
|
||||
// can be sent to any mongos. When connected to a replica set the
|
||||
// endSessions command MUST be sent to the primary if the primary
|
||||
// is available, otherwise it MUST be sent to any available secondary.
|
||||
// Is it enough to use: ReadPreference.primaryPreferred ?
|
||||
this.command(
|
||||
'admin.$cmd',
|
||||
{ endSessions: sessions },
|
||||
{ readPreference: ReadPreference.primaryPreferred },
|
||||
() => {
|
||||
// intentionally ignored, per spec
|
||||
if (typeof callback === 'function') callback();
|
||||
}
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
const RETRYABLE_WIRE_VERSION = 6;
|
||||
|
||||
/**
|
||||
* Determines whether the provided topology supports retryable writes
|
||||
*
|
||||
* @param {Mongos|Replset} topology
|
||||
*/
|
||||
const isRetryableWritesSupported = function(topology) {
|
||||
const maxWireVersion = topology.lastIsMaster().maxWireVersion;
|
||||
if (maxWireVersion < RETRYABLE_WIRE_VERSION) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!topology.logicalSessionTimeoutMinutes) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Relays events for a given listener and emitter
|
||||
*
|
||||
* @param {EventEmitter} listener the EventEmitter to listen to the events for
|
||||
* @param {EventEmitter} emitter the EventEmitter to relay the events on
|
||||
*/
|
||||
function relayEvents(listener, emitter, events) {
|
||||
events.forEach(eventName => listener.on(eventName, event => emitter.emit(eventName, event)));
|
||||
}
|
||||
|
||||
module.exports.SessionMixins = SessionMixins;
|
||||
module.exports.resolveClusterTime = resolveClusterTime;
|
||||
module.exports.inquireServerState = inquireServerState;
|
||||
module.exports.getTopologyType = getTopologyType;
|
||||
module.exports.emitServerDescriptionChanged = emitServerDescriptionChanged;
|
||||
module.exports.emitTopologyDescriptionChanged = emitTopologyDescriptionChanged;
|
||||
module.exports.cloneOptions = cloneOptions;
|
||||
module.exports.createClientInfo = createClientInfo;
|
||||
module.exports.createCompressionInfo = createCompressionInfo;
|
||||
module.exports.clone = clone;
|
||||
module.exports.diff = diff;
|
||||
module.exports.Interval = Interval;
|
||||
module.exports.Timeout = Timeout;
|
||||
module.exports.isRetryableWritesSupported = isRetryableWritesSupported;
|
||||
module.exports.relayEvents = relayEvents;
|
314
ProjectNow/NodeServer/node_modules/mongodb-core/lib/uri_parser.js
generated
vendored
Normal file
314
ProjectNow/NodeServer/node_modules/mongodb-core/lib/uri_parser.js
generated
vendored
Normal file
@@ -0,0 +1,314 @@
|
||||
'use strict';
|
||||
const URL = require('url');
|
||||
const qs = require('querystring');
|
||||
const dns = require('dns');
|
||||
const MongoParseError = require('./error').MongoParseError;
|
||||
|
||||
/**
|
||||
* The following regular expression validates a connection string and breaks the
|
||||
* provide string into the following capture groups: [protocol, username, password, hosts]
|
||||
*/
|
||||
const HOSTS_RX = /(mongodb(?:\+srv|)):\/\/(?: (?:[^:]*) (?: : ([^@]*) )? @ )?([^/?]*)(?:\/|)(.*)/;
|
||||
|
||||
/**
|
||||
* Determines whether a provided address matches the provided parent domain in order
|
||||
* to avoid certain attack vectors.
|
||||
*
|
||||
* @param {String} srvAddress The address to check against a domain
|
||||
* @param {String} parentDomain The domain to check the provided address against
|
||||
* @return {Boolean} Whether the provided address matches the parent domain
|
||||
*/
|
||||
function matchesParentDomain(srvAddress, parentDomain) {
|
||||
const regex = /^.*?\./;
|
||||
const srv = `.${srvAddress.replace(regex, '')}`;
|
||||
const parent = `.${parentDomain.replace(regex, '')}`;
|
||||
return srv.endsWith(parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lookup an `mongodb+srv` connection string, combine the parts and reparse it as a normal
|
||||
* connection string.
|
||||
*
|
||||
* @param {string} uri The connection string to parse
|
||||
* @param {object} options Optional user provided connection string options
|
||||
* @param {function} callback
|
||||
*/
|
||||
function parseSrvConnectionString(uri, options, callback) {
|
||||
const result = URL.parse(uri, true);
|
||||
|
||||
if (result.hostname.split('.').length < 3) {
|
||||
return callback(new MongoParseError('URI does not have hostname, domain name and tld'));
|
||||
}
|
||||
|
||||
result.domainLength = result.hostname.split('.').length;
|
||||
if (result.pathname && result.pathname.match(',')) {
|
||||
return callback(new MongoParseError('Invalid URI, cannot contain multiple hostnames'));
|
||||
}
|
||||
|
||||
if (result.port) {
|
||||
return callback(new MongoParseError(`Ports not accepted with '${PROTOCOL_MONGODB_SRV}' URIs`));
|
||||
}
|
||||
|
||||
let srvAddress = `_mongodb._tcp.${result.host}`;
|
||||
dns.resolveSrv(srvAddress, (err, addresses) => {
|
||||
if (err) return callback(err);
|
||||
|
||||
if (addresses.length === 0) {
|
||||
return callback(new MongoParseError('No addresses found at host'));
|
||||
}
|
||||
|
||||
for (let i = 0; i < addresses.length; i++) {
|
||||
if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) {
|
||||
return callback(
|
||||
new MongoParseError('Server record does not share hostname with parent URI')
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let base = result.auth ? `mongodb://${result.auth}@` : `mongodb://`;
|
||||
let connectionStrings = addresses.map(
|
||||
(address, i) =>
|
||||
i === 0 ? `${base}${address.name}:${address.port}` : `${address.name}:${address.port}`
|
||||
);
|
||||
|
||||
let connectionString = `${connectionStrings.join(',')}/`;
|
||||
let connectionStringOptions = [];
|
||||
|
||||
// Add the default database if needed
|
||||
if (result.path) {
|
||||
let defaultDb = result.path.slice(1);
|
||||
if (defaultDb.indexOf('?') !== -1) {
|
||||
defaultDb = defaultDb.slice(0, defaultDb.indexOf('?'));
|
||||
}
|
||||
|
||||
connectionString += defaultDb;
|
||||
}
|
||||
|
||||
// Default to SSL true
|
||||
if (!options.ssl && (!result.search || result.query['ssl'] == null)) {
|
||||
connectionStringOptions.push('ssl=true');
|
||||
}
|
||||
|
||||
// Keep original uri options
|
||||
if (result.search) {
|
||||
connectionStringOptions.push(result.search.replace('?', ''));
|
||||
}
|
||||
|
||||
dns.resolveTxt(result.host, (err, record) => {
|
||||
if (err) {
|
||||
if (err.code !== 'ENODATA') {
|
||||
return callback(err);
|
||||
}
|
||||
record = null;
|
||||
}
|
||||
|
||||
if (record) {
|
||||
if (record.length > 1) {
|
||||
return callback(new MongoParseError('Multiple text records not allowed'));
|
||||
}
|
||||
|
||||
record = record[0];
|
||||
record = record.length > 1 ? record.join('') : record[0];
|
||||
if (!record.includes('authSource') && !record.includes('replicaSet')) {
|
||||
return callback(
|
||||
new MongoParseError('Text record must only set `authSource` or `replicaSet`')
|
||||
);
|
||||
}
|
||||
|
||||
connectionStringOptions.push(record);
|
||||
}
|
||||
|
||||
// Add any options to the connection string
|
||||
if (connectionStringOptions.length) {
|
||||
connectionString += `?${connectionStringOptions.join('&')}`;
|
||||
}
|
||||
|
||||
parseConnectionString(connectionString, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a query string item according to the connection string spec
|
||||
*
|
||||
* @param {Array|String} value The value to parse
|
||||
* @return {Array|Object|String} The parsed value
|
||||
*/
|
||||
function parseQueryStringItemValue(value) {
|
||||
if (Array.isArray(value)) {
|
||||
// deduplicate and simplify arrays
|
||||
value = value.filter((value, idx) => value.indexOf(value) === idx);
|
||||
if (value.length === 1) value = value[0];
|
||||
} else if (value.indexOf(':') > 0) {
|
||||
value = value.split(',').reduce((result, pair) => {
|
||||
const parts = pair.split(':');
|
||||
result[parts[0]] = parseQueryStringItemValue(parts[1]);
|
||||
return result;
|
||||
}, {});
|
||||
} else if (value.toLowerCase() === 'true' || value.toLowerCase() === 'false') {
|
||||
value = value.toLowerCase() === 'true';
|
||||
} else if (!Number.isNaN(value)) {
|
||||
const numericValue = parseFloat(value);
|
||||
if (!Number.isNaN(numericValue)) {
|
||||
value = parseFloat(value);
|
||||
}
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a query string according the connection string spec.
|
||||
*
|
||||
* @param {String} query The query string to parse
|
||||
* @return {Object|Error} The parsed query string as an object, or an error if one was encountered
|
||||
*/
|
||||
function parseQueryString(query) {
|
||||
const result = {};
|
||||
let parsedQueryString = qs.parse(query);
|
||||
|
||||
for (const key in parsedQueryString) {
|
||||
const value = parsedQueryString[key];
|
||||
if (value === '' || value == null) {
|
||||
return new MongoParseError('Incomplete key value pair for option');
|
||||
}
|
||||
|
||||
result[key.toLowerCase()] = parseQueryStringItemValue(value);
|
||||
}
|
||||
|
||||
// special cases for known deprecated options
|
||||
if (result.wtimeout && result.wtimeoutms) {
|
||||
delete result.wtimeout;
|
||||
console.warn('Unsupported option `wtimeout` specified');
|
||||
}
|
||||
|
||||
return Object.keys(result).length ? result : null;
|
||||
}
|
||||
|
||||
const PROTOCOL_MONGODB = 'mongodb';
|
||||
const PROTOCOL_MONGODB_SRV = 'mongodb+srv';
|
||||
const SUPPORTED_PROTOCOLS = [PROTOCOL_MONGODB, PROTOCOL_MONGODB_SRV];
|
||||
|
||||
/**
|
||||
* Parses a MongoDB connection string
|
||||
*
|
||||
* @param {*} uri the MongoDB connection string to parse
|
||||
* @param {object} [options] Optional settings.
|
||||
* @param {parseCallback} callback
|
||||
*/
|
||||
function parseConnectionString(uri, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
// Check for bad uris before we parse
|
||||
try {
|
||||
URL.parse(uri);
|
||||
} catch (e) {
|
||||
return callback(new MongoParseError('URI malformed, cannot be parsed'));
|
||||
}
|
||||
|
||||
const cap = uri.match(HOSTS_RX);
|
||||
if (!cap) {
|
||||
return callback(new MongoParseError('Invalid connection string'));
|
||||
}
|
||||
|
||||
const protocol = cap[1];
|
||||
if (SUPPORTED_PROTOCOLS.indexOf(protocol) === -1) {
|
||||
return callback(new MongoParseError('Invalid protocol provided'));
|
||||
}
|
||||
|
||||
if (protocol === PROTOCOL_MONGODB_SRV) {
|
||||
return parseSrvConnectionString(uri, options, callback);
|
||||
}
|
||||
|
||||
const dbAndQuery = cap[4].split('?');
|
||||
const db = dbAndQuery.length > 0 ? dbAndQuery[0] : null;
|
||||
const query = dbAndQuery.length > 1 ? dbAndQuery[1] : null;
|
||||
let parsedOptions = parseQueryString(query);
|
||||
if (parsedOptions instanceof MongoParseError) {
|
||||
return callback(parsedOptions);
|
||||
}
|
||||
|
||||
parsedOptions = Object.assign({}, parsedOptions, options);
|
||||
const auth = { username: null, password: null, db: db && db !== '' ? qs.unescape(db) : null };
|
||||
if (cap[4].split('?')[0].indexOf('@') !== -1) {
|
||||
return callback(new MongoParseError('Unescaped slash in userinfo section'));
|
||||
}
|
||||
|
||||
const authorityParts = cap[3].split('@');
|
||||
if (authorityParts.length > 2) {
|
||||
return callback(new MongoParseError('Unescaped at-sign in authority section'));
|
||||
}
|
||||
|
||||
if (authorityParts.length > 1) {
|
||||
const authParts = authorityParts.shift().split(':');
|
||||
if (authParts.length > 2) {
|
||||
return callback(new MongoParseError('Unescaped colon in authority section'));
|
||||
}
|
||||
|
||||
auth.username = qs.unescape(authParts[0]);
|
||||
auth.password = authParts[1] ? qs.unescape(authParts[1]) : null;
|
||||
}
|
||||
|
||||
let hostParsingError = null;
|
||||
const hosts = authorityParts
|
||||
.shift()
|
||||
.split(',')
|
||||
.map(host => {
|
||||
let parsedHost = URL.parse(`mongodb://${host}`);
|
||||
if (parsedHost.path === '/:') {
|
||||
hostParsingError = new MongoParseError('Double colon in host identifier');
|
||||
return null;
|
||||
}
|
||||
|
||||
// heuristically determine if we're working with a domain socket
|
||||
if (host.match(/\.sock/)) {
|
||||
parsedHost.hostname = qs.unescape(host);
|
||||
parsedHost.port = null;
|
||||
}
|
||||
|
||||
if (Number.isNaN(parsedHost.port)) {
|
||||
hostParsingError = new MongoParseError('Invalid port (non-numeric string)');
|
||||
return;
|
||||
}
|
||||
|
||||
const result = {
|
||||
host: parsedHost.hostname,
|
||||
port: parsedHost.port ? parseInt(parsedHost.port) : null
|
||||
};
|
||||
|
||||
if (result.port === 0) {
|
||||
hostParsingError = new MongoParseError('Invalid port (zero) with hostname');
|
||||
return;
|
||||
}
|
||||
|
||||
if (result.port > 65535) {
|
||||
hostParsingError = new MongoParseError('Invalid port (larger than 65535) with hostname');
|
||||
return;
|
||||
}
|
||||
|
||||
if (result.port < 0) {
|
||||
hostParsingError = new MongoParseError('Invalid port (negative number)');
|
||||
return;
|
||||
}
|
||||
|
||||
return result;
|
||||
})
|
||||
.filter(host => !!host);
|
||||
|
||||
if (hostParsingError) {
|
||||
return callback(hostParsingError);
|
||||
}
|
||||
|
||||
if (hosts.length === 0 || hosts[0].host === '' || hosts[0].host === null) {
|
||||
return callback(new MongoParseError('No hostname or hostnames provided in connection string'));
|
||||
}
|
||||
|
||||
callback(null, {
|
||||
hosts: hosts,
|
||||
auth: auth.db || auth.username ? auth : null,
|
||||
options: Object.keys(parsedOptions).length ? parsedOptions : null
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = parseConnectionString;
|
14
ProjectNow/NodeServer/node_modules/mongodb-core/lib/utils.js
generated
vendored
Normal file
14
ProjectNow/NodeServer/node_modules/mongodb-core/lib/utils.js
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
'use strict';
|
||||
|
||||
const crypto = require('crypto');
|
||||
|
||||
const uuidV4 = () => {
|
||||
const result = crypto.randomBytes(16);
|
||||
result[6] = (result[6] & 0x0f) | 0x40;
|
||||
result[8] = (result[8] & 0x3f) | 0x80;
|
||||
return result;
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
uuidV4: uuidV4
|
||||
};
|
368
ProjectNow/NodeServer/node_modules/mongodb-core/lib/wireprotocol/2_6_support.js
generated
vendored
Normal file
368
ProjectNow/NodeServer/node_modules/mongodb-core/lib/wireprotocol/2_6_support.js
generated
vendored
Normal file
@@ -0,0 +1,368 @@
|
||||
'use strict';
|
||||
|
||||
var copy = require('../connection/utils').copy,
|
||||
retrieveBSON = require('../connection/utils').retrieveBSON,
|
||||
KillCursor = require('../connection/commands').KillCursor,
|
||||
GetMore = require('../connection/commands').GetMore,
|
||||
Query = require('../connection/commands').Query,
|
||||
f = require('util').format,
|
||||
MongoError = require('../error').MongoError,
|
||||
getReadPreference = require('./shared').getReadPreference;
|
||||
|
||||
var BSON = retrieveBSON(),
|
||||
Long = BSON.Long;
|
||||
|
||||
var WireProtocol = function() {};
|
||||
|
||||
//
|
||||
// Execute a write operation
|
||||
var executeWrite = function(pool, bson, type, opsField, ns, ops, options, callback) {
|
||||
if (ops.length === 0) throw new MongoError('insert must contain at least one document');
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
options = options || {};
|
||||
}
|
||||
|
||||
// Split the ns up to get db and collection
|
||||
var p = ns.split('.');
|
||||
var d = p.shift();
|
||||
// Options
|
||||
var ordered = typeof options.ordered === 'boolean' ? options.ordered : true;
|
||||
var writeConcern = options.writeConcern;
|
||||
|
||||
// return skeleton
|
||||
var writeCommand = {};
|
||||
writeCommand[type] = p.join('.');
|
||||
writeCommand[opsField] = ops;
|
||||
writeCommand.ordered = ordered;
|
||||
|
||||
// Did we specify a write concern
|
||||
if (writeConcern && Object.keys(writeConcern).length > 0) {
|
||||
writeCommand.writeConcern = writeConcern;
|
||||
}
|
||||
|
||||
// Do we have bypassDocumentValidation set, then enable it on the write command
|
||||
if (typeof options.bypassDocumentValidation === 'boolean') {
|
||||
writeCommand.bypassDocumentValidation = options.bypassDocumentValidation;
|
||||
}
|
||||
|
||||
// Options object
|
||||
var opts = { command: true };
|
||||
if (typeof options.session !== 'undefined') opts.session = options.session;
|
||||
var queryOptions = { checkKeys: false, numberToSkip: 0, numberToReturn: 1 };
|
||||
if (type === 'insert') queryOptions.checkKeys = false;
|
||||
if (typeof options.checkKeys === 'boolean') queryOptions.checkKeys = options.checkKeys;
|
||||
// Ensure we support serialization of functions
|
||||
if (options.serializeFunctions) queryOptions.serializeFunctions = options.serializeFunctions;
|
||||
// Do not serialize the undefined fields
|
||||
if (options.ignoreUndefined) queryOptions.ignoreUndefined = options.ignoreUndefined;
|
||||
|
||||
try {
|
||||
// Create write command
|
||||
var cmd = new Query(bson, f('%s.$cmd', d), writeCommand, queryOptions);
|
||||
// Execute command
|
||||
pool.write(cmd, opts, callback);
|
||||
} catch (err) {
|
||||
callback(err);
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Needs to support legacy mass insert as well as ordered/unordered legacy
|
||||
// emulation
|
||||
//
|
||||
WireProtocol.prototype.insert = function(pool, ismaster, ns, bson, ops, options, callback) {
|
||||
executeWrite(pool, bson, 'insert', 'documents', ns, ops, options, callback);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.update = function(pool, ismaster, ns, bson, ops, options, callback) {
|
||||
executeWrite(pool, bson, 'update', 'updates', ns, ops, options, callback);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.remove = function(pool, ismaster, ns, bson, ops, options, callback) {
|
||||
executeWrite(pool, bson, 'delete', 'deletes', ns, ops, options, callback);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.killCursor = function(bson, ns, cursorState, pool, callback) {
|
||||
var cursorId = cursorState.cursorId;
|
||||
// Create a kill cursor command
|
||||
var killCursor = new KillCursor(bson, ns, [cursorId]);
|
||||
|
||||
// Build killCursor options
|
||||
const options = {
|
||||
immediateRelease: true,
|
||||
noResponse: true
|
||||
};
|
||||
|
||||
if (typeof cursorState.session === 'object') {
|
||||
options.session = cursorState.session;
|
||||
}
|
||||
|
||||
// Execute the kill cursor command
|
||||
if (pool && pool.isConnected()) {
|
||||
try {
|
||||
pool.write(killCursor, options, callback);
|
||||
} catch (err) {
|
||||
if (typeof callback === 'function') {
|
||||
callback(err, null);
|
||||
} else {
|
||||
console.warn(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
WireProtocol.prototype.getMore = function(
|
||||
bson,
|
||||
ns,
|
||||
cursorState,
|
||||
batchSize,
|
||||
raw,
|
||||
connection,
|
||||
options,
|
||||
callback
|
||||
) {
|
||||
// Create getMore command
|
||||
var getMore = new GetMore(bson, ns, cursorState.cursorId, { numberToReturn: batchSize });
|
||||
|
||||
// Query callback
|
||||
var queryCallback = function(err, result) {
|
||||
if (err) return callback(err);
|
||||
// Get the raw message
|
||||
var r = result.message;
|
||||
|
||||
// If we have a timed out query or a cursor that was killed
|
||||
if ((r.responseFlags & (1 << 0)) !== 0) {
|
||||
return callback(new MongoError('cursor does not exist, was killed or timed out'), null);
|
||||
}
|
||||
|
||||
// Ensure we have a Long valie cursor id
|
||||
var cursorId = typeof r.cursorId === 'number' ? Long.fromNumber(r.cursorId) : r.cursorId;
|
||||
|
||||
// Set all the values
|
||||
cursorState.documents = r.documents;
|
||||
cursorState.cursorId = cursorId;
|
||||
|
||||
// Return
|
||||
callback(null, null, r.connection);
|
||||
};
|
||||
|
||||
// Contains any query options
|
||||
var queryOptions = {};
|
||||
|
||||
// If we have a raw query decorate the function
|
||||
if (raw) {
|
||||
queryOptions.raw = raw;
|
||||
}
|
||||
|
||||
// Check if we need to promote longs
|
||||
if (typeof cursorState.promoteLongs === 'boolean') {
|
||||
queryOptions.promoteLongs = cursorState.promoteLongs;
|
||||
}
|
||||
|
||||
if (typeof cursorState.promoteValues === 'boolean') {
|
||||
queryOptions.promoteValues = cursorState.promoteValues;
|
||||
}
|
||||
|
||||
if (typeof cursorState.promoteBuffers === 'boolean') {
|
||||
queryOptions.promoteBuffers = cursorState.promoteBuffers;
|
||||
}
|
||||
|
||||
if (typeof cursorState.session === 'object') {
|
||||
queryOptions.session = cursorState.session;
|
||||
}
|
||||
|
||||
// Write out the getMore command
|
||||
connection.write(getMore, queryOptions, queryCallback);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
|
||||
// Establish type of command
|
||||
if (cmd.find) {
|
||||
return setupClassicFind(bson, ns, cmd, cursorState, topology, options);
|
||||
} else if (cursorState.cursorId != null) {
|
||||
return;
|
||||
} else if (cmd) {
|
||||
return setupCommand(bson, ns, cmd, cursorState, topology, options);
|
||||
} else {
|
||||
throw new MongoError(f('command %s does not return a cursor', JSON.stringify(cmd)));
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Execute a find command
|
||||
var setupClassicFind = function(bson, ns, cmd, cursorState, topology, options) {
|
||||
// Ensure we have at least some options
|
||||
options = options || {};
|
||||
// Get the readPreference
|
||||
var readPreference = getReadPreference(cmd, options);
|
||||
// Set the optional batchSize
|
||||
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
|
||||
var numberToReturn = 0;
|
||||
|
||||
// Unpack the limit and batchSize values
|
||||
if (cursorState.limit === 0) {
|
||||
numberToReturn = cursorState.batchSize;
|
||||
} else if (
|
||||
cursorState.limit < 0 ||
|
||||
cursorState.limit < cursorState.batchSize ||
|
||||
(cursorState.limit > 0 && cursorState.batchSize === 0)
|
||||
) {
|
||||
numberToReturn = cursorState.limit;
|
||||
} else {
|
||||
numberToReturn = cursorState.batchSize;
|
||||
}
|
||||
|
||||
var numberToSkip = cursorState.skip || 0;
|
||||
// Build actual find command
|
||||
var findCmd = {};
|
||||
|
||||
// We have a Mongos topology, check if we need to add a readPreference
|
||||
if (topology.type === 'mongos' && readPreference) {
|
||||
findCmd['$readPreference'] = readPreference.toJSON();
|
||||
}
|
||||
|
||||
// Add special modifiers to the query
|
||||
if (cmd.sort) findCmd['$orderby'] = cmd.sort;
|
||||
if (cmd.hint) findCmd['$hint'] = cmd.hint;
|
||||
if (cmd.snapshot) findCmd['$snapshot'] = cmd.snapshot;
|
||||
if (typeof cmd.returnKey !== 'undefined') findCmd['$returnKey'] = cmd.returnKey;
|
||||
if (cmd.maxScan) findCmd['$maxScan'] = cmd.maxScan;
|
||||
if (cmd.min) findCmd['$min'] = cmd.min;
|
||||
if (cmd.max) findCmd['$max'] = cmd.max;
|
||||
if (typeof cmd.showDiskLoc !== 'undefined') findCmd['$showDiskLoc'] = cmd.showDiskLoc;
|
||||
if (cmd.comment) findCmd['$comment'] = cmd.comment;
|
||||
if (cmd.maxTimeMS) findCmd['$maxTimeMS'] = cmd.maxTimeMS;
|
||||
|
||||
if (cmd.explain) {
|
||||
// nToReturn must be 0 (match all) or negative (match N and close cursor)
|
||||
// nToReturn > 0 will give explain results equivalent to limit(0)
|
||||
numberToReturn = -Math.abs(cmd.limit || 0);
|
||||
findCmd['$explain'] = true;
|
||||
}
|
||||
|
||||
// Add the query
|
||||
findCmd['$query'] = cmd.query;
|
||||
|
||||
// Throw on majority readConcern passed in
|
||||
if (cmd.readConcern && cmd.readConcern.level !== 'local') {
|
||||
throw new MongoError(
|
||||
f('server find command does not support a readConcern level of %s', cmd.readConcern.level)
|
||||
);
|
||||
}
|
||||
|
||||
// Remove readConcern, ensure no failing commands
|
||||
if (cmd.readConcern) {
|
||||
cmd = copy(cmd);
|
||||
delete cmd['readConcern'];
|
||||
}
|
||||
|
||||
// Serialize functions
|
||||
var serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
var ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
|
||||
// Build Query object
|
||||
var query = new Query(bson, ns, findCmd, {
|
||||
numberToSkip: numberToSkip,
|
||||
numberToReturn: numberToReturn,
|
||||
pre32Limit: typeof cmd.limit !== 'undefined' ? cmd.limit : undefined,
|
||||
checkKeys: false,
|
||||
returnFieldSelector: cmd.fields,
|
||||
serializeFunctions: serializeFunctions,
|
||||
ignoreUndefined: ignoreUndefined
|
||||
});
|
||||
|
||||
// Set query flags
|
||||
query.slaveOk = readPreference.slaveOk();
|
||||
|
||||
// Set up the option bits for wire protocol
|
||||
if (typeof cmd.tailable === 'boolean') {
|
||||
query.tailable = cmd.tailable;
|
||||
}
|
||||
|
||||
if (typeof cmd.oplogReplay === 'boolean') {
|
||||
query.oplogReplay = cmd.oplogReplay;
|
||||
}
|
||||
|
||||
if (typeof cmd.noCursorTimeout === 'boolean') {
|
||||
query.noCursorTimeout = cmd.noCursorTimeout;
|
||||
}
|
||||
|
||||
if (typeof cmd.awaitData === 'boolean') {
|
||||
query.awaitData = cmd.awaitData;
|
||||
}
|
||||
|
||||
if (typeof cmd.partial === 'boolean') {
|
||||
query.partial = cmd.partial;
|
||||
}
|
||||
|
||||
// Return the query
|
||||
return query;
|
||||
};
|
||||
|
||||
//
|
||||
// Set up a command cursor
|
||||
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
|
||||
// Set empty options object
|
||||
options = options || {};
|
||||
// Get the readPreference
|
||||
var readPreference = getReadPreference(cmd, options);
|
||||
|
||||
// Final query
|
||||
var finalCmd = {};
|
||||
for (var name in cmd) {
|
||||
finalCmd[name] = cmd[name];
|
||||
}
|
||||
|
||||
// Build command namespace
|
||||
var parts = ns.split(/\./);
|
||||
|
||||
// Serialize functions
|
||||
var serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
|
||||
var ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
|
||||
// Throw on majority readConcern passed in
|
||||
if (cmd.readConcern && cmd.readConcern.level !== 'local') {
|
||||
throw new MongoError(
|
||||
f(
|
||||
'server %s command does not support a readConcern level of %s',
|
||||
JSON.stringify(cmd),
|
||||
cmd.readConcern.level
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Remove readConcern, ensure no failing commands
|
||||
if (cmd.readConcern) delete cmd['readConcern'];
|
||||
|
||||
// We have a Mongos topology, check if we need to add a readPreference
|
||||
if (topology.type === 'mongos' && readPreference && readPreference.preference !== 'primary') {
|
||||
finalCmd = {
|
||||
$query: finalCmd,
|
||||
$readPreference: readPreference.toJSON()
|
||||
};
|
||||
}
|
||||
|
||||
// Build Query object
|
||||
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: -1,
|
||||
checkKeys: false,
|
||||
serializeFunctions: serializeFunctions,
|
||||
ignoreUndefined: ignoreUndefined
|
||||
});
|
||||
|
||||
// Set query flags
|
||||
query.slaveOk = readPreference.slaveOk();
|
||||
|
||||
// Return the query
|
||||
return query;
|
||||
};
|
||||
|
||||
module.exports = WireProtocol;
|
655
ProjectNow/NodeServer/node_modules/mongodb-core/lib/wireprotocol/3_2_support.js
generated
vendored
Normal file
655
ProjectNow/NodeServer/node_modules/mongodb-core/lib/wireprotocol/3_2_support.js
generated
vendored
Normal file
@@ -0,0 +1,655 @@
|
||||
'use strict';
|
||||
|
||||
const Query = require('../connection/commands').Query;
|
||||
const retrieveBSON = require('../connection/utils').retrieveBSON;
|
||||
const f = require('util').format;
|
||||
const MongoError = require('../error').MongoError;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const getReadPreference = require('./shared').getReadPreference;
|
||||
const BSON = retrieveBSON();
|
||||
const Long = BSON.Long;
|
||||
|
||||
var WireProtocol = function(legacyWireProtocol) {
|
||||
this.legacyWireProtocol = legacyWireProtocol;
|
||||
};
|
||||
|
||||
/**
|
||||
* Optionally decorate a command with transactions specific keys
|
||||
*
|
||||
* @param {Object} command the command to decorate
|
||||
* @param {ClientSession} session the session tracking transaction state
|
||||
* @param {boolean} [isRetryableWrite=false] if true, will be decorated for retryable writes
|
||||
*/
|
||||
function decorateWithTransactionsData(command, session, isRetryableWrite) {
|
||||
if (!session) {
|
||||
return;
|
||||
}
|
||||
|
||||
// first apply non-transaction-specific sessions data
|
||||
const serverSession = session.serverSession;
|
||||
const inTransaction = session.inTransaction();
|
||||
|
||||
if (serverSession.txnNumber && (isRetryableWrite || inTransaction)) {
|
||||
command.txnNumber = BSON.Long.fromNumber(serverSession.txnNumber);
|
||||
}
|
||||
|
||||
// now try to apply tansaction-specific data
|
||||
if (!inTransaction) {
|
||||
return;
|
||||
}
|
||||
|
||||
command.stmtId = serverSession.stmtId;
|
||||
command.autocommit = false;
|
||||
|
||||
if (serverSession.stmtId === 0) {
|
||||
command.startTransaction = true;
|
||||
|
||||
const readConcern = session.transactionOptions.readConcern || session.clientOptions.readConcern;
|
||||
if (readConcern) {
|
||||
command.readConcern = readConcern;
|
||||
}
|
||||
|
||||
if (session.supports.causalConsistency && session.operationTime) {
|
||||
command.readConcern = command.readConcern || {};
|
||||
Object.assign(command.readConcern, { afterClusterTime: session.operationTime });
|
||||
}
|
||||
} else {
|
||||
// Drivers MUST add this readConcern to the first command in a transaction and MUST NOT
|
||||
// automatically add any readConcern to subsequent commands. Drivers MUST ignore all other
|
||||
// readConcerns.
|
||||
if (command.readConcern) {
|
||||
delete command.readConcern;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Execute a write operation
|
||||
var executeWrite = function(pool, bson, type, opsField, ns, ops, options, callback) {
|
||||
if (ops.length === 0) throw new MongoError('insert must contain at least one document');
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
options = options || {};
|
||||
}
|
||||
|
||||
// Split the ns up to get db and collection
|
||||
var p = ns.split('.');
|
||||
var d = p.shift();
|
||||
// Options
|
||||
var ordered = typeof options.ordered === 'boolean' ? options.ordered : true;
|
||||
var writeConcern = options.writeConcern;
|
||||
|
||||
// return skeleton
|
||||
var writeCommand = {};
|
||||
writeCommand[type] = p.join('.');
|
||||
writeCommand[opsField] = ops;
|
||||
writeCommand.ordered = ordered;
|
||||
|
||||
// Did we specify a write concern
|
||||
if (writeConcern && Object.keys(writeConcern).length > 0) {
|
||||
writeCommand.writeConcern = writeConcern;
|
||||
}
|
||||
|
||||
// If we have collation passed in
|
||||
if (options.collation) {
|
||||
for (var i = 0; i < writeCommand[opsField].length; i++) {
|
||||
if (!writeCommand[opsField][i].collation) {
|
||||
writeCommand[opsField][i].collation = options.collation;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do we have bypassDocumentValidation set, then enable it on the write command
|
||||
if (typeof options.bypassDocumentValidation === 'boolean') {
|
||||
writeCommand.bypassDocumentValidation = options.bypassDocumentValidation;
|
||||
}
|
||||
|
||||
// optionally decorate command with transactions data
|
||||
decorateWithTransactionsData(writeCommand, options.session, options.willRetryWrite);
|
||||
|
||||
// Options object
|
||||
var opts = { command: true };
|
||||
if (typeof options.session !== 'undefined') opts.session = options.session;
|
||||
var queryOptions = { checkKeys: false, numberToSkip: 0, numberToReturn: 1 };
|
||||
if (type === 'insert') queryOptions.checkKeys = false;
|
||||
if (typeof options.checkKeys === 'boolean') queryOptions.checkKeys = options.checkKeys;
|
||||
|
||||
// Ensure we support serialization of functions
|
||||
if (options.serializeFunctions) queryOptions.serializeFunctions = options.serializeFunctions;
|
||||
// Do not serialize the undefined fields
|
||||
if (options.ignoreUndefined) queryOptions.ignoreUndefined = options.ignoreUndefined;
|
||||
|
||||
try {
|
||||
// Create write command
|
||||
var cmd = new Query(bson, f('%s.$cmd', d), writeCommand, queryOptions);
|
||||
// Execute command
|
||||
pool.write(cmd, opts, callback);
|
||||
} catch (err) {
|
||||
callback(err);
|
||||
}
|
||||
};
|
||||
|
||||
//
|
||||
// Needs to support legacy mass insert as well as ordered/unordered legacy
|
||||
// emulation
|
||||
//
|
||||
WireProtocol.prototype.insert = function(pool, ismaster, ns, bson, ops, options, callback) {
|
||||
executeWrite(pool, bson, 'insert', 'documents', ns, ops, options, callback);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.update = function(pool, ismaster, ns, bson, ops, options, callback) {
|
||||
executeWrite(pool, bson, 'update', 'updates', ns, ops, options, callback);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.remove = function(pool, ismaster, ns, bson, ops, options, callback) {
|
||||
executeWrite(pool, bson, 'delete', 'deletes', ns, ops, options, callback);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.killCursor = function(bson, ns, cursorState, pool, callback) {
|
||||
// Build command namespace
|
||||
var parts = ns.split(/\./);
|
||||
// Command namespace
|
||||
var commandns = f('%s.$cmd', parts.shift());
|
||||
const cursorId = cursorState.cursorId;
|
||||
// Create killCursor command
|
||||
var killcursorCmd = {
|
||||
killCursors: parts.join('.'),
|
||||
cursors: [cursorId]
|
||||
};
|
||||
|
||||
// Build Query object
|
||||
var query = new Query(bson, commandns, killcursorCmd, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: -1,
|
||||
checkKeys: false,
|
||||
returnFieldSelector: null
|
||||
});
|
||||
|
||||
// Kill cursor callback
|
||||
var killCursorCallback = function(err, result) {
|
||||
if (err) {
|
||||
if (typeof callback !== 'function') return;
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
// Result
|
||||
var r = result.message;
|
||||
// If we have a timed out query or a cursor that was killed
|
||||
if ((r.responseFlags & (1 << 0)) !== 0) {
|
||||
if (typeof callback !== 'function') return;
|
||||
return callback(new MongoNetworkError('cursor killed or timed out'), null);
|
||||
}
|
||||
|
||||
if (!Array.isArray(r.documents) || r.documents.length === 0) {
|
||||
if (typeof callback !== 'function') return;
|
||||
return callback(
|
||||
new MongoError(f('invalid killCursors result returned for cursor id %s', cursorId))
|
||||
);
|
||||
}
|
||||
|
||||
// Return the result
|
||||
if (typeof callback === 'function') {
|
||||
callback(null, r.documents[0]);
|
||||
}
|
||||
};
|
||||
|
||||
const options = { command: true };
|
||||
if (typeof cursorState.session === 'object') {
|
||||
options.session = cursorState.session;
|
||||
}
|
||||
|
||||
// Execute the kill cursor command
|
||||
if (pool && pool.isConnected()) {
|
||||
try {
|
||||
pool.write(query, options, killCursorCallback);
|
||||
} catch (err) {
|
||||
killCursorCallback(err, null);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Callback
|
||||
if (typeof callback === 'function') callback(null, null);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.getMore = function(
|
||||
bson,
|
||||
ns,
|
||||
cursorState,
|
||||
batchSize,
|
||||
raw,
|
||||
connection,
|
||||
options,
|
||||
callback
|
||||
) {
|
||||
options = options || {};
|
||||
// Build command namespace
|
||||
var parts = ns.split(/\./);
|
||||
// Command namespace
|
||||
var commandns = f('%s.$cmd', parts.shift());
|
||||
|
||||
// Create getMore command
|
||||
var getMoreCmd = {
|
||||
getMore: cursorState.cursorId,
|
||||
collection: parts.join('.'),
|
||||
batchSize: Math.abs(batchSize)
|
||||
};
|
||||
|
||||
// optionally decorate command with transactions data
|
||||
decorateWithTransactionsData(getMoreCmd, options.session);
|
||||
|
||||
if (cursorState.cmd.tailable && typeof cursorState.cmd.maxAwaitTimeMS === 'number') {
|
||||
getMoreCmd.maxTimeMS = cursorState.cmd.maxAwaitTimeMS;
|
||||
}
|
||||
|
||||
// Build Query object
|
||||
var query = new Query(bson, commandns, getMoreCmd, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: -1,
|
||||
checkKeys: false,
|
||||
returnFieldSelector: null
|
||||
});
|
||||
|
||||
// Query callback
|
||||
var queryCallback = function(err, result) {
|
||||
if (err) return callback(err);
|
||||
// Get the raw message
|
||||
var r = result.message;
|
||||
|
||||
// If we have a timed out query or a cursor that was killed
|
||||
if ((r.responseFlags & (1 << 0)) !== 0) {
|
||||
return callback(new MongoNetworkError('cursor killed or timed out'), null);
|
||||
}
|
||||
|
||||
// Raw, return all the extracted documents
|
||||
if (raw) {
|
||||
cursorState.documents = r.documents;
|
||||
cursorState.cursorId = r.cursorId;
|
||||
return callback(null, r.documents);
|
||||
}
|
||||
|
||||
// We have an error detected
|
||||
if (r.documents[0].ok === 0) {
|
||||
return callback(new MongoError(r.documents[0]));
|
||||
}
|
||||
|
||||
// Ensure we have a Long valid cursor id
|
||||
var cursorId =
|
||||
typeof r.documents[0].cursor.id === 'number'
|
||||
? Long.fromNumber(r.documents[0].cursor.id)
|
||||
: r.documents[0].cursor.id;
|
||||
|
||||
// Set all the values
|
||||
cursorState.documents = r.documents[0].cursor.nextBatch;
|
||||
cursorState.cursorId = cursorId;
|
||||
|
||||
// Return the result
|
||||
callback(null, r.documents[0], r.connection);
|
||||
};
|
||||
|
||||
// Query options
|
||||
var queryOptions = { command: true };
|
||||
|
||||
// If we have a raw query decorate the function
|
||||
if (raw) {
|
||||
queryOptions.raw = raw;
|
||||
}
|
||||
|
||||
// Add the result field needed
|
||||
queryOptions.documentsReturnedIn = 'nextBatch';
|
||||
|
||||
// Check if we need to promote longs
|
||||
if (typeof cursorState.promoteLongs === 'boolean') {
|
||||
queryOptions.promoteLongs = cursorState.promoteLongs;
|
||||
}
|
||||
|
||||
if (typeof cursorState.promoteValues === 'boolean') {
|
||||
queryOptions.promoteValues = cursorState.promoteValues;
|
||||
}
|
||||
|
||||
if (typeof cursorState.promoteBuffers === 'boolean') {
|
||||
queryOptions.promoteBuffers = cursorState.promoteBuffers;
|
||||
}
|
||||
|
||||
if (typeof cursorState.session === 'object') {
|
||||
queryOptions.session = cursorState.session;
|
||||
}
|
||||
|
||||
// We need to increment the statement id if we're in a transaction
|
||||
if (options.session && options.session.inTransaction()) {
|
||||
options.session.incrementStatementId();
|
||||
}
|
||||
|
||||
// Write out the getMore command
|
||||
connection.write(query, queryOptions, queryCallback);
|
||||
};
|
||||
|
||||
WireProtocol.prototype.command = function(bson, ns, cmd, cursorState, topology, options) {
|
||||
options = options || {};
|
||||
// Check if this is a wire protocol command or not
|
||||
var wireProtocolCommand =
|
||||
typeof options.wireProtocolCommand === 'boolean' ? options.wireProtocolCommand : true;
|
||||
|
||||
// Establish type of command
|
||||
let query;
|
||||
if (cmd.find && wireProtocolCommand) {
|
||||
// Create the find command
|
||||
query = executeFindCommand(bson, ns, cmd, cursorState, topology, options);
|
||||
// Mark the cmd as virtual
|
||||
cmd.virtual = false;
|
||||
// Signal the documents are in the firstBatch value
|
||||
query.documentsReturnedIn = 'firstBatch';
|
||||
} else if (cursorState.cursorId != null) {
|
||||
return;
|
||||
} else if (cmd) {
|
||||
query = setupCommand(bson, ns, cmd, cursorState, topology, options);
|
||||
} else {
|
||||
throw new MongoError(f('command %s does not return a cursor', JSON.stringify(cmd)));
|
||||
}
|
||||
|
||||
// optionally decorate query with transaction data
|
||||
decorateWithTransactionsData(query.query, options.session);
|
||||
|
||||
// We need to increment the statement id if we're in a transaction
|
||||
if (options.session && options.session.inTransaction()) {
|
||||
options.session.incrementStatementId();
|
||||
}
|
||||
|
||||
return query;
|
||||
};
|
||||
|
||||
// // Command
|
||||
// {
|
||||
// find: ns
|
||||
// , query: <object>
|
||||
// , limit: <n>
|
||||
// , fields: <object>
|
||||
// , skip: <n>
|
||||
// , hint: <string>
|
||||
// , explain: <boolean>
|
||||
// , snapshot: <boolean>
|
||||
// , batchSize: <n>
|
||||
// , returnKey: <boolean>
|
||||
// , maxScan: <n>
|
||||
// , min: <n>
|
||||
// , max: <n>
|
||||
// , showDiskLoc: <boolean>
|
||||
// , comment: <string>
|
||||
// , maxTimeMS: <n>
|
||||
// , raw: <boolean>
|
||||
// , readPreference: <ReadPreference>
|
||||
// , tailable: <boolean>
|
||||
// , oplogReplay: <boolean>
|
||||
// , noCursorTimeout: <boolean>
|
||||
// , awaitdata: <boolean>
|
||||
// , exhaust: <boolean>
|
||||
// , partial: <boolean>
|
||||
// }
|
||||
|
||||
// FIND/GETMORE SPEC
|
||||
// {
|
||||
// “find”: <string>,
|
||||
// “filter”: { ... },
|
||||
// “sort”: { ... },
|
||||
// “projection”: { ... },
|
||||
// “hint”: { ... },
|
||||
// “skip”: <int>,
|
||||
// “limit”: <int>,
|
||||
// “batchSize”: <int>,
|
||||
// “singleBatch”: <bool>,
|
||||
// “comment”: <string>,
|
||||
// “maxScan”: <int>,
|
||||
// “maxTimeMS”: <int>,
|
||||
// “max”: { ... },
|
||||
// “min”: { ... },
|
||||
// “returnKey”: <bool>,
|
||||
// “showRecordId”: <bool>,
|
||||
// “snapshot”: <bool>,
|
||||
// “tailable”: <bool>,
|
||||
// “oplogReplay”: <bool>,
|
||||
// “noCursorTimeout”: <bool>,
|
||||
// “awaitData”: <bool>,
|
||||
// “partial”: <bool>,
|
||||
// “$readPreference”: { ... }
|
||||
// }
|
||||
|
||||
//
|
||||
// Execute a find command
|
||||
var executeFindCommand = function(bson, ns, cmd, cursorState, topology, options) {
|
||||
// Ensure we have at least some options
|
||||
options = options || {};
|
||||
// Get the readPreference
|
||||
var readPreference = getReadPreference(cmd, options);
|
||||
|
||||
// Set the optional batchSize
|
||||
cursorState.batchSize = cmd.batchSize || cursorState.batchSize;
|
||||
|
||||
// Build command namespace
|
||||
var parts = ns.split(/\./);
|
||||
// Command namespace
|
||||
var commandns = f('%s.$cmd', parts.shift());
|
||||
|
||||
// Build actual find command
|
||||
var findCmd = {
|
||||
find: parts.join('.')
|
||||
};
|
||||
|
||||
// I we provided a filter
|
||||
if (cmd.query) {
|
||||
// Check if the user is passing in the $query parameter
|
||||
if (cmd.query['$query']) {
|
||||
findCmd.filter = cmd.query['$query'];
|
||||
} else {
|
||||
findCmd.filter = cmd.query;
|
||||
}
|
||||
}
|
||||
|
||||
// Sort value
|
||||
var sortValue = cmd.sort;
|
||||
|
||||
// Handle issue of sort being an Array
|
||||
if (Array.isArray(sortValue)) {
|
||||
var sortObject = {};
|
||||
|
||||
if (sortValue.length > 0 && !Array.isArray(sortValue[0])) {
|
||||
var sortDirection = sortValue[1];
|
||||
// Translate the sort order text
|
||||
if (sortDirection === 'asc') {
|
||||
sortDirection = 1;
|
||||
} else if (sortDirection === 'desc') {
|
||||
sortDirection = -1;
|
||||
}
|
||||
|
||||
// Set the sort order
|
||||
sortObject[sortValue[0]] = sortDirection;
|
||||
} else {
|
||||
for (var i = 0; i < sortValue.length; i++) {
|
||||
sortDirection = sortValue[i][1];
|
||||
// Translate the sort order text
|
||||
if (sortDirection === 'asc') {
|
||||
sortDirection = 1;
|
||||
} else if (sortDirection === 'desc') {
|
||||
sortDirection = -1;
|
||||
}
|
||||
|
||||
// Set the sort order
|
||||
sortObject[sortValue[i][0]] = sortDirection;
|
||||
}
|
||||
}
|
||||
|
||||
sortValue = sortObject;
|
||||
}
|
||||
|
||||
// Add sort to command
|
||||
if (cmd.sort) findCmd.sort = sortValue;
|
||||
// Add a projection to the command
|
||||
if (cmd.fields) findCmd.projection = cmd.fields;
|
||||
// Add a hint to the command
|
||||
if (cmd.hint) findCmd.hint = cmd.hint;
|
||||
// Add a skip
|
||||
if (cmd.skip) findCmd.skip = cmd.skip;
|
||||
// Add a limit
|
||||
if (cmd.limit) findCmd.limit = cmd.limit;
|
||||
|
||||
// Check if we wish to have a singleBatch
|
||||
if (cmd.limit < 0) {
|
||||
findCmd.limit = Math.abs(cmd.limit);
|
||||
findCmd.singleBatch = true;
|
||||
}
|
||||
|
||||
// Add a batchSize
|
||||
if (typeof cmd.batchSize === 'number') {
|
||||
if (cmd.batchSize < 0) {
|
||||
if (cmd.limit !== 0 && Math.abs(cmd.batchSize) < Math.abs(cmd.limit)) {
|
||||
findCmd.limit = Math.abs(cmd.batchSize);
|
||||
}
|
||||
|
||||
findCmd.singleBatch = true;
|
||||
}
|
||||
|
||||
findCmd.batchSize = Math.abs(cmd.batchSize);
|
||||
}
|
||||
|
||||
// If we have comment set
|
||||
if (cmd.comment) findCmd.comment = cmd.comment;
|
||||
|
||||
// If we have maxScan
|
||||
if (cmd.maxScan) findCmd.maxScan = cmd.maxScan;
|
||||
|
||||
// If we have maxTimeMS set
|
||||
if (cmd.maxTimeMS) findCmd.maxTimeMS = cmd.maxTimeMS;
|
||||
|
||||
// If we have min
|
||||
if (cmd.min) findCmd.min = cmd.min;
|
||||
|
||||
// If we have max
|
||||
if (cmd.max) findCmd.max = cmd.max;
|
||||
|
||||
// If we have returnKey set
|
||||
findCmd.returnKey = cmd.returnKey ? cmd.returnKey : false;
|
||||
|
||||
// If we have showDiskLoc set
|
||||
findCmd.showRecordId = cmd.showDiskLoc ? cmd.showDiskLoc : false;
|
||||
|
||||
// If we have snapshot set
|
||||
if (cmd.snapshot) findCmd.snapshot = cmd.snapshot;
|
||||
|
||||
// If we have tailable set
|
||||
if (cmd.tailable) findCmd.tailable = cmd.tailable;
|
||||
|
||||
// If we have oplogReplay set
|
||||
if (cmd.oplogReplay) findCmd.oplogReplay = cmd.oplogReplay;
|
||||
|
||||
// If we have noCursorTimeout set
|
||||
if (cmd.noCursorTimeout) findCmd.noCursorTimeout = cmd.noCursorTimeout;
|
||||
|
||||
// If we have awaitData set
|
||||
if (cmd.awaitData) findCmd.awaitData = cmd.awaitData;
|
||||
if (cmd.awaitdata) findCmd.awaitData = cmd.awaitdata;
|
||||
|
||||
// If we have partial set
|
||||
if (cmd.partial) findCmd.partial = cmd.partial;
|
||||
|
||||
// If we have collation passed in
|
||||
if (cmd.collation) findCmd.collation = cmd.collation;
|
||||
|
||||
// If we have explain, we need to rewrite the find command
|
||||
// to wrap it in the explain command
|
||||
if (cmd.explain) {
|
||||
findCmd = {
|
||||
explain: findCmd
|
||||
};
|
||||
}
|
||||
|
||||
// Did we provide a readConcern
|
||||
if (cmd.readConcern) findCmd.readConcern = cmd.readConcern;
|
||||
|
||||
// Set up the serialize and ignoreUndefined fields
|
||||
var serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
var ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
|
||||
// We have a Mongos topology, check if we need to add a readPreference
|
||||
if (topology.type === 'mongos' && readPreference && readPreference.preference !== 'primary') {
|
||||
findCmd = {
|
||||
$query: findCmd,
|
||||
$readPreference: readPreference.toJSON()
|
||||
};
|
||||
}
|
||||
|
||||
// optionally decorate query with transaction data
|
||||
decorateWithTransactionsData(findCmd, options.session);
|
||||
|
||||
// Build Query object
|
||||
var query = new Query(bson, commandns, findCmd, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: 1,
|
||||
checkKeys: false,
|
||||
returnFieldSelector: null,
|
||||
serializeFunctions: serializeFunctions,
|
||||
ignoreUndefined: ignoreUndefined
|
||||
});
|
||||
|
||||
// Set query flags
|
||||
query.slaveOk = readPreference.slaveOk();
|
||||
|
||||
// Return the query
|
||||
return query;
|
||||
};
|
||||
|
||||
//
|
||||
// Set up a command cursor
|
||||
var setupCommand = function(bson, ns, cmd, cursorState, topology, options) {
|
||||
// Set empty options object
|
||||
options = options || {};
|
||||
// Get the readPreference
|
||||
var readPreference = getReadPreference(cmd, options);
|
||||
|
||||
// Final query
|
||||
var finalCmd = {};
|
||||
for (var name in cmd) {
|
||||
finalCmd[name] = cmd[name];
|
||||
}
|
||||
|
||||
// Build command namespace
|
||||
var parts = ns.split(/\./);
|
||||
|
||||
// Serialize functions
|
||||
var serializeFunctions =
|
||||
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
|
||||
|
||||
// Set up the serialize and ignoreUndefined fields
|
||||
var ignoreUndefined =
|
||||
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
|
||||
|
||||
// We have a Mongos topology, check if we need to add a readPreference
|
||||
if (topology.type === 'mongos' && readPreference && readPreference.preference !== 'primary') {
|
||||
finalCmd = {
|
||||
$query: finalCmd,
|
||||
$readPreference: readPreference.toJSON()
|
||||
};
|
||||
}
|
||||
|
||||
// optionally decorate query with transaction data
|
||||
decorateWithTransactionsData(finalCmd, options.session);
|
||||
|
||||
// Build Query object
|
||||
var query = new Query(bson, f('%s.$cmd', parts.shift()), finalCmd, {
|
||||
numberToSkip: 0,
|
||||
numberToReturn: -1,
|
||||
checkKeys: false,
|
||||
serializeFunctions: serializeFunctions,
|
||||
ignoreUndefined: ignoreUndefined
|
||||
});
|
||||
|
||||
// Set query flags
|
||||
query.slaveOk = readPreference.slaveOk();
|
||||
|
||||
// Return the query
|
||||
return query;
|
||||
};
|
||||
|
||||
module.exports = WireProtocol;
|
73
ProjectNow/NodeServer/node_modules/mongodb-core/lib/wireprotocol/compression.js
generated
vendored
Normal file
73
ProjectNow/NodeServer/node_modules/mongodb-core/lib/wireprotocol/compression.js
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
'use strict';
|
||||
|
||||
var Snappy = require('../connection/utils').retrieveSnappy(),
|
||||
zlib = require('zlib');
|
||||
|
||||
var compressorIDs = {
|
||||
snappy: 1,
|
||||
zlib: 2
|
||||
};
|
||||
|
||||
var uncompressibleCommands = [
|
||||
'ismaster',
|
||||
'saslStart',
|
||||
'saslContinue',
|
||||
'getnonce',
|
||||
'authenticate',
|
||||
'createUser',
|
||||
'updateUser',
|
||||
'copydbSaslStart',
|
||||
'copydbgetnonce',
|
||||
'copydb'
|
||||
];
|
||||
|
||||
// Facilitate compressing a message using an agreed compressor
|
||||
var compress = function(self, dataToBeCompressed, callback) {
|
||||
switch (self.options.agreedCompressor) {
|
||||
case 'snappy':
|
||||
Snappy.compress(dataToBeCompressed, callback);
|
||||
break;
|
||||
case 'zlib':
|
||||
// Determine zlibCompressionLevel
|
||||
var zlibOptions = {};
|
||||
if (self.options.zlibCompressionLevel) {
|
||||
zlibOptions.level = self.options.zlibCompressionLevel;
|
||||
}
|
||||
zlib.deflate(dataToBeCompressed, zlibOptions, callback);
|
||||
break;
|
||||
default:
|
||||
throw new Error(
|
||||
'Attempt to compress message using unknown compressor "' +
|
||||
self.options.agreedCompressor +
|
||||
'".'
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
// Decompress a message using the given compressor
|
||||
var decompress = function(compressorID, compressedData, callback) {
|
||||
if (compressorID < 0 || compressorID > compressorIDs.length) {
|
||||
throw new Error(
|
||||
'Server sent message compressed using an unsupported compressor. (Received compressor ID ' +
|
||||
compressorID +
|
||||
')'
|
||||
);
|
||||
}
|
||||
switch (compressorID) {
|
||||
case compressorIDs.snappy:
|
||||
Snappy.uncompress(compressedData, callback);
|
||||
break;
|
||||
case compressorIDs.zlib:
|
||||
zlib.inflate(compressedData, callback);
|
||||
break;
|
||||
default:
|
||||
callback(null, compressedData);
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
compressorIDs: compressorIDs,
|
||||
uncompressibleCommands: uncompressibleCommands,
|
||||
compress: compress,
|
||||
decompress: decompress
|
||||
};
|
63
ProjectNow/NodeServer/node_modules/mongodb-core/lib/wireprotocol/shared.js
generated
vendored
Normal file
63
ProjectNow/NodeServer/node_modules/mongodb-core/lib/wireprotocol/shared.js
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
'use strict';
|
||||
|
||||
var ReadPreference = require('../topologies/read_preference'),
|
||||
MongoError = require('../error').MongoError;
|
||||
|
||||
var MESSAGE_HEADER_SIZE = 16;
|
||||
|
||||
// OPCODE Numbers
|
||||
// Defined at https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#request-opcodes
|
||||
var opcodes = {
|
||||
OP_REPLY: 1,
|
||||
OP_UPDATE: 2001,
|
||||
OP_INSERT: 2002,
|
||||
OP_QUERY: 2004,
|
||||
OP_GETMORE: 2005,
|
||||
OP_DELETE: 2006,
|
||||
OP_KILL_CURSORS: 2007,
|
||||
OP_COMPRESSED: 2012
|
||||
};
|
||||
|
||||
var getReadPreference = function(cmd, options) {
|
||||
// Default to command version of the readPreference
|
||||
var readPreference = cmd.readPreference || new ReadPreference('primary');
|
||||
// If we have an option readPreference override the command one
|
||||
if (options.readPreference) {
|
||||
readPreference = options.readPreference;
|
||||
}
|
||||
|
||||
if (typeof readPreference === 'string') {
|
||||
readPreference = new ReadPreference(readPreference);
|
||||
}
|
||||
|
||||
if (!(readPreference instanceof ReadPreference)) {
|
||||
throw new MongoError('read preference must be a ReadPreference instance');
|
||||
}
|
||||
|
||||
if (
|
||||
options.session &&
|
||||
options.session.inTransaction() &&
|
||||
!readPreference.equals(ReadPreference.primary)
|
||||
) {
|
||||
throw new MongoError('read preference in a transaction must be primary');
|
||||
}
|
||||
|
||||
return readPreference;
|
||||
};
|
||||
|
||||
// Parses the header of a wire protocol message
|
||||
var parseHeader = function(message) {
|
||||
return {
|
||||
length: message.readInt32LE(0),
|
||||
requestId: message.readInt32LE(4),
|
||||
responseTo: message.readInt32LE(8),
|
||||
opCode: message.readInt32LE(12)
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getReadPreference: getReadPreference,
|
||||
MESSAGE_HEADER_SIZE: MESSAGE_HEADER_SIZE,
|
||||
opcodes: opcodes,
|
||||
parseHeader: parseHeader
|
||||
};
|
Reference in New Issue
Block a user