mirror of
https://github.com/bvanroll/yahoo-thing.git
synced 2025-08-29 20:12:46 +00:00
euh
This commit is contained in:
62
node_modules/mongodb/lib/operations/admin_ops.js
generated
vendored
Normal file
62
node_modules/mongodb/lib/operations/admin_ops.js
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
'use strict';
|
||||
|
||||
const executeCommand = require('./db_ops').executeCommand;
|
||||
const executeDbAdminCommand = require('./db_ops').executeDbAdminCommand;
|
||||
|
||||
/**
|
||||
* Get ReplicaSet status
|
||||
*
|
||||
* @param {Admin} a collection instance.
|
||||
* @param {Object} [options] Optional settings. See Admin.prototype.replSetGetStatus for a list of options.
|
||||
* @param {Admin~resultCallback} [callback] The command result callback.
|
||||
*/
|
||||
function replSetGetStatus(admin, options, callback) {
|
||||
executeDbAdminCommand(admin.s.db, { replSetGetStatus: 1 }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve this db's server status.
|
||||
*
|
||||
* @param {Admin} a collection instance.
|
||||
* @param {Object} [options] Optional settings. See Admin.prototype.serverStatus for a list of options.
|
||||
* @param {Admin~resultCallback} [callback] The command result callback
|
||||
*/
|
||||
function serverStatus(admin, options, callback) {
|
||||
executeDbAdminCommand(admin.s.db, { serverStatus: 1 }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate an existing collection
|
||||
*
|
||||
* @param {Admin} a collection instance.
|
||||
* @param {string} collectionName The name of the collection to validate.
|
||||
* @param {Object} [options] Optional settings. See Admin.prototype.validateCollection for a list of options.
|
||||
* @param {Admin~resultCallback} [callback] The command result callback.
|
||||
*/
|
||||
function validateCollection(admin, collectionName, options, callback) {
|
||||
const command = { validate: collectionName };
|
||||
const keys = Object.keys(options);
|
||||
|
||||
// Decorate command with extra options
|
||||
for (let i = 0; i < keys.length; i++) {
|
||||
if (options.hasOwnProperty(keys[i]) && keys[i] !== 'session') {
|
||||
command[keys[i]] = options[keys[i]];
|
||||
}
|
||||
}
|
||||
|
||||
executeCommand(admin.s.db, command, options, (err, doc) => {
|
||||
if (err != null) return callback(err, null);
|
||||
|
||||
if (doc.ok === 0) return callback(new Error('Error with validate command'), null);
|
||||
if (doc.result != null && doc.result.constructor !== String)
|
||||
return callback(new Error('Error with validation data'), null);
|
||||
if (doc.result != null && doc.result.match(/exception|corrupt/) != null)
|
||||
return callback(new Error('Error: invalid collection ' + collectionName), null);
|
||||
if (doc.valid != null && !doc.valid)
|
||||
return callback(new Error('Error: invalid collection ' + collectionName), null);
|
||||
|
||||
return callback(null, doc);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = { replSetGetStatus, serverStatus, validateCollection };
|
1494
node_modules/mongodb/lib/operations/collection_ops.js
generated
vendored
Normal file
1494
node_modules/mongodb/lib/operations/collection_ops.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
250
node_modules/mongodb/lib/operations/cursor_ops.js
generated
vendored
Normal file
250
node_modules/mongodb/lib/operations/cursor_ops.js
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
'use strict';
|
||||
|
||||
const buildCountCommand = require('./collection_ops').buildCountCommand;
|
||||
const formattedOrderClause = require('../utils').formattedOrderClause;
|
||||
const handleCallback = require('../utils').handleCallback;
|
||||
const MongoError = require('mongodb-core').MongoError;
|
||||
const push = Array.prototype.push;
|
||||
|
||||
let cursor;
|
||||
function loadCursor() {
|
||||
if (!cursor) {
|
||||
cursor = require('../cursor');
|
||||
}
|
||||
return cursor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the count of documents for this cursor.
|
||||
*
|
||||
* @method
|
||||
* @param {Cursor} cursor The Cursor instance on which to count.
|
||||
* @param {boolean} [applySkipLimit=true] Specifies whether the count command apply limit and skip settings should be applied on the cursor or in the provided options.
|
||||
* @param {object} [options] Optional settings. See Cursor.prototype.count for a list of options.
|
||||
* @param {Cursor~countResultCallback} [callback] The result callback.
|
||||
*/
|
||||
function count(cursor, applySkipLimit, opts, callback) {
|
||||
if (applySkipLimit) {
|
||||
if (typeof cursor.cursorSkip() === 'number') opts.skip = cursor.cursorSkip();
|
||||
if (typeof cursor.cursorLimit() === 'number') opts.limit = cursor.cursorLimit();
|
||||
}
|
||||
|
||||
// Ensure we have the right read preference inheritance
|
||||
if (opts.readPreference) {
|
||||
cursor.setReadPreference(opts.readPreference);
|
||||
}
|
||||
|
||||
if (
|
||||
typeof opts.maxTimeMS !== 'number' &&
|
||||
cursor.s.cmd &&
|
||||
typeof cursor.s.cmd.maxTimeMS === 'number'
|
||||
) {
|
||||
opts.maxTimeMS = cursor.s.cmd.maxTimeMS;
|
||||
}
|
||||
|
||||
let options = {};
|
||||
options.skip = opts.skip;
|
||||
options.limit = opts.limit;
|
||||
options.hint = opts.hint;
|
||||
options.maxTimeMS = opts.maxTimeMS;
|
||||
|
||||
// Command
|
||||
const delimiter = cursor.s.ns.indexOf('.');
|
||||
options.collectionName = cursor.s.ns.substr(delimiter + 1);
|
||||
|
||||
let command;
|
||||
try {
|
||||
command = buildCountCommand(cursor, cursor.s.cmd.query, options);
|
||||
} catch (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
// Set cursor server to the same as the topology
|
||||
cursor.server = cursor.topology.s.coreTopology;
|
||||
|
||||
// Execute the command
|
||||
cursor.s.topology.command(
|
||||
`${cursor.s.ns.substr(0, delimiter)}.$cmd`,
|
||||
command,
|
||||
cursor.s.options,
|
||||
(err, result) => {
|
||||
callback(err, result ? result.result.n : null);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterates over all the documents for this cursor. See Cursor.prototype.each for more information.
|
||||
*
|
||||
* @method
|
||||
* @deprecated
|
||||
* @param {Cursor} cursor The Cursor instance on which to run.
|
||||
* @param {Cursor~resultCallback} callback The result callback.
|
||||
*/
|
||||
function each(cursor, callback) {
|
||||
let Cursor = loadCursor();
|
||||
|
||||
if (!callback) throw MongoError.create({ message: 'callback is mandatory', driver: true });
|
||||
if (cursor.isNotified()) return;
|
||||
if (cursor.s.state === Cursor.CLOSED || cursor.isDead()) {
|
||||
return handleCallback(
|
||||
callback,
|
||||
MongoError.create({ message: 'Cursor is closed', driver: true })
|
||||
);
|
||||
}
|
||||
|
||||
if (cursor.s.state === Cursor.INIT) cursor.s.state = Cursor.OPEN;
|
||||
|
||||
// Define function to avoid global scope escape
|
||||
let fn = null;
|
||||
// Trampoline all the entries
|
||||
if (cursor.bufferedCount() > 0) {
|
||||
while ((fn = loop(cursor, callback))) fn(cursor, callback);
|
||||
each(cursor, callback);
|
||||
} else {
|
||||
cursor.next((err, item) => {
|
||||
if (err) return handleCallback(callback, err);
|
||||
if (item == null) {
|
||||
return cursor.close({ skipKillCursors: true }, () => handleCallback(callback, null, null));
|
||||
}
|
||||
|
||||
if (handleCallback(callback, null, item) === false) return;
|
||||
each(cursor, callback);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if there is any document still available in the cursor.
|
||||
*
|
||||
* @method
|
||||
* @param {Cursor} cursor The Cursor instance on which to run.
|
||||
* @param {Cursor~resultCallback} [callback] The result callback.
|
||||
*/
|
||||
function hasNext(cursor, callback) {
|
||||
let Cursor = loadCursor();
|
||||
|
||||
if (cursor.s.currentDoc) {
|
||||
return callback(null, true);
|
||||
}
|
||||
|
||||
if (cursor.isNotified()) {
|
||||
return callback(null, false);
|
||||
}
|
||||
|
||||
nextObject(cursor, (err, doc) => {
|
||||
if (err) return callback(err, null);
|
||||
if (cursor.s.state === Cursor.CLOSED || cursor.isDead()) return callback(null, false);
|
||||
if (!doc) return callback(null, false);
|
||||
cursor.s.currentDoc = doc;
|
||||
callback(null, true);
|
||||
});
|
||||
}
|
||||
|
||||
// Trampoline emptying the number of retrieved items
|
||||
// without incurring a nextTick operation
|
||||
function loop(cursor, callback) {
|
||||
// No more items we are done
|
||||
if (cursor.bufferedCount() === 0) return;
|
||||
// Get the next document
|
||||
cursor._next(callback);
|
||||
// Loop
|
||||
return loop;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the next available document from the cursor. Returns null if no more documents are available.
|
||||
*
|
||||
* @method
|
||||
* @param {Cursor} cursor The Cursor instance from which to get the next document.
|
||||
* @param {Cursor~resultCallback} [callback] The result callback.
|
||||
*/
|
||||
function next(cursor, callback) {
|
||||
// Return the currentDoc if someone called hasNext first
|
||||
if (cursor.s.currentDoc) {
|
||||
const doc = cursor.s.currentDoc;
|
||||
cursor.s.currentDoc = null;
|
||||
return callback(null, doc);
|
||||
}
|
||||
|
||||
// Return the next object
|
||||
nextObject(cursor, callback);
|
||||
}
|
||||
|
||||
// Get the next available document from the cursor, returns null if no more documents are available.
|
||||
function nextObject(cursor, callback) {
|
||||
let Cursor = loadCursor();
|
||||
|
||||
if (cursor.s.state === Cursor.CLOSED || (cursor.isDead && cursor.isDead()))
|
||||
return handleCallback(
|
||||
callback,
|
||||
MongoError.create({ message: 'Cursor is closed', driver: true })
|
||||
);
|
||||
if (cursor.s.state === Cursor.INIT && cursor.s.cmd.sort) {
|
||||
try {
|
||||
cursor.s.cmd.sort = formattedOrderClause(cursor.s.cmd.sort);
|
||||
} catch (err) {
|
||||
return handleCallback(callback, err);
|
||||
}
|
||||
}
|
||||
|
||||
// Get the next object
|
||||
cursor._next((err, doc) => {
|
||||
cursor.s.state = Cursor.OPEN;
|
||||
if (err) return handleCallback(callback, err);
|
||||
handleCallback(callback, null, doc);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an array of documents. See Cursor.prototype.toArray for more information.
|
||||
*
|
||||
* @method
|
||||
* @param {Cursor} cursor The Cursor instance from which to get the next document.
|
||||
* @param {Cursor~toArrayResultCallback} [callback] The result callback.
|
||||
*/
|
||||
function toArray(cursor, callback) {
|
||||
let Cursor = loadCursor();
|
||||
|
||||
const items = [];
|
||||
|
||||
// Reset cursor
|
||||
cursor.rewind();
|
||||
cursor.s.state = Cursor.INIT;
|
||||
|
||||
// Fetch all the documents
|
||||
const fetchDocs = () => {
|
||||
cursor._next((err, doc) => {
|
||||
if (err) {
|
||||
return cursor._endSession
|
||||
? cursor._endSession(() => handleCallback(callback, err))
|
||||
: handleCallback(callback, err);
|
||||
}
|
||||
if (doc == null) {
|
||||
return cursor.close({ skipKillCursors: true }, () => handleCallback(callback, null, items));
|
||||
}
|
||||
|
||||
// Add doc to items
|
||||
items.push(doc);
|
||||
|
||||
// Get all buffered objects
|
||||
if (cursor.bufferedCount() > 0) {
|
||||
let docs = cursor.readBufferedDocuments(cursor.bufferedCount());
|
||||
|
||||
// Transform the doc if transform method added
|
||||
if (cursor.s.transforms && typeof cursor.s.transforms.doc === 'function') {
|
||||
docs = docs.map(cursor.s.transforms.doc);
|
||||
}
|
||||
|
||||
push.apply(items, docs);
|
||||
}
|
||||
|
||||
// Attempt a fetch
|
||||
fetchDocs();
|
||||
});
|
||||
};
|
||||
|
||||
fetchDocs();
|
||||
}
|
||||
|
||||
module.exports = { count, each, hasNext, next, toArray };
|
1006
node_modules/mongodb/lib/operations/db_ops.js
generated
vendored
Normal file
1006
node_modules/mongodb/lib/operations/db_ops.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
654
node_modules/mongodb/lib/operations/mongo_client_ops.js
generated
vendored
Normal file
654
node_modules/mongodb/lib/operations/mongo_client_ops.js
generated
vendored
Normal file
@@ -0,0 +1,654 @@
|
||||
'use strict';
|
||||
|
||||
const authenticate = require('../authenticate');
|
||||
const deprecate = require('util').deprecate;
|
||||
const Logger = require('mongodb-core').Logger;
|
||||
const MongoError = require('mongodb-core').MongoError;
|
||||
const Mongos = require('../topologies/mongos');
|
||||
const parse = require('mongodb-core').parseConnectionString;
|
||||
const ReadPreference = require('mongodb-core').ReadPreference;
|
||||
const ReplSet = require('../topologies/replset');
|
||||
const Server = require('../topologies/server');
|
||||
const ServerSessionPool = require('mongodb-core').Sessions.ServerSessionPool;
|
||||
|
||||
let client;
|
||||
function loadClient() {
|
||||
if (!client) {
|
||||
client = require('../mongo_client');
|
||||
}
|
||||
return client;
|
||||
}
|
||||
|
||||
const monitoringEvents = [
|
||||
'timeout',
|
||||
'close',
|
||||
'serverOpening',
|
||||
'serverDescriptionChanged',
|
||||
'serverHeartbeatStarted',
|
||||
'serverHeartbeatSucceeded',
|
||||
'serverHeartbeatFailed',
|
||||
'serverClosed',
|
||||
'topologyOpening',
|
||||
'topologyClosed',
|
||||
'topologyDescriptionChanged',
|
||||
'commandStarted',
|
||||
'commandSucceeded',
|
||||
'commandFailed',
|
||||
'joined',
|
||||
'left',
|
||||
'ping',
|
||||
'ha',
|
||||
'all',
|
||||
'fullsetup',
|
||||
'open'
|
||||
];
|
||||
const ignoreOptionNames = ['native_parser'];
|
||||
const legacyOptionNames = ['server', 'replset', 'replSet', 'mongos', 'db'];
|
||||
const legacyParse = deprecate(
|
||||
require('../url_parser'),
|
||||
'current URL string parser is deprecated, and will be removed in a future version. ' +
|
||||
'To use the new parser, pass option { useNewUrlParser: true } to MongoClient.connect.'
|
||||
);
|
||||
const validOptionNames = [
|
||||
'poolSize',
|
||||
'ssl',
|
||||
'sslValidate',
|
||||
'sslCA',
|
||||
'sslCert',
|
||||
'sslKey',
|
||||
'sslPass',
|
||||
'sslCRL',
|
||||
'autoReconnect',
|
||||
'noDelay',
|
||||
'keepAlive',
|
||||
'keepAliveInitialDelay',
|
||||
'connectTimeoutMS',
|
||||
'family',
|
||||
'socketTimeoutMS',
|
||||
'reconnectTries',
|
||||
'reconnectInterval',
|
||||
'ha',
|
||||
'haInterval',
|
||||
'replicaSet',
|
||||
'secondaryAcceptableLatencyMS',
|
||||
'acceptableLatencyMS',
|
||||
'connectWithNoPrimary',
|
||||
'authSource',
|
||||
'w',
|
||||
'wtimeout',
|
||||
'j',
|
||||
'forceServerObjectId',
|
||||
'serializeFunctions',
|
||||
'ignoreUndefined',
|
||||
'raw',
|
||||
'bufferMaxEntries',
|
||||
'readPreference',
|
||||
'pkFactory',
|
||||
'promiseLibrary',
|
||||
'readConcern',
|
||||
'maxStalenessSeconds',
|
||||
'loggerLevel',
|
||||
'logger',
|
||||
'promoteValues',
|
||||
'promoteBuffers',
|
||||
'promoteLongs',
|
||||
'domainsEnabled',
|
||||
'checkServerIdentity',
|
||||
'validateOptions',
|
||||
'appname',
|
||||
'auth',
|
||||
'user',
|
||||
'password',
|
||||
'authMechanism',
|
||||
'compression',
|
||||
'fsync',
|
||||
'readPreferenceTags',
|
||||
'numberOfRetries',
|
||||
'auto_reconnect',
|
||||
'minSize',
|
||||
'monitorCommands',
|
||||
'retryWrites',
|
||||
'useNewUrlParser'
|
||||
];
|
||||
|
||||
function addListeners(mongoClient, topology) {
|
||||
topology.on('authenticated', createListener(mongoClient, 'authenticated'));
|
||||
topology.on('error', createListener(mongoClient, 'error'));
|
||||
topology.on('timeout', createListener(mongoClient, 'timeout'));
|
||||
topology.on('close', createListener(mongoClient, 'close'));
|
||||
topology.on('parseError', createListener(mongoClient, 'parseError'));
|
||||
topology.once('open', createListener(mongoClient, 'open'));
|
||||
topology.once('fullsetup', createListener(mongoClient, 'fullsetup'));
|
||||
topology.once('all', createListener(mongoClient, 'all'));
|
||||
topology.on('reconnect', createListener(mongoClient, 'reconnect'));
|
||||
}
|
||||
|
||||
function assignTopology(client, topology) {
|
||||
client.topology = topology;
|
||||
topology.s.sessionPool = new ServerSessionPool(topology.s.coreTopology);
|
||||
}
|
||||
|
||||
// Clear out all events
|
||||
function clearAllEvents(topology) {
|
||||
monitoringEvents.forEach(event => topology.removeAllListeners(event));
|
||||
}
|
||||
|
||||
// Collect all events in order from SDAM
|
||||
function collectEvents(mongoClient, topology) {
|
||||
let MongoClient = loadClient();
|
||||
const collectedEvents = [];
|
||||
|
||||
if (mongoClient instanceof MongoClient) {
|
||||
monitoringEvents.forEach(event => {
|
||||
topology.on(event, (object1, object2) => {
|
||||
if (event === 'open') {
|
||||
collectedEvents.push({ event: event, object1: mongoClient });
|
||||
} else {
|
||||
collectedEvents.push({ event: event, object1: object1, object2: object2 });
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return collectedEvents;
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to MongoDB using a url as documented at
|
||||
*
|
||||
* docs.mongodb.org/manual/reference/connection-string/
|
||||
*
|
||||
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
|
||||
*
|
||||
* @method
|
||||
* @param {MongoClient} mongoClient The MongoClient instance with which to connect.
|
||||
* @param {string} url The connection URI string
|
||||
* @param {object} [options] Optional settings. See MongoClient.prototype.connect for a list of options.
|
||||
* @param {MongoClient~connectCallback} [callback] The command result callback
|
||||
*/
|
||||
function connect(mongoClient, url, options, callback) {
|
||||
options = Object.assign({}, options);
|
||||
|
||||
// If callback is null throw an exception
|
||||
if (callback == null) {
|
||||
throw new Error('no callback function provided');
|
||||
}
|
||||
|
||||
// Get a logger for MongoClient
|
||||
const logger = Logger('MongoClient', options);
|
||||
|
||||
// Did we pass in a Server/ReplSet/Mongos
|
||||
if (url instanceof Server || url instanceof ReplSet || url instanceof Mongos) {
|
||||
return connectWithUrl(mongoClient, url, options, connectCallback);
|
||||
}
|
||||
|
||||
const parseFn = options.useNewUrlParser ? parse : legacyParse;
|
||||
const transform = options.useNewUrlParser ? transformUrlOptions : legacyTransformUrlOptions;
|
||||
|
||||
parseFn(url, options, (err, _object) => {
|
||||
// Do not attempt to connect if parsing error
|
||||
if (err) return callback(err);
|
||||
|
||||
// Flatten
|
||||
const object = transform(_object);
|
||||
|
||||
// Parse the string
|
||||
const _finalOptions = createUnifiedOptions(object, options);
|
||||
|
||||
// Check if we have connection and socket timeout set
|
||||
if (_finalOptions.socketTimeoutMS == null) _finalOptions.socketTimeoutMS = 360000;
|
||||
if (_finalOptions.connectTimeoutMS == null) _finalOptions.connectTimeoutMS = 30000;
|
||||
|
||||
if (_finalOptions.db_options && _finalOptions.db_options.auth) {
|
||||
delete _finalOptions.db_options.auth;
|
||||
}
|
||||
|
||||
// Store the merged options object
|
||||
mongoClient.s.options = _finalOptions;
|
||||
|
||||
// Failure modes
|
||||
if (object.servers.length === 0) {
|
||||
return callback(new Error('connection string must contain at least one seed host'));
|
||||
}
|
||||
|
||||
// Do we have a replicaset then skip discovery and go straight to connectivity
|
||||
if (_finalOptions.replicaSet || _finalOptions.rs_name) {
|
||||
return createTopology(
|
||||
mongoClient,
|
||||
'replicaset',
|
||||
_finalOptions,
|
||||
connectHandler(mongoClient, _finalOptions, connectCallback)
|
||||
);
|
||||
} else if (object.servers.length > 1) {
|
||||
return createTopology(
|
||||
mongoClient,
|
||||
'mongos',
|
||||
_finalOptions,
|
||||
connectHandler(mongoClient, _finalOptions, connectCallback)
|
||||
);
|
||||
} else {
|
||||
return createServer(
|
||||
mongoClient,
|
||||
_finalOptions,
|
||||
connectHandler(mongoClient, _finalOptions, connectCallback)
|
||||
);
|
||||
}
|
||||
});
|
||||
function connectCallback(err, topology) {
|
||||
const warningMessage = `seed list contains no mongos proxies, replicaset connections requires the parameter replicaSet to be supplied in the URI or options object, mongodb://server:port/db?replicaSet=name`;
|
||||
if (err && err.message === 'no mongos proxies found in seed list') {
|
||||
if (logger.isWarn()) {
|
||||
logger.warn(warningMessage);
|
||||
}
|
||||
|
||||
// Return a more specific error message for MongoClient.connect
|
||||
return callback(new MongoError(warningMessage));
|
||||
}
|
||||
|
||||
// Return the error and db instance
|
||||
callback(err, topology);
|
||||
}
|
||||
}
|
||||
|
||||
function connectHandler(client, options, callback) {
|
||||
return (err, topology) => {
|
||||
if (err) {
|
||||
return handleConnectCallback(err, topology, callback);
|
||||
}
|
||||
|
||||
// No authentication just reconnect
|
||||
if (!options.auth) {
|
||||
return handleConnectCallback(err, topology, callback);
|
||||
}
|
||||
|
||||
// Authenticate
|
||||
authenticate(client, options.user, options.password, options, (err, success) => {
|
||||
if (success) {
|
||||
handleConnectCallback(null, topology, callback);
|
||||
} else {
|
||||
if (topology) topology.close();
|
||||
const authError = err ? err : new Error('Could not authenticate user ' + options.auth[0]);
|
||||
handleConnectCallback(authError, topology, callback);
|
||||
}
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to MongoDB using a url as documented at
|
||||
*
|
||||
* docs.mongodb.org/manual/reference/connection-string/
|
||||
*
|
||||
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
|
||||
*
|
||||
* @method
|
||||
* @param {MongoClient} mongoClient The MongoClient instance with which to connect.
|
||||
* @param {MongoClient~connectCallback} [callback] The command result callback
|
||||
*/
|
||||
function connectOp(mongoClient, err, callback) {
|
||||
// Did we have a validation error
|
||||
if (err) return callback(err);
|
||||
// Fallback to callback based connect
|
||||
connect(mongoClient, mongoClient.s.url, mongoClient.s.options, err => {
|
||||
if (err) return callback(err);
|
||||
callback(null, mongoClient);
|
||||
});
|
||||
}
|
||||
|
||||
function connectWithUrl(mongoClient, url, options, connectCallback) {
|
||||
// Set the topology
|
||||
assignTopology(mongoClient, url);
|
||||
|
||||
// Add listeners
|
||||
addListeners(mongoClient, url);
|
||||
|
||||
// Propagate the events to the client
|
||||
relayEvents(mongoClient, url);
|
||||
|
||||
let finalOptions = Object.assign({}, options);
|
||||
|
||||
// If we have a readPreference passed in by the db options, convert it from a string
|
||||
if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') {
|
||||
finalOptions.readPreference = new ReadPreference(
|
||||
options.readPreference || options.read_preference
|
||||
);
|
||||
}
|
||||
|
||||
// Connect
|
||||
return url.connect(
|
||||
finalOptions,
|
||||
connectHandler(mongoClient, finalOptions, (err, topology) => {
|
||||
if (err) return connectCallback(err, topology);
|
||||
if (finalOptions.user || finalOptions.password || finalOptions.authMechanism) {
|
||||
return authenticate(
|
||||
mongoClient,
|
||||
finalOptions.user,
|
||||
finalOptions.password,
|
||||
finalOptions,
|
||||
err => {
|
||||
if (err) return connectCallback(err, topology);
|
||||
connectCallback(err, topology);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
connectCallback(err, topology);
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
function createListener(mongoClient, event) {
|
||||
const eventSet = new Set(['all', 'fullsetup', 'open', 'reconnect']);
|
||||
return (v1, v2) => {
|
||||
if (eventSet.has(event)) {
|
||||
return mongoClient.emit(event, mongoClient);
|
||||
}
|
||||
|
||||
mongoClient.emit(event, v1, v2);
|
||||
};
|
||||
}
|
||||
|
||||
function createServer(mongoClient, options, callback) {
|
||||
// Pass in the promise library
|
||||
options.promiseLibrary = mongoClient.s.promiseLibrary;
|
||||
|
||||
// Set default options
|
||||
const servers = translateOptions(options);
|
||||
|
||||
const server = servers[0];
|
||||
|
||||
// Propagate the events to the client
|
||||
const collectedEvents = collectEvents(mongoClient, server);
|
||||
|
||||
// Connect to topology
|
||||
server.connect(options, (err, topology) => {
|
||||
if (err) {
|
||||
server.close(true);
|
||||
return callback(err);
|
||||
}
|
||||
// Clear out all the collected event listeners
|
||||
clearAllEvents(server);
|
||||
|
||||
// Relay all the events
|
||||
relayEvents(mongoClient, server);
|
||||
// Add listeners
|
||||
addListeners(mongoClient, server);
|
||||
// Check if we are really speaking to a mongos
|
||||
const ismaster = topology.lastIsMaster();
|
||||
|
||||
// Set the topology
|
||||
assignTopology(mongoClient, topology);
|
||||
|
||||
// Do we actually have a mongos
|
||||
if (ismaster && ismaster.msg === 'isdbgrid') {
|
||||
// Destroy the current connection
|
||||
topology.close();
|
||||
// Create mongos connection instead
|
||||
return createTopology(mongoClient, 'mongos', options, callback);
|
||||
}
|
||||
|
||||
// Fire all the events
|
||||
replayEvents(mongoClient, collectedEvents);
|
||||
// Otherwise callback
|
||||
callback(err, topology);
|
||||
});
|
||||
}
|
||||
|
||||
function createTopology(mongoClient, topologyType, options, callback) {
|
||||
// Pass in the promise library
|
||||
options.promiseLibrary = mongoClient.s.promiseLibrary;
|
||||
|
||||
const translationOptions = {};
|
||||
if (topologyType === 'unified') translationOptions.createServers = false;
|
||||
|
||||
// Set default options
|
||||
const servers = translateOptions(options, translationOptions);
|
||||
|
||||
// Create the topology
|
||||
let topology;
|
||||
if (topologyType === 'mongos') {
|
||||
topology = new Mongos(servers, options);
|
||||
} else if (topologyType === 'replicaset') {
|
||||
topology = new ReplSet(servers, options);
|
||||
}
|
||||
|
||||
// Add listeners
|
||||
addListeners(mongoClient, topology);
|
||||
|
||||
// Propagate the events to the client
|
||||
relayEvents(mongoClient, topology);
|
||||
|
||||
// Open the connection
|
||||
topology.connect(options, (err, newTopology) => {
|
||||
if (err) {
|
||||
topology.close(true);
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
assignTopology(mongoClient, newTopology);
|
||||
callback(null, newTopology);
|
||||
});
|
||||
}
|
||||
|
||||
function createUnifiedOptions(finalOptions, options) {
|
||||
const childOptions = [
|
||||
'mongos',
|
||||
'server',
|
||||
'db',
|
||||
'replset',
|
||||
'db_options',
|
||||
'server_options',
|
||||
'rs_options',
|
||||
'mongos_options'
|
||||
];
|
||||
const noMerge = ['readconcern', 'compression'];
|
||||
|
||||
for (const name in options) {
|
||||
if (noMerge.indexOf(name.toLowerCase()) !== -1) {
|
||||
finalOptions[name] = options[name];
|
||||
} else if (childOptions.indexOf(name.toLowerCase()) !== -1) {
|
||||
finalOptions = mergeOptions(finalOptions, options[name], false);
|
||||
} else {
|
||||
if (
|
||||
options[name] &&
|
||||
typeof options[name] === 'object' &&
|
||||
!Buffer.isBuffer(options[name]) &&
|
||||
!Array.isArray(options[name])
|
||||
) {
|
||||
finalOptions = mergeOptions(finalOptions, options[name], true);
|
||||
} else {
|
||||
finalOptions[name] = options[name];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return finalOptions;
|
||||
}
|
||||
|
||||
function handleConnectCallback(err, topology, callback) {
|
||||
return process.nextTick(() => {
|
||||
try {
|
||||
callback(err, topology);
|
||||
} catch (err) {
|
||||
if (topology) topology.close();
|
||||
throw err;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function legacyTransformUrlOptions(object) {
|
||||
return mergeOptions(createUnifiedOptions({}, object), object, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Logout user from server, fire off on all connections and remove all auth info.
|
||||
*
|
||||
* @method
|
||||
* @param {MongoClient} mongoClient The MongoClient instance on which to logout.
|
||||
* @param {object} [options] Optional settings. See MongoClient.prototype.logout for a list of options.
|
||||
* @param {Db~resultCallback} [callback] The command result callback
|
||||
*/
|
||||
function logout(mongoClient, dbName, callback) {
|
||||
mongoClient.topology.logout(dbName, err => {
|
||||
if (err) return callback(err);
|
||||
callback(null, true);
|
||||
});
|
||||
}
|
||||
|
||||
function mergeOptions(target, source, flatten) {
|
||||
for (const name in source) {
|
||||
if (source[name] && typeof source[name] === 'object' && flatten) {
|
||||
target = mergeOptions(target, source[name], flatten);
|
||||
} else {
|
||||
target[name] = source[name];
|
||||
}
|
||||
}
|
||||
|
||||
return target;
|
||||
}
|
||||
|
||||
function relayEvents(mongoClient, topology) {
|
||||
const serverOrCommandEvents = [
|
||||
'serverOpening',
|
||||
'serverDescriptionChanged',
|
||||
'serverHeartbeatStarted',
|
||||
'serverHeartbeatSucceeded',
|
||||
'serverHeartbeatFailed',
|
||||
'serverClosed',
|
||||
'topologyOpening',
|
||||
'topologyClosed',
|
||||
'topologyDescriptionChanged',
|
||||
'commandStarted',
|
||||
'commandSucceeded',
|
||||
'commandFailed',
|
||||
'joined',
|
||||
'left',
|
||||
'ping',
|
||||
'ha'
|
||||
];
|
||||
|
||||
serverOrCommandEvents.forEach(event => {
|
||||
topology.on(event, (object1, object2) => {
|
||||
mongoClient.emit(event, object1, object2);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
//
|
||||
// Replay any events due to single server connection switching to Mongos
|
||||
//
|
||||
function replayEvents(mongoClient, events) {
|
||||
for (let i = 0; i < events.length; i++) {
|
||||
mongoClient.emit(events[i].event, events[i].object1, events[i].object2);
|
||||
}
|
||||
}
|
||||
|
||||
const LEGACY_OPTIONS_MAP = validOptionNames.reduce((obj, name) => {
|
||||
obj[name.toLowerCase()] = name;
|
||||
return obj;
|
||||
}, {});
|
||||
|
||||
function transformUrlOptions(_object) {
|
||||
let object = Object.assign({ servers: _object.hosts }, _object.options);
|
||||
for (let name in object) {
|
||||
const camelCaseName = LEGACY_OPTIONS_MAP[name];
|
||||
if (camelCaseName) {
|
||||
object[camelCaseName] = object[name];
|
||||
}
|
||||
}
|
||||
if (_object.auth) {
|
||||
const auth = _object.auth;
|
||||
for (let i in auth) {
|
||||
if (auth[i]) {
|
||||
object[i] = auth[i];
|
||||
}
|
||||
}
|
||||
|
||||
if (auth.username) {
|
||||
object.auth = auth;
|
||||
object.user = auth.username;
|
||||
}
|
||||
|
||||
if (auth.db) {
|
||||
object.authSource = object.authSource || auth.db;
|
||||
}
|
||||
}
|
||||
|
||||
if (_object.defaultDatabase) {
|
||||
object.dbName = _object.defaultDatabase;
|
||||
}
|
||||
|
||||
if (object.maxpoolsize) {
|
||||
object.poolSize = object.maxpoolsize;
|
||||
}
|
||||
|
||||
if (object.readconcernlevel) {
|
||||
object.readConcern = { level: object.readconcernlevel };
|
||||
}
|
||||
|
||||
if (object.wtimeoutms) {
|
||||
object.wtimeout = object.wtimeoutms;
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
function translateOptions(options, translationOptions) {
|
||||
translationOptions = Object.assign({}, { createServers: true }, translationOptions);
|
||||
|
||||
// If we have a readPreference passed in by the db options
|
||||
if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') {
|
||||
options.readPreference = new ReadPreference(options.readPreference || options.read_preference);
|
||||
}
|
||||
|
||||
// Do we have readPreference tags, add them
|
||||
if (options.readPreference && (options.readPreferenceTags || options.read_preference_tags)) {
|
||||
options.readPreference.tags = options.readPreferenceTags || options.read_preference_tags;
|
||||
}
|
||||
|
||||
// Do we have maxStalenessSeconds
|
||||
if (options.maxStalenessSeconds) {
|
||||
options.readPreference.maxStalenessSeconds = options.maxStalenessSeconds;
|
||||
}
|
||||
|
||||
// Set the socket and connection timeouts
|
||||
if (options.socketTimeoutMS == null) options.socketTimeoutMS = 360000;
|
||||
if (options.connectTimeoutMS == null) options.connectTimeoutMS = 30000;
|
||||
|
||||
if (!translationOptions.createServers) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Create server instances
|
||||
return options.servers.map(serverObj => {
|
||||
return serverObj.domain_socket
|
||||
? new Server(serverObj.domain_socket, 27017, options)
|
||||
: new Server(serverObj.host, serverObj.port, options);
|
||||
});
|
||||
}
|
||||
|
||||
// Validate options object
|
||||
function validOptions(options) {
|
||||
const _validOptions = validOptionNames.concat(legacyOptionNames);
|
||||
|
||||
for (const name in options) {
|
||||
if (ignoreOptionNames.indexOf(name) !== -1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (_validOptions.indexOf(name) === -1 && options.validateOptions) {
|
||||
return new MongoError(`option ${name} is not supported`);
|
||||
} else if (_validOptions.indexOf(name) === -1) {
|
||||
console.warn(`the options [${name}] is not supported`);
|
||||
}
|
||||
|
||||
if (legacyOptionNames.indexOf(name) !== -1) {
|
||||
console.warn(
|
||||
`the server/replset/mongos/db options are deprecated, ` +
|
||||
`all their options are supported at the top level of the options object [${validOptionNames}]`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { connectOp, logout, validOptions };
|
Reference in New Issue
Block a user