mirror of
https://github.com/bvanroll/yahoo-thing.git
synced 2025-08-29 20:12:46 +00:00
euh
This commit is contained in:
749
node_modules/mongodb-core/lib/sdam/cursor.js
generated
vendored
Normal file
749
node_modules/mongodb-core/lib/sdam/cursor.js
generated
vendored
Normal file
@@ -0,0 +1,749 @@
|
||||
'use strict';
|
||||
|
||||
const Logger = require('../connection/logger');
|
||||
const BSON = require('../connection/utils').retrieveBSON();
|
||||
const MongoError = require('../error').MongoError;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const mongoErrorContextSymbol = require('../error').mongoErrorContextSymbol;
|
||||
const Long = BSON.Long;
|
||||
const deprecate = require('util').deprecate;
|
||||
const readPreferenceServerSelector = require('./server_selectors').readPreferenceServerSelector;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
|
||||
/**
|
||||
* Handle callback (including any exceptions thrown)
|
||||
*/
|
||||
function handleCallback(callback, err, result) {
|
||||
try {
|
||||
callback(err, result);
|
||||
} catch (err) {
|
||||
process.nextTick(function() {
|
||||
throw err;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a cursor results callback
|
||||
*
|
||||
* @callback resultCallback
|
||||
* @param {error} error An error object. Set to null if no error present
|
||||
* @param {object} document
|
||||
*/
|
||||
|
||||
/**
|
||||
* An internal class that embodies a cursor on MongoDB, allowing for iteration over the
|
||||
* results returned from a query.
|
||||
*
|
||||
* @property {number} cursorBatchSize The current cursorBatchSize for the cursor
|
||||
* @property {number} cursorLimit The current cursorLimit for the cursor
|
||||
* @property {number} cursorSkip The current cursorSkip for the cursor
|
||||
*/
|
||||
class Cursor {
|
||||
/**
|
||||
* Create a cursor
|
||||
*
|
||||
* @param {object} bson An instance of the BSON parser
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {{object}|Long} cmd The selector (can be a command or a cursorId)
|
||||
* @param {object} [options=null] Optional settings.
|
||||
* @param {object} [options.batchSize=1000] Batchsize for the operation
|
||||
* @param {array} [options.documents=[]] Initial documents list for cursor
|
||||
* @param {object} [options.transforms=null] Transform methods for the cursor results
|
||||
* @param {function} [options.transforms.query] Transform the value returned from the initial query
|
||||
* @param {function} [options.transforms.doc] Transform each document returned from Cursor.prototype.next
|
||||
* @param {object} topology The server topology instance.
|
||||
* @param {object} topologyOptions The server topology options.
|
||||
*/
|
||||
constructor(bson, ns, cmd, options, topology, topologyOptions) {
|
||||
options = options || {};
|
||||
|
||||
// Cursor pool
|
||||
this.pool = null;
|
||||
// Cursor server
|
||||
this.server = null;
|
||||
|
||||
// Do we have a not connected handler
|
||||
this.disconnectHandler = options.disconnectHandler;
|
||||
|
||||
// Set local values
|
||||
this.bson = bson;
|
||||
this.ns = ns;
|
||||
this.cmd = cmd;
|
||||
this.options = options;
|
||||
this.topology = topology;
|
||||
|
||||
// All internal state
|
||||
this.s = {
|
||||
cursorId: null,
|
||||
cmd: cmd,
|
||||
documents: options.documents || [],
|
||||
cursorIndex: 0,
|
||||
dead: false,
|
||||
killed: false,
|
||||
init: false,
|
||||
notified: false,
|
||||
limit: options.limit || cmd.limit || 0,
|
||||
skip: options.skip || cmd.skip || 0,
|
||||
batchSize: options.batchSize || cmd.batchSize || 1000,
|
||||
currentLimit: 0,
|
||||
// Result field name if not a cursor (contains the array of results)
|
||||
transforms: options.transforms
|
||||
};
|
||||
|
||||
if (typeof options.session === 'object') {
|
||||
this.s.session = options.session;
|
||||
}
|
||||
|
||||
// Add promoteLong to cursor state
|
||||
if (typeof topologyOptions.promoteLongs === 'boolean') {
|
||||
this.s.promoteLongs = topologyOptions.promoteLongs;
|
||||
} else if (typeof options.promoteLongs === 'boolean') {
|
||||
this.s.promoteLongs = options.promoteLongs;
|
||||
}
|
||||
|
||||
// Add promoteValues to cursor state
|
||||
if (typeof topologyOptions.promoteValues === 'boolean') {
|
||||
this.s.promoteValues = topologyOptions.promoteValues;
|
||||
} else if (typeof options.promoteValues === 'boolean') {
|
||||
this.s.promoteValues = options.promoteValues;
|
||||
}
|
||||
|
||||
// Add promoteBuffers to cursor state
|
||||
if (typeof topologyOptions.promoteBuffers === 'boolean') {
|
||||
this.s.promoteBuffers = topologyOptions.promoteBuffers;
|
||||
} else if (typeof options.promoteBuffers === 'boolean') {
|
||||
this.s.promoteBuffers = options.promoteBuffers;
|
||||
}
|
||||
|
||||
if (topologyOptions.reconnect) {
|
||||
this.s.reconnect = topologyOptions.reconnect;
|
||||
}
|
||||
|
||||
// Logger
|
||||
this.logger = Logger('Cursor', topologyOptions);
|
||||
|
||||
//
|
||||
// Did we pass in a cursor id
|
||||
if (typeof cmd === 'number') {
|
||||
this.s.cursorId = Long.fromNumber(cmd);
|
||||
this.s.lastCursorId = this.s.cursorId;
|
||||
} else if (cmd instanceof Long) {
|
||||
this.s.cursorId = cmd;
|
||||
this.s.lastCursorId = cmd;
|
||||
}
|
||||
}
|
||||
|
||||
setCursorBatchSize(value) {
|
||||
this.s.batchSize = value;
|
||||
}
|
||||
|
||||
cursorBatchSize() {
|
||||
return this.s.batchSize;
|
||||
}
|
||||
|
||||
setCursorLimit(value) {
|
||||
this.s.limit = value;
|
||||
}
|
||||
|
||||
cursorLimit() {
|
||||
return this.s.limit;
|
||||
}
|
||||
|
||||
setCursorSkip(value) {
|
||||
this.s.skip = value;
|
||||
}
|
||||
|
||||
cursorSkip() {
|
||||
return this.s.skip;
|
||||
}
|
||||
|
||||
_endSession(options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
callback = options;
|
||||
options = {};
|
||||
}
|
||||
options = options || {};
|
||||
|
||||
const session = this.s.session;
|
||||
if (session && (options.force || session.owner === this)) {
|
||||
this.s.session = undefined;
|
||||
session.endSession(callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (callback) {
|
||||
callback();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clone the cursor
|
||||
* @method
|
||||
* @return {Cursor}
|
||||
*/
|
||||
clone() {
|
||||
return this.topology.cursor(this.ns, this.cmd, this.options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the cursor is dead
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor is dead or not
|
||||
*/
|
||||
isDead() {
|
||||
return this.s.dead === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the cursor was killed by the application
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor was killed by the application
|
||||
*/
|
||||
isKilled() {
|
||||
return this.s.killed === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the cursor notified it's caller about it's death
|
||||
* @method
|
||||
* @return {boolean} A boolean signifying if the cursor notified the callback
|
||||
*/
|
||||
isNotified() {
|
||||
return this.s.notified === true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current buffered documents length
|
||||
* @method
|
||||
* @return {number} The number of items in the buffered documents
|
||||
*/
|
||||
bufferedCount() {
|
||||
return this.s.documents.length - this.s.cursorIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Kill the cursor
|
||||
*
|
||||
* @param {resultCallback} callback A callback function
|
||||
*/
|
||||
kill(callback) {
|
||||
// Set cursor to dead
|
||||
this.s.dead = true;
|
||||
this.s.killed = true;
|
||||
// Remove documents
|
||||
this.s.documents = [];
|
||||
|
||||
// If no cursor id just return
|
||||
if (this.s.cursorId == null || this.s.cursorId.isZero() || this.s.init === false) {
|
||||
if (callback) callback(null, null);
|
||||
return;
|
||||
}
|
||||
|
||||
// Default pool
|
||||
const pool = this.s.server.s.pool;
|
||||
|
||||
// Execute command
|
||||
this.s.server.s.wireProtocolHandler.killCursor(this.bson, this.ns, this.s, pool, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the cursor
|
||||
*/
|
||||
rewind() {
|
||||
if (this.s.init) {
|
||||
if (!this.s.dead) {
|
||||
this.kill();
|
||||
}
|
||||
|
||||
this.s.currentLimit = 0;
|
||||
this.s.init = false;
|
||||
this.s.dead = false;
|
||||
this.s.killed = false;
|
||||
this.s.notified = false;
|
||||
this.s.documents = [];
|
||||
this.s.cursorId = null;
|
||||
this.s.cursorIndex = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns current buffered documents
|
||||
* @method
|
||||
* @return {Array} An array of buffered documents
|
||||
*/
|
||||
readBufferedDocuments(number) {
|
||||
const unreadDocumentsLength = this.s.documents.length - this.s.cursorIndex;
|
||||
const length = number < unreadDocumentsLength ? number : unreadDocumentsLength;
|
||||
let elements = this.s.documents.slice(this.s.cursorIndex, this.s.cursorIndex + length);
|
||||
|
||||
// Transform the doc with passed in transformation method if provided
|
||||
if (this.s.transforms && typeof this.s.transforms.doc === 'function') {
|
||||
// Transform all the elements
|
||||
for (let i = 0; i < elements.length; i++) {
|
||||
elements[i] = this.s.transforms.doc(elements[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we do not return any more documents than the limit imposed
|
||||
// Just return the number of elements up to the limit
|
||||
if (this.s.limit > 0 && this.s.currentLimit + elements.length > this.s.limit) {
|
||||
elements = elements.slice(0, this.s.limit - this.s.currentLimit);
|
||||
this.kill();
|
||||
}
|
||||
|
||||
// Adjust current limit
|
||||
this.s.currentLimit = this.s.currentLimit + elements.length;
|
||||
this.s.cursorIndex = this.s.cursorIndex + elements.length;
|
||||
|
||||
// Return elements
|
||||
return elements;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the next document from the cursor
|
||||
*
|
||||
* @param {resultCallback} callback A callback function
|
||||
*/
|
||||
next(callback) {
|
||||
nextFunction(this, callback);
|
||||
}
|
||||
}
|
||||
|
||||
Cursor.prototype._find = deprecate(
|
||||
callback => _find(this, callback),
|
||||
'_find() is deprecated, please stop using it'
|
||||
);
|
||||
|
||||
Cursor.prototype._getmore = deprecate(
|
||||
callback => _getmore(this, callback),
|
||||
'_getmore() is deprecated, please stop using it'
|
||||
);
|
||||
|
||||
function _getmore(cursor, callback) {
|
||||
if (cursor.logger.isDebug()) {
|
||||
cursor.logger.debug(`schedule getMore call for query [${JSON.stringify(cursor.query)}]`);
|
||||
}
|
||||
|
||||
// Determine if it's a raw query
|
||||
const raw = cursor.options.raw || cursor.cmd.raw;
|
||||
|
||||
// Set the current batchSize
|
||||
let batchSize = cursor.s.batchSize;
|
||||
if (cursor.s.limit > 0 && cursor.s.currentLimit + batchSize > cursor.s.limit) {
|
||||
batchSize = cursor.s.limit - cursor.s.currentLimit;
|
||||
}
|
||||
|
||||
// Default pool
|
||||
const pool = cursor.s.server.s.pool;
|
||||
|
||||
// We have a wire protocol handler
|
||||
cursor.s.server.s.wireProtocolHandler.getMore(
|
||||
cursor.bson,
|
||||
cursor.ns,
|
||||
cursor.s,
|
||||
batchSize,
|
||||
raw,
|
||||
pool,
|
||||
cursor.options,
|
||||
callback
|
||||
);
|
||||
}
|
||||
|
||||
function _find(cursor, callback) {
|
||||
if (cursor.logger.isDebug()) {
|
||||
cursor.logger.debug(
|
||||
`issue initial query [${JSON.stringify(cursor.cmd)}] with flags [${JSON.stringify(
|
||||
cursor.query
|
||||
)}]`
|
||||
);
|
||||
}
|
||||
|
||||
const queryCallback = (err, r) => {
|
||||
if (err) return callback(err);
|
||||
|
||||
// Get the raw message
|
||||
const result = r.message;
|
||||
|
||||
// Query failure bit set
|
||||
if (result.queryFailure) {
|
||||
return callback(new MongoError(result.documents[0]), null);
|
||||
}
|
||||
|
||||
// Check if we have a command cursor
|
||||
if (
|
||||
Array.isArray(result.documents) &&
|
||||
result.documents.length === 1 &&
|
||||
(!cursor.cmd.find || (cursor.cmd.find && cursor.cmd.virtual === false)) &&
|
||||
(result.documents[0].cursor !== 'string' ||
|
||||
result.documents[0]['$err'] ||
|
||||
result.documents[0]['errmsg'] ||
|
||||
Array.isArray(result.documents[0].result))
|
||||
) {
|
||||
// We have a an error document return the error
|
||||
if (result.documents[0]['$err'] || result.documents[0]['errmsg']) {
|
||||
return callback(new MongoError(result.documents[0]), null);
|
||||
}
|
||||
|
||||
// We have a cursor document
|
||||
if (result.documents[0].cursor != null && typeof result.documents[0].cursor !== 'string') {
|
||||
const id = result.documents[0].cursor.id;
|
||||
// If we have a namespace change set the new namespace for getmores
|
||||
if (result.documents[0].cursor.ns) {
|
||||
cursor.ns = result.documents[0].cursor.ns;
|
||||
}
|
||||
// Promote id to long if needed
|
||||
cursor.s.cursorId = typeof id === 'number' ? Long.fromNumber(id) : id;
|
||||
cursor.s.lastCursorId = cursor.s.cursorId;
|
||||
// If we have a firstBatch set it
|
||||
if (Array.isArray(result.documents[0].cursor.firstBatch)) {
|
||||
cursor.s.documents = result.documents[0].cursor.firstBatch;
|
||||
}
|
||||
|
||||
// Return after processing command cursor
|
||||
return callback(null, result);
|
||||
}
|
||||
|
||||
if (Array.isArray(result.documents[0].result)) {
|
||||
cursor.s.documents = result.documents[0].result;
|
||||
cursor.s.cursorId = Long.ZERO;
|
||||
return callback(null, result);
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise fall back to regular find path
|
||||
cursor.s.cursorId = result.cursorId;
|
||||
cursor.s.documents = result.documents;
|
||||
cursor.s.lastCursorId = result.cursorId;
|
||||
|
||||
// Transform the results with passed in transformation method if provided
|
||||
if (cursor.s.transforms && typeof cursor.s.transforms.query === 'function') {
|
||||
cursor.s.documents = cursor.s.transforms.query(result);
|
||||
}
|
||||
|
||||
// Return callback
|
||||
callback(null, result);
|
||||
};
|
||||
|
||||
// Options passed to the pool
|
||||
const queryOptions = {};
|
||||
|
||||
// If we have a raw query decorate the function
|
||||
if (cursor.options.raw || cursor.cmd.raw) {
|
||||
queryOptions.raw = cursor.options.raw || cursor.cmd.raw;
|
||||
}
|
||||
|
||||
// Do we have documentsReturnedIn set on the query
|
||||
if (typeof cursor.query.documentsReturnedIn === 'string') {
|
||||
queryOptions.documentsReturnedIn = cursor.query.documentsReturnedIn;
|
||||
}
|
||||
|
||||
// Add promote Long value if defined
|
||||
if (typeof cursor.s.promoteLongs === 'boolean') {
|
||||
queryOptions.promoteLongs = cursor.s.promoteLongs;
|
||||
}
|
||||
|
||||
// Add promote values if defined
|
||||
if (typeof cursor.s.promoteValues === 'boolean') {
|
||||
queryOptions.promoteValues = cursor.s.promoteValues;
|
||||
}
|
||||
|
||||
// Add promote values if defined
|
||||
if (typeof cursor.s.promoteBuffers === 'boolean') {
|
||||
queryOptions.promoteBuffers = cursor.s.promoteBuffers;
|
||||
}
|
||||
|
||||
if (typeof cursor.s.session === 'object') {
|
||||
queryOptions.session = cursor.s.session;
|
||||
}
|
||||
|
||||
// Write the initial command out
|
||||
cursor.s.server.s.pool.write(cursor.query, queryOptions, queryCallback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if the pool is dead and return error
|
||||
*/
|
||||
function isConnectionDead(cursor, callback) {
|
||||
if (cursor.pool && cursor.pool.isDestroyed()) {
|
||||
cursor.s.killed = true;
|
||||
const err = new MongoNetworkError(
|
||||
`connection to host ${cursor.pool.host}:${cursor.pool.port} was destroyed`
|
||||
);
|
||||
_setCursorNotifiedImpl(cursor, () => callback(err));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if the cursor is dead but was not explicitly killed by user
|
||||
*/
|
||||
function isCursorDeadButNotkilled(cursor, callback) {
|
||||
// Cursor is dead but not marked killed, return null
|
||||
if (cursor.s.dead && !cursor.s.killed) {
|
||||
cursor.s.killed = true;
|
||||
setCursorNotified(cursor, callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if the cursor is dead and was killed by user
|
||||
*/
|
||||
function isCursorDeadAndKilled(cursor, callback) {
|
||||
if (cursor.s.dead && cursor.s.killed) {
|
||||
handleCallback(callback, new MongoError('cursor is dead'));
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate if the cursor was killed by the user
|
||||
*/
|
||||
function isCursorKilled(cursor, callback) {
|
||||
if (cursor.s.killed) {
|
||||
setCursorNotified(cursor, callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark cursor as being dead and notified
|
||||
*/
|
||||
function setCursorDeadAndNotified(cursor, callback) {
|
||||
cursor.s.dead = true;
|
||||
setCursorNotified(cursor, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark cursor as being notified
|
||||
*/
|
||||
function setCursorNotified(cursor, callback) {
|
||||
_setCursorNotifiedImpl(cursor, () => handleCallback(callback, null, null));
|
||||
}
|
||||
|
||||
function _setCursorNotifiedImpl(cursor, callback) {
|
||||
cursor.s.notified = true;
|
||||
cursor.s.documents = [];
|
||||
cursor.s.cursorIndex = 0;
|
||||
if (cursor._endSession) {
|
||||
return cursor._endSession(undefined, () => callback());
|
||||
}
|
||||
return callback();
|
||||
}
|
||||
|
||||
function initializeCursorAndRetryNext(cursor, callback) {
|
||||
cursor.topology.selectServer(
|
||||
readPreferenceServerSelector(cursor.options.readPreference || ReadPreference.primary),
|
||||
(err, server) => {
|
||||
if (err) {
|
||||
callback(err, null);
|
||||
return;
|
||||
}
|
||||
|
||||
cursor.s.server = server;
|
||||
cursor.s.init = true;
|
||||
|
||||
// check if server supports collation
|
||||
// NOTE: this should be a part of the selection predicate!
|
||||
if (cursor.cmd && cursor.cmd.collation && cursor.server.description.maxWireVersion < 5) {
|
||||
callback(new MongoError(`server ${cursor.server.name} does not support collation`));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
cursor.query = cursor.s.server.s.wireProtocolHandler.command(
|
||||
cursor.bson,
|
||||
cursor.ns,
|
||||
cursor.cmd,
|
||||
cursor.s,
|
||||
cursor.topology,
|
||||
cursor.options
|
||||
);
|
||||
|
||||
nextFunction(cursor, callback);
|
||||
} catch (err) {
|
||||
callback(err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
function nextFunction(cursor, callback) {
|
||||
// We have notified about it
|
||||
if (cursor.s.notified) {
|
||||
return callback(new Error('cursor is exhausted'));
|
||||
}
|
||||
|
||||
// Cursor is killed return null
|
||||
if (isCursorKilled(cursor, callback)) return;
|
||||
|
||||
// Cursor is dead but not marked killed, return null
|
||||
if (isCursorDeadButNotkilled(cursor, callback)) return;
|
||||
|
||||
// We have a dead and killed cursor, attempting to call next should error
|
||||
if (isCursorDeadAndKilled(cursor, callback)) return;
|
||||
|
||||
// We have just started the cursor
|
||||
if (!cursor.s.init) {
|
||||
return initializeCursorAndRetryNext(cursor, callback);
|
||||
}
|
||||
|
||||
// If we don't have a cursorId execute the first query
|
||||
if (cursor.s.cursorId == null) {
|
||||
// Check if pool is dead and return if not possible to
|
||||
// execute the query against the db
|
||||
if (isConnectionDead(cursor, callback)) return;
|
||||
|
||||
// query, cmd, options, s, callback
|
||||
return _find(cursor, function(err) {
|
||||
if (err) return handleCallback(callback, err, null);
|
||||
|
||||
if (cursor.s.cursorId && cursor.s.cursorId.isZero() && cursor._endSession) {
|
||||
cursor._endSession();
|
||||
}
|
||||
|
||||
if (
|
||||
cursor.s.documents.length === 0 &&
|
||||
cursor.s.cursorId &&
|
||||
cursor.s.cursorId.isZero() &&
|
||||
!cursor.cmd.tailable &&
|
||||
!cursor.cmd.awaitData
|
||||
) {
|
||||
return setCursorNotified(cursor, callback);
|
||||
}
|
||||
|
||||
nextFunction(cursor, callback);
|
||||
});
|
||||
}
|
||||
|
||||
if (cursor.s.documents.length === cursor.s.cursorIndex && Long.ZERO.equals(cursor.s.cursorId)) {
|
||||
setCursorDeadAndNotified(cursor, callback);
|
||||
return;
|
||||
}
|
||||
|
||||
if (cursor.s.limit > 0 && cursor.s.currentLimit >= cursor.s.limit) {
|
||||
// Ensure we kill the cursor on the server
|
||||
cursor.kill();
|
||||
// Set cursor in dead and notified state
|
||||
setCursorDeadAndNotified(cursor, callback);
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
cursor.s.documents.length === cursor.s.cursorIndex &&
|
||||
cursor.cmd.tailable &&
|
||||
Long.ZERO.equals(cursor.s.cursorId)
|
||||
) {
|
||||
return handleCallback(
|
||||
callback,
|
||||
new MongoError({
|
||||
message: 'No more documents in tailed cursor',
|
||||
tailable: cursor.cmd.tailable,
|
||||
awaitData: cursor.cmd.awaitData
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
if (cursor.s.cursorIndex === cursor.s.documents.length && !Long.ZERO.equals(cursor.s.cursorId)) {
|
||||
// Ensure an empty cursor state
|
||||
cursor.s.documents = [];
|
||||
cursor.s.cursorIndex = 0;
|
||||
|
||||
// Check if connection is dead and return if not possible to
|
||||
if (isConnectionDead(cursor, callback)) return;
|
||||
|
||||
// Execute the next get more
|
||||
return _getmore(cursor, function(err, doc, connection) {
|
||||
if (err) {
|
||||
if (err instanceof MongoError) {
|
||||
err[mongoErrorContextSymbol].isGetMore = true;
|
||||
}
|
||||
|
||||
return handleCallback(callback, err);
|
||||
}
|
||||
|
||||
if (cursor.s.cursorId && cursor.s.cursorId.isZero() && cursor._endSession) {
|
||||
cursor._endSession();
|
||||
}
|
||||
|
||||
// Save the returned connection to ensure all getMore's fire over the same connection
|
||||
cursor.connection = connection;
|
||||
|
||||
// Tailable cursor getMore result, notify owner about it
|
||||
// No attempt is made here to retry, this is left to the user of the
|
||||
// core module to handle to keep core simple
|
||||
if (
|
||||
cursor.s.documents.length === 0 &&
|
||||
cursor.cmd.tailable &&
|
||||
Long.ZERO.equals(cursor.s.cursorId)
|
||||
) {
|
||||
// No more documents in the tailed cursor
|
||||
return handleCallback(
|
||||
callback,
|
||||
new MongoError({
|
||||
message: 'No more documents in tailed cursor',
|
||||
tailable: cursor.cmd.tailable,
|
||||
awaitData: cursor.cmd.awaitData
|
||||
})
|
||||
);
|
||||
} else if (
|
||||
cursor.s.documents.length === 0 &&
|
||||
cursor.cmd.tailable &&
|
||||
!Long.ZERO.equals(cursor.s.cursorId)
|
||||
) {
|
||||
return nextFunction(cursor, callback);
|
||||
}
|
||||
|
||||
if (cursor.s.limit > 0 && cursor.s.currentLimit >= cursor.s.limit) {
|
||||
return setCursorDeadAndNotified(cursor, callback);
|
||||
}
|
||||
|
||||
nextFunction(cursor, callback);
|
||||
});
|
||||
}
|
||||
|
||||
if (cursor.s.limit > 0 && cursor.s.currentLimit >= cursor.s.limit) {
|
||||
// Ensure we kill the cursor on the server
|
||||
cursor.kill();
|
||||
// Set cursor in dead and notified state
|
||||
return setCursorDeadAndNotified(cursor, callback);
|
||||
}
|
||||
|
||||
// Increment the current cursor limit
|
||||
cursor.s.currentLimit += 1;
|
||||
|
||||
// Get the document
|
||||
let doc = cursor.s.documents[cursor.s.cursorIndex++];
|
||||
|
||||
// Doc overflow
|
||||
if (!doc || doc.$err) {
|
||||
// Ensure we kill the cursor on the server
|
||||
cursor.kill();
|
||||
// Set cursor in dead and notified state
|
||||
return setCursorDeadAndNotified(cursor, function() {
|
||||
handleCallback(callback, new MongoError(doc ? doc.$err : undefined));
|
||||
});
|
||||
}
|
||||
|
||||
// Transform the doc with passed in transformation method if provided
|
||||
if (cursor.s.transforms && typeof cursor.s.transforms.doc === 'function') {
|
||||
doc = cursor.s.transforms.doc(doc);
|
||||
}
|
||||
|
||||
// Return the document
|
||||
handleCallback(callback, null, doc);
|
||||
}
|
||||
|
||||
module.exports = Cursor;
|
217
node_modules/mongodb-core/lib/sdam/monitoring.js
generated
vendored
Normal file
217
node_modules/mongodb-core/lib/sdam/monitoring.js
generated
vendored
Normal file
@@ -0,0 +1,217 @@
|
||||
'use strict';
|
||||
|
||||
const ServerDescription = require('./server_description').ServerDescription;
|
||||
const calculateDurationInMs = require('../utils').calculateDurationInMs;
|
||||
|
||||
/**
|
||||
* Published when server description changes, but does NOT include changes to the RTT.
|
||||
*
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
* @property {ServerDescription} previousDescription The previous server description
|
||||
* @property {ServerDescription} newDescription The new server description
|
||||
*/
|
||||
class ServerDescriptionChangedEvent {
|
||||
constructor(topologyId, address, previousDescription, newDescription) {
|
||||
Object.assign(this, { topologyId, address, previousDescription, newDescription });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when server is initialized.
|
||||
*
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
*/
|
||||
class ServerOpeningEvent {
|
||||
constructor(topologyId, address) {
|
||||
Object.assign(this, { topologyId, address });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when server is closed.
|
||||
*
|
||||
* @property {ServerAddress} address The address (host/port pair) of the server
|
||||
* @property {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class ServerClosedEvent {
|
||||
constructor(topologyId, address) {
|
||||
Object.assign(this, { topologyId, address });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology description changes.
|
||||
*
|
||||
* @property {Object} topologyId
|
||||
* @property {TopologyDescription} previousDescription The old topology description
|
||||
* @property {TopologyDescription} newDescription The new topology description
|
||||
*/
|
||||
class TopologyDescriptionChangedEvent {
|
||||
constructor(topologyId, previousDescription, newDescription) {
|
||||
Object.assign(this, { topologyId, previousDescription, newDescription });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology is initialized.
|
||||
*
|
||||
* @param {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class TopologyOpeningEvent {
|
||||
constructor(topologyId) {
|
||||
Object.assign(this, { topologyId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Published when topology is closed.
|
||||
*
|
||||
* @param {Object} topologyId A unique identifier for the topology
|
||||
*/
|
||||
class TopologyClosedEvent {
|
||||
constructor(topologyId) {
|
||||
Object.assign(this, { topologyId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster command is started - immediately before
|
||||
* the ismaster command is serialized into raw BSON and written to the socket.
|
||||
*
|
||||
* @property {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatStartedEvent {
|
||||
constructor(connectionId) {
|
||||
Object.assign(this, { connectionId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster succeeds.
|
||||
*
|
||||
* @param {Number} duration The execution time of the event in ms
|
||||
* @param {Object} reply The command reply
|
||||
* @param {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatSucceededEvent {
|
||||
constructor(duration, reply, connectionId) {
|
||||
Object.assign(this, { duration, reply, connectionId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fired when the server monitor’s ismaster fails, either with an “ok: 0” or a socket exception.
|
||||
*
|
||||
* @param {Number} duration The execution time of the event in ms
|
||||
* @param {MongoError|Object} failure The command failure
|
||||
* @param {Object} connectionId The connection id for the command
|
||||
*/
|
||||
class ServerHeartbeatFailedEvent {
|
||||
constructor(duration, failure, connectionId) {
|
||||
Object.assign(this, { duration, failure, connectionId });
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a server check as described by the SDAM spec.
|
||||
*
|
||||
* NOTE: This method automatically reschedules itself, so that there is always an active
|
||||
* monitoring process
|
||||
*
|
||||
* @param {Server} server The server to monitor
|
||||
*/
|
||||
function monitorServer(server) {
|
||||
// executes a single check of a server
|
||||
const checkServer = callback => {
|
||||
let start = process.hrtime();
|
||||
|
||||
// emit a signal indicating we have started the heartbeat
|
||||
server.emit('serverHeartbeatStarted', new ServerHeartbeatStartedEvent(server.name));
|
||||
|
||||
server.command(
|
||||
'admin.$cmd',
|
||||
{ ismaster: true },
|
||||
{
|
||||
monitoring: true,
|
||||
socketTimeout: server.s.options.connectionTimeout || 2000
|
||||
},
|
||||
function(err, result) {
|
||||
let duration = calculateDurationInMs(start);
|
||||
|
||||
if (err) {
|
||||
server.emit(
|
||||
'serverHeartbeatFailed',
|
||||
new ServerHeartbeatFailedEvent(duration, err, server.name)
|
||||
);
|
||||
|
||||
return callback(err, null);
|
||||
}
|
||||
|
||||
const isMaster = result.result;
|
||||
server.emit(
|
||||
'serverHeartbeatSucceded',
|
||||
new ServerHeartbeatSucceededEvent(duration, isMaster, server.name)
|
||||
);
|
||||
|
||||
return callback(null, isMaster);
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
const successHandler = isMaster => {
|
||||
server.s.monitoring = false;
|
||||
|
||||
// emit an event indicating that our description has changed
|
||||
server.emit('descriptionReceived', new ServerDescription(server.description.address, isMaster));
|
||||
|
||||
// schedule the next monitoring process
|
||||
server.s.monitorId = setTimeout(
|
||||
() => monitorServer(server),
|
||||
server.s.options.heartbeatFrequencyMS
|
||||
);
|
||||
};
|
||||
|
||||
// run the actual monitoring loop
|
||||
server.s.monitoring = true;
|
||||
checkServer((err, isMaster) => {
|
||||
if (!err) {
|
||||
successHandler(isMaster);
|
||||
return;
|
||||
}
|
||||
|
||||
// According to the SDAM specification's "Network error during server check" section, if
|
||||
// an ismaster call fails we reset the server's pool. If a server was once connected,
|
||||
// change its type to `Unknown` only after retrying once.
|
||||
|
||||
// TODO: we need to reset the pool here
|
||||
|
||||
return checkServer((err, isMaster) => {
|
||||
if (err) {
|
||||
server.s.monitoring = false;
|
||||
|
||||
// revert to `Unknown` by emitting a default description with no isMaster
|
||||
server.emit('descriptionReceived', new ServerDescription(server.description.address));
|
||||
|
||||
// do not reschedule monitoring in this case
|
||||
return;
|
||||
}
|
||||
|
||||
successHandler(isMaster);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ServerDescriptionChangedEvent,
|
||||
ServerOpeningEvent,
|
||||
ServerClosedEvent,
|
||||
TopologyDescriptionChangedEvent,
|
||||
TopologyOpeningEvent,
|
||||
TopologyClosedEvent,
|
||||
ServerHeartbeatStartedEvent,
|
||||
ServerHeartbeatSucceededEvent,
|
||||
ServerHeartbeatFailedEvent,
|
||||
monitorServer
|
||||
};
|
411
node_modules/mongodb-core/lib/sdam/server.js
generated
vendored
Normal file
411
node_modules/mongodb-core/lib/sdam/server.js
generated
vendored
Normal file
@@ -0,0 +1,411 @@
|
||||
'use strict';
|
||||
const EventEmitter = require('events');
|
||||
const MongoError = require('../error').MongoError;
|
||||
const Pool = require('../connection/pool');
|
||||
const relayEvents = require('../utils').relayEvents;
|
||||
const calculateDurationInMs = require('../utils').calculateDurationInMs;
|
||||
const Query = require('../connection/commands').Query;
|
||||
const TwoSixWireProtocolSupport = require('../wireprotocol/2_6_support');
|
||||
const ThreeTwoWireProtocolSupport = require('../wireprotocol/3_2_support');
|
||||
const BSON = require('../connection/utils').retrieveBSON();
|
||||
const createClientInfo = require('../topologies/shared').createClientInfo;
|
||||
const Logger = require('../connection/logger');
|
||||
const ServerDescription = require('./server_description').ServerDescription;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
const monitorServer = require('./monitoring').monitorServer;
|
||||
|
||||
/**
|
||||
*
|
||||
* @fires Server#serverHeartbeatStarted
|
||||
* @fires Server#serverHeartbeatSucceeded
|
||||
* @fires Server#serverHeartbeatFailed
|
||||
*/
|
||||
class Server extends EventEmitter {
|
||||
/**
|
||||
* Create a server
|
||||
*
|
||||
* @param {ServerDescription} description
|
||||
* @param {Object} options
|
||||
*/
|
||||
constructor(description, options) {
|
||||
super();
|
||||
|
||||
this.s = {
|
||||
// the server description
|
||||
description,
|
||||
// a saved copy of the incoming options
|
||||
options,
|
||||
// the server logger
|
||||
logger: Logger('Server', options),
|
||||
// the bson parser
|
||||
bson: options.bson || new BSON(),
|
||||
// client metadata for the initial handshake
|
||||
clientInfo: createClientInfo(options),
|
||||
// state variable to determine if there is an active server check in progress
|
||||
monitoring: false,
|
||||
// the connection pool
|
||||
pool: null
|
||||
};
|
||||
}
|
||||
|
||||
get description() {
|
||||
return this.s.description;
|
||||
}
|
||||
|
||||
get name() {
|
||||
return this.s.description.address;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate server connect
|
||||
*
|
||||
* @param {Array} [options.auth] Array of auth options to apply on connect
|
||||
*/
|
||||
connect(options) {
|
||||
options = options || {};
|
||||
|
||||
// do not allow connect to be called on anything that's not disconnected
|
||||
if (this.s.pool && !this.s.pool.isDisconnected() && !this.s.pool.isDestroyed()) {
|
||||
throw new MongoError(`Server instance in invalid state ${this.s.pool.state}`);
|
||||
}
|
||||
|
||||
// create a pool
|
||||
this.s.pool = new Pool(this, Object.assign(this.s.options, options, { bson: this.s.bson }));
|
||||
|
||||
// Set up listeners
|
||||
this.s.pool.on('connect', connectEventHandler(this));
|
||||
this.s.pool.on('close', closeEventHandler(this));
|
||||
|
||||
// this.s.pool.on('error', errorEventHandler(this));
|
||||
// this.s.pool.on('timeout', timeoutEventHandler(this));
|
||||
// this.s.pool.on('parseError', errorEventHandler(this));
|
||||
// this.s.pool.on('reconnect', reconnectEventHandler(this));
|
||||
// this.s.pool.on('reconnectFailed', errorEventHandler(this));
|
||||
|
||||
// relay all command monitoring events
|
||||
relayEvents(this.s.pool, this, ['commandStarted', 'commandSucceeded', 'commandFailed']);
|
||||
|
||||
// If auth settings have been provided, use them
|
||||
if (options.auth) {
|
||||
this.s.pool.connect.apply(this.s.pool, options.auth);
|
||||
return;
|
||||
}
|
||||
|
||||
this.s.pool.connect();
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the server connection
|
||||
*
|
||||
* @param {Boolean} [options.emitClose=false] Emit close event on destroy
|
||||
* @param {Boolean} [options.emitDestroy=false] Emit destroy event on destroy
|
||||
* @param {Boolean} [options.force=false] Force destroy the pool
|
||||
*/
|
||||
destroy(callback) {
|
||||
if (typeof callback === 'function') {
|
||||
callback(null, null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Immediately schedule monitoring of this server. If there already an attempt being made
|
||||
* this will be a no-op.
|
||||
*/
|
||||
monitor() {
|
||||
if (this.s.monitoring) return;
|
||||
if (this.s.monitorId) clearTimeout(this.s.monitorId);
|
||||
monitorServer(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cmd The command hash
|
||||
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
command(ns, cmd, options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
(callback = options), (options = {}), (options = options || {});
|
||||
}
|
||||
|
||||
const error = basicReadValidations(this, options);
|
||||
if (error) {
|
||||
return callback(error, null);
|
||||
}
|
||||
|
||||
// Clone the options
|
||||
options = Object.assign({}, options, { wireProtocolCommand: false });
|
||||
|
||||
// Debug log
|
||||
if (this.s.logger.isDebug()) {
|
||||
this.s.logger.debug(
|
||||
`executing command [${JSON.stringify({ ns, cmd, options })}] against ${this.name}`
|
||||
);
|
||||
}
|
||||
|
||||
// Check if we have collation support
|
||||
if (this.description.maxWireVersion < 5 && cmd.collation) {
|
||||
callback(new MongoError(`server ${this.name} does not support collation`));
|
||||
return;
|
||||
}
|
||||
|
||||
// Create the query object
|
||||
const query = this.s.wireProtocolHandler.command(this, ns, cmd, {}, options);
|
||||
// Set slave OK of the query
|
||||
query.slaveOk = options.readPreference ? options.readPreference.slaveOk() : false;
|
||||
|
||||
// write options
|
||||
const writeOptions = {
|
||||
raw: typeof options.raw === 'boolean' ? options.raw : false,
|
||||
promoteLongs: typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true,
|
||||
promoteValues: typeof options.promoteValues === 'boolean' ? options.promoteValues : true,
|
||||
promoteBuffers: typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false,
|
||||
command: true,
|
||||
monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : false,
|
||||
fullResult: typeof options.fullResult === 'boolean' ? options.fullResult : false,
|
||||
requestId: query.requestId,
|
||||
socketTimeout: typeof options.socketTimeout === 'number' ? options.socketTimeout : null,
|
||||
session: options.session || null
|
||||
};
|
||||
|
||||
// write the operation to the pool
|
||||
this.s.pool.write(query, writeOptions, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert one or more documents
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of documents to insert
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
insert(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'insert', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform one or more update operations
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of updates
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
update(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'update', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform one or more remove operations
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of removes
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
remove(ns, ops, options, callback) {
|
||||
executeWriteOperation({ server: this, op: 'remove', ns, ops }, options, callback);
|
||||
}
|
||||
}
|
||||
|
||||
function basicWriteValidations(server) {
|
||||
if (!server.s.pool) {
|
||||
return new MongoError('server instance is not connected');
|
||||
}
|
||||
|
||||
if (server.s.pool.isDestroyed()) {
|
||||
return new MongoError('server instance pool was destroyed');
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function basicReadValidations(server, options) {
|
||||
const error = basicWriteValidations(server, options);
|
||||
if (error) {
|
||||
return error;
|
||||
}
|
||||
|
||||
if (options.readPreference && !(options.readPreference instanceof ReadPreference)) {
|
||||
return new MongoError('readPreference must be an instance of ReadPreference');
|
||||
}
|
||||
}
|
||||
|
||||
function executeWriteOperation(args, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
// TODO: once we drop Node 4, use destructuring either here or in arguments.
|
||||
const server = args.server;
|
||||
const op = args.op;
|
||||
const ns = args.ns;
|
||||
const ops = Array.isArray(args.ops) ? args.ops : [args.ops];
|
||||
|
||||
const error = basicWriteValidations(server, options);
|
||||
if (error) {
|
||||
callback(error, null);
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if we have collation support
|
||||
if (server.description.maxWireVersion < 5 && options.collation) {
|
||||
callback(new MongoError(`server ${this.name} does not support collation`));
|
||||
return;
|
||||
}
|
||||
|
||||
// Execute write
|
||||
return server.s.wireProtocolHandler[op](server.s.pool, ns, server.s.bson, ops, options, callback);
|
||||
}
|
||||
|
||||
function saslSupportedMechs(options) {
|
||||
if (!options) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const authArray = options.auth || [];
|
||||
const authMechanism = authArray[0] || options.authMechanism;
|
||||
const authSource = authArray[1] || options.authSource || options.dbName || 'admin';
|
||||
const user = authArray[2] || options.user;
|
||||
|
||||
if (typeof authMechanism === 'string' && authMechanism.toUpperCase() !== 'DEFAULT') {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (!user) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return { saslSupportedMechs: `${authSource}.${user}` };
|
||||
}
|
||||
|
||||
function extractIsMasterError(err, result) {
|
||||
if (err) return err;
|
||||
if (result && result.result && result.result.ok === 0) {
|
||||
return new MongoError(result.result);
|
||||
}
|
||||
}
|
||||
|
||||
function executeServerHandshake(server, callback) {
|
||||
// construct an `ismaster` query
|
||||
const compressors =
|
||||
server.s.options.compression && server.s.options.compression.compressors
|
||||
? server.s.options.compression.compressors
|
||||
: [];
|
||||
|
||||
const queryOptions = { numberToSkip: 0, numberToReturn: -1, checkKeys: false, slaveOk: true };
|
||||
const query = new Query(
|
||||
server.s.bson,
|
||||
'admin.$cmd',
|
||||
Object.assign(
|
||||
{ ismaster: true, client: server.s.clientInfo, compression: compressors },
|
||||
saslSupportedMechs(server.s.options)
|
||||
),
|
||||
queryOptions
|
||||
);
|
||||
|
||||
// execute the query
|
||||
server.s.pool.write(
|
||||
query,
|
||||
{ socketTimeout: server.s.options.connectionTimeout || 2000 },
|
||||
callback
|
||||
);
|
||||
}
|
||||
|
||||
function configureWireProtocolHandler(ismaster) {
|
||||
// 3.2 wire protocol handler
|
||||
if (ismaster.maxWireVersion >= 4) {
|
||||
return new ThreeTwoWireProtocolSupport();
|
||||
}
|
||||
|
||||
// default to 2.6 wire protocol handler
|
||||
return new TwoSixWireProtocolSupport();
|
||||
}
|
||||
|
||||
function connectEventHandler(server) {
|
||||
return function() {
|
||||
// log information of received information if in info mode
|
||||
// if (server.s.logger.isInfo()) {
|
||||
// var object = err instanceof MongoError ? JSON.stringify(err) : {};
|
||||
// server.s.logger.info(`server ${server.name} fired event ${event} out with message ${object}`);
|
||||
// }
|
||||
|
||||
// begin initial server handshake
|
||||
const start = process.hrtime();
|
||||
executeServerHandshake(server, (err, response) => {
|
||||
// Set initial lastIsMasterMS - is this needed?
|
||||
server.s.lastIsMasterMS = calculateDurationInMs(start);
|
||||
|
||||
const serverError = extractIsMasterError(err, response);
|
||||
if (serverError) {
|
||||
server.emit('error', serverError);
|
||||
return;
|
||||
}
|
||||
|
||||
// extract the ismaster from the server response
|
||||
const isMaster = response.result;
|
||||
|
||||
// compression negotation
|
||||
if (isMaster && isMaster.compression) {
|
||||
const localCompressionInfo = server.s.options.compression;
|
||||
const localCompressors = localCompressionInfo.compressors;
|
||||
for (var i = 0; i < localCompressors.length; i++) {
|
||||
if (isMaster.compression.indexOf(localCompressors[i]) > -1) {
|
||||
server.s.pool.options.agreedCompressor = localCompressors[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (localCompressionInfo.zlibCompressionLevel) {
|
||||
server.s.pool.options.zlibCompressionLevel = localCompressionInfo.zlibCompressionLevel;
|
||||
}
|
||||
}
|
||||
|
||||
// configure the wire protocol handler
|
||||
server.s.wireProtocolHandler = configureWireProtocolHandler(isMaster);
|
||||
|
||||
// log the connection event if requested
|
||||
if (server.s.logger.isInfo()) {
|
||||
server.s.logger.info(
|
||||
`server ${server.name} connected with ismaster [${JSON.stringify(isMaster)}]`
|
||||
);
|
||||
}
|
||||
|
||||
// emit an event indicating that our description has changed
|
||||
server.emit(
|
||||
'descriptionReceived',
|
||||
new ServerDescription(server.description.address, isMaster)
|
||||
);
|
||||
|
||||
// emit a connect event
|
||||
server.emit('connect', isMaster);
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
function closeEventHandler(server) {
|
||||
return function() {
|
||||
server.emit('close');
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = Server;
|
141
node_modules/mongodb-core/lib/sdam/server_description.js
generated
vendored
Normal file
141
node_modules/mongodb-core/lib/sdam/server_description.js
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
'use strict';
|
||||
|
||||
// An enumeration of server types we know about
|
||||
const ServerType = {
|
||||
Standalone: 'Standalone',
|
||||
Mongos: 'Mongos',
|
||||
PossiblePrimary: 'PossiblePrimary',
|
||||
RSPrimary: 'RSPrimary',
|
||||
RSSecondary: 'RSSecondary',
|
||||
RSArbiter: 'RSArbiter',
|
||||
RSOther: 'RSOther',
|
||||
RSGhost: 'RSGhost',
|
||||
Unknown: 'Unknown'
|
||||
};
|
||||
|
||||
const WRITABLE_SERVER_TYPES = new Set([
|
||||
ServerType.RSPrimary,
|
||||
ServerType.Standalone,
|
||||
ServerType.Mongos
|
||||
]);
|
||||
|
||||
const ISMASTER_FIELDS = [
|
||||
'minWireVersion',
|
||||
'maxWireVersion',
|
||||
'me',
|
||||
'hosts',
|
||||
'passives',
|
||||
'arbiters',
|
||||
'tags',
|
||||
'setName',
|
||||
'setVersion',
|
||||
'electionId',
|
||||
'primary',
|
||||
'logicalSessionTimeoutMinutes'
|
||||
];
|
||||
|
||||
/**
|
||||
* The client's view of a single server, based on the most recent ismaster outcome.
|
||||
*
|
||||
* Internal type, not meant to be directly instantiated
|
||||
*/
|
||||
class ServerDescription {
|
||||
/**
|
||||
* Create a ServerDescription
|
||||
* @param {String} address The address of the server
|
||||
* @param {Object} [ismaster] An optional ismaster response for this server
|
||||
* @param {Object} [options] Optional settings
|
||||
* @param {Number} [options.roundTripTime] The round trip time to ping this server (in ms)
|
||||
*/
|
||||
constructor(address, ismaster, options) {
|
||||
options = options || {};
|
||||
ismaster = Object.assign(
|
||||
{
|
||||
minWireVersion: 0,
|
||||
maxWireVersion: 0,
|
||||
hosts: [],
|
||||
passives: [],
|
||||
arbiters: [],
|
||||
tags: []
|
||||
},
|
||||
ismaster
|
||||
);
|
||||
|
||||
this.address = address;
|
||||
this.error = null;
|
||||
this.roundTripTime = options.roundTripTime || 0;
|
||||
this.lastUpdateTime = Date.now();
|
||||
this.lastWriteDate = ismaster.lastWrite ? ismaster.lastWrite.lastWriteDate : null;
|
||||
this.opTime = ismaster.lastWrite ? ismaster.lastWrite.opTime : null;
|
||||
this.type = parseServerType(ismaster);
|
||||
|
||||
// direct mappings
|
||||
ISMASTER_FIELDS.forEach(field => {
|
||||
if (typeof ismaster[field] !== 'undefined') this[field] = ismaster[field];
|
||||
});
|
||||
|
||||
// normalize case for hosts
|
||||
this.hosts = this.hosts.map(host => host.toLowerCase());
|
||||
this.passives = this.passives.map(host => host.toLowerCase());
|
||||
this.arbiters = this.arbiters.map(host => host.toLowerCase());
|
||||
}
|
||||
|
||||
get allHosts() {
|
||||
return this.hosts.concat(this.arbiters).concat(this.passives);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {Boolean} Is this server available for reads
|
||||
*/
|
||||
get isReadable() {
|
||||
return this.type === ServerType.RSSecondary || this.isWritable;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return {Boolean} Is this server available for writes
|
||||
*/
|
||||
get isWritable() {
|
||||
return WRITABLE_SERVER_TYPES.has(this.type);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses an `ismaster` message and determines the server type
|
||||
*
|
||||
* @param {Object} ismaster The `ismaster` message to parse
|
||||
* @return {ServerType}
|
||||
*/
|
||||
function parseServerType(ismaster) {
|
||||
if (!ismaster || !ismaster.ok) {
|
||||
return ServerType.Unknown;
|
||||
}
|
||||
|
||||
if (ismaster.isreplicaset) {
|
||||
return ServerType.RSGhost;
|
||||
}
|
||||
|
||||
if (ismaster.msg && ismaster.msg === 'isdbgrid') {
|
||||
return ServerType.Mongos;
|
||||
}
|
||||
|
||||
if (ismaster.setName) {
|
||||
if (ismaster.hidden) {
|
||||
return ServerType.RSOther;
|
||||
} else if (ismaster.ismaster) {
|
||||
return ServerType.RSPrimary;
|
||||
} else if (ismaster.secondary) {
|
||||
return ServerType.RSSecondary;
|
||||
} else if (ismaster.arbiterOnly) {
|
||||
return ServerType.RSArbiter;
|
||||
} else {
|
||||
return ServerType.RSOther;
|
||||
}
|
||||
}
|
||||
|
||||
return ServerType.Standalone;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ServerDescription,
|
||||
ServerType
|
||||
};
|
206
node_modules/mongodb-core/lib/sdam/server_selectors.js
generated
vendored
Normal file
206
node_modules/mongodb-core/lib/sdam/server_selectors.js
generated
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
'use strict';
|
||||
const ServerType = require('./server_description').ServerType;
|
||||
const TopologyType = require('./topology_description').TopologyType;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
const MongoError = require('../error').MongoError;
|
||||
|
||||
// max staleness constants
|
||||
const IDLE_WRITE_PERIOD = 10000;
|
||||
const SMALLEST_MAX_STALENESS_SECONDS = 90;
|
||||
|
||||
function writableServerSelector() {
|
||||
return function(topologyDescription, servers) {
|
||||
return latencyWindowReducer(topologyDescription, servers.filter(s => s.isWritable));
|
||||
};
|
||||
}
|
||||
|
||||
// reducers
|
||||
function maxStalenessReducer(readPreference, topologyDescription, servers) {
|
||||
if (readPreference.maxStalenessSeconds == null || readPreference.maxStalenessSeconds < 0) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
const maxStaleness = readPreference.maxStalenessSeconds;
|
||||
const maxStalenessVariance =
|
||||
(topologyDescription.heartbeatFrequencyMS + IDLE_WRITE_PERIOD) / 1000;
|
||||
if (maxStaleness < maxStalenessVariance) {
|
||||
throw MongoError(`maxStalenessSeconds must be at least ${maxStalenessVariance} seconds`);
|
||||
}
|
||||
|
||||
if (maxStaleness < SMALLEST_MAX_STALENESS_SECONDS) {
|
||||
throw new MongoError(
|
||||
`maxStalenessSeconds must be at least ${SMALLEST_MAX_STALENESS_SECONDS} seconds`
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyDescription.type === TopologyType.ReplicaSetWithPrimary) {
|
||||
const primary = servers.filter(primaryFilter)[0];
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS =
|
||||
server.lastUpdateTime -
|
||||
server.lastWriteDate -
|
||||
(primary.lastUpdateTime - primary.lastWriteDate) +
|
||||
topologyDescription.heartbeatFrequencyMS;
|
||||
|
||||
const staleness = stalenessMS / 1000;
|
||||
if (staleness <= readPreference.maxStalenessSeconds) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
} else if (topologyDescription.type === TopologyType.ReplicaSetNoPrimary) {
|
||||
const sMax = servers.reduce((max, s) => (s.lastWriteDate > max.lastWriteDate ? s : max));
|
||||
return servers.reduce((result, server) => {
|
||||
const stalenessMS =
|
||||
sMax.lastWriteDate - server.lastWriteDate + topologyDescription.heartbeatFrequencyMS;
|
||||
|
||||
const staleness = stalenessMS / 1000;
|
||||
if (staleness <= readPreference.maxStalenessSeconds) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
return servers;
|
||||
}
|
||||
|
||||
function tagSetMatch(tagSet, serverTags) {
|
||||
const keys = Object.keys(tagSet);
|
||||
const serverTagKeys = Object.keys(serverTags);
|
||||
for (let i = 0; i < keys.length; ++i) {
|
||||
const key = keys[i];
|
||||
if (serverTagKeys.indexOf(key) === -1 || serverTags[key] !== tagSet[key]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
function tagSetReducer(readPreference, servers) {
|
||||
if (
|
||||
readPreference.tags == null ||
|
||||
(Array.isArray(readPreference.tags) && readPreference.tags.length === 0)
|
||||
) {
|
||||
return servers;
|
||||
}
|
||||
|
||||
for (let i = 0; i < readPreference.tags.length; ++i) {
|
||||
const tagSet = readPreference.tags[i];
|
||||
const serversMatchingTagset = servers.reduce((matched, server) => {
|
||||
if (tagSetMatch(tagSet, server.tags)) matched.push(server);
|
||||
return matched;
|
||||
}, []);
|
||||
|
||||
if (serversMatchingTagset.length) {
|
||||
return serversMatchingTagset;
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
function latencyWindowReducer(topologyDescription, servers) {
|
||||
const low = servers.reduce(
|
||||
(min, server) => (min === -1 ? server.roundTripTime : Math.min(server.roundTripTime, min)),
|
||||
-1
|
||||
);
|
||||
|
||||
const high = low + topologyDescription.localThresholdMS;
|
||||
|
||||
return servers.reduce((result, server) => {
|
||||
if (server.roundTripTime <= high && server.roundTripTime >= low) result.push(server);
|
||||
return result;
|
||||
}, []);
|
||||
}
|
||||
|
||||
// filters
|
||||
function primaryFilter(server) {
|
||||
return server.type === ServerType.RSPrimary;
|
||||
}
|
||||
|
||||
function secondaryFilter(server) {
|
||||
return server.type === ServerType.RSSecondary;
|
||||
}
|
||||
|
||||
function nearestFilter(server) {
|
||||
return server.type === ServerType.RSSecondary || server.type === ServerType.RSPrimary;
|
||||
}
|
||||
|
||||
function knownFilter(server) {
|
||||
return server.type !== ServerType.Unknown;
|
||||
}
|
||||
|
||||
function readPreferenceServerSelector(readPreference) {
|
||||
if (!readPreference.isValid()) {
|
||||
throw new TypeError('Invalid read preference specified');
|
||||
}
|
||||
|
||||
return function(topologyDescription, servers) {
|
||||
const commonWireVersion = topologyDescription.commonWireVersion;
|
||||
if (
|
||||
commonWireVersion &&
|
||||
(readPreference.minWireVersion && readPreference.minWireVersion > commonWireVersion)
|
||||
) {
|
||||
throw new MongoError(
|
||||
`Minimum wire version '${
|
||||
readPreference.minWireVersion
|
||||
}' required, but found '${commonWireVersion}'`
|
||||
);
|
||||
}
|
||||
|
||||
if (
|
||||
topologyDescription.type === TopologyType.Single ||
|
||||
topologyDescription.type === TopologyType.Sharded
|
||||
) {
|
||||
return latencyWindowReducer(topologyDescription, servers.filter(knownFilter));
|
||||
}
|
||||
|
||||
if (readPreference.mode === ReadPreference.PRIMARY) {
|
||||
return servers.filter(primaryFilter);
|
||||
}
|
||||
|
||||
if (readPreference.mode === ReadPreference.SECONDARY) {
|
||||
return latencyWindowReducer(
|
||||
topologyDescription,
|
||||
tagSetReducer(
|
||||
readPreference,
|
||||
maxStalenessReducer(readPreference, topologyDescription, servers)
|
||||
)
|
||||
).filter(secondaryFilter);
|
||||
} else if (readPreference.mode === ReadPreference.NEAREST) {
|
||||
return latencyWindowReducer(
|
||||
topologyDescription,
|
||||
tagSetReducer(
|
||||
readPreference,
|
||||
maxStalenessReducer(readPreference, topologyDescription, servers)
|
||||
)
|
||||
).filter(nearestFilter);
|
||||
} else if (readPreference.mode === ReadPreference.SECONDARY_PREFERRED) {
|
||||
const result = latencyWindowReducer(
|
||||
topologyDescription,
|
||||
tagSetReducer(
|
||||
readPreference,
|
||||
maxStalenessReducer(readPreference, topologyDescription, servers)
|
||||
)
|
||||
).filter(secondaryFilter);
|
||||
|
||||
return result.length === 0 ? servers.filter(primaryFilter) : result;
|
||||
} else if (readPreference.mode === ReadPreference.PRIMARY_PREFERRED) {
|
||||
const result = servers.filter(primaryFilter);
|
||||
if (result.length) {
|
||||
return result;
|
||||
}
|
||||
|
||||
return latencyWindowReducer(
|
||||
topologyDescription,
|
||||
tagSetReducer(
|
||||
readPreference,
|
||||
maxStalenessReducer(readPreference, topologyDescription, servers)
|
||||
)
|
||||
).filter(secondaryFilter);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
writableServerSelector,
|
||||
readPreferenceServerSelector
|
||||
};
|
666
node_modules/mongodb-core/lib/sdam/topology.js
generated
vendored
Normal file
666
node_modules/mongodb-core/lib/sdam/topology.js
generated
vendored
Normal file
@@ -0,0 +1,666 @@
|
||||
'use strict';
|
||||
const EventEmitter = require('events');
|
||||
const ServerDescription = require('./server_description').ServerDescription;
|
||||
const TopologyDescription = require('./topology_description').TopologyDescription;
|
||||
const TopologyType = require('./topology_description').TopologyType;
|
||||
const monitoring = require('./monitoring');
|
||||
const calculateDurationInMs = require('../utils').calculateDurationInMs;
|
||||
const MongoTimeoutError = require('../error').MongoTimeoutError;
|
||||
const MongoNetworkError = require('../error').MongoNetworkError;
|
||||
const Server = require('./server');
|
||||
const relayEvents = require('../utils').relayEvents;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
const readPreferenceServerSelector = require('./server_selectors').readPreferenceServerSelector;
|
||||
const writableServerSelector = require('./server_selectors').writableServerSelector;
|
||||
const isRetryableWritesSupported = require('../topologies/shared').isRetryableWritesSupported;
|
||||
const Cursor = require('./cursor');
|
||||
const deprecate = require('util').deprecate;
|
||||
const BSON = require('../connection/utils').retrieveBSON();
|
||||
const createCompressionInfo = require('../topologies/shared').createCompressionInfo;
|
||||
|
||||
// Global state
|
||||
let globalTopologyCounter = 0;
|
||||
|
||||
// Constants
|
||||
const TOPOLOGY_DEFAULTS = {
|
||||
localThresholdMS: 15,
|
||||
serverSelectionTimeoutMS: 10000,
|
||||
heartbeatFrequencyMS: 30000,
|
||||
minHeartbeatIntervalMS: 500
|
||||
};
|
||||
|
||||
/**
|
||||
* A container of server instances representing a connection to a MongoDB topology.
|
||||
*
|
||||
* @fires Topology#serverOpening
|
||||
* @fires Topology#serverClosed
|
||||
* @fires Topology#serverDescriptionChanged
|
||||
* @fires Topology#topologyOpening
|
||||
* @fires Topology#topologyClosed
|
||||
* @fires Topology#topologyDescriptionChanged
|
||||
* @fires Topology#serverHeartbeatStarted
|
||||
* @fires Topology#serverHeartbeatSucceeded
|
||||
* @fires Topology#serverHeartbeatFailed
|
||||
*/
|
||||
class Topology extends EventEmitter {
|
||||
/**
|
||||
* Create a topology
|
||||
*
|
||||
* @param {Array|String} [seedlist] a string list, or array of Server instances to connect to
|
||||
* @param {Object} [options] Optional settings
|
||||
* @param {Number} [options.localThresholdMS=15] The size of the latency window for selecting among multiple suitable servers
|
||||
* @param {Number} [options.serverSelectionTimeoutMS=30000] How long to block for server selection before throwing an error
|
||||
* @param {Number} [options.heartbeatFrequencyMS=10000] The frequency with which topology updates are scheduled
|
||||
*/
|
||||
constructor(seedlist, options) {
|
||||
super();
|
||||
if (typeof options === 'undefined') {
|
||||
options = seedlist;
|
||||
seedlist = [];
|
||||
|
||||
// this is for legacy single server constructor support
|
||||
if (options.host) {
|
||||
seedlist.push({ host: options.host, port: options.port });
|
||||
}
|
||||
}
|
||||
|
||||
seedlist = seedlist || [];
|
||||
options = Object.assign({}, TOPOLOGY_DEFAULTS, options);
|
||||
|
||||
const topologyType = topologyTypeFromSeedlist(seedlist, options);
|
||||
const topologyId = globalTopologyCounter++;
|
||||
const serverDescriptions = seedlist.reduce((result, seed) => {
|
||||
const address = seed.port ? `${seed.host}:${seed.port}` : `${seed.host}:27017`;
|
||||
result.set(address, new ServerDescription(address));
|
||||
return result;
|
||||
}, new Map());
|
||||
|
||||
this.s = {
|
||||
// the id of this topology
|
||||
id: topologyId,
|
||||
// passed in options
|
||||
options: Object.assign({}, options),
|
||||
// initial seedlist of servers to connect to
|
||||
seedlist: seedlist,
|
||||
// the topology description
|
||||
description: new TopologyDescription(
|
||||
topologyType,
|
||||
serverDescriptions,
|
||||
options.replicaSet,
|
||||
null,
|
||||
null,
|
||||
options
|
||||
),
|
||||
serverSelectionTimeoutMS: options.serverSelectionTimeoutMS,
|
||||
heartbeatFrequencyMS: options.heartbeatFrequencyMS,
|
||||
minHeartbeatIntervalMS: options.minHeartbeatIntervalMS,
|
||||
// allow users to override the cursor factory
|
||||
Cursor: options.cursorFactory || Cursor,
|
||||
// the bson parser
|
||||
bson:
|
||||
options.bson ||
|
||||
new BSON([
|
||||
BSON.Binary,
|
||||
BSON.Code,
|
||||
BSON.DBRef,
|
||||
BSON.Decimal128,
|
||||
BSON.Double,
|
||||
BSON.Int32,
|
||||
BSON.Long,
|
||||
BSON.Map,
|
||||
BSON.MaxKey,
|
||||
BSON.MinKey,
|
||||
BSON.ObjectId,
|
||||
BSON.BSONRegExp,
|
||||
BSON.Symbol,
|
||||
BSON.Timestamp
|
||||
])
|
||||
};
|
||||
|
||||
// amend options for server instance creation
|
||||
this.s.options.compression = { compressors: createCompressionInfo(options) };
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A `TopologyDescription` for this topology
|
||||
*/
|
||||
get description() {
|
||||
return this.s.description;
|
||||
}
|
||||
|
||||
/**
|
||||
* All raw connections
|
||||
* @method
|
||||
* @return {Connection[]}
|
||||
*/
|
||||
connections() {
|
||||
return Array.from(this.s.servers.values()).reduce((result, server) => {
|
||||
return result.concat(server.s.pool.allConnections());
|
||||
}, []);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate server connect
|
||||
*
|
||||
* @param {Object} [options] Optional settings
|
||||
* @param {Array} [options.auth=null] Array of auth options to apply on connect
|
||||
*/
|
||||
connect(/* options */) {
|
||||
// emit SDAM monitoring events
|
||||
this.emit('topologyOpening', new monitoring.TopologyOpeningEvent(this.s.id));
|
||||
|
||||
// emit an event for the topology change
|
||||
this.emit(
|
||||
'topologyDescriptionChanged',
|
||||
new monitoring.TopologyDescriptionChangedEvent(
|
||||
this.s.id,
|
||||
new TopologyDescription(TopologyType.Unknown), // initial is always Unknown
|
||||
this.s.description
|
||||
)
|
||||
);
|
||||
|
||||
connectServers(this, Array.from(this.s.description.servers.values()));
|
||||
this.s.connected = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close this topology
|
||||
*/
|
||||
close(callback) {
|
||||
// destroy all child servers
|
||||
this.s.servers.forEach(server => server.destroy());
|
||||
|
||||
// emit an event for close
|
||||
this.emit('topologyClosed', new monitoring.TopologyClosedEvent(this.s.id));
|
||||
|
||||
this.s.connected = false;
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback(null, null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Selects a server according to the selection predicate provided
|
||||
*
|
||||
* @param {function} [selector] An optional selector to select servers by, defaults to a random selection within a latency window
|
||||
* @return {Server} An instance of a `Server` meeting the criteria of the predicate provided
|
||||
*/
|
||||
selectServer(selector, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = Object.assign(
|
||||
{},
|
||||
{ serverSelectionTimeoutMS: this.s.serverSelectionTimeoutMS },
|
||||
options
|
||||
);
|
||||
|
||||
selectServers(
|
||||
this,
|
||||
selector,
|
||||
options.serverSelectionTimeoutMS,
|
||||
process.hrtime(),
|
||||
(err, servers) => {
|
||||
if (err) return callback(err, null);
|
||||
callback(null, randomSelection(servers));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the internal TopologyDescription with a ServerDescription
|
||||
*
|
||||
* @param {object} serverDescription The server to update in the internal list of server descriptions
|
||||
*/
|
||||
serverUpdateHandler(serverDescription) {
|
||||
if (!this.s.description.hasServer(serverDescription.address)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// these will be used for monitoring events later
|
||||
const previousTopologyDescription = this.s.description;
|
||||
const previousServerDescription = this.s.description.servers.get(serverDescription.address);
|
||||
|
||||
// first update the TopologyDescription
|
||||
this.s.description = this.s.description.update(serverDescription);
|
||||
|
||||
// emit monitoring events for this change
|
||||
this.emit(
|
||||
'serverDescriptionChanged',
|
||||
new monitoring.ServerDescriptionChangedEvent(
|
||||
this.s.id,
|
||||
serverDescription.address,
|
||||
previousServerDescription,
|
||||
this.s.description.servers.get(serverDescription.address)
|
||||
)
|
||||
);
|
||||
|
||||
// update server list from updated descriptions
|
||||
updateServers(this, serverDescription);
|
||||
|
||||
this.emit(
|
||||
'topologyDescriptionChanged',
|
||||
new monitoring.TopologyDescriptionChangedEvent(
|
||||
this.s.id,
|
||||
previousTopologyDescription,
|
||||
this.s.description
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Authenticate using a specified mechanism
|
||||
*
|
||||
* @param {String} mechanism The auth mechanism used for authentication
|
||||
* @param {String} db The db we are authenticating against
|
||||
* @param {Object} options Optional settings for the authenticating mechanism
|
||||
* @param {authResultCallback} callback A callback function
|
||||
*/
|
||||
auth(mechanism, db, options, callback) {
|
||||
callback(null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Logout from a database
|
||||
*
|
||||
* @param {String} db The db we are logging out from
|
||||
* @param {authResultCallback} callback A callback function
|
||||
*/
|
||||
logout(db, callback) {
|
||||
callback(null, null);
|
||||
}
|
||||
|
||||
// Basic operation support. Eventually this should be moved into command construction
|
||||
// during the command refactor.
|
||||
|
||||
/**
|
||||
* Insert one or more documents
|
||||
*
|
||||
* @param {String} ns The full qualified namespace for this operation
|
||||
* @param {Array} ops An array of documents to insert
|
||||
* @param {Boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {Object} [options.writeConcern] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {boolean} [options.retryWrites] Enable retryable writes for this operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
insert(ns, ops, options, callback) {
|
||||
executeWriteOperation({ topology: this, op: 'insert', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform one or more update operations
|
||||
*
|
||||
* @param {string} ns The fully qualified namespace for this operation
|
||||
* @param {array} ops An array of updates
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
|
||||
* @param {ClientSession} [options.session] Session to use for the operation
|
||||
* @param {boolean} [options.retryWrites] Enable retryable writes for this operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
update(ns, ops, options, callback) {
|
||||
executeWriteOperation({ topology: this, op: 'update', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform one or more remove operations
|
||||
*
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {array} ops An array of removes
|
||||
* @param {boolean} [options.ordered=true] Execute in order or out of order
|
||||
* @param {object} [options.writeConcern={}] Write concern for the operation
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {boolean} [options.retryWrites] Enable retryable writes for this operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
remove(ns, ops, options, callback) {
|
||||
executeWriteOperation({ topology: this, op: 'remove', ns, ops }, options, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a command
|
||||
*
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object} cmd The command hash
|
||||
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
|
||||
* @param {Connection} [options.connection] Specify connection object to execute command against
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {opResultCallback} callback A callback function
|
||||
*/
|
||||
command(ns, cmd, options, callback) {
|
||||
if (typeof options === 'function') {
|
||||
(callback = options), (options = {}), (options = options || {});
|
||||
}
|
||||
|
||||
const readPreference = options.readPreference ? options.readPreference : ReadPreference.primary;
|
||||
this.selectServer(readPreferenceServerSelector(readPreference), (err, server) => {
|
||||
if (err) {
|
||||
callback(err, null);
|
||||
return;
|
||||
}
|
||||
|
||||
server.command(ns, cmd, options, callback);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new cursor
|
||||
*
|
||||
* @method
|
||||
* @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1)
|
||||
* @param {object|Long} cmd Can be either a command returning a cursor or a cursorId
|
||||
* @param {object} [options] Options for the cursor
|
||||
* @param {object} [options.batchSize=0] Batchsize for the operation
|
||||
* @param {array} [options.documents=[]] Initial documents list for cursor
|
||||
* @param {ReadPreference} [options.readPreference] Specify read preference if command supports it
|
||||
* @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized.
|
||||
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields.
|
||||
* @param {ClientSession} [options.session=null] Session to use for the operation
|
||||
* @param {object} [options.topology] The internal topology of the created cursor
|
||||
* @returns {Cursor}
|
||||
*/
|
||||
cursor(ns, cmd, options) {
|
||||
options = options || {};
|
||||
const topology = options.topology || this;
|
||||
const CursorClass = options.cursorFactory || this.s.Cursor;
|
||||
|
||||
return new CursorClass(this.s.bson, ns, cmd, options, topology, this.s.options);
|
||||
}
|
||||
}
|
||||
|
||||
// legacy aliases
|
||||
Topology.prototype.destroy = deprecate(
|
||||
Topology.prototype.close,
|
||||
'destroy() is deprecated, please use close() instead'
|
||||
);
|
||||
|
||||
function topologyTypeFromSeedlist(seedlist, options) {
|
||||
if (seedlist.length === 1 && !options.replicaSet) return TopologyType.Single;
|
||||
if (options.replicaSet) return TopologyType.ReplicaSetNoPrimary;
|
||||
return TopologyType.Unknown;
|
||||
}
|
||||
|
||||
function randomSelection(array) {
|
||||
return array[Math.floor(Math.random() * array.length)];
|
||||
}
|
||||
|
||||
/**
|
||||
* Selects servers using the provided selector
|
||||
*
|
||||
* @private
|
||||
* @param {Topology} topology The topology to select servers from
|
||||
* @param {function} selector The actual predicate used for selecting servers
|
||||
* @param {Number} timeout The max time we are willing wait for selection
|
||||
* @param {Number} start A high precision timestamp for the start of the selection process
|
||||
* @param {function} callback The callback used to convey errors or the resultant servers
|
||||
*/
|
||||
function selectServers(topology, selector, timeout, start, callback) {
|
||||
const serverDescriptions = Array.from(topology.description.servers.values());
|
||||
let descriptions;
|
||||
|
||||
try {
|
||||
descriptions = selector
|
||||
? selector(topology.description, serverDescriptions)
|
||||
: serverDescriptions;
|
||||
} catch (e) {
|
||||
return callback(e, null);
|
||||
}
|
||||
|
||||
if (descriptions.length) {
|
||||
const servers = descriptions.map(description => topology.s.servers.get(description.address));
|
||||
return callback(null, servers);
|
||||
}
|
||||
|
||||
const duration = calculateDurationInMs(start);
|
||||
if (duration >= timeout) {
|
||||
return callback(new MongoTimeoutError(`Server selection timed out after ${timeout} ms`));
|
||||
}
|
||||
|
||||
const retrySelection = () => {
|
||||
// ensure all server monitors attempt monitoring immediately
|
||||
topology.s.servers.forEach(server => server.monitor());
|
||||
|
||||
const iterationTimer = setTimeout(() => {
|
||||
callback(new MongoTimeoutError('Server selection timed out due to monitoring'));
|
||||
}, topology.s.minHeartbeatIntervalMS);
|
||||
|
||||
topology.once('topologyDescriptionChanged', () => {
|
||||
// successful iteration, clear the check timer
|
||||
clearTimeout(iterationTimer);
|
||||
|
||||
// topology description has changed due to monitoring, reattempt server selection
|
||||
selectServers(topology, selector, timeout, start, callback);
|
||||
});
|
||||
};
|
||||
|
||||
// ensure we are connected
|
||||
if (!topology.s.connected) {
|
||||
topology.connect();
|
||||
|
||||
// we want to make sure we're still within the requested timeout window
|
||||
const failToConnectTimer = setTimeout(() => {
|
||||
callback(new MongoTimeoutError('Server selection timed out waiting to connect'));
|
||||
}, timeout - duration);
|
||||
|
||||
topology.once('connect', () => {
|
||||
clearTimeout(failToConnectTimer);
|
||||
retrySelection();
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
retrySelection();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create `Server` instances for all initially known servers, connect them, and assign
|
||||
* them to the passed in `Topology`.
|
||||
*
|
||||
* @param {Topology} topology The topology responsible for the servers
|
||||
* @param {ServerDescription[]} serverDescriptions A list of server descriptions to connect
|
||||
*/
|
||||
function connectServers(topology, serverDescriptions) {
|
||||
topology.s.servers = serverDescriptions.reduce((servers, serverDescription) => {
|
||||
// publish an open event for each ServerDescription created
|
||||
topology.emit(
|
||||
'serverOpening',
|
||||
new monitoring.ServerOpeningEvent(topology.s.id, serverDescription.address)
|
||||
);
|
||||
|
||||
const server = new Server(serverDescription, topology.s.options);
|
||||
relayEvents(server, topology, [
|
||||
'serverHeartbeatStarted',
|
||||
'serverHeartbeatSucceeded',
|
||||
'serverHeartbeatFailed'
|
||||
]);
|
||||
|
||||
server.on('descriptionReceived', topology.serverUpdateHandler.bind(topology));
|
||||
server.on('connect', serverConnectEventHandler(server, topology));
|
||||
servers.set(serverDescription.address, server);
|
||||
server.connect();
|
||||
return servers;
|
||||
}, new Map());
|
||||
}
|
||||
|
||||
function updateServers(topology, currentServerDescription) {
|
||||
// update the internal server's description
|
||||
if (topology.s.servers.has(currentServerDescription.address)) {
|
||||
const server = topology.s.servers.get(currentServerDescription.address);
|
||||
server.s.description = currentServerDescription;
|
||||
}
|
||||
|
||||
// add new servers for all descriptions we currently don't know about locally
|
||||
for (const serverDescription of topology.description.servers.values()) {
|
||||
if (!topology.s.servers.has(serverDescription.address)) {
|
||||
topology.emit(
|
||||
'serverOpening',
|
||||
new monitoring.ServerOpeningEvent(topology.s.id, serverDescription.address)
|
||||
);
|
||||
|
||||
const server = new Server(serverDescription, topology.s.options);
|
||||
relayEvents(server, topology, [
|
||||
'serverHeartbeatStarted',
|
||||
'serverHeartbeatSucceeded',
|
||||
'serverHeartbeatFailed'
|
||||
]);
|
||||
|
||||
server.on('descriptionReceived', topology.serverUpdateHandler.bind(topology));
|
||||
server.on('connect', serverConnectEventHandler(server, topology));
|
||||
topology.s.servers.set(serverDescription.address, server);
|
||||
server.connect();
|
||||
}
|
||||
}
|
||||
|
||||
// for all servers no longer known, remove their descriptions and destroy their instances
|
||||
for (const entry of topology.s.servers) {
|
||||
const serverAddress = entry[0];
|
||||
if (topology.description.hasServer(serverAddress)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const server = topology.s.servers.get(serverAddress);
|
||||
topology.s.servers.delete(serverAddress);
|
||||
|
||||
server.destroy(() =>
|
||||
topology.emit('serverClosed', new monitoring.ServerClosedEvent(topology.s.id, serverAddress))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function serverConnectEventHandler(server, topology) {
|
||||
return function(/* ismaster */) {
|
||||
topology.emit('connect', topology);
|
||||
};
|
||||
}
|
||||
|
||||
function executeWriteOperation(args, options, callback) {
|
||||
if (typeof options === 'function') (callback = options), (options = {});
|
||||
options = options || {};
|
||||
|
||||
// TODO: once we drop Node 4, use destructuring either here or in arguments.
|
||||
const topology = args.topology;
|
||||
const op = args.op;
|
||||
const ns = args.ns;
|
||||
const ops = args.ops;
|
||||
|
||||
const willRetryWrite =
|
||||
!args.retrying &&
|
||||
options.retryWrites &&
|
||||
options.session &&
|
||||
isRetryableWritesSupported(topology) &&
|
||||
!options.session.inTransaction();
|
||||
|
||||
topology.selectServer(writableServerSelector(), (err, server) => {
|
||||
if (err) {
|
||||
callback(err, null);
|
||||
return;
|
||||
}
|
||||
|
||||
const handler = (err, result) => {
|
||||
if (!err) return callback(null, result);
|
||||
if (!(err instanceof MongoNetworkError) && !err.message.match(/not master/)) {
|
||||
return callback(err);
|
||||
}
|
||||
|
||||
if (willRetryWrite) {
|
||||
const newArgs = Object.assign({}, args, { retrying: true });
|
||||
return executeWriteOperation(newArgs, options, callback);
|
||||
}
|
||||
|
||||
return callback(err);
|
||||
};
|
||||
|
||||
if (callback.operationId) {
|
||||
handler.operationId = callback.operationId;
|
||||
}
|
||||
|
||||
// increment and assign txnNumber
|
||||
if (willRetryWrite) {
|
||||
options.session.incrementTransactionNumber();
|
||||
options.willRetryWrite = willRetryWrite;
|
||||
}
|
||||
|
||||
// execute the write operation
|
||||
server[op](ns, ops, options, handler);
|
||||
|
||||
// we need to increment the statement id if we're in a transaction
|
||||
if (options.session && options.session.inTransaction()) {
|
||||
options.session.incrementStatementId(ops.length);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* A server opening SDAM monitoring event
|
||||
*
|
||||
* @event Topology#serverOpening
|
||||
* @type {ServerOpeningEvent}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server closed SDAM monitoring event
|
||||
*
|
||||
* @event Topology#serverClosed
|
||||
* @type {ServerClosedEvent}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A server description SDAM change monitoring event
|
||||
*
|
||||
* @event Topology#serverDescriptionChanged
|
||||
* @type {ServerDescriptionChangedEvent}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology open SDAM event
|
||||
*
|
||||
* @event Topology#topologyOpening
|
||||
* @type {TopologyOpeningEvent}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology closed SDAM event
|
||||
*
|
||||
* @event Topology#topologyClosed
|
||||
* @type {TopologyClosedEvent}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology structure SDAM change event
|
||||
*
|
||||
* @event Topology#topologyDescriptionChanged
|
||||
* @type {TopologyDescriptionChangedEvent}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology serverHeartbeatStarted SDAM event
|
||||
*
|
||||
* @event Topology#serverHeartbeatStarted
|
||||
* @type {ServerHeartbeatStartedEvent}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology serverHeartbeatFailed SDAM event
|
||||
*
|
||||
* @event Topology#serverHeartbeatFailed
|
||||
* @type {ServerHearbeatFailedEvent}
|
||||
*/
|
||||
|
||||
/**
|
||||
* A topology serverHeartbeatSucceeded SDAM change event
|
||||
*
|
||||
* @event Topology#serverHeartbeatSucceeded
|
||||
* @type {ServerHeartbeatSucceededEvent}
|
||||
*/
|
||||
|
||||
module.exports = Topology;
|
364
node_modules/mongodb-core/lib/sdam/topology_description.js
generated
vendored
Normal file
364
node_modules/mongodb-core/lib/sdam/topology_description.js
generated
vendored
Normal file
@@ -0,0 +1,364 @@
|
||||
'use strict';
|
||||
const ServerType = require('./server_description').ServerType;
|
||||
const ServerDescription = require('./server_description').ServerDescription;
|
||||
const ReadPreference = require('../topologies/read_preference');
|
||||
|
||||
// contstants related to compatability checks
|
||||
const MIN_SUPPORTED_SERVER_VERSION = '2.6';
|
||||
const MIN_SUPPORTED_WIRE_VERSION = 2;
|
||||
const MAX_SUPPORTED_WIRE_VERSION = 5;
|
||||
|
||||
// An enumeration of topology types we know about
|
||||
const TopologyType = {
|
||||
Single: 'Single',
|
||||
ReplicaSetNoPrimary: 'ReplicaSetNoPrimary',
|
||||
ReplicaSetWithPrimary: 'ReplicaSetWithPrimary',
|
||||
Sharded: 'Sharded',
|
||||
Unknown: 'Unknown'
|
||||
};
|
||||
|
||||
// Representation of a deployment of servers
|
||||
class TopologyDescription {
|
||||
/**
|
||||
* Create a TopologyDescription
|
||||
*
|
||||
* @param {string} topologyType
|
||||
* @param {Map<string, ServerDescription>} serverDescriptions the a map of address to ServerDescription
|
||||
* @param {string} setName
|
||||
* @param {number} maxSetVersion
|
||||
* @param {ObjectId} maxElectionId
|
||||
*/
|
||||
constructor(topologyType, serverDescriptions, setName, maxSetVersion, maxElectionId, options) {
|
||||
options = options || {};
|
||||
|
||||
// TODO: consider assigning all these values to a temporary value `s` which
|
||||
// we use `Object.freeze` on, ensuring the internal state of this type
|
||||
// is immutable.
|
||||
this.type = topologyType || TopologyType.Unknown;
|
||||
this.setName = setName || null;
|
||||
this.maxSetVersion = maxSetVersion || null;
|
||||
this.maxElectionId = maxElectionId || null;
|
||||
this.servers = serverDescriptions || new Map();
|
||||
this.stale = false;
|
||||
this.compatible = true;
|
||||
this.compatibilityError = null;
|
||||
this.logicalSessionTimeoutMinutes = null;
|
||||
this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 0;
|
||||
this.localThresholdMS = options.localThresholdMS || 0;
|
||||
this.options = options;
|
||||
|
||||
// determine server compatibility
|
||||
for (const serverDescription of this.servers.values()) {
|
||||
if (serverDescription.minWireVersion > MAX_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} requires wire version ${
|
||||
serverDescription.minWireVersion
|
||||
}, but this version of the driver only supports up to ${MAX_SUPPORTED_WIRE_VERSION}.`;
|
||||
}
|
||||
|
||||
if (serverDescription.maxWireVersion < MIN_SUPPORTED_WIRE_VERSION) {
|
||||
this.compatible = false;
|
||||
this.compatibilityError = `Server at ${serverDescription.address} reports wire version ${
|
||||
serverDescription.maxWireVersion
|
||||
}, but this version of the driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION}).`;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Whenever a client updates the TopologyDescription from an ismaster response, it MUST set
|
||||
// TopologyDescription.logicalSessionTimeoutMinutes to the smallest logicalSessionTimeoutMinutes
|
||||
// value among ServerDescriptions of all data-bearing server types. If any have a null
|
||||
// logicalSessionTimeoutMinutes, then TopologyDescription.logicalSessionTimeoutMinutes MUST be
|
||||
// set to null.
|
||||
const readableServers = Array.from(this.servers.values()).filter(s => s.isReadable);
|
||||
this.logicalSessionTimeoutMinutes = readableServers.reduce((result, server) => {
|
||||
if (server.logicalSessionTimeoutMinutes == null) return null;
|
||||
if (result == null) return server.logicalSessionTimeoutMinutes;
|
||||
return Math.min(result, server.logicalSessionTimeoutMinutes);
|
||||
}, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns The minimum reported wire version of all known servers
|
||||
*/
|
||||
get commonWireVersion() {
|
||||
return Array.from(this.servers.values())
|
||||
.filter(server => server.type !== ServerType.Unknown)
|
||||
.reduce(
|
||||
(min, server) =>
|
||||
min == null ? server.maxWireVersion : Math.min(min, server.maxWireVersion),
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a copy of this description updated with a given ServerDescription
|
||||
*
|
||||
* @param {ServerDescription} serverDescription
|
||||
*/
|
||||
update(serverDescription) {
|
||||
const address = serverDescription.address;
|
||||
// NOTE: there are a number of prime targets for refactoring here
|
||||
// once we support destructuring assignments
|
||||
|
||||
// potentially mutated values
|
||||
let topologyType = this.type;
|
||||
let setName = this.setName;
|
||||
let maxSetVersion = this.maxSetVersion;
|
||||
let maxElectionId = this.maxElectionId;
|
||||
|
||||
const serverType = serverDescription.type;
|
||||
let serverDescriptions = new Map(this.servers);
|
||||
|
||||
// update the actual server description
|
||||
serverDescriptions.set(address, serverDescription);
|
||||
|
||||
if (topologyType === TopologyType.Single) {
|
||||
// once we are defined as single, that never changes
|
||||
return new TopologyDescription(
|
||||
TopologyType.Single,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
this.options
|
||||
);
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.Unknown) {
|
||||
if (serverType === ServerType.Standalone) {
|
||||
serverDescriptions.delete(address);
|
||||
} else {
|
||||
topologyType = topologyTypeForServerType(serverType);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.Sharded) {
|
||||
if ([ServerType.Mongos, ServerType.Unknown].indexOf(serverType) === -1) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.ReplicaSetNoPrimary) {
|
||||
if ([ServerType.Mongos, ServerType.Unknown].indexOf(serverType) >= 0) {
|
||||
serverDescriptions.delete(address);
|
||||
}
|
||||
|
||||
if (serverType === ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
);
|
||||
|
||||
(topologyType = result[0]),
|
||||
(setName = result[1]),
|
||||
(maxSetVersion = result[2]),
|
||||
(maxElectionId = result[3]);
|
||||
} else if (
|
||||
[ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0
|
||||
) {
|
||||
const result = updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription);
|
||||
(topologyType = result[0]), (setName = result[1]);
|
||||
}
|
||||
}
|
||||
|
||||
if (topologyType === TopologyType.ReplicaSetWithPrimary) {
|
||||
if ([ServerType.Standalone, ServerType.Mongos].indexOf(serverType) >= 0) {
|
||||
serverDescriptions.delete(address);
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
} else if (serverType === ServerType.RSPrimary) {
|
||||
const result = updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
);
|
||||
|
||||
(topologyType = result[0]),
|
||||
(setName = result[1]),
|
||||
(maxSetVersion = result[2]),
|
||||
(maxElectionId = result[3]);
|
||||
} else if (
|
||||
[ServerType.RSSecondary, ServerType.RSArbiter, ServerType.RSOther].indexOf(serverType) >= 0
|
||||
) {
|
||||
topologyType = updateRsWithPrimaryFromMember(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription
|
||||
);
|
||||
} else {
|
||||
topologyType = checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
}
|
||||
|
||||
return new TopologyDescription(
|
||||
topologyType,
|
||||
serverDescriptions,
|
||||
setName,
|
||||
maxSetVersion,
|
||||
maxElectionId,
|
||||
this.options
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology has a readable server available. See the table in the
|
||||
* following section for behaviour rules.
|
||||
*
|
||||
* @param {ReadPreference} [readPreference] An optional read preference for determining if a readable server is present
|
||||
* @return {Boolean} Whether there is a readable server in this topology
|
||||
*/
|
||||
hasReadableServer(/* readPreference */) {
|
||||
// To be implemented when server selection is implemented
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology has a writable server available. See the table in the
|
||||
* following section for behaviour rules.
|
||||
*
|
||||
* @return {Boolean} Whether there is a writable server in this topology
|
||||
*/
|
||||
hasWritableServer() {
|
||||
return this.hasReadableServer(ReadPreference.primary);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if the topology has a definition for the provided address
|
||||
*
|
||||
* @param {String} address
|
||||
* @return {Boolean} Whether the topology knows about this server
|
||||
*/
|
||||
hasServer(address) {
|
||||
return this.servers.has(address);
|
||||
}
|
||||
}
|
||||
|
||||
function topologyTypeForServerType(serverType) {
|
||||
if (serverType === ServerType.Mongos) return TopologyType.Sharded;
|
||||
if (serverType === ServerType.RSPrimary) return TopologyType.ReplicaSetWithPrimary;
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
|
||||
function updateRsFromPrimary(
|
||||
serverDescriptions,
|
||||
setName,
|
||||
serverDescription,
|
||||
maxSetVersion,
|
||||
maxElectionId
|
||||
) {
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
|
||||
const electionIdOID = serverDescription.electionId ? serverDescription.electionId.$oid : null;
|
||||
const maxElectionIdOID = maxElectionId ? maxElectionId.$oid : null;
|
||||
if (serverDescription.setVersion != null && electionIdOID != null) {
|
||||
if (maxSetVersion != null && maxElectionIdOID != null) {
|
||||
if (maxSetVersion > serverDescription.setVersion || maxElectionIdOID > electionIdOID) {
|
||||
// this primary is stale, we must remove it
|
||||
serverDescriptions.set(
|
||||
serverDescription.address,
|
||||
new ServerDescription(serverDescription.address)
|
||||
);
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
}
|
||||
|
||||
maxElectionId = serverDescription.electionId;
|
||||
}
|
||||
|
||||
if (
|
||||
serverDescription.setVersion != null &&
|
||||
(maxSetVersion == null || serverDescription.setVersion > maxSetVersion)
|
||||
) {
|
||||
maxSetVersion = serverDescription.setVersion;
|
||||
}
|
||||
|
||||
// We've heard from the primary. Is it the same primary as before?
|
||||
for (const address of serverDescriptions.keys()) {
|
||||
const server = serverDescriptions.get(address);
|
||||
|
||||
if (server.type === ServerType.RSPrimary && server.address !== serverDescription.address) {
|
||||
// Reset old primary's type to Unknown.
|
||||
serverDescriptions.set(address, new ServerDescription(server.address));
|
||||
|
||||
// There can only be one primary
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Discover new hosts from this primary's response.
|
||||
serverDescription.allHosts.forEach(address => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
});
|
||||
|
||||
// Remove hosts not in the response.
|
||||
const currentAddresses = Array.from(serverDescriptions.keys());
|
||||
const responseAddresses = serverDescription.allHosts;
|
||||
currentAddresses.filter(addr => responseAddresses.indexOf(addr) === -1).forEach(address => {
|
||||
serverDescriptions.delete(address);
|
||||
});
|
||||
|
||||
return [checkHasPrimary(serverDescriptions), setName, maxSetVersion, maxElectionId];
|
||||
}
|
||||
|
||||
function updateRsWithPrimaryFromMember(serverDescriptions, setName, serverDescription) {
|
||||
if (setName == null) {
|
||||
throw new TypeError('setName is required');
|
||||
}
|
||||
|
||||
if (
|
||||
setName !== serverDescription.setName ||
|
||||
(serverDescription.me && serverDescription.address !== serverDescription.me)
|
||||
) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
|
||||
return checkHasPrimary(serverDescriptions);
|
||||
}
|
||||
|
||||
function updateRsNoPrimaryFromMember(serverDescriptions, setName, serverDescription) {
|
||||
let topologyType = TopologyType.ReplicaSetNoPrimary;
|
||||
|
||||
setName = setName || serverDescription.setName;
|
||||
if (setName !== serverDescription.setName) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
return [topologyType, setName];
|
||||
}
|
||||
|
||||
serverDescription.allHosts.forEach(address => {
|
||||
if (!serverDescriptions.has(address)) {
|
||||
serverDescriptions.set(address, new ServerDescription(address));
|
||||
}
|
||||
});
|
||||
|
||||
if (serverDescription.me && serverDescription.address !== serverDescription.me) {
|
||||
serverDescriptions.delete(serverDescription.address);
|
||||
}
|
||||
|
||||
return [topologyType, setName];
|
||||
}
|
||||
|
||||
function checkHasPrimary(serverDescriptions) {
|
||||
for (const addr of serverDescriptions.keys()) {
|
||||
if (serverDescriptions.get(addr).type === ServerType.RSPrimary) {
|
||||
return TopologyType.ReplicaSetWithPrimary;
|
||||
}
|
||||
}
|
||||
|
||||
return TopologyType.ReplicaSetNoPrimary;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
TopologyType,
|
||||
TopologyDescription
|
||||
};
|
Reference in New Issue
Block a user