Commit With Frontend and Backend in MERN

This commit is contained in:
sanikapendurkar
2025-02-10 14:24:56 +05:30
commit 0f4e1a3183
2518 changed files with 448667 additions and 0 deletions

View File

@@ -0,0 +1,77 @@
import type { Document } from '../../bson';
import { MongoRuntimeError } from '../../error';
import type { HandshakeDocument } from '../connect';
import type { Connection, ConnectionOptions } from '../connection';
import type { MongoCredentials } from './mongo_credentials';
/**
* Context used during authentication
* @internal
*/
export class AuthContext {
/** The connection to authenticate */
connection: Connection;
/** The credentials to use for authentication */
credentials?: MongoCredentials;
/** If the context is for reauthentication. */
reauthenticating = false;
/** The options passed to the `connect` method */
options: ConnectionOptions;
/** A response from an initial auth attempt, only some mechanisms use this (e.g, SCRAM) */
response?: Document;
/** A random nonce generated for use in an authentication conversation */
nonce?: Buffer;
constructor(
connection: Connection,
credentials: MongoCredentials | undefined,
options: ConnectionOptions
) {
this.connection = connection;
this.credentials = credentials;
this.options = options;
}
}
/**
* Provider used during authentication.
* @internal
*/
export abstract class AuthProvider {
/**
* Prepare the handshake document before the initial handshake.
*
* @param handshakeDoc - The document used for the initial handshake on a connection
* @param authContext - Context for authentication flow
*/
async prepare(
handshakeDoc: HandshakeDocument,
_authContext: AuthContext
): Promise<HandshakeDocument> {
return handshakeDoc;
}
/**
* Authenticate
*
* @param context - A shared context for authentication flow
*/
abstract auth(context: AuthContext): Promise<void>;
/**
* Reauthenticate.
* @param context - The shared auth context.
*/
async reauth(context: AuthContext): Promise<void> {
if (context.reauthenticating) {
throw new MongoRuntimeError('Reauthentication already in progress.');
}
try {
context.reauthenticating = true;
await this.auth(context);
} finally {
context.reauthenticating = false;
}
}
}

View File

@@ -0,0 +1,169 @@
import { type AWSCredentials, getAwsCredentialProvider } from '../../deps';
import { MongoAWSError } from '../../error';
import { request } from '../../utils';
const AWS_RELATIVE_URI = 'http://169.254.170.2';
const AWS_EC2_URI = 'http://169.254.169.254';
const AWS_EC2_PATH = '/latest/meta-data/iam/security-credentials';
/**
* @internal
* This interface matches the final result of fetching temporary credentials manually, outlined
* in the spec [here](https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#ec2-endpoint).
*
* When we use the AWS SDK, we map the response from the SDK to conform to this interface.
*/
export interface AWSTempCredentials {
AccessKeyId?: string;
SecretAccessKey?: string;
Token?: string;
RoleArn?: string;
Expiration?: Date;
}
/**
* @internal
*
* Fetches temporary AWS credentials.
*/
export abstract class AWSTemporaryCredentialProvider {
abstract getCredentials(): Promise<AWSTempCredentials>;
private static _awsSDK: ReturnType<typeof getAwsCredentialProvider>;
protected static get awsSDK() {
AWSTemporaryCredentialProvider._awsSDK ??= getAwsCredentialProvider();
return AWSTemporaryCredentialProvider._awsSDK;
}
static get isAWSSDKInstalled(): boolean {
return !('kModuleError' in AWSTemporaryCredentialProvider.awsSDK);
}
}
/** @internal */
export class AWSSDKCredentialProvider extends AWSTemporaryCredentialProvider {
private _provider?: () => Promise<AWSCredentials>;
/**
* The AWS SDK caches credentials automatically and handles refresh when the credentials have expired.
* To ensure this occurs, we need to cache the `provider` returned by the AWS sdk and re-use it when fetching credentials.
*/
private get provider(): () => Promise<AWSCredentials> {
if ('kModuleError' in AWSTemporaryCredentialProvider.awsSDK) {
throw AWSTemporaryCredentialProvider.awsSDK.kModuleError;
}
if (this._provider) {
return this._provider;
}
let { AWS_STS_REGIONAL_ENDPOINTS = '', AWS_REGION = '' } = process.env;
AWS_STS_REGIONAL_ENDPOINTS = AWS_STS_REGIONAL_ENDPOINTS.toLowerCase();
AWS_REGION = AWS_REGION.toLowerCase();
/** The option setting should work only for users who have explicit settings in their environment, the driver should not encode "defaults" */
const awsRegionSettingsExist =
AWS_REGION.length !== 0 && AWS_STS_REGIONAL_ENDPOINTS.length !== 0;
/**
* The following regions use the global AWS STS endpoint, sts.amazonaws.com, by default
* https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html
*/
const LEGACY_REGIONS = new Set([
'ap-northeast-1',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'aws-global',
'ca-central-1',
'eu-central-1',
'eu-north-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'sa-east-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2'
]);
/**
* If AWS_STS_REGIONAL_ENDPOINTS is set to regional, users are opting into the new behavior of respecting the region settings
*
* If AWS_STS_REGIONAL_ENDPOINTS is set to legacy, then "old" regions need to keep using the global setting.
* Technically the SDK gets this wrong, it reaches out to 'sts.us-east-1.amazonaws.com' when it should be 'sts.amazonaws.com'.
* That is not our bug to fix here. We leave that up to the SDK.
*/
const useRegionalSts =
AWS_STS_REGIONAL_ENDPOINTS === 'regional' ||
(AWS_STS_REGIONAL_ENDPOINTS === 'legacy' && !LEGACY_REGIONS.has(AWS_REGION));
this._provider =
awsRegionSettingsExist && useRegionalSts
? AWSTemporaryCredentialProvider.awsSDK.fromNodeProviderChain({
clientConfig: { region: AWS_REGION }
})
: AWSTemporaryCredentialProvider.awsSDK.fromNodeProviderChain();
return this._provider;
}
override async getCredentials(): Promise<AWSTempCredentials> {
/*
* Creates a credential provider that will attempt to find credentials from the
* following sources (listed in order of precedence):
*
* - Environment variables exposed via process.env
* - SSO credentials from token cache
* - Web identity token credentials
* - Shared credentials and config ini files
* - The EC2/ECS Instance Metadata Service
*/
try {
const creds = await this.provider();
return {
AccessKeyId: creds.accessKeyId,
SecretAccessKey: creds.secretAccessKey,
Token: creds.sessionToken,
Expiration: creds.expiration
};
} catch (error) {
throw new MongoAWSError(error.message, { cause: error });
}
}
}
/**
* @internal
* Fetches credentials manually (without the AWS SDK), as outlined in the [Obtaining Credentials](https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#obtaining-credentials)
* section of the Auth spec.
*/
export class LegacyAWSTemporaryCredentialProvider extends AWSTemporaryCredentialProvider {
override async getCredentials(): Promise<AWSTempCredentials> {
// If the environment variable AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
// is set then drivers MUST assume that it was set by an AWS ECS agent
if (process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI) {
return await request(
`${AWS_RELATIVE_URI}${process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI}`
);
}
// Otherwise assume we are on an EC2 instance
// get a token
const token = await request(`${AWS_EC2_URI}/latest/api/token`, {
method: 'PUT',
json: false,
headers: { 'X-aws-ec2-metadata-token-ttl-seconds': 30 }
});
// get role name
const roleName = await request(`${AWS_EC2_URI}/${AWS_EC2_PATH}`, {
json: false,
headers: { 'X-aws-ec2-metadata-token': token }
});
// get temp credentials
const creds = await request(`${AWS_EC2_URI}/${AWS_EC2_PATH}/${roleName}`, {
headers: { 'X-aws-ec2-metadata-token': token }
});
return creds;
}
}

202
backend/node_modules/mongodb/src/cmap/auth/gssapi.ts generated vendored Normal file
View File

@@ -0,0 +1,202 @@
import * as dns from 'dns';
import { getKerberos, type Kerberos, type KerberosClient } from '../../deps';
import { MongoInvalidArgumentError, MongoMissingCredentialsError } from '../../error';
import { ns } from '../../utils';
import type { Connection } from '../connection';
import { type AuthContext, AuthProvider } from './auth_provider';
/** @public */
export const GSSAPICanonicalizationValue = Object.freeze({
on: true,
off: false,
none: 'none',
forward: 'forward',
forwardAndReverse: 'forwardAndReverse'
} as const);
/** @public */
export type GSSAPICanonicalizationValue =
(typeof GSSAPICanonicalizationValue)[keyof typeof GSSAPICanonicalizationValue];
type MechanismProperties = {
CANONICALIZE_HOST_NAME?: GSSAPICanonicalizationValue;
SERVICE_HOST?: string;
SERVICE_NAME?: string;
SERVICE_REALM?: string;
};
async function externalCommand(
connection: Connection,
command: ReturnType<typeof saslStart> | ReturnType<typeof saslContinue>
): Promise<{ payload: string; conversationId: number }> {
const response = await connection.command(ns('$external.$cmd'), command);
return response as { payload: string; conversationId: number };
}
let krb: Kerberos;
export class GSSAPI extends AuthProvider {
override async auth(authContext: AuthContext): Promise<void> {
const { connection, credentials } = authContext;
if (credentials == null) {
throw new MongoMissingCredentialsError('Credentials required for GSSAPI authentication');
}
const { username } = credentials;
const client = await makeKerberosClient(authContext);
const payload = await client.step('');
const saslStartResponse = await externalCommand(connection, saslStart(payload));
const negotiatedPayload = await negotiate(client, 10, saslStartResponse.payload);
const saslContinueResponse = await externalCommand(
connection,
saslContinue(negotiatedPayload, saslStartResponse.conversationId)
);
const finalizePayload = await finalize(client, username, saslContinueResponse.payload);
await externalCommand(connection, {
saslContinue: 1,
conversationId: saslContinueResponse.conversationId,
payload: finalizePayload
});
}
}
async function makeKerberosClient(authContext: AuthContext): Promise<KerberosClient> {
const { hostAddress } = authContext.options;
const { credentials } = authContext;
if (!hostAddress || typeof hostAddress.host !== 'string' || !credentials) {
throw new MongoInvalidArgumentError(
'Connection must have host and port and credentials defined.'
);
}
loadKrb();
if ('kModuleError' in krb) {
throw krb['kModuleError'];
}
const { initializeClient } = krb;
const { username, password } = credentials;
const mechanismProperties = credentials.mechanismProperties as MechanismProperties;
const serviceName = mechanismProperties.SERVICE_NAME ?? 'mongodb';
const host = await performGSSAPICanonicalizeHostName(hostAddress.host, mechanismProperties);
const initOptions = {};
if (password != null) {
// TODO(NODE-5139): These do not match the typescript options in initializeClient
Object.assign(initOptions, { user: username, password: password });
}
const spnHost = mechanismProperties.SERVICE_HOST ?? host;
let spn = `${serviceName}${process.platform === 'win32' ? '/' : '@'}${spnHost}`;
if ('SERVICE_REALM' in mechanismProperties) {
spn = `${spn}@${mechanismProperties.SERVICE_REALM}`;
}
return await initializeClient(spn, initOptions);
}
function saslStart(payload: string) {
return {
saslStart: 1,
mechanism: 'GSSAPI',
payload,
autoAuthorize: 1
} as const;
}
function saslContinue(payload: string, conversationId: number) {
return {
saslContinue: 1,
conversationId,
payload
} as const;
}
async function negotiate(
client: KerberosClient,
retries: number,
payload: string
): Promise<string> {
try {
const response = await client.step(payload);
return response || '';
} catch (error) {
if (retries === 0) {
// Retries exhausted, raise error
throw error;
}
// Adjust number of retries and call step again
return await negotiate(client, retries - 1, payload);
}
}
async function finalize(client: KerberosClient, user: string, payload: string): Promise<string> {
// GSS Client Unwrap
const response = await client.unwrap(payload);
return await client.wrap(response || '', { user });
}
export async function performGSSAPICanonicalizeHostName(
host: string,
mechanismProperties: MechanismProperties
): Promise<string> {
const mode = mechanismProperties.CANONICALIZE_HOST_NAME;
if (!mode || mode === GSSAPICanonicalizationValue.none) {
return host;
}
// If forward and reverse or true
if (
mode === GSSAPICanonicalizationValue.on ||
mode === GSSAPICanonicalizationValue.forwardAndReverse
) {
// Perform the lookup of the ip address.
const { address } = await dns.promises.lookup(host);
try {
// Perform a reverse ptr lookup on the ip address.
const results = await dns.promises.resolvePtr(address);
// If the ptr did not error but had no results, return the host.
return results.length > 0 ? results[0] : host;
} catch {
// This can error as ptr records may not exist for all ips. In this case
// fallback to a cname lookup as dns.lookup() does not return the
// cname.
return await resolveCname(host);
}
} else {
// The case for forward is just to resolve the cname as dns.lookup()
// will not return it.
return await resolveCname(host);
}
}
export async function resolveCname(host: string): Promise<string> {
// Attempt to resolve the host name
try {
const results = await dns.promises.resolveCname(host);
// Get the first resolved host id
return results.length > 0 ? results[0] : host;
} catch {
return host;
}
}
/**
* Load the Kerberos library.
*/
function loadKrb() {
if (!krb) {
krb = getKerberos();
}
}

View File

@@ -0,0 +1,282 @@
// Resolves the default auth mechanism according to
// Resolves the default auth mechanism according to
import type { Document } from '../../bson';
import {
MongoAPIError,
MongoInvalidArgumentError,
MongoMissingCredentialsError
} from '../../error';
import { GSSAPICanonicalizationValue } from './gssapi';
import type { OIDCCallbackFunction } from './mongodb_oidc';
import { AUTH_MECHS_AUTH_SRC_EXTERNAL, AuthMechanism } from './providers';
/**
* @see https://github.com/mongodb/specifications/blob/master/source/auth/auth.md
*/
function getDefaultAuthMechanism(hello: Document | null): AuthMechanism {
if (hello) {
// If hello contains saslSupportedMechs, use scram-sha-256
// if it is available, else scram-sha-1
if (Array.isArray(hello.saslSupportedMechs)) {
return hello.saslSupportedMechs.includes(AuthMechanism.MONGODB_SCRAM_SHA256)
? AuthMechanism.MONGODB_SCRAM_SHA256
: AuthMechanism.MONGODB_SCRAM_SHA1;
}
}
// Default auth mechanism for 4.0 and higher.
return AuthMechanism.MONGODB_SCRAM_SHA256;
}
const ALLOWED_ENVIRONMENT_NAMES: AuthMechanismProperties['ENVIRONMENT'][] = [
'test',
'azure',
'gcp',
'k8s'
];
const ALLOWED_HOSTS_ERROR = 'Auth mechanism property ALLOWED_HOSTS must be an array of strings.';
/** @internal */
export const DEFAULT_ALLOWED_HOSTS = [
'*.mongodb.net',
'*.mongodb-qa.net',
'*.mongodb-dev.net',
'*.mongodbgov.net',
'localhost',
'127.0.0.1',
'::1'
];
/** Error for when the token audience is missing in the environment. */
const TOKEN_RESOURCE_MISSING_ERROR =
'TOKEN_RESOURCE must be set in the auth mechanism properties when ENVIRONMENT is azure or gcp.';
/** @public */
export interface AuthMechanismProperties extends Document {
SERVICE_HOST?: string;
SERVICE_NAME?: string;
SERVICE_REALM?: string;
CANONICALIZE_HOST_NAME?: GSSAPICanonicalizationValue;
AWS_SESSION_TOKEN?: string;
/** A user provided OIDC machine callback function. */
OIDC_CALLBACK?: OIDCCallbackFunction;
/** A user provided OIDC human interacted callback function. */
OIDC_HUMAN_CALLBACK?: OIDCCallbackFunction;
/** The OIDC environment. Note that 'test' is for internal use only. */
ENVIRONMENT?: 'test' | 'azure' | 'gcp' | 'k8s';
/** Allowed hosts that OIDC auth can connect to. */
ALLOWED_HOSTS?: string[];
/** The resource token for OIDC auth in Azure and GCP. */
TOKEN_RESOURCE?: string;
}
/** @public */
export interface MongoCredentialsOptions {
username?: string;
password: string;
source: string;
db?: string;
mechanism?: AuthMechanism;
mechanismProperties: AuthMechanismProperties;
}
/**
* A representation of the credentials used by MongoDB
* @public
*/
export class MongoCredentials {
/** The username used for authentication */
readonly username: string;
/** The password used for authentication */
readonly password: string;
/** The database that the user should authenticate against */
readonly source: string;
/** The method used to authenticate */
readonly mechanism: AuthMechanism;
/** Special properties used by some types of auth mechanisms */
readonly mechanismProperties: AuthMechanismProperties;
constructor(options: MongoCredentialsOptions) {
this.username = options.username ?? '';
this.password = options.password;
this.source = options.source;
if (!this.source && options.db) {
this.source = options.db;
}
this.mechanism = options.mechanism || AuthMechanism.MONGODB_DEFAULT;
this.mechanismProperties = options.mechanismProperties || {};
if (this.mechanism.match(/MONGODB-AWS/i)) {
if (!this.username && process.env.AWS_ACCESS_KEY_ID) {
this.username = process.env.AWS_ACCESS_KEY_ID;
}
if (!this.password && process.env.AWS_SECRET_ACCESS_KEY) {
this.password = process.env.AWS_SECRET_ACCESS_KEY;
}
if (
this.mechanismProperties.AWS_SESSION_TOKEN == null &&
process.env.AWS_SESSION_TOKEN != null
) {
this.mechanismProperties = {
...this.mechanismProperties,
AWS_SESSION_TOKEN: process.env.AWS_SESSION_TOKEN
};
}
}
if (this.mechanism === AuthMechanism.MONGODB_OIDC && !this.mechanismProperties.ALLOWED_HOSTS) {
this.mechanismProperties = {
...this.mechanismProperties,
ALLOWED_HOSTS: DEFAULT_ALLOWED_HOSTS
};
}
Object.freeze(this.mechanismProperties);
Object.freeze(this);
}
/** Determines if two MongoCredentials objects are equivalent */
equals(other: MongoCredentials): boolean {
return (
this.mechanism === other.mechanism &&
this.username === other.username &&
this.password === other.password &&
this.source === other.source
);
}
/**
* If the authentication mechanism is set to "default", resolves the authMechanism
* based on the server version and server supported sasl mechanisms.
*
* @param hello - A hello response from the server
*/
resolveAuthMechanism(hello: Document | null): MongoCredentials {
// If the mechanism is not "default", then it does not need to be resolved
if (this.mechanism.match(/DEFAULT/i)) {
return new MongoCredentials({
username: this.username,
password: this.password,
source: this.source,
mechanism: getDefaultAuthMechanism(hello),
mechanismProperties: this.mechanismProperties
});
}
return this;
}
validate(): void {
if (
(this.mechanism === AuthMechanism.MONGODB_GSSAPI ||
this.mechanism === AuthMechanism.MONGODB_PLAIN ||
this.mechanism === AuthMechanism.MONGODB_SCRAM_SHA1 ||
this.mechanism === AuthMechanism.MONGODB_SCRAM_SHA256) &&
!this.username
) {
throw new MongoMissingCredentialsError(`Username required for mechanism '${this.mechanism}'`);
}
if (this.mechanism === AuthMechanism.MONGODB_OIDC) {
if (
this.username &&
this.mechanismProperties.ENVIRONMENT &&
this.mechanismProperties.ENVIRONMENT !== 'azure'
) {
throw new MongoInvalidArgumentError(
`username and ENVIRONMENT '${this.mechanismProperties.ENVIRONMENT}' may not be used together for mechanism '${this.mechanism}'.`
);
}
if (this.username && this.password) {
throw new MongoInvalidArgumentError(
`No password is allowed in ENVIRONMENT '${this.mechanismProperties.ENVIRONMENT}' for '${this.mechanism}'.`
);
}
if (
(this.mechanismProperties.ENVIRONMENT === 'azure' ||
this.mechanismProperties.ENVIRONMENT === 'gcp') &&
!this.mechanismProperties.TOKEN_RESOURCE
) {
throw new MongoInvalidArgumentError(TOKEN_RESOURCE_MISSING_ERROR);
}
if (
this.mechanismProperties.ENVIRONMENT &&
!ALLOWED_ENVIRONMENT_NAMES.includes(this.mechanismProperties.ENVIRONMENT)
) {
throw new MongoInvalidArgumentError(
`Currently only a ENVIRONMENT in ${ALLOWED_ENVIRONMENT_NAMES.join(
','
)} is supported for mechanism '${this.mechanism}'.`
);
}
if (
!this.mechanismProperties.ENVIRONMENT &&
!this.mechanismProperties.OIDC_CALLBACK &&
!this.mechanismProperties.OIDC_HUMAN_CALLBACK
) {
throw new MongoInvalidArgumentError(
`Either a ENVIRONMENT, OIDC_CALLBACK, or OIDC_HUMAN_CALLBACK must be specified for mechanism '${this.mechanism}'.`
);
}
if (this.mechanismProperties.ALLOWED_HOSTS) {
const hosts = this.mechanismProperties.ALLOWED_HOSTS;
if (!Array.isArray(hosts)) {
throw new MongoInvalidArgumentError(ALLOWED_HOSTS_ERROR);
}
for (const host of hosts) {
if (typeof host !== 'string') {
throw new MongoInvalidArgumentError(ALLOWED_HOSTS_ERROR);
}
}
}
}
if (AUTH_MECHS_AUTH_SRC_EXTERNAL.has(this.mechanism)) {
if (this.source != null && this.source !== '$external') {
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
throw new MongoAPIError(
`Invalid source '${this.source}' for mechanism '${this.mechanism}' specified.`
);
}
}
if (this.mechanism === AuthMechanism.MONGODB_PLAIN && this.source == null) {
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
throw new MongoAPIError('PLAIN Authentication Mechanism needs an auth source');
}
if (this.mechanism === AuthMechanism.MONGODB_X509 && this.password != null) {
if (this.password === '') {
Reflect.set(this, 'password', undefined);
return;
}
// TODO(NODE-3485): Replace this with a MongoAuthValidationError
throw new MongoAPIError(`Password not allowed for mechanism MONGODB-X509`);
}
const canonicalization = this.mechanismProperties.CANONICALIZE_HOST_NAME ?? false;
if (!Object.values(GSSAPICanonicalizationValue).includes(canonicalization)) {
throw new MongoAPIError(`Invalid CANONICALIZE_HOST_NAME value: ${canonicalization}`);
}
}
static merge(
creds: MongoCredentials | undefined,
options: Partial<MongoCredentialsOptions>
): MongoCredentials {
return new MongoCredentials({
username: options.username ?? creds?.username ?? '',
password: options.password ?? creds?.password ?? '',
mechanism: options.mechanism ?? creds?.mechanism ?? AuthMechanism.MONGODB_DEFAULT,
mechanismProperties: options.mechanismProperties ?? creds?.mechanismProperties ?? {},
source: options.source ?? options.db ?? creds?.source ?? 'admin'
});
}
}

View File

@@ -0,0 +1,191 @@
import type { Binary, BSONSerializeOptions } from '../../bson';
import * as BSON from '../../bson';
import { aws4 } from '../../deps';
import {
MongoCompatibilityError,
MongoMissingCredentialsError,
MongoRuntimeError
} from '../../error';
import { ByteUtils, maxWireVersion, ns, randomBytes } from '../../utils';
import { type AuthContext, AuthProvider } from './auth_provider';
import {
AWSSDKCredentialProvider,
type AWSTempCredentials,
AWSTemporaryCredentialProvider,
LegacyAWSTemporaryCredentialProvider
} from './aws_temporary_credentials';
import { MongoCredentials } from './mongo_credentials';
import { AuthMechanism } from './providers';
const ASCII_N = 110;
const bsonOptions: BSONSerializeOptions = {
useBigInt64: false,
promoteLongs: true,
promoteValues: true,
promoteBuffers: false,
bsonRegExp: false
};
interface AWSSaslContinuePayload {
a: string;
d: string;
t?: string;
}
export class MongoDBAWS extends AuthProvider {
private credentialFetcher: AWSTemporaryCredentialProvider;
constructor() {
super();
this.credentialFetcher = AWSTemporaryCredentialProvider.isAWSSDKInstalled
? new AWSSDKCredentialProvider()
: new LegacyAWSTemporaryCredentialProvider();
}
override async auth(authContext: AuthContext): Promise<void> {
const { connection } = authContext;
if (!authContext.credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
if ('kModuleError' in aws4) {
throw aws4['kModuleError'];
}
const { sign } = aws4;
if (maxWireVersion(connection) < 9) {
throw new MongoCompatibilityError(
'MONGODB-AWS authentication requires MongoDB version 4.4 or later'
);
}
if (!authContext.credentials.username) {
authContext.credentials = await makeTempCredentials(
authContext.credentials,
this.credentialFetcher
);
}
const { credentials } = authContext;
const accessKeyId = credentials.username;
const secretAccessKey = credentials.password;
// Allow the user to specify an AWS session token for authentication with temporary credentials.
const sessionToken = credentials.mechanismProperties.AWS_SESSION_TOKEN;
// If all three defined, include sessionToken, else include username and pass, else no credentials
const awsCredentials =
accessKeyId && secretAccessKey && sessionToken
? { accessKeyId, secretAccessKey, sessionToken }
: accessKeyId && secretAccessKey
? { accessKeyId, secretAccessKey }
: undefined;
const db = credentials.source;
const nonce = await randomBytes(32);
// All messages between MongoDB clients and servers are sent as BSON objects
// in the payload field of saslStart and saslContinue.
const saslStart = {
saslStart: 1,
mechanism: 'MONGODB-AWS',
payload: BSON.serialize({ r: nonce, p: ASCII_N }, bsonOptions)
};
const saslStartResponse = await connection.command(ns(`${db}.$cmd`), saslStart, undefined);
const serverResponse = BSON.deserialize(saslStartResponse.payload.buffer, bsonOptions) as {
s: Binary;
h: string;
};
const host = serverResponse.h;
const serverNonce = serverResponse.s.buffer;
if (serverNonce.length !== 64) {
// TODO(NODE-3483)
throw new MongoRuntimeError(`Invalid server nonce length ${serverNonce.length}, expected 64`);
}
if (!ByteUtils.equals(serverNonce.subarray(0, nonce.byteLength), nonce)) {
// throw because the serverNonce's leading 32 bytes must equal the client nonce's 32 bytes
// https://github.com/mongodb/specifications/blob/master/source/auth/auth.md#conversation-5
// TODO(NODE-3483)
throw new MongoRuntimeError('Server nonce does not begin with client nonce');
}
if (host.length < 1 || host.length > 255 || host.indexOf('..') !== -1) {
// TODO(NODE-3483)
throw new MongoRuntimeError(`Server returned an invalid host: "${host}"`);
}
const body = 'Action=GetCallerIdentity&Version=2011-06-15';
const options = sign(
{
method: 'POST',
host,
region: deriveRegion(serverResponse.h),
service: 'sts',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Content-Length': body.length,
'X-MongoDB-Server-Nonce': ByteUtils.toBase64(serverNonce),
'X-MongoDB-GS2-CB-Flag': 'n'
},
path: '/',
body
},
awsCredentials
);
const payload: AWSSaslContinuePayload = {
a: options.headers.Authorization,
d: options.headers['X-Amz-Date']
};
if (sessionToken) {
payload.t = sessionToken;
}
const saslContinue = {
saslContinue: 1,
conversationId: 1,
payload: BSON.serialize(payload, bsonOptions)
};
await connection.command(ns(`${db}.$cmd`), saslContinue, undefined);
}
}
async function makeTempCredentials(
credentials: MongoCredentials,
awsCredentialFetcher: AWSTemporaryCredentialProvider
): Promise<MongoCredentials> {
function makeMongoCredentialsFromAWSTemp(creds: AWSTempCredentials) {
// The AWS session token (creds.Token) may or may not be set.
if (!creds.AccessKeyId || !creds.SecretAccessKey) {
throw new MongoMissingCredentialsError('Could not obtain temporary MONGODB-AWS credentials');
}
return new MongoCredentials({
username: creds.AccessKeyId,
password: creds.SecretAccessKey,
source: credentials.source,
mechanism: AuthMechanism.MONGODB_AWS,
mechanismProperties: {
AWS_SESSION_TOKEN: creds.Token
}
});
}
const temporaryCredentials = await awsCredentialFetcher.getCredentials();
return makeMongoCredentialsFromAWSTemp(temporaryCredentials);
}
function deriveRegion(host: string) {
const parts = host.split('.');
if (parts.length === 1 || parts[1] === 'amazonaws') {
return 'us-east-1';
}
return parts[1];
}

View File

@@ -0,0 +1,180 @@
import type { Document } from '../../bson';
import { MongoInvalidArgumentError, MongoMissingCredentialsError } from '../../error';
import type { HandshakeDocument } from '../connect';
import type { Connection } from '../connection';
import { type AuthContext, AuthProvider } from './auth_provider';
import type { MongoCredentials } from './mongo_credentials';
import { AzureMachineWorkflow } from './mongodb_oidc/azure_machine_workflow';
import { GCPMachineWorkflow } from './mongodb_oidc/gcp_machine_workflow';
import { K8SMachineWorkflow } from './mongodb_oidc/k8s_machine_workflow';
import { TokenCache } from './mongodb_oidc/token_cache';
import { TokenMachineWorkflow } from './mongodb_oidc/token_machine_workflow';
/** Error when credentials are missing. */
const MISSING_CREDENTIALS_ERROR = 'AuthContext must provide credentials.';
/**
* The information returned by the server on the IDP server.
* @public
*/
export interface IdPInfo {
/**
* A URL which describes the Authentication Server. This identifier should
* be the iss of provided access tokens, and be viable for RFC8414 metadata
* discovery and RFC9207 identification.
*/
issuer: string;
/** A unique client ID for this OIDC client. */
clientId: string;
/** A list of additional scopes to request from IdP. */
requestScopes?: string[];
}
/**
* The response from the IdP server with the access token and
* optional expiration time and refresh token.
* @public
*/
export interface IdPServerResponse {
/** The OIDC access token. */
accessToken: string;
/** The time when the access token expires. For future use. */
expiresInSeconds?: number;
/** The refresh token, if applicable, to be used by the callback to request a new token from the issuer. */
refreshToken?: string;
}
/**
* The response required to be returned from the machine or
* human callback workflows' callback.
* @public
*/
export interface OIDCResponse {
/** The OIDC access token. */
accessToken: string;
/** The time when the access token expires. For future use. */
expiresInSeconds?: number;
/** The refresh token, if applicable, to be used by the callback to request a new token from the issuer. */
refreshToken?: string;
}
/**
* The parameters that the driver provides to the user supplied
* human or machine callback.
*
* The version number is used to communicate callback API changes that are not breaking but that
* users may want to know about and review their implementation. Users may wish to check the version
* number and throw an error if their expected version number and the one provided do not match.
* @public
*/
export interface OIDCCallbackParams {
/** Optional username. */
username?: string;
/** The context in which to timeout the OIDC callback. */
timeoutContext: AbortSignal;
/** The current OIDC API version. */
version: 1;
/** The IdP information returned from the server. */
idpInfo?: IdPInfo;
/** The refresh token, if applicable, to be used by the callback to request a new token from the issuer. */
refreshToken?: string;
}
/**
* The signature of the human or machine callback functions.
* @public
*/
export type OIDCCallbackFunction = (params: OIDCCallbackParams) => Promise<OIDCResponse>;
/** The current version of OIDC implementation. */
export const OIDC_VERSION = 1;
type EnvironmentName = 'test' | 'azure' | 'gcp' | 'k8s' | undefined;
/** @internal */
export interface Workflow {
/**
* All device workflows must implement this method in order to get the access
* token and then call authenticate with it.
*/
execute(
connection: Connection,
credentials: MongoCredentials,
response?: Document
): Promise<void>;
/**
* Each workflow should specify the correct custom behaviour for reauthentication.
*/
reauthenticate(connection: Connection, credentials: MongoCredentials): Promise<void>;
/**
* Get the document to add for speculative authentication.
*/
speculativeAuth(connection: Connection, credentials: MongoCredentials): Promise<Document>;
}
/** @internal */
export const OIDC_WORKFLOWS: Map<EnvironmentName, () => Workflow> = new Map();
OIDC_WORKFLOWS.set('test', () => new TokenMachineWorkflow(new TokenCache()));
OIDC_WORKFLOWS.set('azure', () => new AzureMachineWorkflow(new TokenCache()));
OIDC_WORKFLOWS.set('gcp', () => new GCPMachineWorkflow(new TokenCache()));
OIDC_WORKFLOWS.set('k8s', () => new K8SMachineWorkflow(new TokenCache()));
/**
* OIDC auth provider.
*/
export class MongoDBOIDC extends AuthProvider {
workflow: Workflow;
/**
* Instantiate the auth provider.
*/
constructor(workflow?: Workflow) {
super();
if (!workflow) {
throw new MongoInvalidArgumentError('No workflow provided to the OIDC auth provider.');
}
this.workflow = workflow;
}
/**
* Authenticate using OIDC
*/
override async auth(authContext: AuthContext): Promise<void> {
const { connection, reauthenticating, response } = authContext;
if (response?.speculativeAuthenticate?.done) {
return;
}
const credentials = getCredentials(authContext);
if (reauthenticating) {
await this.workflow.reauthenticate(connection, credentials);
} else {
await this.workflow.execute(connection, credentials, response);
}
}
/**
* Add the speculative auth for the initial handshake.
*/
override async prepare(
handshakeDoc: HandshakeDocument,
authContext: AuthContext
): Promise<HandshakeDocument> {
const { connection } = authContext;
const credentials = getCredentials(authContext);
const result = await this.workflow.speculativeAuth(connection, credentials);
return { ...handshakeDoc, ...result };
}
}
/**
* Get credentials from the auth context, throwing if they do not exist.
*/
function getCredentials(authContext: AuthContext): MongoCredentials {
const { credentials } = authContext;
if (!credentials) {
throw new MongoMissingCredentialsError(MISSING_CREDENTIALS_ERROR);
}
return credentials;
}

View File

@@ -0,0 +1,82 @@
import { MONGODB_ERROR_CODES, MongoError, MongoOIDCError } from '../../../error';
import { Timeout, TimeoutError } from '../../../timeout';
import { type Connection } from '../../connection';
import { type MongoCredentials } from '../mongo_credentials';
import {
OIDC_VERSION,
type OIDCCallbackFunction,
type OIDCCallbackParams,
type OIDCResponse
} from '../mongodb_oidc';
import { AUTOMATED_TIMEOUT_MS, CallbackWorkflow } from './callback_workflow';
import { type TokenCache } from './token_cache';
/**
* Class implementing behaviour for the non human callback workflow.
* @internal
*/
export class AutomatedCallbackWorkflow extends CallbackWorkflow {
/**
* Instantiate the human callback workflow.
*/
constructor(cache: TokenCache, callback: OIDCCallbackFunction) {
super(cache, callback);
}
/**
* Execute the OIDC callback workflow.
*/
async execute(connection: Connection, credentials: MongoCredentials): Promise<void> {
// If there is a cached access token, try to authenticate with it. If
// authentication fails with an Authentication error (18),
// invalidate the access token, fetch a new access token, and try
// to authenticate again.
// If the server fails for any other reason, do not clear the cache.
if (this.cache.hasAccessToken) {
const token = this.cache.getAccessToken();
try {
return await this.finishAuthentication(connection, credentials, token);
} catch (error) {
if (
error instanceof MongoError &&
error.code === MONGODB_ERROR_CODES.AuthenticationFailed
) {
this.cache.removeAccessToken();
return await this.execute(connection, credentials);
} else {
throw error;
}
}
}
const response = await this.fetchAccessToken(credentials);
this.cache.put(response);
connection.accessToken = response.accessToken;
await this.finishAuthentication(connection, credentials, response.accessToken);
}
/**
* Fetches the access token using the callback.
*/
protected async fetchAccessToken(credentials: MongoCredentials): Promise<OIDCResponse> {
const controller = new AbortController();
const params: OIDCCallbackParams = {
timeoutContext: controller.signal,
version: OIDC_VERSION
};
if (credentials.username) {
params.username = credentials.username;
}
const timeout = Timeout.expires(AUTOMATED_TIMEOUT_MS);
try {
return await Promise.race([this.executeAndValidateCallback(params), timeout]);
} catch (error) {
if (TimeoutError.is(error)) {
controller.abort();
throw new MongoOIDCError(`OIDC callback timed out after ${AUTOMATED_TIMEOUT_MS}ms.`);
}
throw error;
} finally {
timeout.clear();
}
}
}

View File

@@ -0,0 +1,85 @@
import { addAzureParams, AZURE_BASE_URL } from '../../../client-side-encryption/providers/azure';
import { MongoAzureError } from '../../../error';
import { get } from '../../../utils';
import type { MongoCredentials } from '../mongo_credentials';
import { type AccessToken, MachineWorkflow } from './machine_workflow';
import { type TokenCache } from './token_cache';
/** Azure request headers. */
const AZURE_HEADERS = Object.freeze({ Metadata: 'true', Accept: 'application/json' });
/** Invalid endpoint result error. */
const ENDPOINT_RESULT_ERROR =
'Azure endpoint did not return a value with only access_token and expires_in properties';
/** Error for when the token audience is missing in the environment. */
const TOKEN_RESOURCE_MISSING_ERROR =
'TOKEN_RESOURCE must be set in the auth mechanism properties when ENVIRONMENT is azure.';
/**
* Device workflow implementation for Azure.
*
* @internal
*/
export class AzureMachineWorkflow extends MachineWorkflow {
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
super(cache);
}
/**
* Get the token from the environment.
*/
async getToken(credentials?: MongoCredentials): Promise<AccessToken> {
const tokenAudience = credentials?.mechanismProperties.TOKEN_RESOURCE;
const username = credentials?.username;
if (!tokenAudience) {
throw new MongoAzureError(TOKEN_RESOURCE_MISSING_ERROR);
}
const response = await getAzureTokenData(tokenAudience, username);
if (!isEndpointResultValid(response)) {
throw new MongoAzureError(ENDPOINT_RESULT_ERROR);
}
return response;
}
}
/**
* Hit the Azure endpoint to get the token data.
*/
async function getAzureTokenData(tokenAudience: string, username?: string): Promise<AccessToken> {
const url = new URL(AZURE_BASE_URL);
addAzureParams(url, tokenAudience, username);
const response = await get(url, {
headers: AZURE_HEADERS
});
if (response.status !== 200) {
throw new MongoAzureError(
`Status code ${response.status} returned from the Azure endpoint. Response body: ${response.body}`
);
}
const result = JSON.parse(response.body);
return {
access_token: result.access_token,
expires_in: Number(result.expires_in)
};
}
/**
* Determines if a result returned from the endpoint is valid.
* This means the result is not nullish, contains the access_token required field
* and the expires_in required field.
*/
function isEndpointResultValid(
token: unknown
): token is { access_token: unknown; expires_in: unknown } {
if (token == null || typeof token !== 'object') return false;
return (
'access_token' in token &&
typeof token.access_token === 'string' &&
'expires_in' in token &&
typeof token.expires_in === 'number'
);
}

View File

@@ -0,0 +1,188 @@
import { setTimeout } from 'timers/promises';
import { type Document } from '../../../bson';
import { MongoMissingCredentialsError } from '../../../error';
import { ns } from '../../../utils';
import type { Connection } from '../../connection';
import type { MongoCredentials } from '../mongo_credentials';
import {
type OIDCCallbackFunction,
type OIDCCallbackParams,
type OIDCResponse,
type Workflow
} from '../mongodb_oidc';
import { finishCommandDocument, startCommandDocument } from './command_builders';
import { type TokenCache } from './token_cache';
/** 5 minutes in milliseconds */
export const HUMAN_TIMEOUT_MS = 300000;
/** 1 minute in milliseconds */
export const AUTOMATED_TIMEOUT_MS = 60000;
/** Properties allowed on results of callbacks. */
const RESULT_PROPERTIES = ['accessToken', 'expiresInSeconds', 'refreshToken'];
/** Error message when the callback result is invalid. */
const CALLBACK_RESULT_ERROR =
'User provided OIDC callbacks must return a valid object with an accessToken.';
/** The time to throttle callback calls. */
const THROTTLE_MS = 100;
/**
* OIDC implementation of a callback based workflow.
* @internal
*/
export abstract class CallbackWorkflow implements Workflow {
cache: TokenCache;
callback: OIDCCallbackFunction;
lastExecutionTime: number;
/**
* Instantiate the callback workflow.
*/
constructor(cache: TokenCache, callback: OIDCCallbackFunction) {
this.cache = cache;
this.callback = this.withLock(callback);
this.lastExecutionTime = Date.now() - THROTTLE_MS;
}
/**
* Get the document to add for speculative authentication. This also needs
* to add a db field from the credentials source.
*/
async speculativeAuth(connection: Connection, credentials: MongoCredentials): Promise<Document> {
// Check if the Client Cache has an access token.
// If it does, cache the access token in the Connection Cache and send a JwtStepRequest
// with the cached access token in the speculative authentication SASL payload.
if (this.cache.hasAccessToken) {
const accessToken = this.cache.getAccessToken();
connection.accessToken = accessToken;
const document = finishCommandDocument(accessToken);
document.db = credentials.source;
return { speculativeAuthenticate: document };
}
return {};
}
/**
* Reauthenticate the callback workflow. For this we invalidated the access token
* in the cache and run the authentication steps again. No initial handshake needs
* to be sent.
*/
async reauthenticate(connection: Connection, credentials: MongoCredentials): Promise<void> {
if (this.cache.hasAccessToken) {
// Reauthentication implies the token has expired.
if (connection.accessToken === this.cache.getAccessToken()) {
// If connection's access token is the same as the cache's, remove
// the token from the cache and connection.
this.cache.removeAccessToken();
delete connection.accessToken;
} else {
// If the connection's access token is different from the cache's, set
// the cache's token on the connection and do not remove from the
// cache.
connection.accessToken = this.cache.getAccessToken();
}
}
await this.execute(connection, credentials);
}
/**
* Execute the OIDC callback workflow.
*/
abstract execute(
connection: Connection,
credentials: MongoCredentials,
response?: Document
): Promise<void>;
/**
* Starts the callback authentication process. If there is a speculative
* authentication document from the initial handshake, then we will use that
* value to get the issuer, otherwise we will send the saslStart command.
*/
protected async startAuthentication(
connection: Connection,
credentials: MongoCredentials,
response?: Document
): Promise<Document> {
let result;
if (response?.speculativeAuthenticate) {
result = response.speculativeAuthenticate;
} else {
result = await connection.command(
ns(credentials.source),
startCommandDocument(credentials),
undefined
);
}
return result;
}
/**
* Finishes the callback authentication process.
*/
protected async finishAuthentication(
connection: Connection,
credentials: MongoCredentials,
token: string,
conversationId?: number
): Promise<void> {
await connection.command(
ns(credentials.source),
finishCommandDocument(token, conversationId),
undefined
);
}
/**
* Executes the callback and validates the output.
*/
protected async executeAndValidateCallback(params: OIDCCallbackParams): Promise<OIDCResponse> {
const result = await this.callback(params);
// Validate that the result returned by the callback is acceptable. If it is not
// we must clear the token result from the cache.
if (isCallbackResultInvalid(result)) {
throw new MongoMissingCredentialsError(CALLBACK_RESULT_ERROR);
}
return result;
}
/**
* Ensure the callback is only executed one at a time and throttles the calls
* to every 100ms.
*/
protected withLock(callback: OIDCCallbackFunction): OIDCCallbackFunction {
let lock: Promise<any> = Promise.resolve();
return async (params: OIDCCallbackParams): Promise<OIDCResponse> => {
// We do this to ensure that we would never return the result of the
// previous lock, only the current callback's value would get returned.
await lock;
lock = lock
.catch(() => null)
.then(async () => {
const difference = Date.now() - this.lastExecutionTime;
if (difference <= THROTTLE_MS) {
await setTimeout(THROTTLE_MS - difference, { signal: params.timeoutContext });
}
this.lastExecutionTime = Date.now();
return await callback(params);
});
return await lock;
};
}
}
/**
* Determines if a result returned from a request or refresh callback
* function is invalid. This means the result is nullish, doesn't contain
* the accessToken required field, and does not contain extra fields.
*/
function isCallbackResultInvalid(tokenResult: unknown): boolean {
if (tokenResult == null || typeof tokenResult !== 'object') return true;
if (!('accessToken' in tokenResult)) return true;
return !Object.getOwnPropertyNames(tokenResult).every(prop => RESULT_PROPERTIES.includes(prop));
}

View File

@@ -0,0 +1,53 @@
import { Binary, BSON, type Document } from '../../../bson';
import { type MongoCredentials } from '../mongo_credentials';
import { AuthMechanism } from '../providers';
/** @internal */
export interface OIDCCommand {
saslStart?: number;
saslContinue?: number;
conversationId?: number;
mechanism?: string;
autoAuthorize?: number;
db?: string;
payload: Binary;
}
/**
* Generate the finishing command document for authentication. Will be a
* saslStart or saslContinue depending on the presence of a conversation id.
*/
export function finishCommandDocument(token: string, conversationId?: number): OIDCCommand {
if (conversationId != null) {
return {
saslContinue: 1,
conversationId: conversationId,
payload: new Binary(BSON.serialize({ jwt: token }))
};
}
// saslContinue requires a conversationId in the command to be valid so in this
// case the server allows "step two" to actually be a saslStart with the token
// as the jwt since the use of the cached value has no correlating conversating
// on the particular connection.
return {
saslStart: 1,
mechanism: AuthMechanism.MONGODB_OIDC,
payload: new Binary(BSON.serialize({ jwt: token }))
};
}
/**
* Generate the saslStart command document.
*/
export function startCommandDocument(credentials: MongoCredentials): OIDCCommand {
const payload: Document = {};
if (credentials.username) {
payload.n = credentials.username;
}
return {
saslStart: 1,
autoAuthorize: 1,
mechanism: AuthMechanism.MONGODB_OIDC,
payload: new Binary(BSON.serialize(payload))
};
}

View File

@@ -0,0 +1,53 @@
import { MongoGCPError } from '../../../error';
import { get } from '../../../utils';
import { type MongoCredentials } from '../mongo_credentials';
import { type AccessToken, MachineWorkflow } from './machine_workflow';
import { type TokenCache } from './token_cache';
/** GCP base URL. */
const GCP_BASE_URL =
'http://metadata/computeMetadata/v1/instance/service-accounts/default/identity';
/** GCP request headers. */
const GCP_HEADERS = Object.freeze({ 'Metadata-Flavor': 'Google' });
/** Error for when the token audience is missing in the environment. */
const TOKEN_RESOURCE_MISSING_ERROR =
'TOKEN_RESOURCE must be set in the auth mechanism properties when ENVIRONMENT is gcp.';
export class GCPMachineWorkflow extends MachineWorkflow {
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
super(cache);
}
/**
* Get the token from the environment.
*/
async getToken(credentials?: MongoCredentials): Promise<AccessToken> {
const tokenAudience = credentials?.mechanismProperties.TOKEN_RESOURCE;
if (!tokenAudience) {
throw new MongoGCPError(TOKEN_RESOURCE_MISSING_ERROR);
}
return await getGcpTokenData(tokenAudience);
}
}
/**
* Hit the GCP endpoint to get the token data.
*/
async function getGcpTokenData(tokenAudience: string): Promise<AccessToken> {
const url = new URL(GCP_BASE_URL);
url.searchParams.append('audience', tokenAudience);
const response = await get(url, {
headers: GCP_HEADERS
});
if (response.status !== 200) {
throw new MongoGCPError(
`Status code ${response.status} returned from the GCP endpoint. Response body: ${response.body}`
);
}
return { access_token: response.body };
}

View File

@@ -0,0 +1,141 @@
import { BSON } from '../../../bson';
import { MONGODB_ERROR_CODES, MongoError, MongoOIDCError } from '../../../error';
import { Timeout, TimeoutError } from '../../../timeout';
import { type Connection } from '../../connection';
import { type MongoCredentials } from '../mongo_credentials';
import {
type IdPInfo,
OIDC_VERSION,
type OIDCCallbackFunction,
type OIDCCallbackParams,
type OIDCResponse
} from '../mongodb_oidc';
import { CallbackWorkflow, HUMAN_TIMEOUT_MS } from './callback_workflow';
import { type TokenCache } from './token_cache';
/**
* Class implementing behaviour for the non human callback workflow.
* @internal
*/
export class HumanCallbackWorkflow extends CallbackWorkflow {
/**
* Instantiate the human callback workflow.
*/
constructor(cache: TokenCache, callback: OIDCCallbackFunction) {
super(cache, callback);
}
/**
* Execute the OIDC human callback workflow.
*/
async execute(connection: Connection, credentials: MongoCredentials): Promise<void> {
// Check if the Client Cache has an access token.
// If it does, cache the access token in the Connection Cache and perform a One-Step SASL conversation
// using the access token. If the server returns an Authentication error (18),
// invalidate the access token token from the Client Cache, clear the Connection Cache,
// and restart the authentication flow. Raise any other errors to the user. On success, exit the algorithm.
if (this.cache.hasAccessToken) {
const token = this.cache.getAccessToken();
connection.accessToken = token;
try {
return await this.finishAuthentication(connection, credentials, token);
} catch (error) {
if (
error instanceof MongoError &&
error.code === MONGODB_ERROR_CODES.AuthenticationFailed
) {
this.cache.removeAccessToken();
delete connection.accessToken;
return await this.execute(connection, credentials);
} else {
throw error;
}
}
}
// Check if the Client Cache has a refresh token.
// If it does, call the OIDC Human Callback with the cached refresh token and IdpInfo to get a
// new access token. Cache the new access token in the Client Cache and Connection Cache.
// Perform a One-Step SASL conversation using the new access token. If the the server returns
// an Authentication error (18), clear the refresh token, invalidate the access token from the
// Client Cache, clear the Connection Cache, and restart the authentication flow. Raise any other
// errors to the user. On success, exit the algorithm.
if (this.cache.hasRefreshToken) {
const refreshToken = this.cache.getRefreshToken();
const result = await this.fetchAccessToken(
this.cache.getIdpInfo(),
credentials,
refreshToken
);
this.cache.put(result);
connection.accessToken = result.accessToken;
try {
return await this.finishAuthentication(connection, credentials, result.accessToken);
} catch (error) {
if (
error instanceof MongoError &&
error.code === MONGODB_ERROR_CODES.AuthenticationFailed
) {
this.cache.removeRefreshToken();
delete connection.accessToken;
return await this.execute(connection, credentials);
} else {
throw error;
}
}
}
// Start a new Two-Step SASL conversation.
// Run a PrincipalStepRequest to get the IdpInfo.
// Call the OIDC Human Callback with the new IdpInfo to get a new access token and optional refresh
// token. Drivers MUST NOT pass a cached refresh token to the callback when performing
// a new Two-Step conversation. Cache the new IdpInfo and refresh token in the Client Cache and the
// new access token in the Client Cache and Connection Cache.
// Attempt to authenticate using a JwtStepRequest with the new access token. Raise any errors to the user.
const startResponse = await this.startAuthentication(connection, credentials);
const conversationId = startResponse.conversationId;
const idpInfo = BSON.deserialize(startResponse.payload.buffer) as IdPInfo;
const callbackResponse = await this.fetchAccessToken(idpInfo, credentials);
this.cache.put(callbackResponse, idpInfo);
connection.accessToken = callbackResponse.accessToken;
return await this.finishAuthentication(
connection,
credentials,
callbackResponse.accessToken,
conversationId
);
}
/**
* Fetches an access token using the callback.
*/
private async fetchAccessToken(
idpInfo: IdPInfo,
credentials: MongoCredentials,
refreshToken?: string
): Promise<OIDCResponse> {
const controller = new AbortController();
const params: OIDCCallbackParams = {
timeoutContext: controller.signal,
version: OIDC_VERSION,
idpInfo: idpInfo
};
if (credentials.username) {
params.username = credentials.username;
}
if (refreshToken) {
params.refreshToken = refreshToken;
}
const timeout = Timeout.expires(HUMAN_TIMEOUT_MS);
try {
return await Promise.race([this.executeAndValidateCallback(params), timeout]);
} catch (error) {
if (TimeoutError.is(error)) {
controller.abort();
throw new MongoOIDCError(`OIDC callback timed out after ${HUMAN_TIMEOUT_MS}ms.`);
}
throw error;
} finally {
timeout.clear();
}
}
}

View File

@@ -0,0 +1,38 @@
import { readFile } from 'fs/promises';
import { type AccessToken, MachineWorkflow } from './machine_workflow';
import { type TokenCache } from './token_cache';
/** The fallback file name */
const FALLBACK_FILENAME = '/var/run/secrets/kubernetes.io/serviceaccount/token';
/** The azure environment variable for the file name. */
const AZURE_FILENAME = 'AZURE_FEDERATED_TOKEN_FILE';
/** The AWS environment variable for the file name. */
const AWS_FILENAME = 'AWS_WEB_IDENTITY_TOKEN_FILE';
export class K8SMachineWorkflow extends MachineWorkflow {
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
super(cache);
}
/**
* Get the token from the environment.
*/
async getToken(): Promise<AccessToken> {
let filename: string;
if (process.env[AZURE_FILENAME]) {
filename = process.env[AZURE_FILENAME];
} else if (process.env[AWS_FILENAME]) {
filename = process.env[AWS_FILENAME];
} else {
filename = FALLBACK_FILENAME;
}
const token = await readFile(filename, 'utf8');
return { access_token: token };
}
}

View File

@@ -0,0 +1,137 @@
import { setTimeout } from 'timers/promises';
import { type Document } from '../../../bson';
import { ns } from '../../../utils';
import type { Connection } from '../../connection';
import type { MongoCredentials } from '../mongo_credentials';
import type { Workflow } from '../mongodb_oidc';
import { finishCommandDocument } from './command_builders';
import { type TokenCache } from './token_cache';
/** The time to throttle callback calls. */
const THROTTLE_MS = 100;
/**
* The access token format.
* @internal
*/
export interface AccessToken {
access_token: string;
expires_in?: number;
}
/** @internal */
export type OIDCTokenFunction = (credentials: MongoCredentials) => Promise<AccessToken>;
/**
* Common behaviour for OIDC machine workflows.
* @internal
*/
export abstract class MachineWorkflow implements Workflow {
cache: TokenCache;
callback: OIDCTokenFunction;
lastExecutionTime: number;
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
this.cache = cache;
this.callback = this.withLock(this.getToken.bind(this));
this.lastExecutionTime = Date.now() - THROTTLE_MS;
}
/**
* Execute the workflow. Gets the token from the subclass implementation.
*/
async execute(connection: Connection, credentials: MongoCredentials): Promise<void> {
const token = await this.getTokenFromCacheOrEnv(connection, credentials);
const command = finishCommandDocument(token);
await connection.command(ns(credentials.source), command, undefined);
}
/**
* Reauthenticate on a machine workflow just grabs the token again since the server
* has said the current access token is invalid or expired.
*/
async reauthenticate(connection: Connection, credentials: MongoCredentials): Promise<void> {
if (this.cache.hasAccessToken) {
// Reauthentication implies the token has expired.
if (connection.accessToken === this.cache.getAccessToken()) {
// If connection's access token is the same as the cache's, remove
// the token from the cache and connection.
this.cache.removeAccessToken();
delete connection.accessToken;
} else {
// If the connection's access token is different from the cache's, set
// the cache's token on the connection and do not remove from the
// cache.
connection.accessToken = this.cache.getAccessToken();
}
}
await this.execute(connection, credentials);
}
/**
* Get the document to add for speculative authentication.
*/
async speculativeAuth(connection: Connection, credentials: MongoCredentials): Promise<Document> {
// The spec states only cached access tokens can use speculative auth.
if (!this.cache.hasAccessToken) {
return {};
}
const token = await this.getTokenFromCacheOrEnv(connection, credentials);
const document = finishCommandDocument(token);
document.db = credentials.source;
return { speculativeAuthenticate: document };
}
/**
* Get the token from the cache or environment.
*/
private async getTokenFromCacheOrEnv(
connection: Connection,
credentials: MongoCredentials
): Promise<string> {
if (this.cache.hasAccessToken) {
return this.cache.getAccessToken();
} else {
const token = await this.callback(credentials);
this.cache.put({ accessToken: token.access_token, expiresInSeconds: token.expires_in });
// Put the access token on the connection as well.
connection.accessToken = token.access_token;
return token.access_token;
}
}
/**
* Ensure the callback is only executed one at a time, and throttled to
* only once per 100ms.
*/
private withLock(callback: OIDCTokenFunction): OIDCTokenFunction {
let lock: Promise<any> = Promise.resolve();
return async (credentials: MongoCredentials): Promise<AccessToken> => {
// We do this to ensure that we would never return the result of the
// previous lock, only the current callback's value would get returned.
await lock;
lock = lock
.catch(() => null)
.then(async () => {
const difference = Date.now() - this.lastExecutionTime;
if (difference <= THROTTLE_MS) {
await setTimeout(THROTTLE_MS - difference);
}
this.lastExecutionTime = Date.now();
return await callback(credentials);
});
return await lock;
};
}
/**
* Get the token from the environment or endpoint.
*/
abstract getToken(credentials: MongoCredentials): Promise<AccessToken>;
}

View File

@@ -0,0 +1,62 @@
import { MongoDriverError } from '../../../error';
import type { IdPInfo, OIDCResponse } from '../mongodb_oidc';
class MongoOIDCError extends MongoDriverError {}
/** @internal */
export class TokenCache {
private accessToken?: string;
private refreshToken?: string;
private idpInfo?: IdPInfo;
private expiresInSeconds?: number;
get hasAccessToken(): boolean {
return !!this.accessToken;
}
get hasRefreshToken(): boolean {
return !!this.refreshToken;
}
get hasIdpInfo(): boolean {
return !!this.idpInfo;
}
getAccessToken(): string {
if (!this.accessToken) {
throw new MongoOIDCError('Attempted to get an access token when none exists.');
}
return this.accessToken;
}
getRefreshToken(): string {
if (!this.refreshToken) {
throw new MongoOIDCError('Attempted to get a refresh token when none exists.');
}
return this.refreshToken;
}
getIdpInfo(): IdPInfo {
if (!this.idpInfo) {
throw new MongoOIDCError('Attempted to get IDP information when none exists.');
}
return this.idpInfo;
}
put(response: OIDCResponse, idpInfo?: IdPInfo) {
this.accessToken = response.accessToken;
this.refreshToken = response.refreshToken;
this.expiresInSeconds = response.expiresInSeconds;
if (idpInfo) {
this.idpInfo = idpInfo;
}
}
removeAccessToken() {
this.accessToken = undefined;
}
removeRefreshToken() {
this.refreshToken = undefined;
}
}

View File

@@ -0,0 +1,34 @@
import * as fs from 'fs';
import { MongoAWSError } from '../../../error';
import { type AccessToken, MachineWorkflow } from './machine_workflow';
import { type TokenCache } from './token_cache';
/** Error for when the token is missing in the environment. */
const TOKEN_MISSING_ERROR = 'OIDC_TOKEN_FILE must be set in the environment.';
/**
* Device workflow implementation for AWS.
*
* @internal
*/
export class TokenMachineWorkflow extends MachineWorkflow {
/**
* Instantiate the machine workflow.
*/
constructor(cache: TokenCache) {
super(cache);
}
/**
* Get the token from the environment.
*/
async getToken(): Promise<AccessToken> {
const tokenFile = process.env.OIDC_TOKEN_FILE;
if (!tokenFile) {
throw new MongoAWSError(TOKEN_MISSING_ERROR);
}
const token = await fs.promises.readFile(tokenFile, 'utf8');
return { access_token: token };
}
}

25
backend/node_modules/mongodb/src/cmap/auth/plain.ts generated vendored Normal file
View File

@@ -0,0 +1,25 @@
import { Binary } from '../../bson';
import { MongoMissingCredentialsError } from '../../error';
import { ns } from '../../utils';
import { type AuthContext, AuthProvider } from './auth_provider';
export class Plain extends AuthProvider {
override async auth(authContext: AuthContext): Promise<void> {
const { connection, credentials } = authContext;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
const { username, password } = credentials;
const payload = new Binary(Buffer.from(`\x00${username}\x00${password}`));
const command = {
saslStart: 1,
mechanism: 'PLAIN',
payload: payload,
autoAuthorize: 1
};
await connection.command(ns('$external.$cmd'), command, undefined);
}
}

View File

@@ -0,0 +1,23 @@
/** @public */
export const AuthMechanism = Object.freeze({
MONGODB_AWS: 'MONGODB-AWS',
MONGODB_CR: 'MONGODB-CR',
MONGODB_DEFAULT: 'DEFAULT',
MONGODB_GSSAPI: 'GSSAPI',
MONGODB_PLAIN: 'PLAIN',
MONGODB_SCRAM_SHA1: 'SCRAM-SHA-1',
MONGODB_SCRAM_SHA256: 'SCRAM-SHA-256',
MONGODB_X509: 'MONGODB-X509',
MONGODB_OIDC: 'MONGODB-OIDC'
} as const);
/** @public */
export type AuthMechanism = (typeof AuthMechanism)[keyof typeof AuthMechanism];
/** @internal */
export const AUTH_MECHS_AUTH_SRC_EXTERNAL = new Set<AuthMechanism>([
AuthMechanism.MONGODB_GSSAPI,
AuthMechanism.MONGODB_AWS,
AuthMechanism.MONGODB_OIDC,
AuthMechanism.MONGODB_X509
]);

344
backend/node_modules/mongodb/src/cmap/auth/scram.ts generated vendored Normal file
View File

@@ -0,0 +1,344 @@
import { saslprep } from '@mongodb-js/saslprep';
import * as crypto from 'crypto';
import { Binary, type Document } from '../../bson';
import {
MongoInvalidArgumentError,
MongoMissingCredentialsError,
MongoRuntimeError
} from '../../error';
import { ns, randomBytes } from '../../utils';
import type { HandshakeDocument } from '../connect';
import { type AuthContext, AuthProvider } from './auth_provider';
import type { MongoCredentials } from './mongo_credentials';
import { AuthMechanism } from './providers';
type CryptoMethod = 'sha1' | 'sha256';
class ScramSHA extends AuthProvider {
cryptoMethod: CryptoMethod;
constructor(cryptoMethod: CryptoMethod) {
super();
this.cryptoMethod = cryptoMethod || 'sha1';
}
override async prepare(
handshakeDoc: HandshakeDocument,
authContext: AuthContext
): Promise<HandshakeDocument> {
const cryptoMethod = this.cryptoMethod;
const credentials = authContext.credentials;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
const nonce = await randomBytes(24);
// store the nonce for later use
authContext.nonce = nonce;
const request = {
...handshakeDoc,
speculativeAuthenticate: {
...makeFirstMessage(cryptoMethod, credentials, nonce),
db: credentials.source
}
};
return request;
}
override async auth(authContext: AuthContext) {
const { reauthenticating, response } = authContext;
if (response?.speculativeAuthenticate && !reauthenticating) {
return await continueScramConversation(
this.cryptoMethod,
response.speculativeAuthenticate,
authContext
);
}
return await executeScram(this.cryptoMethod, authContext);
}
}
function cleanUsername(username: string) {
return username.replace('=', '=3D').replace(',', '=2C');
}
function clientFirstMessageBare(username: string, nonce: Buffer) {
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
// Since the username is not sasl-prep-d, we need to do this here.
return Buffer.concat([
Buffer.from('n=', 'utf8'),
Buffer.from(username, 'utf8'),
Buffer.from(',r=', 'utf8'),
Buffer.from(nonce.toString('base64'), 'utf8')
]);
}
function makeFirstMessage(
cryptoMethod: CryptoMethod,
credentials: MongoCredentials,
nonce: Buffer
) {
const username = cleanUsername(credentials.username);
const mechanism =
cryptoMethod === 'sha1' ? AuthMechanism.MONGODB_SCRAM_SHA1 : AuthMechanism.MONGODB_SCRAM_SHA256;
// NOTE: This is done b/c Javascript uses UTF-16, but the server is hashing in UTF-8.
// Since the username is not sasl-prep-d, we need to do this here.
return {
saslStart: 1,
mechanism,
payload: new Binary(
Buffer.concat([Buffer.from('n,,', 'utf8'), clientFirstMessageBare(username, nonce)])
),
autoAuthorize: 1,
options: { skipEmptyExchange: true }
};
}
async function executeScram(cryptoMethod: CryptoMethod, authContext: AuthContext): Promise<void> {
const { connection, credentials } = authContext;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
if (!authContext.nonce) {
throw new MongoInvalidArgumentError('AuthContext must contain a valid nonce property');
}
const nonce = authContext.nonce;
const db = credentials.source;
const saslStartCmd = makeFirstMessage(cryptoMethod, credentials, nonce);
const response = await connection.command(ns(`${db}.$cmd`), saslStartCmd, undefined);
await continueScramConversation(cryptoMethod, response, authContext);
}
async function continueScramConversation(
cryptoMethod: CryptoMethod,
response: Document,
authContext: AuthContext
): Promise<void> {
const connection = authContext.connection;
const credentials = authContext.credentials;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
if (!authContext.nonce) {
throw new MongoInvalidArgumentError('Unable to continue SCRAM without valid nonce');
}
const nonce = authContext.nonce;
const db = credentials.source;
const username = cleanUsername(credentials.username);
const password = credentials.password;
const processedPassword =
cryptoMethod === 'sha256' ? saslprep(password) : passwordDigest(username, password);
const payload: Binary = Buffer.isBuffer(response.payload)
? new Binary(response.payload)
: response.payload;
const dict = parsePayload(payload);
const iterations = parseInt(dict.i, 10);
if (iterations && iterations < 4096) {
// TODO(NODE-3483)
throw new MongoRuntimeError(`Server returned an invalid iteration count ${iterations}`);
}
const salt = dict.s;
const rnonce = dict.r;
if (rnonce.startsWith('nonce')) {
// TODO(NODE-3483)
throw new MongoRuntimeError(`Server returned an invalid nonce: ${rnonce}`);
}
// Set up start of proof
const withoutProof = `c=biws,r=${rnonce}`;
const saltedPassword = HI(
processedPassword,
Buffer.from(salt, 'base64'),
iterations,
cryptoMethod
);
const clientKey = HMAC(cryptoMethod, saltedPassword, 'Client Key');
const serverKey = HMAC(cryptoMethod, saltedPassword, 'Server Key');
const storedKey = H(cryptoMethod, clientKey);
const authMessage = [
clientFirstMessageBare(username, nonce),
payload.toString('utf8'),
withoutProof
].join(',');
const clientSignature = HMAC(cryptoMethod, storedKey, authMessage);
const clientProof = `p=${xor(clientKey, clientSignature)}`;
const clientFinal = [withoutProof, clientProof].join(',');
const serverSignature = HMAC(cryptoMethod, serverKey, authMessage);
const saslContinueCmd = {
saslContinue: 1,
conversationId: response.conversationId,
payload: new Binary(Buffer.from(clientFinal))
};
const r = await connection.command(ns(`${db}.$cmd`), saslContinueCmd, undefined);
const parsedResponse = parsePayload(r.payload);
if (!compareDigest(Buffer.from(parsedResponse.v, 'base64'), serverSignature)) {
throw new MongoRuntimeError('Server returned an invalid signature');
}
if (r.done !== false) {
// If the server sends r.done === true we can save one RTT
return;
}
const retrySaslContinueCmd = {
saslContinue: 1,
conversationId: r.conversationId,
payload: Buffer.alloc(0)
};
await connection.command(ns(`${db}.$cmd`), retrySaslContinueCmd, undefined);
}
function parsePayload(payload: Binary) {
const payloadStr = payload.toString('utf8');
const dict: Document = {};
const parts = payloadStr.split(',');
for (let i = 0; i < parts.length; i++) {
const valueParts = (parts[i].match(/^([^=]*)=(.*)$/) ?? []).slice(1);
dict[valueParts[0]] = valueParts[1];
}
return dict;
}
function passwordDigest(username: string, password: string) {
if (typeof username !== 'string') {
throw new MongoInvalidArgumentError('Username must be a string');
}
if (typeof password !== 'string') {
throw new MongoInvalidArgumentError('Password must be a string');
}
if (password.length === 0) {
throw new MongoInvalidArgumentError('Password cannot be empty');
}
let md5: crypto.Hash;
try {
md5 = crypto.createHash('md5');
} catch (err) {
if (crypto.getFips()) {
// This error is (slightly) more helpful than what comes from OpenSSL directly, e.g.
// 'Error: error:060800C8:digital envelope routines:EVP_DigestInit_ex:disabled for FIPS'
throw new Error('Auth mechanism SCRAM-SHA-1 is not supported in FIPS mode');
}
throw err;
}
md5.update(`${username}:mongo:${password}`, 'utf8');
return md5.digest('hex');
}
// XOR two buffers
function xor(a: Buffer, b: Buffer) {
if (!Buffer.isBuffer(a)) {
a = Buffer.from(a);
}
if (!Buffer.isBuffer(b)) {
b = Buffer.from(b);
}
const length = Math.max(a.length, b.length);
const res = [];
for (let i = 0; i < length; i += 1) {
res.push(a[i] ^ b[i]);
}
return Buffer.from(res).toString('base64');
}
function H(method: CryptoMethod, text: Buffer) {
return crypto.createHash(method).update(text).digest();
}
function HMAC(method: CryptoMethod, key: Buffer, text: Buffer | string) {
return crypto.createHmac(method, key).update(text).digest();
}
interface HICache {
[key: string]: Buffer;
}
let _hiCache: HICache = {};
let _hiCacheCount = 0;
function _hiCachePurge() {
_hiCache = {};
_hiCacheCount = 0;
}
const hiLengthMap = {
sha256: 32,
sha1: 20
};
function HI(data: string, salt: Buffer, iterations: number, cryptoMethod: CryptoMethod) {
// omit the work if already generated
const key = [data, salt.toString('base64'), iterations].join('_');
if (_hiCache[key] != null) {
return _hiCache[key];
}
// generate the salt
const saltedData = crypto.pbkdf2Sync(
data,
salt,
iterations,
hiLengthMap[cryptoMethod],
cryptoMethod
);
// cache a copy to speed up the next lookup, but prevent unbounded cache growth
if (_hiCacheCount >= 200) {
_hiCachePurge();
}
_hiCache[key] = saltedData;
_hiCacheCount += 1;
return saltedData;
}
function compareDigest(lhs: Buffer, rhs: Uint8Array) {
if (lhs.length !== rhs.length) {
return false;
}
if (typeof crypto.timingSafeEqual === 'function') {
return crypto.timingSafeEqual(lhs, rhs);
}
let result = 0;
for (let i = 0; i < lhs.length; i++) {
result |= lhs[i] ^ rhs[i];
}
return result === 0;
}
export class ScramSHA1 extends ScramSHA {
constructor() {
super('sha1');
}
}
export class ScramSHA256 extends ScramSHA {
constructor() {
super('sha256');
}
}

43
backend/node_modules/mongodb/src/cmap/auth/x509.ts generated vendored Normal file
View File

@@ -0,0 +1,43 @@
import type { Document } from '../../bson';
import { MongoMissingCredentialsError } from '../../error';
import { ns } from '../../utils';
import type { HandshakeDocument } from '../connect';
import { type AuthContext, AuthProvider } from './auth_provider';
import type { MongoCredentials } from './mongo_credentials';
export class X509 extends AuthProvider {
override async prepare(
handshakeDoc: HandshakeDocument,
authContext: AuthContext
): Promise<HandshakeDocument> {
const { credentials } = authContext;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
return { ...handshakeDoc, speculativeAuthenticate: x509AuthenticateCommand(credentials) };
}
override async auth(authContext: AuthContext) {
const connection = authContext.connection;
const credentials = authContext.credentials;
if (!credentials) {
throw new MongoMissingCredentialsError('AuthContext must provide credentials.');
}
const response = authContext.response;
if (response?.speculativeAuthenticate) {
return;
}
await connection.command(ns('$external.$cmd'), x509AuthenticateCommand(credentials), undefined);
}
}
function x509AuthenticateCommand(credentials: MongoCredentials) {
const command: Document = { authenticate: 1, mechanism: 'MONGODB-X509' };
if (credentials.username) {
command.user = credentials.username;
}
return command;
}

View File

@@ -0,0 +1,357 @@
import { type Document, type ObjectId } from '../bson';
import {
COMMAND_FAILED,
COMMAND_STARTED,
COMMAND_SUCCEEDED,
LEGACY_HELLO_COMMAND,
LEGACY_HELLO_COMMAND_CAMEL_CASE
} from '../constants';
import { calculateDurationInMs, deepCopy } from '../utils';
import {
DocumentSequence,
OpMsgRequest,
type OpQueryRequest,
type WriteProtocolMessageType
} from './commands';
import type { Connection } from './connection';
/**
* An event indicating the start of a given command
* @public
* @category Event
*/
export class CommandStartedEvent {
commandObj?: Document;
requestId: number;
databaseName: string;
commandName: string;
command: Document;
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId"
* from the server on 4.2+.
*/
serverConnectionId: bigint | null;
serviceId?: ObjectId;
/** @internal */
name = COMMAND_STARTED;
/**
* Create a started event
*
* @internal
* @param pool - the pool that originated the command
* @param command - the command
*/
constructor(
connection: Connection,
command: WriteProtocolMessageType,
serverConnectionId: bigint | null
) {
const cmd = extractCommand(command);
const commandName = extractCommandName(cmd);
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
// TODO: remove in major revision, this is not spec behavior
if (SENSITIVE_COMMANDS.has(commandName)) {
this.commandObj = {};
this.commandObj[commandName] = true;
}
this.address = address;
this.connectionId = connectionId;
this.serviceId = serviceId;
this.requestId = command.requestId;
this.databaseName = command.databaseName;
this.commandName = commandName;
this.command = maybeRedact(commandName, cmd, cmd);
this.serverConnectionId = serverConnectionId;
}
/* @internal */
get hasServiceId(): boolean {
return !!this.serviceId;
}
}
/**
* An event indicating the success of a given command
* @public
* @category Event
*/
export class CommandSucceededEvent {
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId" from the server on 4.2+.
*/
serverConnectionId: bigint | null;
requestId: number;
duration: number;
commandName: string;
reply: unknown;
serviceId?: ObjectId;
/** @internal */
name = COMMAND_SUCCEEDED;
/**
* Create a succeeded event
*
* @internal
* @param pool - the pool that originated the command
* @param command - the command
* @param reply - the reply for this command from the server
* @param started - a high resolution tuple timestamp of when the command was first sent, to calculate duration
*/
constructor(
connection: Connection,
command: WriteProtocolMessageType,
reply: Document | undefined,
started: number,
serverConnectionId: bigint | null
) {
const cmd = extractCommand(command);
const commandName = extractCommandName(cmd);
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
this.address = address;
this.connectionId = connectionId;
this.serviceId = serviceId;
this.requestId = command.requestId;
this.commandName = commandName;
this.duration = calculateDurationInMs(started);
this.reply = maybeRedact(commandName, cmd, extractReply(command, reply));
this.serverConnectionId = serverConnectionId;
}
/* @internal */
get hasServiceId(): boolean {
return !!this.serviceId;
}
}
/**
* An event indicating the failure of a given command
* @public
* @category Event
*/
export class CommandFailedEvent {
address: string;
/** Driver generated connection id */
connectionId?: string | number;
/**
* Server generated connection id
* Distinct from the connection id and is returned by the hello or legacy hello response as "connectionId" from the server on 4.2+.
*/
serverConnectionId: bigint | null;
requestId: number;
duration: number;
commandName: string;
failure: Error;
serviceId?: ObjectId;
/** @internal */
name = COMMAND_FAILED;
/**
* Create a failure event
*
* @internal
* @param pool - the pool that originated the command
* @param command - the command
* @param error - the generated error or a server error response
* @param started - a high resolution tuple timestamp of when the command was first sent, to calculate duration
*/
constructor(
connection: Connection,
command: WriteProtocolMessageType,
error: Error | Document,
started: number,
serverConnectionId: bigint | null
) {
const cmd = extractCommand(command);
const commandName = extractCommandName(cmd);
const { address, connectionId, serviceId } = extractConnectionDetails(connection);
this.address = address;
this.connectionId = connectionId;
this.serviceId = serviceId;
this.requestId = command.requestId;
this.commandName = commandName;
this.duration = calculateDurationInMs(started);
this.failure = maybeRedact(commandName, cmd, error) as Error;
this.serverConnectionId = serverConnectionId;
}
/* @internal */
get hasServiceId(): boolean {
return !!this.serviceId;
}
}
/**
* Commands that we want to redact because of the sensitive nature of their contents
* @internal
*/
export const SENSITIVE_COMMANDS = new Set([
'authenticate',
'saslStart',
'saslContinue',
'getnonce',
'createUser',
'updateUser',
'copydbgetnonce',
'copydbsaslstart',
'copydb'
]);
const HELLO_COMMANDS = new Set(['hello', LEGACY_HELLO_COMMAND, LEGACY_HELLO_COMMAND_CAMEL_CASE]);
// helper methods
const extractCommandName = (commandDoc: Document) => Object.keys(commandDoc)[0];
const namespace = (command: OpQueryRequest) => command.ns;
const collectionName = (command: OpQueryRequest) => command.ns.split('.')[1];
const maybeRedact = (commandName: string, commandDoc: Document, result: Error | Document) =>
SENSITIVE_COMMANDS.has(commandName) ||
(HELLO_COMMANDS.has(commandName) && commandDoc.speculativeAuthenticate)
? {}
: result;
const LEGACY_FIND_QUERY_MAP: { [key: string]: string } = {
$query: 'filter',
$orderby: 'sort',
$hint: 'hint',
$comment: 'comment',
$maxScan: 'maxScan',
$max: 'max',
$min: 'min',
$returnKey: 'returnKey',
$showDiskLoc: 'showRecordId',
$maxTimeMS: 'maxTimeMS',
$snapshot: 'snapshot'
};
const LEGACY_FIND_OPTIONS_MAP = {
numberToSkip: 'skip',
numberToReturn: 'batchSize',
returnFieldSelector: 'projection'
} as const;
const OP_QUERY_KEYS = [
'tailable',
'oplogReplay',
'noCursorTimeout',
'awaitData',
'partial',
'exhaust'
] as const;
/** Extract the actual command from the query, possibly up-converting if it's a legacy format */
function extractCommand(command: WriteProtocolMessageType): Document {
if (command instanceof OpMsgRequest) {
const cmd = deepCopy(command.command);
// For OP_MSG with payload type 1 we need to pull the documents
// array out of the document sequence for monitoring.
if (cmd.ops instanceof DocumentSequence) {
cmd.ops = cmd.ops.documents;
}
if (cmd.nsInfo instanceof DocumentSequence) {
cmd.nsInfo = cmd.nsInfo.documents;
}
return cmd;
}
if (command.query?.$query) {
let result: Document;
if (command.ns === 'admin.$cmd') {
// up-convert legacy command
result = Object.assign({}, command.query.$query);
} else {
// up-convert legacy find command
result = { find: collectionName(command) };
Object.keys(LEGACY_FIND_QUERY_MAP).forEach(key => {
if (command.query[key] != null) {
result[LEGACY_FIND_QUERY_MAP[key]] = deepCopy(command.query[key]);
}
});
}
Object.keys(LEGACY_FIND_OPTIONS_MAP).forEach(key => {
const legacyKey = key as keyof typeof LEGACY_FIND_OPTIONS_MAP;
if (command[legacyKey] != null) {
result[LEGACY_FIND_OPTIONS_MAP[legacyKey]] = deepCopy(command[legacyKey]);
}
});
OP_QUERY_KEYS.forEach(key => {
if (command[key]) {
result[key] = command[key];
}
});
if (command.pre32Limit != null) {
result.limit = command.pre32Limit;
}
if (command.query.$explain) {
return { explain: result };
}
return result;
}
const clonedQuery: Record<string, unknown> = {};
const clonedCommand: Record<string, unknown> = {};
if (command.query) {
for (const k in command.query) {
clonedQuery[k] = deepCopy(command.query[k]);
}
clonedCommand.query = clonedQuery;
}
for (const k in command) {
if (k === 'query') continue;
clonedCommand[k] = deepCopy((command as unknown as Record<string, unknown>)[k]);
}
return command.query ? clonedQuery : clonedCommand;
}
function extractReply(command: WriteProtocolMessageType, reply?: Document) {
if (!reply) {
return reply;
}
if (command instanceof OpMsgRequest) {
return deepCopy(reply.result ? reply.result : reply);
}
// is this a legacy find command?
if (command.query && command.query.$query != null) {
return {
ok: 1,
cursor: {
id: deepCopy(reply.cursorId),
ns: namespace(command),
firstBatch: deepCopy(reply.documents)
}
};
}
return deepCopy(reply.result ? reply.result : reply);
}
function extractConnectionDetails(connection: Connection) {
let connectionId;
if ('id' in connection) {
connectionId = connection.id;
}
return {
address: connection.address,
serviceId: connection.serviceId,
connectionId
};
}

773
backend/node_modules/mongodb/src/cmap/commands.ts generated vendored Normal file
View File

@@ -0,0 +1,773 @@
import type { BSONSerializeOptions, Document, Long } from '../bson';
import * as BSON from '../bson';
import { MongoInvalidArgumentError, MongoRuntimeError } from '../error';
import { type ReadPreference } from '../read_preference';
import type { ClientSession } from '../sessions';
import type { CommandOptions } from './connection';
import {
compress,
Compressor,
type CompressorName,
uncompressibleCommands
} from './wire_protocol/compression';
import { OP_COMPRESSED, OP_MSG, OP_QUERY } from './wire_protocol/constants';
// Incrementing request id
let _requestId = 0;
// Query flags
const OPTS_TAILABLE_CURSOR = 2;
const OPTS_SECONDARY = 4;
const OPTS_OPLOG_REPLAY = 8;
const OPTS_NO_CURSOR_TIMEOUT = 16;
const OPTS_AWAIT_DATA = 32;
const OPTS_EXHAUST = 64;
const OPTS_PARTIAL = 128;
// Response flags
const CURSOR_NOT_FOUND = 1;
const QUERY_FAILURE = 2;
const SHARD_CONFIG_STALE = 4;
const AWAIT_CAPABLE = 8;
const encodeUTF8Into = BSON.BSON.onDemand.ByteUtils.encodeUTF8Into;
/** @internal */
export type WriteProtocolMessageType = OpQueryRequest | OpMsgRequest;
/** @internal */
export interface OpQueryOptions extends CommandOptions {
socketTimeoutMS?: number;
session?: ClientSession;
numberToSkip?: number;
numberToReturn?: number;
returnFieldSelector?: Document;
pre32Limit?: number;
serializeFunctions?: boolean;
ignoreUndefined?: boolean;
maxBsonSize?: number;
checkKeys?: boolean;
secondaryOk?: boolean;
requestId?: number;
moreToCome?: boolean;
exhaustAllowed?: boolean;
}
/** @internal */
export class OpQueryRequest {
ns: string;
numberToSkip: number;
numberToReturn: number;
returnFieldSelector?: Document;
requestId: number;
pre32Limit?: number;
serializeFunctions: boolean;
ignoreUndefined: boolean;
maxBsonSize: number;
checkKeys: boolean;
batchSize: number;
tailable: boolean;
secondaryOk: boolean;
oplogReplay: boolean;
noCursorTimeout: boolean;
awaitData: boolean;
exhaust: boolean;
partial: boolean;
/** moreToCome is an OP_MSG only concept */
moreToCome = false;
constructor(
public databaseName: string,
public query: Document,
options: OpQueryOptions
) {
// Basic options needed to be passed in
// TODO(NODE-3483): Replace with MongoCommandError
const ns = `${databaseName}.$cmd`;
if (typeof databaseName !== 'string') {
throw new MongoRuntimeError('Database name must be a string for a query');
}
// TODO(NODE-3483): Replace with MongoCommandError
if (query == null) throw new MongoRuntimeError('A query document must be specified for query');
// Validate that we are not passing 0x00 in the collection name
if (ns.indexOf('\x00') !== -1) {
// TODO(NODE-3483): Use MongoNamespace static method
throw new MongoRuntimeError('Namespace cannot contain a null character');
}
// Basic options
this.ns = ns;
// Additional options
this.numberToSkip = options.numberToSkip || 0;
this.numberToReturn = options.numberToReturn || 0;
this.returnFieldSelector = options.returnFieldSelector || undefined;
this.requestId = options.requestId ?? OpQueryRequest.getRequestId();
// special case for pre-3.2 find commands, delete ASAP
this.pre32Limit = options.pre32Limit;
// Serialization option
this.serializeFunctions =
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined =
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
this.batchSize = this.numberToReturn;
// Flags
this.tailable = false;
this.secondaryOk = typeof options.secondaryOk === 'boolean' ? options.secondaryOk : false;
this.oplogReplay = false;
this.noCursorTimeout = false;
this.awaitData = false;
this.exhaust = false;
this.partial = false;
}
/** Assign next request Id. */
incRequestId(): void {
this.requestId = _requestId++;
}
/** Peek next request Id. */
nextRequestId(): number {
return _requestId + 1;
}
/** Increment then return next request Id. */
static getRequestId(): number {
return ++_requestId;
}
// Uses a single allocated buffer for the process, avoiding multiple memory allocations
toBin(): Uint8Array[] {
const buffers = [];
let projection = null;
// Set up the flags
let flags = 0;
if (this.tailable) {
flags |= OPTS_TAILABLE_CURSOR;
}
if (this.secondaryOk) {
flags |= OPTS_SECONDARY;
}
if (this.oplogReplay) {
flags |= OPTS_OPLOG_REPLAY;
}
if (this.noCursorTimeout) {
flags |= OPTS_NO_CURSOR_TIMEOUT;
}
if (this.awaitData) {
flags |= OPTS_AWAIT_DATA;
}
if (this.exhaust) {
flags |= OPTS_EXHAUST;
}
if (this.partial) {
flags |= OPTS_PARTIAL;
}
// If batchSize is different to this.numberToReturn
if (this.batchSize !== this.numberToReturn) this.numberToReturn = this.batchSize;
// Allocate write protocol header buffer
const header = Buffer.alloc(
4 * 4 + // Header
4 + // Flags
Buffer.byteLength(this.ns) +
1 + // namespace
4 + // numberToSkip
4 // numberToReturn
);
// Add header to buffers
buffers.push(header);
// Serialize the query
const query = BSON.serialize(this.query, {
checkKeys: this.checkKeys,
serializeFunctions: this.serializeFunctions,
ignoreUndefined: this.ignoreUndefined
});
// Add query document
buffers.push(query);
if (this.returnFieldSelector && Object.keys(this.returnFieldSelector).length > 0) {
// Serialize the projection document
projection = BSON.serialize(this.returnFieldSelector, {
checkKeys: this.checkKeys,
serializeFunctions: this.serializeFunctions,
ignoreUndefined: this.ignoreUndefined
});
// Add projection document
buffers.push(projection);
}
// Total message size
const totalLength = header.length + query.length + (projection ? projection.length : 0);
// Set up the index
let index = 4;
// Write total document length
header[3] = (totalLength >> 24) & 0xff;
header[2] = (totalLength >> 16) & 0xff;
header[1] = (totalLength >> 8) & 0xff;
header[0] = totalLength & 0xff;
// Write header information requestId
header[index + 3] = (this.requestId >> 24) & 0xff;
header[index + 2] = (this.requestId >> 16) & 0xff;
header[index + 1] = (this.requestId >> 8) & 0xff;
header[index] = this.requestId & 0xff;
index = index + 4;
// Write header information responseTo
header[index + 3] = (0 >> 24) & 0xff;
header[index + 2] = (0 >> 16) & 0xff;
header[index + 1] = (0 >> 8) & 0xff;
header[index] = 0 & 0xff;
index = index + 4;
// Write header information OP_QUERY
header[index + 3] = (OP_QUERY >> 24) & 0xff;
header[index + 2] = (OP_QUERY >> 16) & 0xff;
header[index + 1] = (OP_QUERY >> 8) & 0xff;
header[index] = OP_QUERY & 0xff;
index = index + 4;
// Write header information flags
header[index + 3] = (flags >> 24) & 0xff;
header[index + 2] = (flags >> 16) & 0xff;
header[index + 1] = (flags >> 8) & 0xff;
header[index] = flags & 0xff;
index = index + 4;
// Write collection name
index = index + header.write(this.ns, index, 'utf8') + 1;
header[index - 1] = 0;
// Write header information flags numberToSkip
header[index + 3] = (this.numberToSkip >> 24) & 0xff;
header[index + 2] = (this.numberToSkip >> 16) & 0xff;
header[index + 1] = (this.numberToSkip >> 8) & 0xff;
header[index] = this.numberToSkip & 0xff;
index = index + 4;
// Write header information flags numberToReturn
header[index + 3] = (this.numberToReturn >> 24) & 0xff;
header[index + 2] = (this.numberToReturn >> 16) & 0xff;
header[index + 1] = (this.numberToReturn >> 8) & 0xff;
header[index] = this.numberToReturn & 0xff;
index = index + 4;
// Return the buffers
return buffers;
}
}
/** @internal */
export interface MessageHeader {
length: number;
requestId: number;
responseTo: number;
opCode: number;
fromCompressed?: boolean;
}
/** @internal */
export class OpReply {
parsed: boolean;
raw: Buffer;
data: Buffer;
opts: BSONSerializeOptions;
length: number;
requestId: number;
responseTo: number;
opCode: number;
fromCompressed?: boolean;
responseFlags?: number;
cursorId?: Long;
startingFrom?: number;
numberReturned?: number;
cursorNotFound?: boolean;
queryFailure?: boolean;
shardConfigStale?: boolean;
awaitCapable?: boolean;
useBigInt64: boolean;
promoteLongs: boolean;
promoteValues: boolean;
promoteBuffers: boolean;
bsonRegExp?: boolean;
index = 0;
sections: Uint8Array[] = [];
/** moreToCome is an OP_MSG only concept */
moreToCome = false;
constructor(
message: Buffer,
msgHeader: MessageHeader,
msgBody: Buffer,
opts?: BSONSerializeOptions
) {
this.parsed = false;
this.raw = message;
this.data = msgBody;
this.opts = opts ?? {
useBigInt64: false,
promoteLongs: true,
promoteValues: true,
promoteBuffers: false,
bsonRegExp: false
};
// Read the message header
this.length = msgHeader.length;
this.requestId = msgHeader.requestId;
this.responseTo = msgHeader.responseTo;
this.opCode = msgHeader.opCode;
this.fromCompressed = msgHeader.fromCompressed;
// Flag values
this.useBigInt64 = typeof this.opts.useBigInt64 === 'boolean' ? this.opts.useBigInt64 : false;
this.promoteLongs = typeof this.opts.promoteLongs === 'boolean' ? this.opts.promoteLongs : true;
this.promoteValues =
typeof this.opts.promoteValues === 'boolean' ? this.opts.promoteValues : true;
this.promoteBuffers =
typeof this.opts.promoteBuffers === 'boolean' ? this.opts.promoteBuffers : false;
this.bsonRegExp = typeof this.opts.bsonRegExp === 'boolean' ? this.opts.bsonRegExp : false;
}
isParsed(): boolean {
return this.parsed;
}
parse(): Uint8Array {
// Don't parse again if not needed
if (this.parsed) return this.sections[0];
// Position within OP_REPLY at which documents start
// (See https://www.mongodb.com/docs/manual/reference/mongodb-wire-protocol/#wire-op-reply)
this.index = 20;
// Read the message body
this.responseFlags = this.data.readInt32LE(0);
this.cursorId = new BSON.Long(this.data.readInt32LE(4), this.data.readInt32LE(8));
this.startingFrom = this.data.readInt32LE(12);
this.numberReturned = this.data.readInt32LE(16);
if (this.numberReturned < 0 || this.numberReturned > 2 ** 32 - 1) {
throw new RangeError(
`OP_REPLY numberReturned is an invalid array length ${this.numberReturned}`
);
}
this.cursorNotFound = (this.responseFlags & CURSOR_NOT_FOUND) !== 0;
this.queryFailure = (this.responseFlags & QUERY_FAILURE) !== 0;
this.shardConfigStale = (this.responseFlags & SHARD_CONFIG_STALE) !== 0;
this.awaitCapable = (this.responseFlags & AWAIT_CAPABLE) !== 0;
// Parse Body
for (let i = 0; i < this.numberReturned; i++) {
const bsonSize =
this.data[this.index] |
(this.data[this.index + 1] << 8) |
(this.data[this.index + 2] << 16) |
(this.data[this.index + 3] << 24);
const section = this.data.subarray(this.index, this.index + bsonSize);
this.sections.push(section);
// Adjust the index
this.index = this.index + bsonSize;
}
// Set parsed
this.parsed = true;
return this.sections[0];
}
}
// Msg Flags
const OPTS_CHECKSUM_PRESENT = 1;
const OPTS_MORE_TO_COME = 2;
const OPTS_EXHAUST_ALLOWED = 1 << 16;
/** @internal */
export interface OpMsgOptions {
socketTimeoutMS?: number;
session?: ClientSession;
numberToSkip?: number;
numberToReturn?: number;
returnFieldSelector?: Document;
pre32Limit?: number;
serializeFunctions?: boolean;
ignoreUndefined?: boolean;
maxBsonSize?: number;
checkKeys?: boolean;
secondaryOk?: boolean;
requestId?: number;
moreToCome?: boolean;
exhaustAllowed?: boolean;
readPreference: ReadPreference;
}
/** @internal */
export class DocumentSequence {
field: string;
documents: Document[];
serializedDocumentsLength: number;
private chunks: Uint8Array[];
private header: Buffer;
/**
* Create a new document sequence for the provided field.
* @param field - The field it will replace.
*/
constructor(field: string, documents?: Document[]) {
this.field = field;
this.documents = [];
this.chunks = [];
this.serializedDocumentsLength = 0;
// Document sequences starts with type 1 at the first byte.
// Field strings must always be UTF-8.
const buffer = Buffer.allocUnsafe(1 + 4 + this.field.length + 1);
buffer[0] = 1;
// Third part is the field name at offset 5 with trailing null byte.
encodeUTF8Into(buffer, `${this.field}\0`, 5);
this.chunks.push(buffer);
this.header = buffer;
if (documents) {
for (const doc of documents) {
this.push(doc, BSON.serialize(doc));
}
}
}
/**
* Push a document to the document sequence. Will serialize the document
* as well and return the current serialized length of all documents.
* @param document - The document to add.
* @param buffer - The serialized document in raw BSON.
* @returns The new total document sequence length.
*/
push(document: Document, buffer: Uint8Array): number {
this.serializedDocumentsLength += buffer.length;
// Push the document.
this.documents.push(document);
// Push the document raw bson.
this.chunks.push(buffer);
// Write the new length.
this.header?.writeInt32LE(4 + this.field.length + 1 + this.serializedDocumentsLength, 1);
return this.serializedDocumentsLength + this.header.length;
}
/**
* Get the fully serialized bytes for the document sequence section.
* @returns The section bytes.
*/
toBin(): Uint8Array {
return Buffer.concat(this.chunks);
}
}
/** @internal */
export class OpMsgRequest {
requestId: number;
serializeFunctions: boolean;
ignoreUndefined: boolean;
checkKeys: boolean;
maxBsonSize: number;
checksumPresent: boolean;
moreToCome: boolean;
exhaustAllowed: boolean;
constructor(
public databaseName: string,
public command: Document,
public options: OpQueryOptions
) {
// Basic options needed to be passed in
if (command == null)
throw new MongoInvalidArgumentError('Query document must be specified for query');
// Basic options
this.command.$db = databaseName;
// Ensure empty options
this.options = options ?? {};
// Additional options
this.requestId = options.requestId ? options.requestId : OpMsgRequest.getRequestId();
// Serialization option
this.serializeFunctions =
typeof options.serializeFunctions === 'boolean' ? options.serializeFunctions : false;
this.ignoreUndefined =
typeof options.ignoreUndefined === 'boolean' ? options.ignoreUndefined : false;
this.checkKeys = typeof options.checkKeys === 'boolean' ? options.checkKeys : false;
this.maxBsonSize = options.maxBsonSize || 1024 * 1024 * 16;
// flags
this.checksumPresent = false;
this.moreToCome = options.moreToCome ?? command.writeConcern?.w === 0;
this.exhaustAllowed =
typeof options.exhaustAllowed === 'boolean' ? options.exhaustAllowed : false;
}
toBin(): Buffer[] {
const buffers: Buffer[] = [];
let flags = 0;
if (this.checksumPresent) {
flags |= OPTS_CHECKSUM_PRESENT;
}
if (this.moreToCome) {
flags |= OPTS_MORE_TO_COME;
}
if (this.exhaustAllowed) {
flags |= OPTS_EXHAUST_ALLOWED;
}
const header = Buffer.alloc(
4 * 4 + // Header
4 // Flags
);
buffers.push(header);
let totalLength = header.length;
const command = this.command;
totalLength += this.makeSections(buffers, command);
header.writeInt32LE(totalLength, 0); // messageLength
header.writeInt32LE(this.requestId, 4); // requestID
header.writeInt32LE(0, 8); // responseTo
header.writeInt32LE(OP_MSG, 12); // opCode
header.writeUInt32LE(flags, 16); // flags
return buffers;
}
/**
* Add the sections to the OP_MSG request's buffers and returns the length.
*/
makeSections(buffers: Uint8Array[], document: Document): number {
const sequencesBuffer = this.extractDocumentSequences(document);
const payloadTypeBuffer = Buffer.allocUnsafe(1);
payloadTypeBuffer[0] = 0;
const documentBuffer = this.serializeBson(document);
// First section, type 0
buffers.push(payloadTypeBuffer);
buffers.push(documentBuffer);
// Subsequent sections, type 1
buffers.push(sequencesBuffer);
return payloadTypeBuffer.length + documentBuffer.length + sequencesBuffer.length;
}
/**
* Extracts the document sequences from the command document and returns
* a buffer to be added as multiple sections after the initial type 0
* section in the message.
*/
extractDocumentSequences(document: Document): Uint8Array {
// Pull out any field in the command document that's value is a document sequence.
const chunks = [];
for (const [key, value] of Object.entries(document)) {
if (value instanceof DocumentSequence) {
chunks.push(value.toBin());
// Why are we removing the field from the command? This is because it needs to be
// removed in the OP_MSG request first section, and DocumentSequence is not a
// BSON type and is specific to the MongoDB wire protocol so there's nothing
// our BSON serializer can do about this. Since DocumentSequence is not exposed
// in the public API and only used internally, we are never mutating an original
// command provided by the user, just our own, and it's cheaper to delete from
// our own command than copying it.
delete document[key];
}
}
if (chunks.length > 0) {
return Buffer.concat(chunks);
}
// If we have no document sequences we return an empty buffer for nothing to add
// to the payload.
return Buffer.alloc(0);
}
serializeBson(document: Document): Uint8Array {
return BSON.serialize(document, {
checkKeys: this.checkKeys,
serializeFunctions: this.serializeFunctions,
ignoreUndefined: this.ignoreUndefined
});
}
static getRequestId(): number {
_requestId = (_requestId + 1) & 0x7fffffff;
return _requestId;
}
}
/** @internal */
export class OpMsgResponse {
parsed: boolean;
raw: Buffer;
data: Buffer;
opts: BSONSerializeOptions;
length: number;
requestId: number;
responseTo: number;
opCode: number;
fromCompressed?: boolean;
responseFlags: number;
checksumPresent: boolean;
/** Indicates the server will be sending more responses on this connection */
moreToCome: boolean;
exhaustAllowed: boolean;
useBigInt64: boolean;
promoteLongs: boolean;
promoteValues: boolean;
promoteBuffers: boolean;
bsonRegExp: boolean;
index = 0;
sections: Uint8Array[] = [];
constructor(
message: Buffer,
msgHeader: MessageHeader,
msgBody: Buffer,
opts?: BSONSerializeOptions
) {
this.parsed = false;
this.raw = message;
this.data = msgBody;
this.opts = opts ?? {
useBigInt64: false,
promoteLongs: true,
promoteValues: true,
promoteBuffers: false,
bsonRegExp: false
};
// Read the message header
this.length = msgHeader.length;
this.requestId = msgHeader.requestId;
this.responseTo = msgHeader.responseTo;
this.opCode = msgHeader.opCode;
this.fromCompressed = msgHeader.fromCompressed;
// Read response flags
this.responseFlags = msgBody.readInt32LE(0);
this.checksumPresent = (this.responseFlags & OPTS_CHECKSUM_PRESENT) !== 0;
this.moreToCome = (this.responseFlags & OPTS_MORE_TO_COME) !== 0;
this.exhaustAllowed = (this.responseFlags & OPTS_EXHAUST_ALLOWED) !== 0;
this.useBigInt64 = typeof this.opts.useBigInt64 === 'boolean' ? this.opts.useBigInt64 : false;
this.promoteLongs = typeof this.opts.promoteLongs === 'boolean' ? this.opts.promoteLongs : true;
this.promoteValues =
typeof this.opts.promoteValues === 'boolean' ? this.opts.promoteValues : true;
this.promoteBuffers =
typeof this.opts.promoteBuffers === 'boolean' ? this.opts.promoteBuffers : false;
this.bsonRegExp = typeof this.opts.bsonRegExp === 'boolean' ? this.opts.bsonRegExp : false;
}
isParsed(): boolean {
return this.parsed;
}
parse(): Uint8Array {
// Don't parse again if not needed
if (this.parsed) return this.sections[0];
this.index = 4;
while (this.index < this.data.length) {
const payloadType = this.data.readUInt8(this.index++);
if (payloadType === 0) {
const bsonSize = this.data.readUInt32LE(this.index);
const bin = this.data.subarray(this.index, this.index + bsonSize);
this.sections.push(bin);
this.index += bsonSize;
} else if (payloadType === 1) {
// It was decided that no driver makes use of payload type 1
// TODO(NODE-3483): Replace with MongoDeprecationError
throw new MongoRuntimeError('OP_MSG Payload Type 1 detected unsupported protocol');
}
}
this.parsed = true;
return this.sections[0];
}
}
const MESSAGE_HEADER_SIZE = 16;
const COMPRESSION_DETAILS_SIZE = 9; // originalOpcode + uncompressedSize, compressorID
/**
* @internal
*
* An OP_COMPRESSED request wraps either an OP_QUERY or OP_MSG message.
*/
export class OpCompressedRequest {
constructor(
private command: WriteProtocolMessageType,
private options: { zlibCompressionLevel: number; agreedCompressor: CompressorName }
) {}
// Return whether a command contains an uncompressible command term
// Will return true if command contains no uncompressible command terms
static canCompress(command: WriteProtocolMessageType) {
const commandDoc = command instanceof OpMsgRequest ? command.command : command.query;
const commandName = Object.keys(commandDoc)[0];
return !uncompressibleCommands.has(commandName);
}
async toBin(): Promise<Buffer[]> {
const concatenatedOriginalCommandBuffer = Buffer.concat(this.command.toBin());
// otherwise, compress the message
const messageToBeCompressed = concatenatedOriginalCommandBuffer.slice(MESSAGE_HEADER_SIZE);
// Extract information needed for OP_COMPRESSED from the uncompressed message
const originalCommandOpCode = concatenatedOriginalCommandBuffer.readInt32LE(12);
// Compress the message body
const compressedMessage = await compress(this.options, messageToBeCompressed);
// Create the msgHeader of OP_COMPRESSED
const msgHeader = Buffer.alloc(MESSAGE_HEADER_SIZE);
msgHeader.writeInt32LE(
MESSAGE_HEADER_SIZE + COMPRESSION_DETAILS_SIZE + compressedMessage.length,
0
); // messageLength
msgHeader.writeInt32LE(this.command.requestId, 4); // requestID
msgHeader.writeInt32LE(0, 8); // responseTo (zero)
msgHeader.writeInt32LE(OP_COMPRESSED, 12); // opCode
// Create the compression details of OP_COMPRESSED
const compressionDetails = Buffer.alloc(COMPRESSION_DETAILS_SIZE);
compressionDetails.writeInt32LE(originalCommandOpCode, 0); // originalOpcode
compressionDetails.writeInt32LE(messageToBeCompressed.length, 4); // Size of the uncompressed compressedMessage, excluding the MsgHeader
compressionDetails.writeUInt8(Compressor[this.options.agreedCompressor], 8); // compressorID
return [msgHeader, compressionDetails, compressedMessage];
}
}

499
backend/node_modules/mongodb/src/cmap/connect.ts generated vendored Normal file
View File

@@ -0,0 +1,499 @@
import type { Socket, SocketConnectOpts } from 'net';
import * as net from 'net';
import type { ConnectionOptions as TLSConnectionOpts, TLSSocket } from 'tls';
import * as tls from 'tls';
import type { Document } from '../bson';
import { LEGACY_HELLO_COMMAND } from '../constants';
import { getSocks, type SocksLib } from '../deps';
import {
MongoCompatibilityError,
MongoError,
MongoErrorLabel,
MongoInvalidArgumentError,
MongoNetworkError,
MongoNetworkTimeoutError,
MongoRuntimeError,
needsRetryableWriteLabel
} from '../error';
import { HostAddress, ns, promiseWithResolvers } from '../utils';
import { AuthContext } from './auth/auth_provider';
import { AuthMechanism } from './auth/providers';
import {
type CommandOptions,
Connection,
type ConnectionOptions,
CryptoConnection
} from './connection';
import {
MAX_SUPPORTED_SERVER_VERSION,
MAX_SUPPORTED_WIRE_VERSION,
MIN_SUPPORTED_SERVER_VERSION,
MIN_SUPPORTED_WIRE_VERSION
} from './wire_protocol/constants';
/** @public */
export type Stream = Socket | TLSSocket;
export async function connect(options: ConnectionOptions): Promise<Connection> {
let connection: Connection | null = null;
try {
const socket = await makeSocket(options);
connection = makeConnection(options, socket);
await performInitialHandshake(connection, options);
return connection;
} catch (error) {
connection?.destroy();
throw error;
}
}
export function makeConnection(options: ConnectionOptions, socket: Stream): Connection {
let ConnectionType = options.connectionType ?? Connection;
if (options.autoEncrypter) {
ConnectionType = CryptoConnection;
}
return new ConnectionType(socket, options);
}
function checkSupportedServer(hello: Document, options: ConnectionOptions) {
const maxWireVersion = Number(hello.maxWireVersion);
const minWireVersion = Number(hello.minWireVersion);
const serverVersionHighEnough =
!Number.isNaN(maxWireVersion) && maxWireVersion >= MIN_SUPPORTED_WIRE_VERSION;
const serverVersionLowEnough =
!Number.isNaN(minWireVersion) && minWireVersion <= MAX_SUPPORTED_WIRE_VERSION;
if (serverVersionHighEnough) {
if (serverVersionLowEnough) {
return null;
}
const message = `Server at ${options.hostAddress} reports minimum wire version ${JSON.stringify(
hello.minWireVersion
)}, but this version of the Node.js Driver requires at most ${MAX_SUPPORTED_WIRE_VERSION} (MongoDB ${MAX_SUPPORTED_SERVER_VERSION})`;
return new MongoCompatibilityError(message);
}
const message = `Server at ${options.hostAddress} reports maximum wire version ${
JSON.stringify(hello.maxWireVersion) ?? 0
}, but this version of the Node.js Driver requires at least ${MIN_SUPPORTED_WIRE_VERSION} (MongoDB ${MIN_SUPPORTED_SERVER_VERSION})`;
return new MongoCompatibilityError(message);
}
export async function performInitialHandshake(
conn: Connection,
options: ConnectionOptions
): Promise<void> {
const credentials = options.credentials;
if (credentials) {
if (
!(credentials.mechanism === AuthMechanism.MONGODB_DEFAULT) &&
!options.authProviders.getOrCreateProvider(
credentials.mechanism,
credentials.mechanismProperties
)
) {
throw new MongoInvalidArgumentError(`AuthMechanism '${credentials.mechanism}' not supported`);
}
}
const authContext = new AuthContext(conn, credentials, options);
conn.authContext = authContext;
const handshakeDoc = await prepareHandshakeDocument(authContext);
// @ts-expect-error: TODO(NODE-5141): The options need to be filtered properly, Connection options differ from Command options
const handshakeOptions: CommandOptions = { ...options, raw: false };
if (typeof options.connectTimeoutMS === 'number') {
// The handshake technically is a monitoring check, so its socket timeout should be connectTimeoutMS
handshakeOptions.socketTimeoutMS = options.connectTimeoutMS;
}
const start = new Date().getTime();
const response = await executeHandshake(handshakeDoc, handshakeOptions);
if (!('isWritablePrimary' in response)) {
// Provide hello-style response document.
response.isWritablePrimary = response[LEGACY_HELLO_COMMAND];
}
if (response.helloOk) {
conn.helloOk = true;
}
const supportedServerErr = checkSupportedServer(response, options);
if (supportedServerErr) {
throw supportedServerErr;
}
if (options.loadBalanced) {
if (!response.serviceId) {
throw new MongoCompatibilityError(
'Driver attempted to initialize in load balancing mode, ' +
'but the server does not support this mode.'
);
}
}
// NOTE: This is metadata attached to the connection while porting away from
// handshake being done in the `Server` class. Likely, it should be
// relocated, or at very least restructured.
conn.hello = response;
conn.lastHelloMS = new Date().getTime() - start;
if (!response.arbiterOnly && credentials) {
// store the response on auth context
authContext.response = response;
const resolvedCredentials = credentials.resolveAuthMechanism(response);
const provider = options.authProviders.getOrCreateProvider(
resolvedCredentials.mechanism,
resolvedCredentials.mechanismProperties
);
if (!provider) {
throw new MongoInvalidArgumentError(
`No AuthProvider for ${resolvedCredentials.mechanism} defined.`
);
}
try {
await provider.auth(authContext);
} catch (error) {
if (error instanceof MongoError) {
error.addErrorLabel(MongoErrorLabel.HandshakeError);
if (needsRetryableWriteLabel(error, response.maxWireVersion, conn.description.type)) {
error.addErrorLabel(MongoErrorLabel.RetryableWriteError);
}
}
throw error;
}
}
// Connection establishment is socket creation (tcp handshake, tls handshake, MongoDB handshake (saslStart, saslContinue))
// Once connection is established, command logging can log events (if enabled)
conn.established = true;
async function executeHandshake(handshakeDoc: Document, handshakeOptions: CommandOptions) {
try {
const handshakeResponse = await conn.command(
ns('admin.$cmd'),
handshakeDoc,
handshakeOptions
);
return handshakeResponse;
} catch (error) {
if (error instanceof MongoError) {
error.addErrorLabel(MongoErrorLabel.HandshakeError);
}
throw error;
}
}
}
/**
* HandshakeDocument used during authentication.
* @internal
*/
export interface HandshakeDocument extends Document {
/**
* @deprecated Use hello instead
*/
ismaster?: boolean;
hello?: boolean;
helloOk?: boolean;
client: Document;
compression: string[];
saslSupportedMechs?: string;
loadBalanced?: boolean;
}
/**
* @internal
*
* This function is only exposed for testing purposes.
*/
export async function prepareHandshakeDocument(
authContext: AuthContext
): Promise<HandshakeDocument> {
const options = authContext.options;
const compressors = options.compressors ? options.compressors : [];
const { serverApi } = authContext.connection;
const clientMetadata: Document = await options.extendedMetadata;
const handshakeDoc: HandshakeDocument = {
[serverApi?.version || options.loadBalanced === true ? 'hello' : LEGACY_HELLO_COMMAND]: 1,
helloOk: true,
client: clientMetadata,
compression: compressors
};
if (options.loadBalanced === true) {
handshakeDoc.loadBalanced = true;
}
const credentials = authContext.credentials;
if (credentials) {
if (credentials.mechanism === AuthMechanism.MONGODB_DEFAULT && credentials.username) {
handshakeDoc.saslSupportedMechs = `${credentials.source}.${credentials.username}`;
const provider = authContext.options.authProviders.getOrCreateProvider(
AuthMechanism.MONGODB_SCRAM_SHA256,
credentials.mechanismProperties
);
if (!provider) {
// This auth mechanism is always present.
throw new MongoInvalidArgumentError(
`No AuthProvider for ${AuthMechanism.MONGODB_SCRAM_SHA256} defined.`
);
}
return await provider.prepare(handshakeDoc, authContext);
}
const provider = authContext.options.authProviders.getOrCreateProvider(
credentials.mechanism,
credentials.mechanismProperties
);
if (!provider) {
throw new MongoInvalidArgumentError(`No AuthProvider for ${credentials.mechanism} defined.`);
}
return await provider.prepare(handshakeDoc, authContext);
}
return handshakeDoc;
}
/** @public */
export const LEGAL_TLS_SOCKET_OPTIONS = [
'allowPartialTrustChain',
'ALPNProtocols',
'ca',
'cert',
'checkServerIdentity',
'ciphers',
'crl',
'ecdhCurve',
'key',
'minDHSize',
'passphrase',
'pfx',
'rejectUnauthorized',
'secureContext',
'secureProtocol',
'servername',
'session'
] as const;
/** @public */
export const LEGAL_TCP_SOCKET_OPTIONS = [
'autoSelectFamily',
'autoSelectFamilyAttemptTimeout',
'family',
'hints',
'localAddress',
'localPort',
'lookup'
] as const;
function parseConnectOptions(options: ConnectionOptions): SocketConnectOpts {
const hostAddress = options.hostAddress;
if (!hostAddress) throw new MongoInvalidArgumentError('Option "hostAddress" is required');
const result: Partial<net.TcpNetConnectOpts & net.IpcNetConnectOpts> = {};
for (const name of LEGAL_TCP_SOCKET_OPTIONS) {
if (options[name] != null) {
(result as Document)[name] = options[name];
}
}
if (typeof hostAddress.socketPath === 'string') {
result.path = hostAddress.socketPath;
return result as net.IpcNetConnectOpts;
} else if (typeof hostAddress.host === 'string') {
result.host = hostAddress.host;
result.port = hostAddress.port;
return result as net.TcpNetConnectOpts;
} else {
// This should never happen since we set up HostAddresses
// But if we don't throw here the socket could hang until timeout
// TODO(NODE-3483)
throw new MongoRuntimeError(`Unexpected HostAddress ${JSON.stringify(hostAddress)}`);
}
}
type MakeConnectionOptions = ConnectionOptions & { existingSocket?: Stream };
function parseSslOptions(options: MakeConnectionOptions): TLSConnectionOpts {
const result: TLSConnectionOpts = parseConnectOptions(options);
// Merge in valid SSL options
for (const name of LEGAL_TLS_SOCKET_OPTIONS) {
if (options[name] != null) {
(result as Document)[name] = options[name];
}
}
if (options.existingSocket) {
result.socket = options.existingSocket;
}
// Set default sni servername to be the same as host
if (result.servername == null && result.host && !net.isIP(result.host)) {
result.servername = result.host;
}
return result;
}
export async function makeSocket(options: MakeConnectionOptions): Promise<Stream> {
const useTLS = options.tls ?? false;
const noDelay = options.noDelay ?? true;
const connectTimeoutMS = options.connectTimeoutMS ?? 30000;
const existingSocket = options.existingSocket;
let socket: Stream;
if (options.proxyHost != null) {
// Currently, only Socks5 is supported.
return await makeSocks5Connection({
...options,
connectTimeoutMS // Should always be present for Socks5
});
}
if (useTLS) {
const tlsSocket = tls.connect(parseSslOptions(options));
if (typeof tlsSocket.disableRenegotiation === 'function') {
tlsSocket.disableRenegotiation();
}
socket = tlsSocket;
} else if (existingSocket) {
// In the TLS case, parseSslOptions() sets options.socket to existingSocket,
// so we only need to handle the non-TLS case here (where existingSocket
// gives us all we need out of the box).
socket = existingSocket;
} else {
socket = net.createConnection(parseConnectOptions(options));
}
socket.setKeepAlive(true, 300000);
socket.setTimeout(connectTimeoutMS);
socket.setNoDelay(noDelay);
let cancellationHandler: ((err: Error) => void) | null = null;
const { promise: connectedSocket, resolve, reject } = promiseWithResolvers<Stream>();
if (existingSocket) {
resolve(socket);
} else {
const start = performance.now();
const connectEvent = useTLS ? 'secureConnect' : 'connect';
socket
.once(connectEvent, () => resolve(socket))
.once('error', cause =>
reject(new MongoNetworkError(MongoError.buildErrorMessage(cause), { cause }))
)
.once('timeout', () => {
reject(
new MongoNetworkTimeoutError(
`Socket '${connectEvent}' timed out after ${(performance.now() - start) | 0}ms (connectTimeoutMS: ${connectTimeoutMS})`
)
);
})
.once('close', () =>
reject(
new MongoNetworkError(
`Socket closed after ${(performance.now() - start) | 0} during connection establishment`
)
)
);
if (options.cancellationToken != null) {
cancellationHandler = () =>
reject(
new MongoNetworkError(
`Socket connection establishment was cancelled after ${(performance.now() - start) | 0}`
)
);
options.cancellationToken.once('cancel', cancellationHandler);
}
}
try {
socket = await connectedSocket;
return socket;
} catch (error) {
socket.destroy();
throw error;
} finally {
socket.setTimeout(0);
socket.removeAllListeners();
if (cancellationHandler != null) {
options.cancellationToken?.removeListener('cancel', cancellationHandler);
}
}
}
let socks: SocksLib | null = null;
function loadSocks() {
if (socks == null) {
const socksImport = getSocks();
if ('kModuleError' in socksImport) {
throw socksImport.kModuleError;
}
socks = socksImport;
}
return socks;
}
async function makeSocks5Connection(options: MakeConnectionOptions): Promise<Stream> {
const hostAddress = HostAddress.fromHostPort(
options.proxyHost ?? '', // proxyHost is guaranteed to set here
options.proxyPort ?? 1080
);
// First, connect to the proxy server itself:
const rawSocket = await makeSocket({
...options,
hostAddress,
tls: false,
proxyHost: undefined
});
const destination = parseConnectOptions(options) as net.TcpNetConnectOpts;
if (typeof destination.host !== 'string' || typeof destination.port !== 'number') {
throw new MongoInvalidArgumentError('Can only make Socks5 connections to TCP hosts');
}
socks ??= loadSocks();
let existingSocket: Stream;
try {
// Then, establish the Socks5 proxy connection:
const connection = await socks.SocksClient.createConnection({
existing_socket: rawSocket,
timeout: options.connectTimeoutMS,
command: 'connect',
destination: {
host: destination.host,
port: destination.port
},
proxy: {
// host and port are ignored because we pass existing_socket
host: 'iLoveJavaScript',
port: 0,
type: 5,
userId: options.proxyUsername || undefined,
password: options.proxyPassword || undefined
}
});
existingSocket = connection.socket;
} catch (cause) {
throw new MongoNetworkError(MongoError.buildErrorMessage(cause), { cause });
}
// Finally, now treat the resulting duplex stream as the
// socket over which we send and receive wire protocol messages:
return await makeSocket({ ...options, existingSocket, proxyHost: undefined });
}

902
backend/node_modules/mongodb/src/cmap/connection.ts generated vendored Normal file
View File

@@ -0,0 +1,902 @@
import { type Readable, Transform, type TransformCallback } from 'stream';
import { clearTimeout, setTimeout } from 'timers';
import {
type BSONSerializeOptions,
deserialize,
type DeserializeOptions,
type Document,
type ObjectId
} from '../bson';
import { type AutoEncrypter } from '../client-side-encryption/auto_encrypter';
import {
CLOSE,
CLUSTER_TIME_RECEIVED,
COMMAND_FAILED,
COMMAND_STARTED,
COMMAND_SUCCEEDED,
kDecorateResult,
PINNED,
UNPINNED
} from '../constants';
import {
MongoCompatibilityError,
MONGODB_ERROR_CODES,
MongoMissingDependencyError,
MongoNetworkError,
MongoNetworkTimeoutError,
MongoOperationTimeoutError,
MongoParseError,
MongoServerError,
MongoUnexpectedServerResponseError
} from '../error';
import type { ServerApi, SupportedNodeConnectionOptions } from '../mongo_client';
import { type MongoClientAuthProviders } from '../mongo_client_auth_providers';
import { MongoLoggableComponent, type MongoLogger, SeverityLevel } from '../mongo_logger';
import { type CancellationToken, TypedEventEmitter } from '../mongo_types';
import { ReadPreference, type ReadPreferenceLike } from '../read_preference';
import { ServerType } from '../sdam/common';
import { applySession, type ClientSession, updateSessionFromResponse } from '../sessions';
import { type TimeoutContext, TimeoutError } from '../timeout';
import {
BufferPool,
calculateDurationInMs,
type Callback,
decorateDecryptionResult,
HostAddress,
maxWireVersion,
type MongoDBNamespace,
now,
once,
squashError,
uuidV4
} from '../utils';
import type { WriteConcern } from '../write_concern';
import type { AuthContext } from './auth/auth_provider';
import type { MongoCredentials } from './auth/mongo_credentials';
import {
CommandFailedEvent,
CommandStartedEvent,
CommandSucceededEvent
} from './command_monitoring_events';
import {
OpCompressedRequest,
OpMsgRequest,
type OpMsgResponse,
OpQueryRequest,
type OpReply,
type WriteProtocolMessageType
} from './commands';
import type { Stream } from './connect';
import type { ClientMetadata } from './handshake/client_metadata';
import { StreamDescription, type StreamDescriptionOptions } from './stream_description';
import { type CompressorName, decompressResponse } from './wire_protocol/compression';
import { onData } from './wire_protocol/on_data';
import {
CursorResponse,
MongoDBResponse,
type MongoDBResponseConstructor
} from './wire_protocol/responses';
import { getReadPreference, isSharded } from './wire_protocol/shared';
/** @internal */
export interface CommandOptions extends BSONSerializeOptions {
secondaryOk?: boolean;
/** Specify read preference if command supports it */
readPreference?: ReadPreferenceLike;
monitoring?: boolean;
socketTimeoutMS?: number;
/** Session to use for the operation */
session?: ClientSession;
documentsReturnedIn?: string;
noResponse?: boolean;
omitReadPreference?: boolean;
omitMaxTimeMS?: boolean;
// TODO(NODE-2802): Currently the CommandOptions take a property willRetryWrite which is a hint
// from executeOperation that the txnNum should be applied to this command.
// Applying a session to a command should happen as part of command construction,
// most likely in the CommandOperation#executeCommand method, where we have access to
// the details we need to determine if a txnNum should also be applied.
willRetryWrite?: boolean;
writeConcern?: WriteConcern;
directConnection?: boolean;
/** @internal */
timeoutContext?: TimeoutContext;
}
/** @public */
export interface ProxyOptions {
proxyHost?: string;
proxyPort?: number;
proxyUsername?: string;
proxyPassword?: string;
}
/** @public */
export interface ConnectionOptions
extends SupportedNodeConnectionOptions,
StreamDescriptionOptions,
ProxyOptions {
// Internal creation info
id: number | '<monitor>';
generation: number;
hostAddress: HostAddress;
/** @internal */
autoEncrypter?: AutoEncrypter;
serverApi?: ServerApi;
monitorCommands: boolean;
/** @internal */
connectionType?: any;
credentials?: MongoCredentials;
/** @internal */
authProviders: MongoClientAuthProviders;
connectTimeoutMS?: number;
tls: boolean;
noDelay?: boolean;
socketTimeoutMS?: number;
cancellationToken?: CancellationToken;
metadata: ClientMetadata;
/** @internal */
extendedMetadata: Promise<Document>;
/** @internal */
mongoLogger?: MongoLogger | undefined;
}
/** @public */
export type ConnectionEvents = {
commandStarted(event: CommandStartedEvent): void;
commandSucceeded(event: CommandSucceededEvent): void;
commandFailed(event: CommandFailedEvent): void;
clusterTimeReceived(clusterTime: Document): void;
close(): void;
pinned(pinType: string): void;
unpinned(pinType: string): void;
};
/** @internal */
export function hasSessionSupport(conn: Connection): boolean {
const description = conn.description;
return description.logicalSessionTimeoutMinutes != null;
}
function streamIdentifier(stream: Stream, options: ConnectionOptions): string {
if (options.proxyHost) {
// If proxy options are specified, the properties of `stream` itself
// will not accurately reflect what endpoint this is connected to.
return options.hostAddress.toString();
}
const { remoteAddress, remotePort } = stream;
if (typeof remoteAddress === 'string' && typeof remotePort === 'number') {
return HostAddress.fromHostPort(remoteAddress, remotePort).toString();
}
return uuidV4().toString('hex');
}
/** @internal */
export class Connection extends TypedEventEmitter<ConnectionEvents> {
public id: number | '<monitor>';
public address: string;
public lastHelloMS = -1;
public serverApi?: ServerApi;
public helloOk = false;
public authContext?: AuthContext;
public delayedTimeoutId: NodeJS.Timeout | null = null;
public generation: number;
public accessToken?: string;
public readonly description: Readonly<StreamDescription>;
/**
* Represents if the connection has been established:
* - TCP handshake
* - TLS negotiated
* - mongodb handshake (saslStart, saslContinue), includes authentication
*
* Once connection is established, command logging can log events (if enabled)
*/
public established: boolean;
/** Indicates that the connection (including underlying TCP socket) has been closed. */
public closed = false;
private lastUseTime: number;
private clusterTime: Document | null = null;
private error: Error | null = null;
private dataEvents: AsyncGenerator<Buffer, void, void> | null = null;
private readonly socketTimeoutMS: number;
private readonly monitorCommands: boolean;
private readonly socket: Stream;
private readonly messageStream: Readable;
/** @event */
static readonly COMMAND_STARTED = COMMAND_STARTED;
/** @event */
static readonly COMMAND_SUCCEEDED = COMMAND_SUCCEEDED;
/** @event */
static readonly COMMAND_FAILED = COMMAND_FAILED;
/** @event */
static readonly CLUSTER_TIME_RECEIVED = CLUSTER_TIME_RECEIVED;
/** @event */
static readonly CLOSE = CLOSE;
/** @event */
static readonly PINNED = PINNED;
/** @event */
static readonly UNPINNED = UNPINNED;
constructor(stream: Stream, options: ConnectionOptions) {
super();
this.socket = stream;
this.id = options.id;
this.address = streamIdentifier(stream, options);
this.socketTimeoutMS = options.socketTimeoutMS ?? 0;
this.monitorCommands = options.monitorCommands;
this.serverApi = options.serverApi;
this.mongoLogger = options.mongoLogger;
this.established = false;
this.description = new StreamDescription(this.address, options);
this.generation = options.generation;
this.lastUseTime = now();
this.messageStream = this.socket
.on('error', this.onError.bind(this))
.pipe(new SizedMessageTransform({ connection: this }))
.on('error', this.onError.bind(this));
this.socket.on('close', this.onClose.bind(this));
this.socket.on('timeout', this.onTimeout.bind(this));
this.messageStream.pause();
}
public get hello() {
return this.description.hello;
}
// the `connect` method stores the result of the handshake hello on the connection
public set hello(response: Document | null) {
this.description.receiveResponse(response);
Object.freeze(this.description);
}
public get serviceId(): ObjectId | undefined {
return this.hello?.serviceId;
}
public get loadBalanced(): boolean {
return this.description.loadBalanced;
}
public get idleTime(): number {
return calculateDurationInMs(this.lastUseTime);
}
private get hasSessionSupport(): boolean {
return this.description.logicalSessionTimeoutMinutes != null;
}
private get supportsOpMsg(): boolean {
return (
this.description != null &&
maxWireVersion(this) >= 6 &&
!this.description.__nodejs_mock_server__
);
}
private get shouldEmitAndLogCommand(): boolean {
return (
(this.monitorCommands ||
(this.established &&
!this.authContext?.reauthenticating &&
this.mongoLogger?.willLog(MongoLoggableComponent.COMMAND, SeverityLevel.DEBUG))) ??
false
);
}
public markAvailable(): void {
this.lastUseTime = now();
}
public onError(error: Error) {
this.cleanup(error);
}
private onClose() {
const message = `connection ${this.id} to ${this.address} closed`;
this.cleanup(new MongoNetworkError(message));
}
private onTimeout() {
this.delayedTimeoutId = setTimeout(() => {
const message = `connection ${this.id} to ${this.address} timed out`;
const beforeHandshake = this.hello == null;
this.cleanup(new MongoNetworkTimeoutError(message, { beforeHandshake }));
}, 1).unref(); // No need for this timer to hold the event loop open
}
public destroy(): void {
if (this.closed) {
return;
}
// load balanced mode requires that these listeners remain on the connection
// after cleanup on timeouts, errors or close so we remove them before calling
// cleanup.
this.removeAllListeners(Connection.PINNED);
this.removeAllListeners(Connection.UNPINNED);
const message = `connection ${this.id} to ${this.address} closed`;
this.cleanup(new MongoNetworkError(message));
}
/**
* A method that cleans up the connection. When `force` is true, this method
* forcibly destroys the socket.
*
* If an error is provided, any in-flight operations will be closed with the error.
*
* This method does nothing if the connection is already closed.
*/
private cleanup(error: Error): void {
if (this.closed) {
return;
}
this.socket.destroy();
this.error = error;
this.dataEvents?.throw(error).then(undefined, squashError);
this.closed = true;
this.emit(Connection.CLOSE);
}
private prepareCommand(db: string, command: Document, options: CommandOptions) {
let cmd = { ...command };
const readPreference = getReadPreference(options);
const session = options?.session;
let clusterTime = this.clusterTime;
if (this.serverApi) {
const { version, strict, deprecationErrors } = this.serverApi;
cmd.apiVersion = version;
if (strict != null) cmd.apiStrict = strict;
if (deprecationErrors != null) cmd.apiDeprecationErrors = deprecationErrors;
}
if (this.hasSessionSupport && session) {
if (
session.clusterTime &&
clusterTime &&
session.clusterTime.clusterTime.greaterThan(clusterTime.clusterTime)
) {
clusterTime = session.clusterTime;
}
const sessionError = applySession(session, cmd, options);
if (sessionError) throw sessionError;
} else if (session?.explicit) {
throw new MongoCompatibilityError('Current topology does not support sessions');
}
// if we have a known cluster time, gossip it
if (clusterTime) {
cmd.$clusterTime = clusterTime;
}
// For standalone, drivers MUST NOT set $readPreference.
if (this.description.type !== ServerType.Standalone) {
if (
!isSharded(this) &&
!this.description.loadBalanced &&
this.supportsOpMsg &&
options.directConnection === true &&
readPreference?.mode === 'primary'
) {
// For mongos and load balancers with 'primary' mode, drivers MUST NOT set $readPreference.
// For all other types with a direct connection, if the read preference is 'primary'
// (driver sets 'primary' as default if no read preference is configured),
// the $readPreference MUST be set to 'primaryPreferred'
// to ensure that any server type can handle the request.
cmd.$readPreference = ReadPreference.primaryPreferred.toJSON();
} else if (isSharded(this) && !this.supportsOpMsg && readPreference?.mode !== 'primary') {
// When sending a read operation via OP_QUERY and the $readPreference modifier,
// the query MUST be provided using the $query modifier.
cmd = {
$query: cmd,
$readPreference: readPreference.toJSON()
};
} else if (readPreference?.mode !== 'primary') {
// For mode 'primary', drivers MUST NOT set $readPreference.
// For all other read preference modes (i.e. 'secondary', 'primaryPreferred', ...),
// drivers MUST set $readPreference
cmd.$readPreference = readPreference.toJSON();
}
}
const commandOptions = {
numberToSkip: 0,
numberToReturn: -1,
checkKeys: false,
// This value is not overridable
secondaryOk: readPreference.secondaryOk(),
...options
};
options.timeoutContext?.addMaxTimeMSToCommand(cmd, options);
const message = this.supportsOpMsg
? new OpMsgRequest(db, cmd, commandOptions)
: new OpQueryRequest(db, cmd, commandOptions);
return message;
}
private async *sendWire(
message: WriteProtocolMessageType,
options: CommandOptions,
responseType?: MongoDBResponseConstructor
): AsyncGenerator<MongoDBResponse> {
this.throwIfAborted();
const timeout =
options.socketTimeoutMS ??
options?.timeoutContext?.getSocketTimeoutMS() ??
this.socketTimeoutMS;
this.socket.setTimeout(timeout);
try {
await this.writeCommand(message, {
agreedCompressor: this.description.compressor ?? 'none',
zlibCompressionLevel: this.description.zlibCompressionLevel,
timeoutContext: options.timeoutContext
});
if (options.noResponse || message.moreToCome) {
yield MongoDBResponse.empty;
return;
}
this.throwIfAborted();
if (
options.timeoutContext?.csotEnabled() &&
options.timeoutContext.minRoundTripTime != null &&
options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime
) {
throw new MongoOperationTimeoutError(
'Server roundtrip time is greater than the time remaining'
);
}
for await (const response of this.readMany({ timeoutContext: options.timeoutContext })) {
this.socket.setTimeout(0);
const bson = response.parse();
const document = (responseType ?? MongoDBResponse).make(bson);
yield document;
this.throwIfAborted();
this.socket.setTimeout(timeout);
}
} finally {
this.socket.setTimeout(0);
}
}
private async *sendCommand(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions,
responseType?: MongoDBResponseConstructor
) {
const message = this.prepareCommand(ns.db, command, options);
let started = 0;
if (this.shouldEmitAndLogCommand) {
started = now();
this.emitAndLogCommand(
this.monitorCommands,
Connection.COMMAND_STARTED,
message.databaseName,
this.established,
new CommandStartedEvent(this, message, this.description.serverConnectionId)
);
}
// If `documentsReturnedIn` not set or raw is not enabled, use input bson options
// Otherwise, support raw flag. Raw only works for cursors that hardcode firstBatch/nextBatch fields
const bsonOptions: DeserializeOptions =
options.documentsReturnedIn == null || !options.raw
? options
: {
...options,
raw: false,
fieldsAsRaw: { [options.documentsReturnedIn]: true }
};
/** MongoDBResponse instance or subclass */
let document: MongoDBResponse | undefined = undefined;
/** Cached result of a toObject call */
let object: Document | undefined = undefined;
try {
this.throwIfAborted();
for await (document of this.sendWire(message, options, responseType)) {
object = undefined;
if (options.session != null) {
updateSessionFromResponse(options.session, document);
}
if (document.$clusterTime) {
this.clusterTime = document.$clusterTime;
this.emit(Connection.CLUSTER_TIME_RECEIVED, document.$clusterTime);
}
if (document.ok === 0) {
if (options.timeoutContext?.csotEnabled() && document.isMaxTimeExpiredError) {
throw new MongoOperationTimeoutError('Server reported a timeout error', {
cause: new MongoServerError((object ??= document.toObject(bsonOptions)))
});
}
throw new MongoServerError((object ??= document.toObject(bsonOptions)));
}
if (this.shouldEmitAndLogCommand) {
this.emitAndLogCommand(
this.monitorCommands,
Connection.COMMAND_SUCCEEDED,
message.databaseName,
this.established,
new CommandSucceededEvent(
this,
message,
options.noResponse
? undefined
: message.moreToCome
? { ok: 1 }
: (object ??= document.toObject(bsonOptions)),
started,
this.description.serverConnectionId
)
);
}
if (responseType == null) {
yield (object ??= document.toObject(bsonOptions));
} else {
yield document;
}
this.throwIfAborted();
}
} catch (error) {
if (this.shouldEmitAndLogCommand) {
this.emitAndLogCommand(
this.monitorCommands,
Connection.COMMAND_FAILED,
message.databaseName,
this.established,
new CommandFailedEvent(this, message, error, started, this.description.serverConnectionId)
);
}
throw error;
}
}
public async command<T extends MongoDBResponseConstructor>(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions | undefined,
responseType: T
): Promise<InstanceType<T>>;
public async command<T extends MongoDBResponseConstructor>(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions | undefined,
responseType: T | undefined
): Promise<typeof responseType extends undefined ? Document : InstanceType<T>>;
public async command(
ns: MongoDBNamespace,
command: Document,
options?: CommandOptions
): Promise<Document>;
public async command(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions = {},
responseType?: MongoDBResponseConstructor
): Promise<Document> {
this.throwIfAborted();
for await (const document of this.sendCommand(ns, command, options, responseType)) {
if (options.timeoutContext?.csotEnabled()) {
if (MongoDBResponse.is(document)) {
if (document.isMaxTimeExpiredError) {
throw new MongoOperationTimeoutError('Server reported a timeout error', {
cause: new MongoServerError(document.toObject())
});
}
} else {
if (
(Array.isArray(document?.writeErrors) &&
document.writeErrors.some(
error => error?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired
)) ||
document?.writeConcernError?.code === MONGODB_ERROR_CODES.MaxTimeMSExpired
) {
throw new MongoOperationTimeoutError('Server reported a timeout error', {
cause: new MongoServerError(document)
});
}
}
}
return document;
}
throw new MongoUnexpectedServerResponseError('Unable to get response from server');
}
public exhaustCommand(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions,
replyListener: Callback
) {
const exhaustLoop = async () => {
this.throwIfAborted();
for await (const reply of this.sendCommand(ns, command, options)) {
replyListener(undefined, reply);
this.throwIfAborted();
}
throw new MongoUnexpectedServerResponseError('Server ended moreToCome unexpectedly');
};
exhaustLoop().then(undefined, replyListener);
}
private throwIfAborted() {
if (this.error) throw this.error;
}
/**
* @internal
*
* Writes an OP_MSG or OP_QUERY request to the socket, optionally compressing the command. This method
* waits until the socket's buffer has emptied (the Nodejs socket `drain` event has fired).
*/
private async writeCommand(
command: WriteProtocolMessageType,
options: {
agreedCompressor?: CompressorName;
zlibCompressionLevel?: number;
timeoutContext?: TimeoutContext;
}
): Promise<void> {
const finalCommand =
options.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command)
? command
: new OpCompressedRequest(command, {
agreedCompressor: options.agreedCompressor ?? 'none',
zlibCompressionLevel: options.zlibCompressionLevel ?? 0
});
const buffer = Buffer.concat(await finalCommand.toBin());
if (options.timeoutContext?.csotEnabled()) {
if (
options.timeoutContext.minRoundTripTime != null &&
options.timeoutContext.remainingTimeMS < options.timeoutContext.minRoundTripTime
) {
throw new MongoOperationTimeoutError(
'Server roundtrip time is greater than the time remaining'
);
}
}
if (this.socket.write(buffer)) return;
const drainEvent = once<void>(this.socket, 'drain');
const timeout = options?.timeoutContext?.timeoutForSocketWrite;
if (timeout) {
try {
return await Promise.race([drainEvent, timeout]);
} catch (error) {
let err = error;
if (TimeoutError.is(error)) {
err = new MongoOperationTimeoutError('Timed out at socket write');
this.cleanup(err);
}
throw error;
} finally {
timeout.clear();
}
}
return await drainEvent;
}
/**
* @internal
*
* Returns an async generator that yields full wire protocol messages from the underlying socket. This function
* yields messages until `moreToCome` is false or not present in a response, or the caller cancels the request
* by calling `return` on the generator.
*
* Note that `for-await` loops call `return` automatically when the loop is exited.
*/
private async *readMany(options: {
timeoutContext?: TimeoutContext;
}): AsyncGenerator<OpMsgResponse | OpReply> {
try {
this.dataEvents = onData(this.messageStream, options);
this.messageStream.resume();
for await (const message of this.dataEvents) {
const response = await decompressResponse(message);
yield response;
if (!response.moreToCome) {
return;
}
}
} catch (readError) {
const err = readError;
if (TimeoutError.is(readError)) {
const error = new MongoOperationTimeoutError(
`Timed out during socket read (${readError.duration}ms)`
);
this.dataEvents = null;
this.onError(error);
throw error;
}
throw err;
} finally {
this.dataEvents = null;
this.messageStream.pause();
this.throwIfAborted();
}
}
}
/** @internal */
export class SizedMessageTransform extends Transform {
bufferPool: BufferPool;
connection: Connection;
constructor({ connection }: { connection: Connection }) {
super({ writableObjectMode: false, readableObjectMode: true });
this.bufferPool = new BufferPool();
this.connection = connection;
}
override _transform(chunk: Buffer, encoding: unknown, callback: TransformCallback): void {
if (this.connection.delayedTimeoutId != null) {
clearTimeout(this.connection.delayedTimeoutId);
this.connection.delayedTimeoutId = null;
}
this.bufferPool.append(chunk);
const sizeOfMessage = this.bufferPool.getInt32();
if (sizeOfMessage == null) {
return callback();
}
if (sizeOfMessage < 0) {
return callback(new MongoParseError(`Invalid message size: ${sizeOfMessage}, too small`));
}
if (sizeOfMessage > this.bufferPool.length) {
return callback();
}
const message = this.bufferPool.read(sizeOfMessage);
return callback(null, message);
}
}
/** @internal */
export class CryptoConnection extends Connection {
/** @internal */
autoEncrypter?: AutoEncrypter;
constructor(stream: Stream, options: ConnectionOptions) {
super(stream, options);
this.autoEncrypter = options.autoEncrypter;
}
public override async command<T extends MongoDBResponseConstructor>(
ns: MongoDBNamespace,
command: Document,
options: CommandOptions | undefined,
responseType: T
): Promise<InstanceType<T>>;
public override async command(
ns: MongoDBNamespace,
command: Document,
options?: CommandOptions
): Promise<Document>;
override async command<T extends MongoDBResponseConstructor>(
ns: MongoDBNamespace,
cmd: Document,
options?: CommandOptions,
responseType?: T | undefined
): Promise<Document> {
const { autoEncrypter } = this;
if (!autoEncrypter) {
// TODO(NODE-6065): throw a MongoRuntimeError in Node V7
// @ts-expect-error No cause provided because there is no underlying error.
throw new MongoMissingDependencyError('No AutoEncrypter available for encryption', {
dependencyName: 'n/a'
});
}
const serverWireVersion = maxWireVersion(this);
if (serverWireVersion === 0) {
// This means the initial handshake hasn't happened yet
return await super.command<T>(ns, cmd, options, responseType);
}
if (serverWireVersion < 8) {
throw new MongoCompatibilityError(
'Auto-encryption requires a minimum MongoDB version of 4.2'
);
}
// Save sort or indexKeys based on the command being run
// the encrypt API serializes our JS objects to BSON to pass to the native code layer
// and then deserializes the encrypted result, the protocol level components
// of the command (ex. sort) are then converted to JS objects potentially losing
// import key order information. These fields are never encrypted so we can save the values
// from before the encryption and replace them after encryption has been performed
const sort: Map<string, number> | null = cmd.find || cmd.findAndModify ? cmd.sort : null;
const indexKeys: Map<string, number>[] | null = cmd.createIndexes
? cmd.indexes.map((index: { key: Map<string, number> }) => index.key)
: null;
const encrypted = await autoEncrypter.encrypt(ns.toString(), cmd, options);
// Replace the saved values
if (sort != null && (cmd.find || cmd.findAndModify)) {
encrypted.sort = sort;
}
if (indexKeys != null && cmd.createIndexes) {
for (const [offset, index] of indexKeys.entries()) {
// @ts-expect-error `encrypted` is a generic "command", but we've narrowed for only `createIndexes` commands here
encrypted.indexes[offset].key = index;
}
}
const encryptedResponse = await super.command(
ns,
encrypted,
options,
// Eventually we want to require `responseType` which means we would satisfy `T` as the return type.
// In the meantime, we want encryptedResponse to always be _at least_ a MongoDBResponse if not a more specific subclass
// So that we can ensure we have access to the on-demand APIs for decorate response
responseType ?? MongoDBResponse
);
const result = await autoEncrypter.decrypt(encryptedResponse.toBytes(), options);
const decryptedResponse = responseType?.make(result) ?? deserialize(result, options);
if (autoEncrypter[kDecorateResult]) {
if (responseType == null) {
decorateDecryptionResult(decryptedResponse, encryptedResponse.toObject(), true);
} else if (decryptedResponse instanceof CursorResponse) {
decryptedResponse.encryptedResponse = encryptedResponse;
}
}
return decryptedResponse;
}
}

View File

@@ -0,0 +1,851 @@
import { clearTimeout, setTimeout } from 'timers';
import type { ObjectId } from '../bson';
import {
APM_EVENTS,
CONNECTION_CHECK_OUT_FAILED,
CONNECTION_CHECK_OUT_STARTED,
CONNECTION_CHECKED_IN,
CONNECTION_CHECKED_OUT,
CONNECTION_CLOSED,
CONNECTION_CREATED,
CONNECTION_POOL_CLEARED,
CONNECTION_POOL_CLOSED,
CONNECTION_POOL_CREATED,
CONNECTION_POOL_READY,
CONNECTION_READY
} from '../constants';
import {
type AnyError,
type MongoError,
MongoInvalidArgumentError,
MongoMissingCredentialsError,
MongoNetworkError,
MongoOperationTimeoutError,
MongoRuntimeError,
MongoServerError
} from '../error';
import { CancellationToken, TypedEventEmitter } from '../mongo_types';
import type { Server } from '../sdam/server';
import { type TimeoutContext, TimeoutError } from '../timeout';
import { type Callback, List, makeCounter, now, promiseWithResolvers } from '../utils';
import { connect } from './connect';
import { Connection, type ConnectionEvents, type ConnectionOptions } from './connection';
import {
ConnectionCheckedInEvent,
ConnectionCheckedOutEvent,
ConnectionCheckOutFailedEvent,
ConnectionCheckOutStartedEvent,
ConnectionClosedEvent,
ConnectionCreatedEvent,
ConnectionPoolClearedEvent,
ConnectionPoolClosedEvent,
ConnectionPoolCreatedEvent,
ConnectionPoolReadyEvent,
ConnectionReadyEvent
} from './connection_pool_events';
import {
PoolClearedError,
PoolClearedOnNetworkError,
PoolClosedError,
WaitQueueTimeoutError
} from './errors';
import { ConnectionPoolMetrics } from './metrics';
/** @internal */
const kServer = Symbol('server');
/** @internal */
const kConnections = Symbol('connections');
/** @internal */
const kPending = Symbol('pending');
/** @internal */
const kCheckedOut = Symbol('checkedOut');
/** @internal */
const kMinPoolSizeTimer = Symbol('minPoolSizeTimer');
/** @internal */
const kGeneration = Symbol('generation');
/** @internal */
const kServiceGenerations = Symbol('serviceGenerations');
/** @internal */
const kConnectionCounter = Symbol('connectionCounter');
/** @internal */
const kCancellationToken = Symbol('cancellationToken');
/** @internal */
const kWaitQueue = Symbol('waitQueue');
/** @internal */
const kCancelled = Symbol('cancelled');
/** @internal */
const kMetrics = Symbol('metrics');
/** @internal */
const kProcessingWaitQueue = Symbol('processingWaitQueue');
/** @internal */
const kPoolState = Symbol('poolState');
/** @public */
export interface ConnectionPoolOptions extends Omit<ConnectionOptions, 'id' | 'generation'> {
/** The maximum number of connections that may be associated with a pool at a given time. This includes in use and available connections. */
maxPoolSize: number;
/** The minimum number of connections that MUST exist at any moment in a single connection pool. */
minPoolSize: number;
/** The maximum number of connections that may be in the process of being established concurrently by the connection pool. */
maxConnecting: number;
/** The maximum amount of time a connection should remain idle in the connection pool before being marked idle. */
maxIdleTimeMS: number;
/** The maximum amount of time operation execution should wait for a connection to become available. The default is 0 which means there is no limit. */
waitQueueTimeoutMS: number;
/** If we are in load balancer mode. */
loadBalanced: boolean;
/** @internal */
minPoolSizeCheckFrequencyMS?: number;
}
/** @internal */
export interface WaitQueueMember {
resolve: (conn: Connection) => void;
reject: (err: AnyError) => void;
[kCancelled]?: boolean;
checkoutTime: number;
}
/** @internal */
export const PoolState = Object.freeze({
paused: 'paused',
ready: 'ready',
closed: 'closed'
} as const);
/**
* @public
* @deprecated This interface is deprecated and will be removed in a future release as it is not used
* in the driver
*/
export interface CloseOptions {
force?: boolean;
}
/** @public */
export type ConnectionPoolEvents = {
connectionPoolCreated(event: ConnectionPoolCreatedEvent): void;
connectionPoolReady(event: ConnectionPoolReadyEvent): void;
connectionPoolClosed(event: ConnectionPoolClosedEvent): void;
connectionPoolCleared(event: ConnectionPoolClearedEvent): void;
connectionCreated(event: ConnectionCreatedEvent): void;
connectionReady(event: ConnectionReadyEvent): void;
connectionClosed(event: ConnectionClosedEvent): void;
connectionCheckOutStarted(event: ConnectionCheckOutStartedEvent): void;
connectionCheckOutFailed(event: ConnectionCheckOutFailedEvent): void;
connectionCheckedOut(event: ConnectionCheckedOutEvent): void;
connectionCheckedIn(event: ConnectionCheckedInEvent): void;
} & Omit<ConnectionEvents, 'close' | 'message'>;
/**
* A pool of connections which dynamically resizes, and emit events related to pool activity
* @internal
*/
export class ConnectionPool extends TypedEventEmitter<ConnectionPoolEvents> {
options: Readonly<ConnectionPoolOptions>;
[kPoolState]: (typeof PoolState)[keyof typeof PoolState];
[kServer]: Server;
[kConnections]: List<Connection>;
[kPending]: number;
[kCheckedOut]: Set<Connection>;
[kMinPoolSizeTimer]?: NodeJS.Timeout;
/**
* An integer representing the SDAM generation of the pool
*/
[kGeneration]: number;
/**
* A map of generations to service ids
*/
[kServiceGenerations]: Map<string, number>;
[kConnectionCounter]: Generator<number>;
[kCancellationToken]: CancellationToken;
[kWaitQueue]: List<WaitQueueMember>;
[kMetrics]: ConnectionPoolMetrics;
[kProcessingWaitQueue]: boolean;
/**
* Emitted when the connection pool is created.
* @event
*/
static readonly CONNECTION_POOL_CREATED = CONNECTION_POOL_CREATED;
/**
* Emitted once when the connection pool is closed
* @event
*/
static readonly CONNECTION_POOL_CLOSED = CONNECTION_POOL_CLOSED;
/**
* Emitted each time the connection pool is cleared and it's generation incremented
* @event
*/
static readonly CONNECTION_POOL_CLEARED = CONNECTION_POOL_CLEARED;
/**
* Emitted each time the connection pool is marked ready
* @event
*/
static readonly CONNECTION_POOL_READY = CONNECTION_POOL_READY;
/**
* Emitted when a connection is created.
* @event
*/
static readonly CONNECTION_CREATED = CONNECTION_CREATED;
/**
* Emitted when a connection becomes established, and is ready to use
* @event
*/
static readonly CONNECTION_READY = CONNECTION_READY;
/**
* Emitted when a connection is closed
* @event
*/
static readonly CONNECTION_CLOSED = CONNECTION_CLOSED;
/**
* Emitted when an attempt to check out a connection begins
* @event
*/
static readonly CONNECTION_CHECK_OUT_STARTED = CONNECTION_CHECK_OUT_STARTED;
/**
* Emitted when an attempt to check out a connection fails
* @event
*/
static readonly CONNECTION_CHECK_OUT_FAILED = CONNECTION_CHECK_OUT_FAILED;
/**
* Emitted each time a connection is successfully checked out of the connection pool
* @event
*/
static readonly CONNECTION_CHECKED_OUT = CONNECTION_CHECKED_OUT;
/**
* Emitted each time a connection is successfully checked into the connection pool
* @event
*/
static readonly CONNECTION_CHECKED_IN = CONNECTION_CHECKED_IN;
constructor(server: Server, options: ConnectionPoolOptions) {
super();
this.options = Object.freeze({
connectionType: Connection,
...options,
maxPoolSize: options.maxPoolSize ?? 100,
minPoolSize: options.minPoolSize ?? 0,
maxConnecting: options.maxConnecting ?? 2,
maxIdleTimeMS: options.maxIdleTimeMS ?? 0,
waitQueueTimeoutMS: options.waitQueueTimeoutMS ?? 0,
minPoolSizeCheckFrequencyMS: options.minPoolSizeCheckFrequencyMS ?? 100,
autoEncrypter: options.autoEncrypter
});
if (this.options.minPoolSize > this.options.maxPoolSize) {
throw new MongoInvalidArgumentError(
'Connection pool minimum size must not be greater than maximum pool size'
);
}
this[kPoolState] = PoolState.paused;
this[kServer] = server;
this[kConnections] = new List();
this[kPending] = 0;
this[kCheckedOut] = new Set();
this[kMinPoolSizeTimer] = undefined;
this[kGeneration] = 0;
this[kServiceGenerations] = new Map();
this[kConnectionCounter] = makeCounter(1);
this[kCancellationToken] = new CancellationToken();
this[kCancellationToken].setMaxListeners(Infinity);
this[kWaitQueue] = new List();
this[kMetrics] = new ConnectionPoolMetrics();
this[kProcessingWaitQueue] = false;
this.mongoLogger = this[kServer].topology.client?.mongoLogger;
this.component = 'connection';
process.nextTick(() => {
this.emitAndLog(ConnectionPool.CONNECTION_POOL_CREATED, new ConnectionPoolCreatedEvent(this));
});
}
/** The address of the endpoint the pool is connected to */
get address(): string {
return this.options.hostAddress.toString();
}
/**
* Check if the pool has been closed
*
* TODO(NODE-3263): We can remove this property once shell no longer needs it
*/
get closed(): boolean {
return this[kPoolState] === PoolState.closed;
}
/** An integer representing the SDAM generation of the pool */
get generation(): number {
return this[kGeneration];
}
/** An integer expressing how many total connections (available + pending + in use) the pool currently has */
get totalConnectionCount(): number {
return (
this.availableConnectionCount + this.pendingConnectionCount + this.currentCheckedOutCount
);
}
/** An integer expressing how many connections are currently available in the pool. */
get availableConnectionCount(): number {
return this[kConnections].length;
}
get pendingConnectionCount(): number {
return this[kPending];
}
get currentCheckedOutCount(): number {
return this[kCheckedOut].size;
}
get waitQueueSize(): number {
return this[kWaitQueue].length;
}
get loadBalanced(): boolean {
return this.options.loadBalanced;
}
get serviceGenerations(): Map<string, number> {
return this[kServiceGenerations];
}
get serverError() {
return this[kServer].description.error;
}
/**
* This is exposed ONLY for use in mongosh, to enable
* killing all connections if a user quits the shell with
* operations in progress.
*
* This property may be removed as a part of NODE-3263.
*/
get checkedOutConnections() {
return this[kCheckedOut];
}
/**
* Get the metrics information for the pool when a wait queue timeout occurs.
*/
private waitQueueErrorMetrics(): string {
return this[kMetrics].info(this.options.maxPoolSize);
}
/**
* Set the pool state to "ready"
*/
ready(): void {
if (this[kPoolState] !== PoolState.paused) {
return;
}
this[kPoolState] = PoolState.ready;
this.emitAndLog(ConnectionPool.CONNECTION_POOL_READY, new ConnectionPoolReadyEvent(this));
clearTimeout(this[kMinPoolSizeTimer]);
this.ensureMinPoolSize();
}
/**
* Check a connection out of this pool. The connection will continue to be tracked, but no reference to it
* will be held by the pool. This means that if a connection is checked out it MUST be checked back in or
* explicitly destroyed by the new owner.
*/
async checkOut(options: { timeoutContext: TimeoutContext }): Promise<Connection> {
const checkoutTime = now();
this.emitAndLog(
ConnectionPool.CONNECTION_CHECK_OUT_STARTED,
new ConnectionCheckOutStartedEvent(this)
);
const { promise, resolve, reject } = promiseWithResolvers<Connection>();
const timeout = options.timeoutContext.connectionCheckoutTimeout;
const waitQueueMember: WaitQueueMember = {
resolve,
reject,
checkoutTime
};
this[kWaitQueue].push(waitQueueMember);
process.nextTick(() => this.processWaitQueue());
try {
timeout?.throwIfExpired();
return await (timeout ? Promise.race([promise, timeout]) : promise);
} catch (error) {
if (TimeoutError.is(error)) {
timeout?.clear();
waitQueueMember[kCancelled] = true;
this.emitAndLog(
ConnectionPool.CONNECTION_CHECK_OUT_FAILED,
new ConnectionCheckOutFailedEvent(this, 'timeout', waitQueueMember.checkoutTime)
);
const timeoutError = new WaitQueueTimeoutError(
this.loadBalanced
? this.waitQueueErrorMetrics()
: 'Timed out while checking out a connection from connection pool',
this.address
);
if (options.timeoutContext.csotEnabled()) {
throw new MongoOperationTimeoutError('Timed out during connection checkout', {
cause: timeoutError
});
}
throw timeoutError;
}
throw error;
} finally {
timeout?.clear();
}
}
/**
* Check a connection into the pool.
*
* @param connection - The connection to check in
*/
checkIn(connection: Connection): void {
if (!this[kCheckedOut].has(connection)) {
return;
}
const poolClosed = this.closed;
const stale = this.connectionIsStale(connection);
const willDestroy = !!(poolClosed || stale || connection.closed);
if (!willDestroy) {
connection.markAvailable();
this[kConnections].unshift(connection);
}
this[kCheckedOut].delete(connection);
this.emitAndLog(
ConnectionPool.CONNECTION_CHECKED_IN,
new ConnectionCheckedInEvent(this, connection)
);
if (willDestroy) {
const reason = connection.closed ? 'error' : poolClosed ? 'poolClosed' : 'stale';
this.destroyConnection(connection, reason);
}
process.nextTick(() => this.processWaitQueue());
}
/**
* Clear the pool
*
* Pool reset is handled by incrementing the pool's generation count. Any existing connection of a
* previous generation will eventually be pruned during subsequent checkouts.
*/
clear(options: { serviceId?: ObjectId; interruptInUseConnections?: boolean } = {}): void {
if (this.closed) {
return;
}
// handle load balanced case
if (this.loadBalanced) {
const { serviceId } = options;
if (!serviceId) {
throw new MongoRuntimeError(
'ConnectionPool.clear() called in load balanced mode with no serviceId.'
);
}
const sid = serviceId.toHexString();
const generation = this.serviceGenerations.get(sid);
// Only need to worry if the generation exists, since it should
// always be there but typescript needs the check.
if (generation == null) {
throw new MongoRuntimeError('Service generations are required in load balancer mode.');
} else {
// Increment the generation for the service id.
this.serviceGenerations.set(sid, generation + 1);
}
this.emitAndLog(
ConnectionPool.CONNECTION_POOL_CLEARED,
new ConnectionPoolClearedEvent(this, { serviceId })
);
return;
}
// handle non load-balanced case
const interruptInUseConnections = options.interruptInUseConnections ?? false;
const oldGeneration = this[kGeneration];
this[kGeneration] += 1;
const alreadyPaused = this[kPoolState] === PoolState.paused;
this[kPoolState] = PoolState.paused;
this.clearMinPoolSizeTimer();
if (!alreadyPaused) {
this.emitAndLog(
ConnectionPool.CONNECTION_POOL_CLEARED,
new ConnectionPoolClearedEvent(this, {
interruptInUseConnections
})
);
}
if (interruptInUseConnections) {
process.nextTick(() => this.interruptInUseConnections(oldGeneration));
}
this.processWaitQueue();
}
/**
* Closes all stale in-use connections in the pool with a resumable PoolClearedOnNetworkError.
*
* Only connections where `connection.generation <= minGeneration` are killed.
*/
private interruptInUseConnections(minGeneration: number) {
for (const connection of this[kCheckedOut]) {
if (connection.generation <= minGeneration) {
connection.onError(new PoolClearedOnNetworkError(this));
this.checkIn(connection);
}
}
}
/** Close the pool */
close(): void {
if (this.closed) {
return;
}
// immediately cancel any in-flight connections
this[kCancellationToken].emit('cancel');
// end the connection counter
if (typeof this[kConnectionCounter].return === 'function') {
this[kConnectionCounter].return(undefined);
}
this[kPoolState] = PoolState.closed;
this.clearMinPoolSizeTimer();
this.processWaitQueue();
for (const conn of this[kConnections]) {
this.emitAndLog(
ConnectionPool.CONNECTION_CLOSED,
new ConnectionClosedEvent(this, conn, 'poolClosed')
);
conn.destroy();
}
this[kConnections].clear();
this.emitAndLog(ConnectionPool.CONNECTION_POOL_CLOSED, new ConnectionPoolClosedEvent(this));
}
/**
* @internal
* Reauthenticate a connection
*/
async reauthenticate(connection: Connection): Promise<void> {
const authContext = connection.authContext;
if (!authContext) {
throw new MongoRuntimeError('No auth context found on connection.');
}
const credentials = authContext.credentials;
if (!credentials) {
throw new MongoMissingCredentialsError(
'Connection is missing credentials when asked to reauthenticate'
);
}
const resolvedCredentials = credentials.resolveAuthMechanism(connection.hello);
const provider = this[kServer].topology.client.s.authProviders.getOrCreateProvider(
resolvedCredentials.mechanism,
resolvedCredentials.mechanismProperties
);
if (!provider) {
throw new MongoMissingCredentialsError(
`Reauthenticate failed due to no auth provider for ${credentials.mechanism}`
);
}
await provider.reauth(authContext);
return;
}
/** Clear the min pool size timer */
private clearMinPoolSizeTimer(): void {
const minPoolSizeTimer = this[kMinPoolSizeTimer];
if (minPoolSizeTimer) {
clearTimeout(minPoolSizeTimer);
}
}
private destroyConnection(
connection: Connection,
reason: 'error' | 'idle' | 'stale' | 'poolClosed'
) {
this.emitAndLog(
ConnectionPool.CONNECTION_CLOSED,
new ConnectionClosedEvent(this, connection, reason)
);
// destroy the connection
connection.destroy();
}
private connectionIsStale(connection: Connection) {
const serviceId = connection.serviceId;
if (this.loadBalanced && serviceId) {
const sid = serviceId.toHexString();
const generation = this.serviceGenerations.get(sid);
return connection.generation !== generation;
}
return connection.generation !== this[kGeneration];
}
private connectionIsIdle(connection: Connection) {
return !!(this.options.maxIdleTimeMS && connection.idleTime > this.options.maxIdleTimeMS);
}
/**
* Destroys a connection if the connection is perished.
*
* @returns `true` if the connection was destroyed, `false` otherwise.
*/
private destroyConnectionIfPerished(connection: Connection): boolean {
const isStale = this.connectionIsStale(connection);
const isIdle = this.connectionIsIdle(connection);
if (!isStale && !isIdle && !connection.closed) {
return false;
}
const reason = connection.closed ? 'error' : isStale ? 'stale' : 'idle';
this.destroyConnection(connection, reason);
return true;
}
private createConnection(callback: Callback<Connection>) {
const connectOptions: ConnectionOptions = {
...this.options,
id: this[kConnectionCounter].next().value,
generation: this[kGeneration],
cancellationToken: this[kCancellationToken],
mongoLogger: this.mongoLogger,
authProviders: this[kServer].topology.client.s.authProviders
};
this[kPending]++;
// This is our version of a "virtual" no-I/O connection as the spec requires
const connectionCreatedTime = now();
this.emitAndLog(
ConnectionPool.CONNECTION_CREATED,
new ConnectionCreatedEvent(this, { id: connectOptions.id })
);
connect(connectOptions).then(
connection => {
// The pool might have closed since we started trying to create a connection
if (this[kPoolState] !== PoolState.ready) {
this[kPending]--;
connection.destroy();
callback(this.closed ? new PoolClosedError(this) : new PoolClearedError(this));
return;
}
// forward all events from the connection to the pool
for (const event of [...APM_EVENTS, Connection.CLUSTER_TIME_RECEIVED]) {
connection.on(event, (e: any) => this.emit(event, e));
}
if (this.loadBalanced) {
connection.on(Connection.PINNED, pinType => this[kMetrics].markPinned(pinType));
connection.on(Connection.UNPINNED, pinType => this[kMetrics].markUnpinned(pinType));
const serviceId = connection.serviceId;
if (serviceId) {
let generation;
const sid = serviceId.toHexString();
if ((generation = this.serviceGenerations.get(sid))) {
connection.generation = generation;
} else {
this.serviceGenerations.set(sid, 0);
connection.generation = 0;
}
}
}
connection.markAvailable();
this.emitAndLog(
ConnectionPool.CONNECTION_READY,
new ConnectionReadyEvent(this, connection, connectionCreatedTime)
);
this[kPending]--;
callback(undefined, connection);
},
error => {
this[kPending]--;
this[kServer].handleError(error);
this.emitAndLog(
ConnectionPool.CONNECTION_CLOSED,
new ConnectionClosedEvent(
this,
{ id: connectOptions.id, serviceId: undefined },
'error',
// TODO(NODE-5192): Remove this cast
error as MongoError
)
);
if (error instanceof MongoNetworkError || error instanceof MongoServerError) {
error.connectionGeneration = connectOptions.generation;
}
callback(error ?? new MongoRuntimeError('Connection creation failed without error'));
}
);
}
private ensureMinPoolSize() {
const minPoolSize = this.options.minPoolSize;
if (this[kPoolState] !== PoolState.ready || minPoolSize === 0) {
return;
}
this[kConnections].prune(connection => this.destroyConnectionIfPerished(connection));
if (
this.totalConnectionCount < minPoolSize &&
this.pendingConnectionCount < this.options.maxConnecting
) {
// NOTE: ensureMinPoolSize should not try to get all the pending
// connection permits because that potentially delays the availability of
// the connection to a checkout request
this.createConnection((err, connection) => {
if (!err && connection) {
this[kConnections].push(connection);
process.nextTick(() => this.processWaitQueue());
}
if (this[kPoolState] === PoolState.ready) {
clearTimeout(this[kMinPoolSizeTimer]);
this[kMinPoolSizeTimer] = setTimeout(
() => this.ensureMinPoolSize(),
this.options.minPoolSizeCheckFrequencyMS
);
}
});
} else {
clearTimeout(this[kMinPoolSizeTimer]);
this[kMinPoolSizeTimer] = setTimeout(
() => this.ensureMinPoolSize(),
this.options.minPoolSizeCheckFrequencyMS
);
}
}
private processWaitQueue() {
if (this[kProcessingWaitQueue]) {
return;
}
this[kProcessingWaitQueue] = true;
while (this.waitQueueSize) {
const waitQueueMember = this[kWaitQueue].first();
if (!waitQueueMember) {
this[kWaitQueue].shift();
continue;
}
if (waitQueueMember[kCancelled]) {
this[kWaitQueue].shift();
continue;
}
if (this[kPoolState] !== PoolState.ready) {
const reason = this.closed ? 'poolClosed' : 'connectionError';
const error = this.closed ? new PoolClosedError(this) : new PoolClearedError(this);
this.emitAndLog(
ConnectionPool.CONNECTION_CHECK_OUT_FAILED,
new ConnectionCheckOutFailedEvent(this, reason, waitQueueMember.checkoutTime, error)
);
this[kWaitQueue].shift();
waitQueueMember.reject(error);
continue;
}
if (!this.availableConnectionCount) {
break;
}
const connection = this[kConnections].shift();
if (!connection) {
break;
}
if (!this.destroyConnectionIfPerished(connection)) {
this[kCheckedOut].add(connection);
this.emitAndLog(
ConnectionPool.CONNECTION_CHECKED_OUT,
new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime)
);
this[kWaitQueue].shift();
waitQueueMember.resolve(connection);
}
}
const { maxPoolSize, maxConnecting } = this.options;
while (
this.waitQueueSize > 0 &&
this.pendingConnectionCount < maxConnecting &&
(maxPoolSize === 0 || this.totalConnectionCount < maxPoolSize)
) {
const waitQueueMember = this[kWaitQueue].shift();
if (!waitQueueMember || waitQueueMember[kCancelled]) {
continue;
}
this.createConnection((err, connection) => {
if (waitQueueMember[kCancelled]) {
if (!err && connection) {
this[kConnections].push(connection);
}
} else {
if (err) {
this.emitAndLog(
ConnectionPool.CONNECTION_CHECK_OUT_FAILED,
// TODO(NODE-5192): Remove this cast
new ConnectionCheckOutFailedEvent(
this,
'connectionError',
waitQueueMember.checkoutTime,
err as MongoError
)
);
waitQueueMember.reject(err);
} else if (connection) {
this[kCheckedOut].add(connection);
this.emitAndLog(
ConnectionPool.CONNECTION_CHECKED_OUT,
new ConnectionCheckedOutEvent(this, connection, waitQueueMember.checkoutTime)
);
waitQueueMember.resolve(connection);
}
}
process.nextTick(() => this.processWaitQueue());
});
}
this[kProcessingWaitQueue] = false;
}
}
/**
* A callback provided to `withConnection`
* @internal
*
* @param error - An error instance representing the error during the execution.
* @param connection - The managed connection which was checked out of the pool.
* @param callback - A function to call back after connection management is complete
*/
export type WithConnectionCallback = (
error: MongoError | undefined,
connection: Connection | undefined,
callback: Callback<Connection>
) => void;

View File

@@ -0,0 +1,300 @@
import type { ObjectId } from '../bson';
import {
CONNECTION_CHECK_OUT_FAILED,
CONNECTION_CHECK_OUT_STARTED,
CONNECTION_CHECKED_IN,
CONNECTION_CHECKED_OUT,
CONNECTION_CLOSED,
CONNECTION_CREATED,
CONNECTION_POOL_CLEARED,
CONNECTION_POOL_CLOSED,
CONNECTION_POOL_CREATED,
CONNECTION_POOL_READY,
CONNECTION_READY
} from '../constants';
import type { MongoError } from '../error';
import { now } from '../utils';
import type { Connection } from './connection';
import type { ConnectionPool, ConnectionPoolOptions } from './connection_pool';
/**
* The base export class for all monitoring events published from the connection pool
* @public
* @category Event
*/
export abstract class ConnectionPoolMonitoringEvent {
/** A timestamp when the event was created */
time: Date;
/** The address (host/port pair) of the pool */
address: string;
/** @internal */
abstract name:
| typeof CONNECTION_CHECK_OUT_FAILED
| typeof CONNECTION_CHECK_OUT_STARTED
| typeof CONNECTION_CHECKED_IN
| typeof CONNECTION_CHECKED_OUT
| typeof CONNECTION_CLOSED
| typeof CONNECTION_CREATED
| typeof CONNECTION_POOL_CLEARED
| typeof CONNECTION_POOL_CLOSED
| typeof CONNECTION_POOL_CREATED
| typeof CONNECTION_POOL_READY
| typeof CONNECTION_READY;
/** @internal */
constructor(pool: ConnectionPool) {
this.time = new Date();
this.address = pool.address;
}
}
/**
* An event published when a connection pool is created
* @public
* @category Event
*/
export class ConnectionPoolCreatedEvent extends ConnectionPoolMonitoringEvent {
/** The options used to create this connection pool */
options: Pick<
ConnectionPoolOptions,
'maxPoolSize' | 'minPoolSize' | 'maxConnecting' | 'maxIdleTimeMS' | 'waitQueueTimeoutMS'
>;
/** @internal */
name = CONNECTION_POOL_CREATED;
/** @internal */
constructor(pool: ConnectionPool) {
super(pool);
const { maxConnecting, maxPoolSize, minPoolSize, maxIdleTimeMS, waitQueueTimeoutMS } =
pool.options;
this.options = { maxConnecting, maxPoolSize, minPoolSize, maxIdleTimeMS, waitQueueTimeoutMS };
}
}
/**
* An event published when a connection pool is ready
* @public
* @category Event
*/
export class ConnectionPoolReadyEvent extends ConnectionPoolMonitoringEvent {
/** @internal */
name = CONNECTION_POOL_READY;
/** @internal */
constructor(pool: ConnectionPool) {
super(pool);
}
}
/**
* An event published when a connection pool is closed
* @public
* @category Event
*/
export class ConnectionPoolClosedEvent extends ConnectionPoolMonitoringEvent {
/** @internal */
name = CONNECTION_POOL_CLOSED;
/** @internal */
constructor(pool: ConnectionPool) {
super(pool);
}
}
/**
* An event published when a connection pool creates a new connection
* @public
* @category Event
*/
export class ConnectionCreatedEvent extends ConnectionPoolMonitoringEvent {
/** A monotonically increasing, per-pool id for the newly created connection */
connectionId: number | '<monitor>';
/** @internal */
name = CONNECTION_CREATED;
/** @internal */
constructor(pool: ConnectionPool, connection: { id: number | '<monitor>' }) {
super(pool);
this.connectionId = connection.id;
}
}
/**
* An event published when a connection is ready for use
* @public
* @category Event
*/
export class ConnectionReadyEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '<monitor>';
/**
* The time it took to establish the connection.
* In accordance with the definition of establishment of a connection
* specified by `ConnectionPoolOptions.maxConnecting`,
* it is the time elapsed between emitting a `ConnectionCreatedEvent`
* and emitting this event as part of the same checking out.
*
* Naturally, when establishing a connection is part of checking out,
* this duration is not greater than
* `ConnectionCheckedOutEvent.duration`.
*/
durationMS: number;
/** @internal */
name = CONNECTION_READY;
/** @internal */
constructor(pool: ConnectionPool, connection: Connection, connectionCreatedEventTime: number) {
super(pool);
this.durationMS = now() - connectionCreatedEventTime;
this.connectionId = connection.id;
}
}
/**
* An event published when a connection is closed
* @public
* @category Event
*/
export class ConnectionClosedEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '<monitor>';
/** The reason the connection was closed */
reason: string;
serviceId?: ObjectId;
/** @internal */
name = CONNECTION_CLOSED;
/** @internal */
error: MongoError | null;
/** @internal */
constructor(
pool: ConnectionPool,
connection: Pick<Connection, 'id' | 'serviceId'>,
reason: 'idle' | 'stale' | 'poolClosed' | 'error',
error?: MongoError
) {
super(pool);
this.connectionId = connection.id;
this.reason = reason;
this.serviceId = connection.serviceId;
this.error = error ?? null;
}
}
/**
* An event published when a request to check a connection out begins
* @public
* @category Event
*/
export class ConnectionCheckOutStartedEvent extends ConnectionPoolMonitoringEvent {
/** @internal */
name = CONNECTION_CHECK_OUT_STARTED;
/** @internal */
constructor(pool: ConnectionPool) {
super(pool);
}
}
/**
* An event published when a request to check a connection out fails
* @public
* @category Event
*/
export class ConnectionCheckOutFailedEvent extends ConnectionPoolMonitoringEvent {
/** The reason the attempt to check out failed */
reason: string;
/** @internal */
error?: MongoError;
/** @internal */
name = CONNECTION_CHECK_OUT_FAILED;
/**
* The time it took to check out the connection.
* More specifically, the time elapsed between
* emitting a `ConnectionCheckOutStartedEvent`
* and emitting this event as part of the same check out.
*/
durationMS: number;
/** @internal */
constructor(
pool: ConnectionPool,
reason: 'poolClosed' | 'timeout' | 'connectionError',
checkoutTime: number,
error?: MongoError
) {
super(pool);
this.durationMS = now() - checkoutTime;
this.reason = reason;
this.error = error;
}
}
/**
* An event published when a connection is checked out of the connection pool
* @public
* @category Event
*/
export class ConnectionCheckedOutEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '<monitor>';
/** @internal */
name = CONNECTION_CHECKED_OUT;
/**
* The time it took to check out the connection.
* More specifically, the time elapsed between
* emitting a `ConnectionCheckOutStartedEvent`
* and emitting this event as part of the same checking out.
*
*/
durationMS: number;
/** @internal */
constructor(pool: ConnectionPool, connection: Connection, checkoutTime: number) {
super(pool);
this.durationMS = now() - checkoutTime;
this.connectionId = connection.id;
}
}
/**
* An event published when a connection is checked into the connection pool
* @public
* @category Event
*/
export class ConnectionCheckedInEvent extends ConnectionPoolMonitoringEvent {
/** The id of the connection */
connectionId: number | '<monitor>';
/** @internal */
name = CONNECTION_CHECKED_IN;
/** @internal */
constructor(pool: ConnectionPool, connection: Connection) {
super(pool);
this.connectionId = connection.id;
}
}
/**
* An event published when a connection pool is cleared
* @public
* @category Event
*/
export class ConnectionPoolClearedEvent extends ConnectionPoolMonitoringEvent {
/** @internal */
serviceId?: ObjectId;
interruptInUseConnections?: boolean;
/** @internal */
name = CONNECTION_POOL_CLEARED;
/** @internal */
constructor(
pool: ConnectionPool,
options: { serviceId?: ObjectId; interruptInUseConnections?: boolean } = {}
) {
super(pool);
this.serviceId = options.serviceId;
this.interruptInUseConnections = options.interruptInUseConnections;
}
}

119
backend/node_modules/mongodb/src/cmap/errors.ts generated vendored Normal file
View File

@@ -0,0 +1,119 @@
import { MongoDriverError, MongoErrorLabel, MongoNetworkError } from '../error';
import type { ConnectionPool } from './connection_pool';
/**
* An error indicating a connection pool is closed
* @category Error
*/
export class PoolClosedError extends MongoDriverError {
/** The address of the connection pool */
address: string;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(pool: ConnectionPool) {
super('Attempted to check out a connection from closed connection pool');
this.address = pool.address;
}
override get name(): string {
return 'MongoPoolClosedError';
}
}
/**
* An error indicating a connection pool is currently paused
* @category Error
*/
export class PoolClearedError extends MongoNetworkError {
/** The address of the connection pool */
address: string;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(pool: ConnectionPool, message?: string) {
const errorMessage = message
? message
: `Connection pool for ${pool.address} was cleared because another operation failed with: "${pool.serverError?.message}"`;
super(errorMessage, pool.serverError ? { cause: pool.serverError } : undefined);
this.address = pool.address;
this.addErrorLabel(MongoErrorLabel.PoolRequstedRetry);
}
override get name(): string {
return 'MongoPoolClearedError';
}
}
/**
* An error indicating that a connection pool has been cleared after the monitor for that server timed out.
* @category Error
*/
export class PoolClearedOnNetworkError extends PoolClearedError {
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(pool: ConnectionPool) {
super(pool, `Connection to ${pool.address} interrupted due to server monitor timeout`);
}
override get name(): string {
return 'PoolClearedOnNetworkError';
}
}
/**
* An error thrown when a request to check out a connection times out
* @category Error
*/
export class WaitQueueTimeoutError extends MongoDriverError {
/** The address of the connection pool */
address: string;
/**
* **Do not use this constructor!**
*
* Meant for internal use only.
*
* @remarks
* This class is only meant to be constructed within the driver. This constructor is
* not subject to semantic versioning compatibility guarantees and may change at any time.
*
* @public
**/
constructor(message: string, address: string) {
super(message);
this.address = address;
}
override get name(): string {
return 'MongoWaitQueueTimeoutError';
}
}

View File

@@ -0,0 +1,320 @@
import * as os from 'os';
import * as process from 'process';
import { BSON, type Document, Int32 } from '../../bson';
import { MongoInvalidArgumentError } from '../../error';
import type { MongoOptions } from '../../mongo_client';
import { fileIsAccessible } from '../../utils';
// eslint-disable-next-line @typescript-eslint/no-require-imports
const NODE_DRIVER_VERSION = require('../../../package.json').version;
/**
* @public
* @see https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.md#hello-command
*/
export interface ClientMetadata {
driver: {
name: string;
version: string;
};
os: {
type: string;
name?: NodeJS.Platform;
architecture?: string;
version?: string;
};
platform: string;
application?: {
name: string;
};
/** FaaS environment information */
env?: {
name: 'aws.lambda' | 'gcp.func' | 'azure.func' | 'vercel';
timeout_sec?: Int32;
memory_mb?: Int32;
region?: string;
url?: string;
};
}
/** @public */
export interface ClientMetadataOptions {
driverInfo?: {
name?: string;
version?: string;
platform?: string;
};
appName?: string;
}
/** @internal */
export class LimitedSizeDocument {
private document = new Map();
/** BSON overhead: Int32 + Null byte */
private documentSize = 5;
constructor(private maxSize: number) {}
/** Only adds key/value if the bsonByteLength is less than MAX_SIZE */
public ifItFitsItSits(key: string, value: Record<string, any> | string): boolean {
// The BSON byteLength of the new element is the same as serializing it to its own document
// subtracting the document size int32 and the null terminator.
const newElementSize = BSON.serialize(new Map().set(key, value)).byteLength - 5;
if (newElementSize + this.documentSize > this.maxSize) {
return false;
}
this.documentSize += newElementSize;
this.document.set(key, value);
return true;
}
toObject(): Document {
return BSON.deserialize(BSON.serialize(this.document), {
promoteLongs: false,
promoteBuffers: false,
promoteValues: false,
useBigInt64: false
});
}
}
type MakeClientMetadataOptions = Pick<MongoOptions, 'appName' | 'driverInfo'>;
/**
* From the specs:
* Implementors SHOULD cumulatively update fields in the following order until the document is under the size limit:
* 1. Omit fields from `env` except `env.name`.
* 2. Omit fields from `os` except `os.type`.
* 3. Omit the `env` document entirely.
* 4. Truncate `platform`. -- special we do not truncate this field
*/
export function makeClientMetadata(options: MakeClientMetadataOptions): ClientMetadata {
const metadataDocument = new LimitedSizeDocument(512);
const { appName = '' } = options;
// Add app name first, it must be sent
if (appName.length > 0) {
const name =
Buffer.byteLength(appName, 'utf8') <= 128
? options.appName
: Buffer.from(appName, 'utf8').subarray(0, 128).toString('utf8');
metadataDocument.ifItFitsItSits('application', { name });
}
const { name = '', version = '', platform = '' } = options.driverInfo;
const driverInfo = {
name: name.length > 0 ? `nodejs|${name}` : 'nodejs',
version: version.length > 0 ? `${NODE_DRIVER_VERSION}|${version}` : NODE_DRIVER_VERSION
};
if (!metadataDocument.ifItFitsItSits('driver', driverInfo)) {
throw new MongoInvalidArgumentError(
'Unable to include driverInfo name and version, metadata cannot exceed 512 bytes'
);
}
let runtimeInfo = getRuntimeInfo();
if (platform.length > 0) {
runtimeInfo = `${runtimeInfo}|${platform}`;
}
if (!metadataDocument.ifItFitsItSits('platform', runtimeInfo)) {
throw new MongoInvalidArgumentError(
'Unable to include driverInfo platform, metadata cannot exceed 512 bytes'
);
}
// Note: order matters, os.type is last so it will be removed last if we're at maxSize
const osInfo = new Map()
.set('name', process.platform)
.set('architecture', process.arch)
.set('version', os.release())
.set('type', os.type());
if (!metadataDocument.ifItFitsItSits('os', osInfo)) {
for (const key of osInfo.keys()) {
osInfo.delete(key);
if (osInfo.size === 0) break;
if (metadataDocument.ifItFitsItSits('os', osInfo)) break;
}
}
const faasEnv = getFAASEnv();
if (faasEnv != null) {
if (!metadataDocument.ifItFitsItSits('env', faasEnv)) {
for (const key of faasEnv.keys()) {
faasEnv.delete(key);
if (faasEnv.size === 0) break;
if (metadataDocument.ifItFitsItSits('env', faasEnv)) break;
}
}
}
return metadataDocument.toObject() as ClientMetadata;
}
let dockerPromise: Promise<boolean>;
/** @internal */
async function getContainerMetadata() {
const containerMetadata: Record<string, any> = {};
dockerPromise ??= fileIsAccessible('/.dockerenv');
const isDocker = await dockerPromise;
const { KUBERNETES_SERVICE_HOST = '' } = process.env;
const isKubernetes = KUBERNETES_SERVICE_HOST.length > 0 ? true : false;
if (isDocker) containerMetadata.runtime = 'docker';
if (isKubernetes) containerMetadata.orchestrator = 'kubernetes';
return containerMetadata;
}
/**
* @internal
* Re-add each metadata value.
* Attempt to add new env container metadata, but keep old data if it does not fit.
*/
export async function addContainerMetadata(originalMetadata: ClientMetadata) {
const containerMetadata = await getContainerMetadata();
if (Object.keys(containerMetadata).length === 0) return originalMetadata;
const extendedMetadata = new LimitedSizeDocument(512);
const extendedEnvMetadata = { ...originalMetadata?.env, container: containerMetadata };
for (const [key, val] of Object.entries(originalMetadata)) {
if (key !== 'env') {
extendedMetadata.ifItFitsItSits(key, val);
} else {
if (!extendedMetadata.ifItFitsItSits('env', extendedEnvMetadata)) {
// add in old data if newer / extended metadata does not fit
extendedMetadata.ifItFitsItSits('env', val);
}
}
}
if (!('env' in originalMetadata)) {
extendedMetadata.ifItFitsItSits('env', extendedEnvMetadata);
}
return extendedMetadata.toObject();
}
/**
* Collects FaaS metadata.
* - `name` MUST be the last key in the Map returned.
*/
export function getFAASEnv(): Map<string, string | Int32> | null {
const {
AWS_EXECUTION_ENV = '',
AWS_LAMBDA_RUNTIME_API = '',
FUNCTIONS_WORKER_RUNTIME = '',
K_SERVICE = '',
FUNCTION_NAME = '',
VERCEL = '',
AWS_LAMBDA_FUNCTION_MEMORY_SIZE = '',
AWS_REGION = '',
FUNCTION_MEMORY_MB = '',
FUNCTION_REGION = '',
FUNCTION_TIMEOUT_SEC = '',
VERCEL_REGION = ''
} = process.env;
const isAWSFaaS =
AWS_EXECUTION_ENV.startsWith('AWS_Lambda_') || AWS_LAMBDA_RUNTIME_API.length > 0;
const isAzureFaaS = FUNCTIONS_WORKER_RUNTIME.length > 0;
const isGCPFaaS = K_SERVICE.length > 0 || FUNCTION_NAME.length > 0;
const isVercelFaaS = VERCEL.length > 0;
// Note: order matters, name must always be the last key
const faasEnv = new Map();
// When isVercelFaaS is true so is isAWSFaaS; Vercel inherits the AWS env
if (isVercelFaaS && !(isAzureFaaS || isGCPFaaS)) {
if (VERCEL_REGION.length > 0) {
faasEnv.set('region', VERCEL_REGION);
}
faasEnv.set('name', 'vercel');
return faasEnv;
}
if (isAWSFaaS && !(isAzureFaaS || isGCPFaaS || isVercelFaaS)) {
if (AWS_REGION.length > 0) {
faasEnv.set('region', AWS_REGION);
}
if (
AWS_LAMBDA_FUNCTION_MEMORY_SIZE.length > 0 &&
Number.isInteger(+AWS_LAMBDA_FUNCTION_MEMORY_SIZE)
) {
faasEnv.set('memory_mb', new Int32(AWS_LAMBDA_FUNCTION_MEMORY_SIZE));
}
faasEnv.set('name', 'aws.lambda');
return faasEnv;
}
if (isAzureFaaS && !(isGCPFaaS || isAWSFaaS || isVercelFaaS)) {
faasEnv.set('name', 'azure.func');
return faasEnv;
}
if (isGCPFaaS && !(isAzureFaaS || isAWSFaaS || isVercelFaaS)) {
if (FUNCTION_REGION.length > 0) {
faasEnv.set('region', FUNCTION_REGION);
}
if (FUNCTION_MEMORY_MB.length > 0 && Number.isInteger(+FUNCTION_MEMORY_MB)) {
faasEnv.set('memory_mb', new Int32(FUNCTION_MEMORY_MB));
}
if (FUNCTION_TIMEOUT_SEC.length > 0 && Number.isInteger(+FUNCTION_TIMEOUT_SEC)) {
faasEnv.set('timeout_sec', new Int32(FUNCTION_TIMEOUT_SEC));
}
faasEnv.set('name', 'gcp.func');
return faasEnv;
}
return null;
}
/**
* @internal
* This type represents the global Deno object and the minimal type contract we expect it to satisfy.
*/
declare const Deno: { version?: { deno?: string } } | undefined;
/**
* @internal
* This type represents the global Bun object and the minimal type contract we expect it to satisfy.
*/
declare const Bun: { (): void; version?: string } | undefined;
/**
* @internal
* Get current JavaScript runtime platform
*
* NOTE: The version information fetching is intentionally written defensively
* to avoid having a released driver version that becomes incompatible
* with a future change to these global objects.
*/
function getRuntimeInfo(): string {
if ('Deno' in globalThis) {
const version = typeof Deno?.version?.deno === 'string' ? Deno?.version?.deno : '0.0.0-unknown';
return `Deno v${version}, ${os.endianness()}`;
}
if ('Bun' in globalThis) {
const version = typeof Bun?.version === 'string' ? Bun?.version : '0.0.0-unknown';
return `Bun v${version}, ${os.endianness()}`;
}
return `Node.js ${process.version}, ${os.endianness()}`;
}

58
backend/node_modules/mongodb/src/cmap/metrics.ts generated vendored Normal file
View File

@@ -0,0 +1,58 @@
/** @internal */
export class ConnectionPoolMetrics {
static readonly TXN = 'txn' as const;
static readonly CURSOR = 'cursor' as const;
static readonly OTHER = 'other' as const;
txnConnections = 0;
cursorConnections = 0;
otherConnections = 0;
/**
* Mark a connection as pinned for a specific operation.
*/
markPinned(pinType: string): void {
if (pinType === ConnectionPoolMetrics.TXN) {
this.txnConnections += 1;
} else if (pinType === ConnectionPoolMetrics.CURSOR) {
this.cursorConnections += 1;
} else {
this.otherConnections += 1;
}
}
/**
* Unmark a connection as pinned for an operation.
*/
markUnpinned(pinType: string): void {
if (pinType === ConnectionPoolMetrics.TXN) {
this.txnConnections -= 1;
} else if (pinType === ConnectionPoolMetrics.CURSOR) {
this.cursorConnections -= 1;
} else {
this.otherConnections -= 1;
}
}
/**
* Return information about the cmap metrics as a string.
*/
info(maxPoolSize: number): string {
return (
'Timed out while checking out a connection from connection pool: ' +
`maxPoolSize: ${maxPoolSize}, ` +
`connections in use by cursors: ${this.cursorConnections}, ` +
`connections in use by transactions: ${this.txnConnections}, ` +
`connections in use by other operations: ${this.otherConnections}`
);
}
/**
* Reset the metrics to the initial values.
*/
reset(): void {
this.txnConnections = 0;
this.cursorConnections = 0;
this.otherConnections = 0;
}
}

View File

@@ -0,0 +1,96 @@
import { type Document, type Double, Long } from '../bson';
import { ServerType } from '../sdam/common';
import { parseServerType } from '../sdam/server_description';
import type { CompressorName } from './wire_protocol/compression';
const RESPONSE_FIELDS = [
'minWireVersion',
'maxWireVersion',
'maxBsonObjectSize',
'maxMessageSizeBytes',
'maxWriteBatchSize',
'logicalSessionTimeoutMinutes'
] as const;
/** @public */
export interface StreamDescriptionOptions {
compressors?: CompressorName[];
logicalSessionTimeoutMinutes?: number;
loadBalanced: boolean;
}
/** @public */
export class StreamDescription {
address: string;
type: ServerType;
minWireVersion?: number;
maxWireVersion?: number;
maxBsonObjectSize: number;
maxMessageSizeBytes: number;
maxWriteBatchSize: number;
compressors: CompressorName[];
compressor?: CompressorName;
logicalSessionTimeoutMinutes?: number;
loadBalanced: boolean;
__nodejs_mock_server__?: boolean;
zlibCompressionLevel?: number;
serverConnectionId: bigint | null;
public hello: Document | null = null;
constructor(address: string, options?: StreamDescriptionOptions) {
this.address = address;
this.type = ServerType.Unknown;
this.minWireVersion = undefined;
this.maxWireVersion = undefined;
this.maxBsonObjectSize = 16777216;
this.maxMessageSizeBytes = 48000000;
this.maxWriteBatchSize = 100000;
this.logicalSessionTimeoutMinutes = options?.logicalSessionTimeoutMinutes;
this.loadBalanced = !!options?.loadBalanced;
this.compressors =
options && options.compressors && Array.isArray(options.compressors)
? options.compressors
: [];
this.serverConnectionId = null;
}
receiveResponse(response: Document | null): void {
if (response == null) {
return;
}
this.hello = response;
this.type = parseServerType(response);
if ('connectionId' in response) {
this.serverConnectionId = this.parseServerConnectionID(response.connectionId);
} else {
this.serverConnectionId = null;
}
for (const field of RESPONSE_FIELDS) {
if (response[field] != null) {
this[field] = response[field];
}
// testing case
if ('__nodejs_mock_server__' in response) {
this.__nodejs_mock_server__ = response['__nodejs_mock_server__'];
}
}
if (response.compression) {
this.compressor = this.compressors.filter(c => response.compression?.includes(c))[0];
}
}
/* @internal */
parseServerConnectionID(serverConnectionId: number | Double | bigint | Long): bigint {
// Connection ids are always integral, so it's safe to coerce doubles as well as
// any integral type.
return Long.isLong(serverConnectionId)
? serverConnectionId.toBigInt()
: // @ts-expect-error: Doubles are coercible to number
BigInt(serverConnectionId);
}
}

View File

@@ -0,0 +1,196 @@
import { promisify } from 'util';
import * as zlib from 'zlib';
import { LEGACY_HELLO_COMMAND } from '../../constants';
import { getSnappy, getZstdLibrary, type SnappyLib, type ZStandard } from '../../deps';
import { MongoDecompressionError, MongoInvalidArgumentError } from '../../error';
import {
type MessageHeader,
OpCompressedRequest,
OpMsgResponse,
OpReply,
type WriteProtocolMessageType
} from '../commands';
import { OP_COMPRESSED, OP_MSG } from './constants';
/** @public */
export const Compressor = Object.freeze({
none: 0,
snappy: 1,
zlib: 2,
zstd: 3
} as const);
/** @public */
export type Compressor = (typeof Compressor)[CompressorName];
/** @public */
export type CompressorName = keyof typeof Compressor;
export const uncompressibleCommands = new Set([
LEGACY_HELLO_COMMAND,
'saslStart',
'saslContinue',
'getnonce',
'authenticate',
'createUser',
'updateUser',
'copydbSaslStart',
'copydbgetnonce',
'copydb'
]);
const ZSTD_COMPRESSION_LEVEL = 3;
const zlibInflate = promisify(zlib.inflate.bind(zlib));
const zlibDeflate = promisify(zlib.deflate.bind(zlib));
let zstd: ZStandard;
let Snappy: SnappyLib | null = null;
function loadSnappy() {
if (Snappy == null) {
const snappyImport = getSnappy();
if ('kModuleError' in snappyImport) {
throw snappyImport.kModuleError;
}
Snappy = snappyImport;
}
return Snappy;
}
// Facilitate compressing a message using an agreed compressor
export async function compress(
options: { zlibCompressionLevel: number; agreedCompressor: CompressorName },
dataToBeCompressed: Buffer
): Promise<Buffer> {
const zlibOptions = {} as zlib.ZlibOptions;
switch (options.agreedCompressor) {
case 'snappy': {
Snappy ??= loadSnappy();
return await Snappy.compress(dataToBeCompressed);
}
case 'zstd': {
loadZstd();
if ('kModuleError' in zstd) {
throw zstd['kModuleError'];
}
return await zstd.compress(dataToBeCompressed, ZSTD_COMPRESSION_LEVEL);
}
case 'zlib': {
if (options.zlibCompressionLevel) {
zlibOptions.level = options.zlibCompressionLevel;
}
return await zlibDeflate(dataToBeCompressed, zlibOptions);
}
default: {
throw new MongoInvalidArgumentError(
`Unknown compressor ${options.agreedCompressor} failed to compress`
);
}
}
}
// Decompress a message using the given compressor
export async function decompress(compressorID: number, compressedData: Buffer): Promise<Buffer> {
if (
compressorID !== Compressor.snappy &&
compressorID !== Compressor.zstd &&
compressorID !== Compressor.zlib &&
compressorID !== Compressor.none
) {
throw new MongoDecompressionError(
`Server sent message compressed using an unsupported compressor. (Received compressor ID ${compressorID})`
);
}
switch (compressorID) {
case Compressor.snappy: {
Snappy ??= loadSnappy();
return await Snappy.uncompress(compressedData, { asBuffer: true });
}
case Compressor.zstd: {
loadZstd();
if ('kModuleError' in zstd) {
throw zstd['kModuleError'];
}
return await zstd.decompress(compressedData);
}
case Compressor.zlib: {
return await zlibInflate(compressedData);
}
default: {
return compressedData;
}
}
}
/**
* Load ZStandard if it is not already set.
*/
function loadZstd() {
if (!zstd) {
zstd = getZstdLibrary();
}
}
const MESSAGE_HEADER_SIZE = 16;
/**
* @internal
*
* Compresses an OP_MSG or OP_QUERY message, if compression is configured. This method
* also serializes the command to BSON.
*/
export async function compressCommand(
command: WriteProtocolMessageType,
description: { agreedCompressor?: CompressorName; zlibCompressionLevel?: number }
): Promise<Buffer> {
const finalCommand =
description.agreedCompressor === 'none' || !OpCompressedRequest.canCompress(command)
? command
: new OpCompressedRequest(command, {
agreedCompressor: description.agreedCompressor ?? 'none',
zlibCompressionLevel: description.zlibCompressionLevel ?? 0
});
const data = await finalCommand.toBin();
return Buffer.concat(data);
}
/**
* @internal
*
* Decompresses an OP_MSG or OP_QUERY response from the server, if compression is configured.
*
* This method does not parse the response's BSON.
*/
export async function decompressResponse(message: Buffer): Promise<OpMsgResponse | OpReply> {
const messageHeader: MessageHeader = {
length: message.readInt32LE(0),
requestId: message.readInt32LE(4),
responseTo: message.readInt32LE(8),
opCode: message.readInt32LE(12)
};
if (messageHeader.opCode !== OP_COMPRESSED) {
const ResponseType = messageHeader.opCode === OP_MSG ? OpMsgResponse : OpReply;
const messageBody = message.subarray(MESSAGE_HEADER_SIZE);
return new ResponseType(message, messageHeader, messageBody);
}
const header: MessageHeader = {
...messageHeader,
fromCompressed: true,
opCode: message.readInt32LE(MESSAGE_HEADER_SIZE),
length: message.readInt32LE(MESSAGE_HEADER_SIZE + 4)
};
const compressorID = message[MESSAGE_HEADER_SIZE + 8];
const compressedBuffer = message.slice(MESSAGE_HEADER_SIZE + 9);
// recalculate based on wrapped opcode
const ResponseType = header.opCode === OP_MSG ? OpMsgResponse : OpReply;
const messageBody = await decompress(compressorID, compressedBuffer);
if (messageBody.length !== header.length) {
throw new MongoDecompressionError('Message body and message header must be the same length');
}
return new ResponseType(message, header, messageBody);
}

View File

@@ -0,0 +1,13 @@
export const MIN_SUPPORTED_SERVER_VERSION = '4.0';
export const MAX_SUPPORTED_SERVER_VERSION = '8.0';
export const MIN_SUPPORTED_WIRE_VERSION = 7;
export const MAX_SUPPORTED_WIRE_VERSION = 25;
export const MIN_SUPPORTED_QE_WIRE_VERSION = 21;
export const MIN_SUPPORTED_QE_SERVER_VERSION = '7.0';
export const OP_REPLY = 1;
export const OP_UPDATE = 2001;
export const OP_INSERT = 2002;
export const OP_QUERY = 2004;
export const OP_DELETE = 2006;
export const OP_COMPRESSED = 2012;
export const OP_MSG = 2013;

View File

@@ -0,0 +1,128 @@
import { type EventEmitter } from 'events';
import { type TimeoutContext } from '../../timeout';
import { List, promiseWithResolvers } from '../../utils';
/**
* @internal
* An object holding references to a promise's resolve and reject functions.
*/
type PendingPromises = Omit<
ReturnType<typeof promiseWithResolvers<IteratorResult<Buffer>>>,
'promise'
>;
/**
* onData is adapted from Node.js' events.on helper
* https://nodejs.org/api/events.html#eventsonemitter-eventname-options
*
* Returns an AsyncIterator that iterates each 'data' event emitted from emitter.
* It will reject upon an error event.
*/
export function onData(
emitter: EventEmitter,
{ timeoutContext }: { timeoutContext?: TimeoutContext }
) {
// Setup pending events and pending promise lists
/**
* When the caller has not yet called .next(), we store the
* value from the event in this list. Next time they call .next()
* we pull the first value out of this list and resolve a promise with it.
*/
const unconsumedEvents = new List<Buffer>();
/**
* When there has not yet been an event, a new promise will be created
* and implicitly stored in this list. When an event occurs we take the first
* promise in this list and resolve it.
*/
const unconsumedPromises = new List<PendingPromises>();
/**
* Stored an error created by an error event.
* This error will turn into a rejection for the subsequent .next() call
*/
let error: Error | null = null;
/** Set to true only after event listeners have been removed. */
let finished = false;
const iterator: AsyncGenerator<Buffer> = {
next() {
// First, we consume all unread events
const value = unconsumedEvents.shift();
if (value != null) {
return Promise.resolve({ value, done: false });
}
// Then we error, if an error happened
// This happens one time if at all, because after 'error'
// we stop listening
if (error != null) {
const p = Promise.reject(error);
// Only the first element errors
error = null;
return p;
}
// If the iterator is finished, resolve to done
if (finished) return closeHandler();
// Wait until an event happens
const { promise, resolve, reject } = promiseWithResolvers<IteratorResult<Buffer>>();
unconsumedPromises.push({ resolve, reject });
return promise;
},
return() {
return closeHandler();
},
throw(err: Error) {
errorHandler(err);
return Promise.resolve({ value: undefined, done: true });
},
[Symbol.asyncIterator]() {
return this;
}
};
// Adding event handlers
emitter.on('data', eventHandler);
emitter.on('error', errorHandler);
const timeoutForSocketRead = timeoutContext?.timeoutForSocketRead;
timeoutForSocketRead?.throwIfExpired();
timeoutForSocketRead?.then(undefined, errorHandler);
return iterator;
function eventHandler(value: Buffer) {
const promise = unconsumedPromises.shift();
if (promise != null) promise.resolve({ value, done: false });
else unconsumedEvents.push(value);
}
function errorHandler(err: Error) {
const promise = unconsumedPromises.shift();
if (promise != null) promise.reject(err);
else error = err;
void closeHandler();
}
function closeHandler() {
// Adding event handlers
emitter.off('data', eventHandler);
emitter.off('error', errorHandler);
finished = true;
timeoutForSocketRead?.clear();
const doneResult = { value: undefined, done: finished } as const;
for (const promise of unconsumedPromises) {
promise.resolve(doneResult);
}
return Promise.resolve(doneResult);
}
}

View File

@@ -0,0 +1,353 @@
import {
Binary,
type BSONElement,
BSONError,
BSONType,
deserialize,
type DeserializeOptions,
getBigInt64LE,
getFloat64LE,
getInt32LE,
ObjectId,
parseToElementsToArray,
Timestamp,
toUTF8
} from '../../../bson';
// eslint-disable-next-line no-restricted-syntax
const enum BSONElementOffset {
type = 0,
nameOffset = 1,
nameLength = 2,
offset = 3,
length = 4
}
/** @internal */
export type JSTypeOf = {
[BSONType.null]: null;
[BSONType.undefined]: null;
[BSONType.double]: number;
[BSONType.int]: number;
[BSONType.long]: bigint;
[BSONType.timestamp]: Timestamp;
[BSONType.binData]: Binary;
[BSONType.bool]: boolean;
[BSONType.objectId]: ObjectId;
[BSONType.string]: string;
[BSONType.date]: Date;
[BSONType.object]: OnDemandDocument;
[BSONType.array]: OnDemandDocument;
};
/** @internal */
type CachedBSONElement = { element: BSONElement; value: any | undefined };
/**
* @internal
*
* Options for `OnDemandDocument.toObject()`. Validation is required to ensure
* that callers provide utf8 validation options. */
export type OnDemandDocumentDeserializeOptions = Omit<DeserializeOptions, 'validation'> &
Required<Pick<DeserializeOptions, 'validation'>>;
/** @internal */
export class OnDemandDocument {
/**
* Maps JS strings to elements and jsValues for speeding up subsequent lookups.
* - If `false` then name does not exist in the BSON document
* - If `CachedBSONElement` instance name exists
* - If `cache[name].value == null` jsValue has not yet been parsed
* - Null/Undefined values do not get cached because they are zero-length values.
*/
private readonly cache: Record<string, CachedBSONElement | false | undefined> =
Object.create(null);
/** Caches the index of elements that have been named */
private readonly indexFound: Record<number, boolean> = Object.create(null);
/** All bson elements in this document */
private readonly elements: ReadonlyArray<BSONElement>;
constructor(
/** BSON bytes, this document begins at offset */
protected readonly bson: Uint8Array,
/** The start of the document */
private readonly offset = 0,
/** If this is an embedded document, indicates if this was a BSON array */
public readonly isArray = false,
/** If elements was already calculated */
elements?: BSONElement[]
) {
this.elements = elements ?? parseToElementsToArray(this.bson, offset);
}
/** Only supports basic latin strings */
private isElementName(name: string, element: BSONElement): boolean {
const nameLength = element[BSONElementOffset.nameLength];
const nameOffset = element[BSONElementOffset.nameOffset];
if (name.length !== nameLength) return false;
const nameEnd = nameOffset + nameLength;
for (
let byteIndex = nameOffset, charIndex = 0;
charIndex < name.length && byteIndex < nameEnd;
charIndex++, byteIndex++
) {
if (this.bson[byteIndex] !== name.charCodeAt(charIndex)) return false;
}
return true;
}
/**
* Seeks into the elements array for an element matching the given name.
*
* @remarks
* Caching:
* - Caches the existence of a property making subsequent look ups for non-existent properties return immediately
* - Caches names mapped to elements to avoid reiterating the array and comparing the name again
* - Caches the index at which an element has been found to prevent rechecking against elements already determined to belong to another name
*
* @param name - a basic latin string name of a BSON element
* @returns
*/
private getElement(name: string | number): CachedBSONElement | null {
const cachedElement = this.cache[name];
if (cachedElement === false) return null;
if (cachedElement != null) {
return cachedElement;
}
if (typeof name === 'number') {
if (this.isArray) {
if (name < this.elements.length) {
const element = this.elements[name];
const cachedElement = { element, value: undefined };
this.cache[name] = cachedElement;
this.indexFound[name] = true;
return cachedElement;
} else {
return null;
}
} else {
return null;
}
}
for (let index = 0; index < this.elements.length; index++) {
const element = this.elements[index];
// skip this element if it has already been associated with a name
if (!(index in this.indexFound) && this.isElementName(name, element)) {
const cachedElement = { element, value: undefined };
this.cache[name] = cachedElement;
this.indexFound[index] = true;
return cachedElement;
}
}
this.cache[name] = false;
return null;
}
/**
* Translates BSON bytes into a javascript value. Checking `as` against the BSON element's type
* this methods returns the small subset of BSON types that the driver needs to function.
*
* @remarks
* - BSONType.null and BSONType.undefined always return null
* - If the type requested does not match this returns null
*
* @param element - The element to revive to a javascript value
* @param as - A type byte expected to be returned
*/
private toJSValue<T extends keyof JSTypeOf>(element: BSONElement, as: T): JSTypeOf[T];
private toJSValue(element: BSONElement, as: keyof JSTypeOf): any {
const type = element[BSONElementOffset.type];
const offset = element[BSONElementOffset.offset];
const length = element[BSONElementOffset.length];
if (as !== type) {
return null;
}
switch (as) {
case BSONType.null:
case BSONType.undefined:
return null;
case BSONType.double:
return getFloat64LE(this.bson, offset);
case BSONType.int:
return getInt32LE(this.bson, offset);
case BSONType.long:
return getBigInt64LE(this.bson, offset);
case BSONType.bool:
return Boolean(this.bson[offset]);
case BSONType.objectId:
return new ObjectId(this.bson.subarray(offset, offset + 12));
case BSONType.timestamp:
return new Timestamp(getBigInt64LE(this.bson, offset));
case BSONType.string:
return toUTF8(this.bson, offset + 4, offset + length - 1, false);
case BSONType.binData: {
const totalBinarySize = getInt32LE(this.bson, offset);
const subType = this.bson[offset + 4];
if (subType === 2) {
const subType2BinarySize = getInt32LE(this.bson, offset + 1 + 4);
if (subType2BinarySize < 0)
throw new BSONError('Negative binary type element size found for subtype 0x02');
if (subType2BinarySize > totalBinarySize - 4)
throw new BSONError('Binary type with subtype 0x02 contains too long binary size');
if (subType2BinarySize < totalBinarySize - 4)
throw new BSONError('Binary type with subtype 0x02 contains too short binary size');
return new Binary(
this.bson.subarray(offset + 1 + 4 + 4, offset + 1 + 4 + 4 + subType2BinarySize),
2
);
}
return new Binary(
this.bson.subarray(offset + 1 + 4, offset + 1 + 4 + totalBinarySize),
subType
);
}
case BSONType.date:
// Pretend this is correct.
return new Date(Number(getBigInt64LE(this.bson, offset)));
case BSONType.object:
return new OnDemandDocument(this.bson, offset);
case BSONType.array:
return new OnDemandDocument(this.bson, offset, true);
default:
throw new BSONError(`Unsupported BSON type: ${as}`);
}
}
/**
* Returns the number of elements in this BSON document
*/
public size() {
return this.elements.length;
}
/**
* Checks for the existence of an element by name.
*
* @remarks
* Uses `getElement` with the expectation that will populate caches such that a `has` call
* followed by a `getElement` call will not repeat the cost paid by the first look up.
*
* @param name - element name
*/
public has(name: string): boolean {
const cachedElement = this.cache[name];
if (cachedElement === false) return false;
if (cachedElement != null) return true;
return this.getElement(name) != null;
}
/**
* Turns BSON element with `name` into a javascript value.
*
* @typeParam T - must be one of the supported BSON types determined by `JSTypeOf` this will determine the return type of this function.
* @param name - the element name
* @param as - the bson type expected
* @param required - whether or not the element is expected to exist, if true this function will throw if it is not present
*/
public get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required?: boolean | undefined
): JSTypeOf[T] | null;
/** `required` will make `get` throw if name does not exist or is null/undefined */
public get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required: true
): JSTypeOf[T];
public get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required?: boolean
): JSTypeOf[T] | null {
const element = this.getElement(name);
if (element == null) {
if (required === true) {
throw new BSONError(`BSON element "${name}" is missing`);
} else {
return null;
}
}
if (element.value == null) {
const value = this.toJSValue(element.element, as);
if (value == null) {
if (required === true) {
throw new BSONError(`BSON element "${name}" is missing`);
} else {
return null;
}
}
// It is important to never store null
element.value = value;
}
return element.value;
}
/**
* Supports returning int, double, long, and bool as javascript numbers
*
* @remarks
* **NOTE:**
* - Use this _only_ when you believe the potential precision loss of an int64 is acceptable
* - This method does not cache the result as Longs or booleans would be stored incorrectly
*
* @param name - element name
* @param required - throws if name does not exist
*/
public getNumber<const Req extends boolean = false>(
name: string,
required?: Req
): Req extends true ? number : number | null;
public getNumber(name: string, required: boolean): number | null {
const maybeBool = this.get(name, BSONType.bool);
const bool = maybeBool == null ? null : maybeBool ? 1 : 0;
const maybeLong = this.get(name, BSONType.long);
const long = maybeLong == null ? null : Number(maybeLong);
const result = bool ?? long ?? this.get(name, BSONType.int) ?? this.get(name, BSONType.double);
if (required === true && result == null) {
throw new BSONError(`BSON element "${name}" is missing`);
}
return result;
}
/**
* Deserialize this object, DOES NOT cache result so avoid multiple invocations
* @param options - BSON deserialization options
*/
public toObject(options?: OnDemandDocumentDeserializeOptions): Record<string, any> {
return deserialize(this.bson, {
...options,
index: this.offset,
allowObjectSmallerThanBufferSize: true
});
}
/** Returns this document's bytes only */
toBytes() {
const size = getInt32LE(this.bson, this.offset);
return this.bson.subarray(this.offset, this.offset + size);
}
}

View File

@@ -0,0 +1,394 @@
import {
type BSONElement,
type BSONSerializeOptions,
BSONType,
type DeserializeOptions,
type Document,
Long,
parseToElementsToArray,
parseUtf8ValidationOption,
pluckBSONSerializeOptions,
type Timestamp
} from '../../bson';
import { MONGODB_ERROR_CODES, MongoUnexpectedServerResponseError } from '../../error';
import { type ClusterTime } from '../../sdam/common';
import { decorateDecryptionResult, ns } from '../../utils';
import {
type JSTypeOf,
OnDemandDocument,
type OnDemandDocumentDeserializeOptions
} from './on_demand/document';
// eslint-disable-next-line no-restricted-syntax
const enum BSONElementOffset {
type = 0,
nameOffset = 1,
nameLength = 2,
offset = 3,
length = 4
}
/**
* Accepts a BSON payload and checks for na "ok: 0" element.
* This utility is intended to prevent calling response class constructors
* that expect the result to be a success and demand certain properties to exist.
*
* For example, a cursor response always expects a cursor embedded document.
* In order to write the class such that the properties reflect that assertion (non-null)
* we cannot invoke the subclass constructor if the BSON represents an error.
*
* @param bytes - BSON document returned from the server
*/
export function isErrorResponse(bson: Uint8Array, elements: BSONElement[]): boolean {
for (let eIdx = 0; eIdx < elements.length; eIdx++) {
const element = elements[eIdx];
if (element[BSONElementOffset.nameLength] === 2) {
const nameOffset = element[BSONElementOffset.nameOffset];
// 111 == "o", 107 == "k"
if (bson[nameOffset] === 111 && bson[nameOffset + 1] === 107) {
const valueOffset = element[BSONElementOffset.offset];
const valueLength = element[BSONElementOffset.length];
// If any byte in the length of the ok number (works for any type) is non zero,
// then it is considered "ok: 1"
for (let i = valueOffset; i < valueOffset + valueLength; i++) {
if (bson[i] !== 0x00) return false;
}
return true;
}
}
}
return true;
}
/** @internal */
export type MongoDBResponseConstructor = {
new (bson: Uint8Array, offset?: number, isArray?: boolean): MongoDBResponse;
make(bson: Uint8Array): MongoDBResponse;
};
/** @internal */
export class MongoDBResponse extends OnDemandDocument {
// Wrap error thrown from BSON
public override get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required?: false | undefined
): JSTypeOf[T] | null;
public override get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required: true
): JSTypeOf[T];
public override get<const T extends keyof JSTypeOf>(
name: string | number,
as: T,
required?: boolean | undefined
): JSTypeOf[T] | null {
try {
return super.get(name, as, required);
} catch (cause) {
throw new MongoUnexpectedServerResponseError(cause.message, { cause });
}
}
static is(value: unknown): value is MongoDBResponse {
return value instanceof MongoDBResponse;
}
static make(bson: Uint8Array) {
const elements = parseToElementsToArray(bson, 0);
const isError = isErrorResponse(bson, elements);
return isError
? new MongoDBResponse(bson, 0, false, elements)
: new this(bson, 0, false, elements);
}
// {ok:1}
static empty = new MongoDBResponse(new Uint8Array([13, 0, 0, 0, 16, 111, 107, 0, 1, 0, 0, 0, 0]));
/**
* Returns true iff:
* - ok is 0 and the top-level code === 50
* - ok is 1 and the writeErrors array contains a code === 50
* - ok is 1 and the writeConcern object contains a code === 50
*/
get isMaxTimeExpiredError() {
// {ok: 0, code: 50 ... }
const isTopLevel = this.ok === 0 && this.code === MONGODB_ERROR_CODES.MaxTimeMSExpired;
if (isTopLevel) return true;
if (this.ok === 0) return false;
// {ok: 1, writeConcernError: {code: 50 ... }}
const isWriteConcern =
this.get('writeConcernError', BSONType.object)?.getNumber('code') ===
MONGODB_ERROR_CODES.MaxTimeMSExpired;
if (isWriteConcern) return true;
const writeErrors = this.get('writeErrors', BSONType.array);
if (writeErrors?.size()) {
for (let i = 0; i < writeErrors.size(); i++) {
const isWriteError =
writeErrors.get(i, BSONType.object)?.getNumber('code') ===
MONGODB_ERROR_CODES.MaxTimeMSExpired;
// {ok: 1, writeErrors: [{code: 50 ... }]}
if (isWriteError) return true;
}
}
return false;
}
/**
* Drivers can safely assume that the `recoveryToken` field is always a BSON document but drivers MUST NOT modify the
* contents of the document.
*/
get recoveryToken(): Document | null {
return (
this.get('recoveryToken', BSONType.object)?.toObject({
promoteValues: false,
promoteLongs: false,
promoteBuffers: false,
validation: { utf8: true }
}) ?? null
);
}
/**
* The server creates a cursor in response to a snapshot find/aggregate command and reports atClusterTime within the cursor field in the response.
* For the distinct command the server adds a top-level atClusterTime field to the response.
* The atClusterTime field represents the timestamp of the read and is guaranteed to be majority committed.
*/
public get atClusterTime(): Timestamp | null {
return (
this.get('cursor', BSONType.object)?.get('atClusterTime', BSONType.timestamp) ??
this.get('atClusterTime', BSONType.timestamp)
);
}
public get operationTime(): Timestamp | null {
return this.get('operationTime', BSONType.timestamp);
}
/** Normalizes whatever BSON value is "ok" to a JS number 1 or 0. */
public get ok(): 0 | 1 {
return this.getNumber('ok') ? 1 : 0;
}
public get $err(): string | null {
return this.get('$err', BSONType.string);
}
public get errmsg(): string | null {
return this.get('errmsg', BSONType.string);
}
public get code(): number | null {
return this.getNumber('code');
}
private clusterTime?: ClusterTime | null;
public get $clusterTime(): ClusterTime | null {
if (!('clusterTime' in this)) {
const clusterTimeDoc = this.get('$clusterTime', BSONType.object);
if (clusterTimeDoc == null) {
this.clusterTime = null;
return null;
}
const clusterTime = clusterTimeDoc.get('clusterTime', BSONType.timestamp, true);
const signature = clusterTimeDoc.get('signature', BSONType.object)?.toObject();
// @ts-expect-error: `signature` is incorrectly typed. It is public API.
this.clusterTime = { clusterTime, signature };
}
return this.clusterTime ?? null;
}
public override toObject(options?: BSONSerializeOptions): Record<string, any> {
const exactBSONOptions = {
...pluckBSONSerializeOptions(options ?? {}),
validation: parseUtf8ValidationOption(options)
};
return super.toObject(exactBSONOptions);
}
}
/** @internal */
export class CursorResponse extends MongoDBResponse {
/**
* Devtools need to know which keys were encrypted before the driver automatically decrypted them.
* If decorating is enabled (`Symbol.for('@@mdb.decorateDecryptionResult')`), this field will be set,
* storing the original encrypted response from the server, so that we can build an object that has
* the list of BSON keys that were encrypted stored at a well known symbol: `Symbol.for('@@mdb.decryptedKeys')`.
*/
encryptedResponse?: MongoDBResponse;
/**
* This supports a feature of the FindCursor.
* It is an optimization to avoid an extra getMore when the limit has been reached
*/
static emptyGetMore: CursorResponse = {
id: new Long(0),
length: 0,
shift: () => null
} as unknown as CursorResponse;
static override is(value: unknown): value is CursorResponse {
return value instanceof CursorResponse || value === CursorResponse.emptyGetMore;
}
private _batch: OnDemandDocument | null = null;
private iterated = 0;
get cursor() {
return this.get('cursor', BSONType.object, true);
}
public get id(): Long {
try {
return Long.fromBigInt(this.cursor.get('id', BSONType.long, true));
} catch (cause) {
throw new MongoUnexpectedServerResponseError(cause.message, { cause });
}
}
public get ns() {
const namespace = this.cursor.get('ns', BSONType.string);
if (namespace != null) return ns(namespace);
return null;
}
public get length() {
return Math.max(this.batchSize - this.iterated, 0);
}
private _encryptedBatch: OnDemandDocument | null = null;
get encryptedBatch() {
if (this.encryptedResponse == null) return null;
if (this._encryptedBatch != null) return this._encryptedBatch;
const cursor = this.encryptedResponse?.get('cursor', BSONType.object);
if (cursor?.has('firstBatch'))
this._encryptedBatch = cursor.get('firstBatch', BSONType.array, true);
else if (cursor?.has('nextBatch'))
this._encryptedBatch = cursor.get('nextBatch', BSONType.array, true);
else throw new MongoUnexpectedServerResponseError('Cursor document did not contain a batch');
return this._encryptedBatch;
}
private get batch() {
if (this._batch != null) return this._batch;
const cursor = this.cursor;
if (cursor.has('firstBatch')) this._batch = cursor.get('firstBatch', BSONType.array, true);
else if (cursor.has('nextBatch')) this._batch = cursor.get('nextBatch', BSONType.array, true);
else throw new MongoUnexpectedServerResponseError('Cursor document did not contain a batch');
return this._batch;
}
public get batchSize() {
return this.batch?.size();
}
public get postBatchResumeToken() {
return (
this.cursor.get('postBatchResumeToken', BSONType.object)?.toObject({
promoteValues: false,
promoteLongs: false,
promoteBuffers: false,
validation: { utf8: true }
}) ?? null
);
}
public shift(options: OnDemandDocumentDeserializeOptions): any {
if (this.iterated >= this.batchSize) {
return null;
}
const result = this.batch.get(this.iterated, BSONType.object, true) ?? null;
const encryptedResult = this.encryptedBatch?.get(this.iterated, BSONType.object, true) ?? null;
this.iterated += 1;
if (options?.raw) {
return result.toBytes();
} else {
const object = result.toObject(options);
if (encryptedResult) {
decorateDecryptionResult(object, encryptedResult.toObject(options), true);
}
return object;
}
}
public clear() {
this.iterated = this.batchSize;
}
}
/**
* Explain responses have nothing to do with cursor responses
* This class serves to temporarily avoid refactoring how cursors handle
* explain responses which is to detect that the response is not cursor-like and return the explain
* result as the "first and only" document in the "batch" and end the "cursor"
*/
export class ExplainedCursorResponse extends CursorResponse {
isExplain = true;
override get id(): Long {
return Long.fromBigInt(0n);
}
override get batchSize() {
return 0;
}
override get ns() {
return null;
}
_length = 1;
override get length(): number {
return this._length;
}
override shift(options?: DeserializeOptions) {
if (this._length === 0) return null;
this._length -= 1;
return this.toObject(options);
}
}
/**
* Client bulk writes have some extra metadata at the top level that needs to be
* included in the result returned to the user.
*/
export class ClientBulkWriteCursorResponse extends CursorResponse {
get insertedCount() {
return this.get('nInserted', BSONType.int, true);
}
get upsertedCount() {
return this.get('nUpserted', BSONType.int, true);
}
get matchedCount() {
return this.get('nMatched', BSONType.int, true);
}
get modifiedCount() {
return this.get('nModified', BSONType.int, true);
}
get deletedCount() {
return this.get('nDeleted', BSONType.int, true);
}
get writeConcernError() {
return this.get('writeConcernError', BSONType.object, false);
}
}

View File

@@ -0,0 +1,48 @@
import { MongoInvalidArgumentError } from '../../error';
import { ReadPreference, type ReadPreferenceLike } from '../../read_preference';
import { ServerType } from '../../sdam/common';
import type { Server } from '../../sdam/server';
import type { ServerDescription } from '../../sdam/server_description';
import type { Topology } from '../../sdam/topology';
import { TopologyDescription } from '../../sdam/topology_description';
import type { Connection } from '../connection';
export interface ReadPreferenceOption {
readPreference?: ReadPreferenceLike;
}
export function getReadPreference(options?: ReadPreferenceOption): ReadPreference {
// Default to command version of the readPreference.
let readPreference = options?.readPreference ?? ReadPreference.primary;
if (typeof readPreference === 'string') {
readPreference = ReadPreference.fromString(readPreference);
}
if (!(readPreference instanceof ReadPreference)) {
throw new MongoInvalidArgumentError(
'Option "readPreference" must be a ReadPreference instance'
);
}
return readPreference;
}
export function isSharded(topologyOrServer?: Topology | Server | Connection): boolean {
if (topologyOrServer == null) {
return false;
}
if (topologyOrServer.description && topologyOrServer.description.type === ServerType.Mongos) {
return true;
}
// NOTE: This is incredibly inefficient, and should be removed once command construction
// happens based on `Server` not `Topology`.
if (topologyOrServer.description && topologyOrServer.description instanceof TopologyDescription) {
const servers: ServerDescription[] = Array.from(topologyOrServer.description.servers.values());
return servers.some((server: ServerDescription) => server.type === ServerType.Mongos);
}
return false;
}