Commit With Frontend and Backend in MERN
This commit is contained in:
171
backend/node_modules/mongodb/src/operations/aggregate.ts
generated
vendored
Normal file
171
backend/node_modules/mongodb/src/operations/aggregate.ts
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
import type { Document } from '../bson';
|
||||
import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses';
|
||||
import { type CursorTimeoutMode } from '../cursor/abstract_cursor';
|
||||
import { MongoInvalidArgumentError } from '../error';
|
||||
import { type ExplainOptions } from '../explain';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { maxWireVersion, type MongoDBNamespace } from '../utils';
|
||||
import { WriteConcern } from '../write_concern';
|
||||
import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects, type Hint } from './operation';
|
||||
|
||||
/** @internal */
|
||||
// eslint-disable-next-line @typescript-eslint/no-unnecessary-type-assertion
|
||||
export const DB_AGGREGATE_COLLECTION = 1 as const;
|
||||
const MIN_WIRE_VERSION_$OUT_READ_CONCERN_SUPPORT = 8;
|
||||
|
||||
/** @public */
|
||||
export interface AggregateOptions extends Omit<CommandOperationOptions, 'explain'> {
|
||||
/** allowDiskUse lets the server know if it can use disk to store temporary results for the aggregation (requires mongodb 2.6 \>). */
|
||||
allowDiskUse?: boolean;
|
||||
/** The number of documents to return per batch. See [aggregation documentation](https://www.mongodb.com/docs/manual/reference/command/aggregate). */
|
||||
batchSize?: number;
|
||||
/** Allow driver to bypass schema validation. */
|
||||
bypassDocumentValidation?: boolean;
|
||||
/** Return the query as cursor, on 2.6 \> it returns as a real cursor on pre 2.6 it returns as an emulated cursor. */
|
||||
cursor?: Document;
|
||||
/**
|
||||
* Specifies a cumulative time limit in milliseconds for processing operations on the cursor. MongoDB interrupts the operation at the earliest following interrupt point.
|
||||
*/
|
||||
maxTimeMS?: number;
|
||||
/** The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. */
|
||||
maxAwaitTimeMS?: number;
|
||||
/** Specify collation. */
|
||||
collation?: CollationOptions;
|
||||
/** Add an index selection hint to an aggregation command */
|
||||
hint?: Hint;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
|
||||
out?: string;
|
||||
|
||||
/**
|
||||
* Specifies the verbosity mode for the explain output.
|
||||
* @deprecated This API is deprecated in favor of `collection.aggregate().explain()`
|
||||
* or `db.aggregate().explain()`.
|
||||
*/
|
||||
explain?: ExplainOptions['explain'];
|
||||
/** @internal */
|
||||
timeoutMode?: CursorTimeoutMode;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class AggregateOperation extends CommandOperation<CursorResponse> {
|
||||
override options: AggregateOptions;
|
||||
target: string | typeof DB_AGGREGATE_COLLECTION;
|
||||
pipeline: Document[];
|
||||
hasWriteStage: boolean;
|
||||
|
||||
constructor(ns: MongoDBNamespace, pipeline: Document[], options?: AggregateOptions) {
|
||||
super(undefined, { ...options, dbName: ns.db });
|
||||
|
||||
this.options = { ...options };
|
||||
|
||||
// Covers when ns.collection is null, undefined or the empty string, use DB_AGGREGATE_COLLECTION
|
||||
this.target = ns.collection || DB_AGGREGATE_COLLECTION;
|
||||
|
||||
this.pipeline = pipeline;
|
||||
|
||||
// determine if we have a write stage, override read preference if so
|
||||
this.hasWriteStage = false;
|
||||
if (typeof options?.out === 'string') {
|
||||
this.pipeline = this.pipeline.concat({ $out: options.out });
|
||||
this.hasWriteStage = true;
|
||||
} else if (pipeline.length > 0) {
|
||||
const finalStage = pipeline[pipeline.length - 1];
|
||||
if (finalStage.$out || finalStage.$merge) {
|
||||
this.hasWriteStage = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.hasWriteStage) {
|
||||
this.trySecondaryWrite = true;
|
||||
} else {
|
||||
delete this.options.writeConcern;
|
||||
}
|
||||
|
||||
if (this.explain && this.writeConcern) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
'Option "explain" cannot be used on an aggregate call with writeConcern'
|
||||
);
|
||||
}
|
||||
|
||||
if (options?.cursor != null && typeof options.cursor !== 'object') {
|
||||
throw new MongoInvalidArgumentError('Cursor options must be an object');
|
||||
}
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'aggregate' as const;
|
||||
}
|
||||
|
||||
override get canRetryRead(): boolean {
|
||||
return !this.hasWriteStage;
|
||||
}
|
||||
|
||||
addToPipeline(stage: Document): void {
|
||||
this.pipeline.push(stage);
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<CursorResponse> {
|
||||
const options: AggregateOptions = this.options;
|
||||
const serverWireVersion = maxWireVersion(server);
|
||||
const command: Document = { aggregate: this.target, pipeline: this.pipeline };
|
||||
|
||||
if (this.hasWriteStage && serverWireVersion < MIN_WIRE_VERSION_$OUT_READ_CONCERN_SUPPORT) {
|
||||
this.readConcern = undefined;
|
||||
}
|
||||
|
||||
if (this.hasWriteStage && this.writeConcern) {
|
||||
WriteConcern.apply(command, this.writeConcern);
|
||||
}
|
||||
|
||||
if (options.bypassDocumentValidation === true) {
|
||||
command.bypassDocumentValidation = options.bypassDocumentValidation;
|
||||
}
|
||||
|
||||
if (typeof options.allowDiskUse === 'boolean') {
|
||||
command.allowDiskUse = options.allowDiskUse;
|
||||
}
|
||||
|
||||
if (options.hint) {
|
||||
command.hint = options.hint;
|
||||
}
|
||||
|
||||
if (options.let) {
|
||||
command.let = options.let;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (options.comment !== undefined) {
|
||||
command.comment = options.comment;
|
||||
}
|
||||
|
||||
command.cursor = options.cursor || {};
|
||||
if (options.batchSize && !this.hasWriteStage) {
|
||||
command.cursor.batchSize = options.batchSize;
|
||||
}
|
||||
|
||||
return await super.executeCommand(
|
||||
server,
|
||||
session,
|
||||
command,
|
||||
timeoutContext,
|
||||
this.explain ? ExplainedCursorResponse : CursorResponse
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(AggregateOperation, [
|
||||
Aspect.READ_OPERATION,
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.EXPLAINABLE,
|
||||
Aspect.CURSOR_CREATING
|
||||
]);
|
||||
64
backend/node_modules/mongodb/src/operations/bulk_write.ts
generated
vendored
Normal file
64
backend/node_modules/mongodb/src/operations/bulk_write.ts
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
import type {
|
||||
AnyBulkWriteOperation,
|
||||
BulkOperationBase,
|
||||
BulkWriteOptions,
|
||||
BulkWriteResult
|
||||
} from '../bulk/common';
|
||||
import type { Collection } from '../collection';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { AbstractOperation, Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @internal */
|
||||
export class BulkWriteOperation extends AbstractOperation<BulkWriteResult> {
|
||||
override options: BulkWriteOptions;
|
||||
collection: Collection;
|
||||
operations: ReadonlyArray<AnyBulkWriteOperation>;
|
||||
|
||||
constructor(
|
||||
collection: Collection,
|
||||
operations: ReadonlyArray<AnyBulkWriteOperation>,
|
||||
options: BulkWriteOptions
|
||||
) {
|
||||
super(options);
|
||||
this.options = options;
|
||||
this.collection = collection;
|
||||
this.operations = operations;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'bulkWrite' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<BulkWriteResult> {
|
||||
const coll = this.collection;
|
||||
const operations = this.operations;
|
||||
const options = {
|
||||
...this.options,
|
||||
...this.bsonOptions,
|
||||
readPreference: this.readPreference,
|
||||
timeoutContext
|
||||
};
|
||||
|
||||
// Create the bulk operation
|
||||
const bulk: BulkOperationBase =
|
||||
options.ordered === false
|
||||
? coll.initializeUnorderedBulkOp(options)
|
||||
: coll.initializeOrderedBulkOp(options);
|
||||
|
||||
// for each op go through and add to the bulk
|
||||
for (let i = 0; i < operations.length; i++) {
|
||||
bulk.raw(operations[i]);
|
||||
}
|
||||
|
||||
// Execute the bulk
|
||||
return await bulk.execute({ ...options, session });
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(BulkWriteOperation, [Aspect.WRITE_OPERATION]);
|
||||
115
backend/node_modules/mongodb/src/operations/client_bulk_write/client_bulk_write.ts
generated
vendored
Normal file
115
backend/node_modules/mongodb/src/operations/client_bulk_write/client_bulk_write.ts
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
import { MongoClientBulkWriteExecutionError, ServerType } from '../../beta';
|
||||
import { ClientBulkWriteCursorResponse } from '../../cmap/wire_protocol/responses';
|
||||
import type { Server } from '../../sdam/server';
|
||||
import type { ClientSession } from '../../sessions';
|
||||
import { type TimeoutContext } from '../../timeout';
|
||||
import { MongoDBNamespace } from '../../utils';
|
||||
import { CommandOperation } from '../command';
|
||||
import { Aspect, defineAspects } from '../operation';
|
||||
import { type ClientBulkWriteCommandBuilder } from './command_builder';
|
||||
import { type ClientBulkWriteOptions } from './common';
|
||||
|
||||
/**
|
||||
* Executes a single client bulk write operation within a potential batch.
|
||||
* @internal
|
||||
*/
|
||||
export class ClientBulkWriteOperation extends CommandOperation<ClientBulkWriteCursorResponse> {
|
||||
commandBuilder: ClientBulkWriteCommandBuilder;
|
||||
override options: ClientBulkWriteOptions;
|
||||
|
||||
override get commandName() {
|
||||
return 'bulkWrite' as const;
|
||||
}
|
||||
|
||||
constructor(commandBuilder: ClientBulkWriteCommandBuilder, options: ClientBulkWriteOptions) {
|
||||
super(undefined, options);
|
||||
this.commandBuilder = commandBuilder;
|
||||
this.options = options;
|
||||
this.ns = new MongoDBNamespace('admin', '$cmd');
|
||||
}
|
||||
|
||||
override resetBatch(): boolean {
|
||||
return this.commandBuilder.resetBatch();
|
||||
}
|
||||
|
||||
override get canRetryWrite(): boolean {
|
||||
return this.commandBuilder.isBatchRetryable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the command. Superclass will handle write concern, etc.
|
||||
* @param server - The server.
|
||||
* @param session - The session.
|
||||
* @returns The response.
|
||||
*/
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<ClientBulkWriteCursorResponse> {
|
||||
let command;
|
||||
|
||||
if (server.description.type === ServerType.LoadBalancer) {
|
||||
if (session) {
|
||||
let connection;
|
||||
if (!session.pinnedConnection) {
|
||||
// Checkout a connection to build the command.
|
||||
connection = await server.pool.checkOut({ timeoutContext });
|
||||
// Pin the connection to the session so it get used to execute the command and we do not
|
||||
// perform a double check-in/check-out.
|
||||
session.pin(connection);
|
||||
} else {
|
||||
connection = session.pinnedConnection;
|
||||
}
|
||||
command = this.commandBuilder.buildBatch(
|
||||
connection.hello?.maxMessageSizeBytes,
|
||||
connection.hello?.maxWriteBatchSize,
|
||||
connection.hello?.maxBsonObjectSize
|
||||
);
|
||||
} else {
|
||||
throw new MongoClientBulkWriteExecutionError(
|
||||
'Session provided to the client bulk write operation must be present.'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// At this point we have a server and the auto connect code has already
|
||||
// run in executeOperation, so the server description will be populated.
|
||||
// We can use that to build the command.
|
||||
if (
|
||||
!server.description.maxWriteBatchSize ||
|
||||
!server.description.maxMessageSizeBytes ||
|
||||
!server.description.maxBsonObjectSize
|
||||
) {
|
||||
throw new MongoClientBulkWriteExecutionError(
|
||||
'In order to execute a client bulk write, both maxWriteBatchSize, maxMessageSizeBytes and maxBsonObjectSize must be provided by the servers hello response.'
|
||||
);
|
||||
}
|
||||
command = this.commandBuilder.buildBatch(
|
||||
server.description.maxMessageSizeBytes,
|
||||
server.description.maxWriteBatchSize,
|
||||
server.description.maxBsonObjectSize
|
||||
);
|
||||
}
|
||||
|
||||
// Check after the batch is built if we cannot retry it and override the option.
|
||||
if (!this.canRetryWrite) {
|
||||
this.options.willRetryWrite = false;
|
||||
}
|
||||
return await super.executeCommand(
|
||||
server,
|
||||
session,
|
||||
command,
|
||||
timeoutContext,
|
||||
ClientBulkWriteCursorResponse
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Skipping the collation as it goes on the individual ops.
|
||||
defineAspects(ClientBulkWriteOperation, [
|
||||
Aspect.WRITE_OPERATION,
|
||||
Aspect.SKIP_COLLATION,
|
||||
Aspect.CURSOR_CREATING,
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.COMMAND_BATCHING
|
||||
]);
|
||||
469
backend/node_modules/mongodb/src/operations/client_bulk_write/command_builder.ts
generated
vendored
Normal file
469
backend/node_modules/mongodb/src/operations/client_bulk_write/command_builder.ts
generated
vendored
Normal file
@@ -0,0 +1,469 @@
|
||||
import { BSON, type Document } from '../../bson';
|
||||
import { DocumentSequence } from '../../cmap/commands';
|
||||
import { MongoAPIError, MongoInvalidArgumentError } from '../../error';
|
||||
import { type PkFactory } from '../../mongo_client';
|
||||
import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types';
|
||||
import { DEFAULT_PK_FACTORY, hasAtomicOperators } from '../../utils';
|
||||
import { type CollationOptions } from '../command';
|
||||
import { type Hint } from '../operation';
|
||||
import type {
|
||||
AnyClientBulkWriteModel,
|
||||
ClientBulkWriteOptions,
|
||||
ClientDeleteManyModel,
|
||||
ClientDeleteOneModel,
|
||||
ClientInsertOneModel,
|
||||
ClientReplaceOneModel,
|
||||
ClientUpdateManyModel,
|
||||
ClientUpdateOneModel
|
||||
} from './common';
|
||||
|
||||
/** @internal */
|
||||
export interface ClientBulkWriteCommand {
|
||||
bulkWrite: 1;
|
||||
errorsOnly: boolean;
|
||||
ordered: boolean;
|
||||
ops: DocumentSequence;
|
||||
nsInfo: DocumentSequence;
|
||||
bypassDocumentValidation?: boolean;
|
||||
let?: Document;
|
||||
comment?: any;
|
||||
}
|
||||
|
||||
/**
|
||||
* The bytes overhead for the extra fields added post command generation.
|
||||
*/
|
||||
const MESSAGE_OVERHEAD_BYTES = 1000;
|
||||
|
||||
/** @internal */
|
||||
export class ClientBulkWriteCommandBuilder {
|
||||
models: ReadonlyArray<AnyClientBulkWriteModel<Document>>;
|
||||
options: ClientBulkWriteOptions;
|
||||
pkFactory: PkFactory;
|
||||
/** The current index in the models array that is being processed. */
|
||||
currentModelIndex: number;
|
||||
/** The model index that the builder was on when it finished the previous batch. Used for resets when retrying. */
|
||||
previousModelIndex: number;
|
||||
/** The last array of operations that were created. Used by the results merger for indexing results. */
|
||||
lastOperations: Document[];
|
||||
/** Returns true if the current batch being created has no multi-updates. */
|
||||
isBatchRetryable: boolean;
|
||||
|
||||
/**
|
||||
* Create the command builder.
|
||||
* @param models - The client write models.
|
||||
*/
|
||||
constructor(
|
||||
models: ReadonlyArray<AnyClientBulkWriteModel<Document>>,
|
||||
options: ClientBulkWriteOptions,
|
||||
pkFactory?: PkFactory
|
||||
) {
|
||||
this.models = models;
|
||||
this.options = options;
|
||||
this.pkFactory = pkFactory ?? DEFAULT_PK_FACTORY;
|
||||
this.currentModelIndex = 0;
|
||||
this.previousModelIndex = 0;
|
||||
this.lastOperations = [];
|
||||
this.isBatchRetryable = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the errorsOnly value for the command, which is the inverse of the
|
||||
* user provided verboseResults option. Defaults to true.
|
||||
*/
|
||||
get errorsOnly(): boolean {
|
||||
if ('verboseResults' in this.options) {
|
||||
return !this.options.verboseResults;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if there is another batch to process.
|
||||
* @returns True if not all batches have been built.
|
||||
*/
|
||||
hasNextBatch(): boolean {
|
||||
return this.currentModelIndex < this.models.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* When we need to retry a command we need to set the current
|
||||
* model index back to its previous value.
|
||||
*/
|
||||
resetBatch(): boolean {
|
||||
this.currentModelIndex = this.previousModelIndex;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a single batch of a client bulk write command.
|
||||
* @param maxMessageSizeBytes - The max message size in bytes.
|
||||
* @param maxWriteBatchSize - The max write batch size.
|
||||
* @returns The client bulk write command.
|
||||
*/
|
||||
buildBatch(
|
||||
maxMessageSizeBytes: number,
|
||||
maxWriteBatchSize: number,
|
||||
maxBsonObjectSize: number
|
||||
): ClientBulkWriteCommand {
|
||||
// We start by assuming the batch has no multi-updates, so it is retryable
|
||||
// until we find them.
|
||||
this.isBatchRetryable = true;
|
||||
let commandLength = 0;
|
||||
let currentNamespaceIndex = 0;
|
||||
const command: ClientBulkWriteCommand = this.baseCommand();
|
||||
const namespaces = new Map<string, number>();
|
||||
// In the case of retries we need to mark where we started this batch.
|
||||
this.previousModelIndex = this.currentModelIndex;
|
||||
|
||||
while (this.currentModelIndex < this.models.length) {
|
||||
const model = this.models[this.currentModelIndex];
|
||||
const ns = model.namespace;
|
||||
const nsIndex = namespaces.get(ns);
|
||||
|
||||
// Multi updates are not retryable.
|
||||
if (model.name === 'deleteMany' || model.name === 'updateMany') {
|
||||
this.isBatchRetryable = false;
|
||||
}
|
||||
|
||||
if (nsIndex != null) {
|
||||
// Build the operation and serialize it to get the bytes buffer.
|
||||
const operation = buildOperation(model, nsIndex, this.pkFactory);
|
||||
let operationBuffer;
|
||||
try {
|
||||
operationBuffer = BSON.serialize(operation);
|
||||
} catch (cause) {
|
||||
throw new MongoInvalidArgumentError(`Could not serialize operation to BSON`, { cause });
|
||||
}
|
||||
|
||||
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
|
||||
|
||||
// Check if the operation buffer can fit in the command. If it can,
|
||||
// then add the operation to the document sequence and increment the
|
||||
// current length as long as the ops don't exceed the maxWriteBatchSize.
|
||||
if (
|
||||
commandLength + operationBuffer.length < maxMessageSizeBytes &&
|
||||
command.ops.documents.length < maxWriteBatchSize
|
||||
) {
|
||||
// Pushing to the ops document sequence returns the total byte length of the document sequence.
|
||||
commandLength = MESSAGE_OVERHEAD_BYTES + command.ops.push(operation, operationBuffer);
|
||||
// Increment the builder's current model index.
|
||||
this.currentModelIndex++;
|
||||
} else {
|
||||
// The operation cannot fit in the current command and will need to
|
||||
// go in the next batch. Exit the loop.
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// The namespace is not already in the nsInfo so we will set it in the map, and
|
||||
// construct our nsInfo and ops documents and buffers.
|
||||
namespaces.set(ns, currentNamespaceIndex);
|
||||
const nsInfo = { ns: ns };
|
||||
const operation = buildOperation(model, currentNamespaceIndex, this.pkFactory);
|
||||
let nsInfoBuffer;
|
||||
let operationBuffer;
|
||||
try {
|
||||
nsInfoBuffer = BSON.serialize(nsInfo);
|
||||
operationBuffer = BSON.serialize(operation);
|
||||
} catch (cause) {
|
||||
throw new MongoInvalidArgumentError(`Could not serialize ns info to BSON`, { cause });
|
||||
}
|
||||
|
||||
validateBufferSize('nsInfo', nsInfoBuffer, maxBsonObjectSize);
|
||||
validateBufferSize('ops', operationBuffer, maxBsonObjectSize);
|
||||
|
||||
// Check if the operation and nsInfo buffers can fit in the command. If they
|
||||
// can, then add the operation and nsInfo to their respective document
|
||||
// sequences and increment the current length as long as the ops don't exceed
|
||||
// the maxWriteBatchSize.
|
||||
if (
|
||||
commandLength + nsInfoBuffer.length + operationBuffer.length < maxMessageSizeBytes &&
|
||||
command.ops.documents.length < maxWriteBatchSize
|
||||
) {
|
||||
// Pushing to the ops document sequence returns the total byte length of the document sequence.
|
||||
commandLength =
|
||||
MESSAGE_OVERHEAD_BYTES +
|
||||
command.nsInfo.push(nsInfo, nsInfoBuffer) +
|
||||
command.ops.push(operation, operationBuffer);
|
||||
// We've added a new namespace, increment the namespace index.
|
||||
currentNamespaceIndex++;
|
||||
// Increment the builder's current model index.
|
||||
this.currentModelIndex++;
|
||||
} else {
|
||||
// The operation cannot fit in the current command and will need to
|
||||
// go in the next batch. Exit the loop.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set the last operations and return the command.
|
||||
this.lastOperations = command.ops.documents;
|
||||
return command;
|
||||
}
|
||||
|
||||
private baseCommand(): ClientBulkWriteCommand {
|
||||
const command: ClientBulkWriteCommand = {
|
||||
bulkWrite: 1,
|
||||
errorsOnly: this.errorsOnly,
|
||||
ordered: this.options.ordered ?? true,
|
||||
ops: new DocumentSequence('ops'),
|
||||
nsInfo: new DocumentSequence('nsInfo')
|
||||
};
|
||||
// Add bypassDocumentValidation if it was present in the options.
|
||||
if (this.options.bypassDocumentValidation != null) {
|
||||
command.bypassDocumentValidation = this.options.bypassDocumentValidation;
|
||||
}
|
||||
// Add let if it was present in the options.
|
||||
if (this.options.let) {
|
||||
command.let = this.options.let;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (this.options.comment !== undefined) {
|
||||
command.comment = this.options.comment;
|
||||
}
|
||||
|
||||
return command;
|
||||
}
|
||||
}
|
||||
|
||||
function validateBufferSize(name: string, buffer: Uint8Array, maxBsonObjectSize: number) {
|
||||
if (buffer.length > maxBsonObjectSize) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
`Client bulk write operation ${name} of length ${buffer.length} exceeds the max bson object size of ${maxBsonObjectSize}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
interface ClientInsertOperation {
|
||||
insert: number;
|
||||
document: OptionalId<Document>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the insert one operation.
|
||||
* @param model - The insert one model.
|
||||
* @param index - The namespace index.
|
||||
* @returns the operation.
|
||||
*/
|
||||
export const buildInsertOneOperation = (
|
||||
model: ClientInsertOneModel<Document>,
|
||||
index: number,
|
||||
pkFactory: PkFactory
|
||||
): ClientInsertOperation => {
|
||||
const document: ClientInsertOperation = {
|
||||
insert: index,
|
||||
document: model.document
|
||||
};
|
||||
document.document._id = model.document._id ?? pkFactory.createPk();
|
||||
return document;
|
||||
};
|
||||
|
||||
/** @internal */
|
||||
export interface ClientDeleteOperation {
|
||||
delete: number;
|
||||
multi: boolean;
|
||||
filter: Filter<Document>;
|
||||
hint?: Hint;
|
||||
collation?: CollationOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the delete one operation.
|
||||
* @param model - The insert many model.
|
||||
* @param index - The namespace index.
|
||||
* @returns the operation.
|
||||
*/
|
||||
export const buildDeleteOneOperation = (
|
||||
model: ClientDeleteOneModel<Document>,
|
||||
index: number
|
||||
): Document => {
|
||||
return createDeleteOperation(model, index, false);
|
||||
};
|
||||
|
||||
/**
|
||||
* Build the delete many operation.
|
||||
* @param model - The delete many model.
|
||||
* @param index - The namespace index.
|
||||
* @returns the operation.
|
||||
*/
|
||||
export const buildDeleteManyOperation = (
|
||||
model: ClientDeleteManyModel<Document>,
|
||||
index: number
|
||||
): Document => {
|
||||
return createDeleteOperation(model, index, true);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a delete operation based on the parameters.
|
||||
*/
|
||||
function createDeleteOperation(
|
||||
model: ClientDeleteOneModel<Document> | ClientDeleteManyModel<Document>,
|
||||
index: number,
|
||||
multi: boolean
|
||||
): ClientDeleteOperation {
|
||||
const document: ClientDeleteOperation = {
|
||||
delete: index,
|
||||
multi: multi,
|
||||
filter: model.filter
|
||||
};
|
||||
if (model.hint) {
|
||||
document.hint = model.hint;
|
||||
}
|
||||
if (model.collation) {
|
||||
document.collation = model.collation;
|
||||
}
|
||||
return document;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface ClientUpdateOperation {
|
||||
update: number;
|
||||
multi: boolean;
|
||||
filter: Filter<Document>;
|
||||
updateMods: UpdateFilter<Document> | Document[];
|
||||
hint?: Hint;
|
||||
upsert?: boolean;
|
||||
arrayFilters?: Document[];
|
||||
collation?: CollationOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the update one operation.
|
||||
* @param model - The update one model.
|
||||
* @param index - The namespace index.
|
||||
* @returns the operation.
|
||||
*/
|
||||
export const buildUpdateOneOperation = (
|
||||
model: ClientUpdateOneModel<Document>,
|
||||
index: number
|
||||
): ClientUpdateOperation => {
|
||||
return createUpdateOperation(model, index, false);
|
||||
};
|
||||
|
||||
/**
|
||||
* Build the update many operation.
|
||||
* @param model - The update many model.
|
||||
* @param index - The namespace index.
|
||||
* @returns the operation.
|
||||
*/
|
||||
export const buildUpdateManyOperation = (
|
||||
model: ClientUpdateManyModel<Document>,
|
||||
index: number
|
||||
): ClientUpdateOperation => {
|
||||
return createUpdateOperation(model, index, true);
|
||||
};
|
||||
|
||||
/**
|
||||
* Validate the update document.
|
||||
* @param update - The update document.
|
||||
*/
|
||||
function validateUpdate(update: Document) {
|
||||
if (!hasAtomicOperators(update)) {
|
||||
throw new MongoAPIError(
|
||||
'Client bulk write update models must only contain atomic modifiers (start with $) and must not be empty.'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a delete operation based on the parameters.
|
||||
*/
|
||||
function createUpdateOperation(
|
||||
model: ClientUpdateOneModel<Document> | ClientUpdateManyModel<Document>,
|
||||
index: number,
|
||||
multi: boolean
|
||||
): ClientUpdateOperation {
|
||||
// Update documents provided in UpdateOne and UpdateMany write models are
|
||||
// required only to contain atomic modifiers (i.e. keys that start with "$").
|
||||
// Drivers MUST throw an error if an update document is empty or if the
|
||||
// document's first key does not start with "$".
|
||||
validateUpdate(model.update);
|
||||
const document: ClientUpdateOperation = {
|
||||
update: index,
|
||||
multi: multi,
|
||||
filter: model.filter,
|
||||
updateMods: model.update
|
||||
};
|
||||
if (model.hint) {
|
||||
document.hint = model.hint;
|
||||
}
|
||||
if (model.upsert) {
|
||||
document.upsert = model.upsert;
|
||||
}
|
||||
if (model.arrayFilters) {
|
||||
document.arrayFilters = model.arrayFilters;
|
||||
}
|
||||
if (model.collation) {
|
||||
document.collation = model.collation;
|
||||
}
|
||||
return document;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface ClientReplaceOneOperation {
|
||||
update: number;
|
||||
multi: boolean;
|
||||
filter: Filter<Document>;
|
||||
updateMods: WithoutId<Document>;
|
||||
hint?: Hint;
|
||||
upsert?: boolean;
|
||||
collation?: CollationOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the replace one operation.
|
||||
* @param model - The replace one model.
|
||||
* @param index - The namespace index.
|
||||
* @returns the operation.
|
||||
*/
|
||||
export const buildReplaceOneOperation = (
|
||||
model: ClientReplaceOneModel<Document>,
|
||||
index: number
|
||||
): ClientReplaceOneOperation => {
|
||||
if (hasAtomicOperators(model.replacement)) {
|
||||
throw new MongoAPIError(
|
||||
'Client bulk write replace models must not contain atomic modifiers (start with $) and must not be empty.'
|
||||
);
|
||||
}
|
||||
|
||||
const document: ClientReplaceOneOperation = {
|
||||
update: index,
|
||||
multi: false,
|
||||
filter: model.filter,
|
||||
updateMods: model.replacement
|
||||
};
|
||||
if (model.hint) {
|
||||
document.hint = model.hint;
|
||||
}
|
||||
if (model.upsert) {
|
||||
document.upsert = model.upsert;
|
||||
}
|
||||
if (model.collation) {
|
||||
document.collation = model.collation;
|
||||
}
|
||||
return document;
|
||||
};
|
||||
|
||||
/** @internal */
|
||||
export function buildOperation(
|
||||
model: AnyClientBulkWriteModel<Document>,
|
||||
index: number,
|
||||
pkFactory: PkFactory
|
||||
): Document {
|
||||
switch (model.name) {
|
||||
case 'insertOne':
|
||||
return buildInsertOneOperation(model, index, pkFactory);
|
||||
case 'deleteOne':
|
||||
return buildDeleteOneOperation(model, index);
|
||||
case 'deleteMany':
|
||||
return buildDeleteManyOperation(model, index);
|
||||
case 'updateOne':
|
||||
return buildUpdateOneOperation(model, index);
|
||||
case 'updateMany':
|
||||
return buildUpdateManyOperation(model, index);
|
||||
case 'replaceOne':
|
||||
return buildReplaceOneOperation(model, index);
|
||||
}
|
||||
}
|
||||
271
backend/node_modules/mongodb/src/operations/client_bulk_write/common.ts
generated
vendored
Normal file
271
backend/node_modules/mongodb/src/operations/client_bulk_write/common.ts
generated
vendored
Normal file
@@ -0,0 +1,271 @@
|
||||
import { type Document } from '../../bson';
|
||||
import type { Filter, OptionalId, UpdateFilter, WithoutId } from '../../mongo_types';
|
||||
import type { CollationOptions, CommandOperationOptions } from '../../operations/command';
|
||||
import type { Hint } from '../../operations/operation';
|
||||
|
||||
/** @public */
|
||||
export interface ClientBulkWriteOptions extends CommandOperationOptions {
|
||||
/**
|
||||
* If true, when an insert fails, don't execute the remaining writes.
|
||||
* If false, continue with remaining inserts when one fails.
|
||||
* @defaultValue `true` - inserts are ordered by default
|
||||
*/
|
||||
ordered?: boolean;
|
||||
/**
|
||||
* Allow driver to bypass schema validation.
|
||||
* @defaultValue `false` - documents will be validated by default
|
||||
**/
|
||||
bypassDocumentValidation?: boolean;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
/**
|
||||
* Whether detailed results for each successful operation should be included in the returned
|
||||
* BulkWriteResult.
|
||||
*/
|
||||
verboseResults?: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientWriteModel {
|
||||
/**
|
||||
* The namespace for the write.
|
||||
*
|
||||
* A namespace is a combination of the database name and the name of the collection: `<database-name>.<collection>`.
|
||||
* All documents belong to a namespace.
|
||||
*
|
||||
* @see https://www.mongodb.com/docs/manual/reference/limits/#std-label-faq-dev-namespace
|
||||
*/
|
||||
namespace: string;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientInsertOneModel<TSchema> extends ClientWriteModel {
|
||||
name: 'insertOne';
|
||||
/** The document to insert. */
|
||||
document: OptionalId<TSchema>;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientDeleteOneModel<TSchema> extends ClientWriteModel {
|
||||
name: 'deleteOne';
|
||||
/**
|
||||
* The filter used to determine if a document should be deleted.
|
||||
* For a deleteOne operation, the first match is removed.
|
||||
*/
|
||||
filter: Filter<TSchema>;
|
||||
/** Specifies a collation. */
|
||||
collation?: CollationOptions;
|
||||
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
|
||||
hint?: Hint;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientDeleteManyModel<TSchema> extends ClientWriteModel {
|
||||
name: 'deleteMany';
|
||||
/**
|
||||
* The filter used to determine if a document should be deleted.
|
||||
* For a deleteMany operation, all matches are removed.
|
||||
*/
|
||||
filter: Filter<TSchema>;
|
||||
/** Specifies a collation. */
|
||||
collation?: CollationOptions;
|
||||
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
|
||||
hint?: Hint;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientReplaceOneModel<TSchema> extends ClientWriteModel {
|
||||
name: 'replaceOne';
|
||||
/**
|
||||
* The filter used to determine if a document should be replaced.
|
||||
* For a replaceOne operation, the first match is replaced.
|
||||
*/
|
||||
filter: Filter<TSchema>;
|
||||
/** The document with which to replace the matched document. */
|
||||
replacement: WithoutId<TSchema>;
|
||||
/** Specifies a collation. */
|
||||
collation?: CollationOptions;
|
||||
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
|
||||
hint?: Hint;
|
||||
/** When true, creates a new document if no document matches the query. */
|
||||
upsert?: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientUpdateOneModel<TSchema> extends ClientWriteModel {
|
||||
name: 'updateOne';
|
||||
/**
|
||||
* The filter used to determine if a document should be updated.
|
||||
* For an updateOne operation, the first match is updated.
|
||||
*/
|
||||
filter: Filter<TSchema>;
|
||||
/**
|
||||
* The modifications to apply. The value can be either:
|
||||
* UpdateFilter<Document> - A document that contains update operator expressions,
|
||||
* Document[] - an aggregation pipeline.
|
||||
*/
|
||||
update: UpdateFilter<TSchema> | Document[];
|
||||
/** A set of filters specifying to which array elements an update should apply. */
|
||||
arrayFilters?: Document[];
|
||||
/** Specifies a collation. */
|
||||
collation?: CollationOptions;
|
||||
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
|
||||
hint?: Hint;
|
||||
/** When true, creates a new document if no document matches the query. */
|
||||
upsert?: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientUpdateManyModel<TSchema> extends ClientWriteModel {
|
||||
name: 'updateMany';
|
||||
/**
|
||||
* The filter used to determine if a document should be updated.
|
||||
* For an updateMany operation, all matches are updated.
|
||||
*/
|
||||
filter: Filter<TSchema>;
|
||||
/**
|
||||
* The modifications to apply. The value can be either:
|
||||
* UpdateFilter<Document> - A document that contains update operator expressions,
|
||||
* Document[] - an aggregation pipeline.
|
||||
*/
|
||||
update: UpdateFilter<TSchema> | Document[];
|
||||
/** A set of filters specifying to which array elements an update should apply. */
|
||||
arrayFilters?: Document[];
|
||||
/** Specifies a collation. */
|
||||
collation?: CollationOptions;
|
||||
/** The index to use. If specified, then the query system will only consider plans using the hinted index. */
|
||||
hint?: Hint;
|
||||
/** When true, creates a new document if no document matches the query. */
|
||||
upsert?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Used to represent any of the client bulk write models that can be passed as an array
|
||||
* to MongoClient#bulkWrite.
|
||||
* @public
|
||||
*/
|
||||
export type AnyClientBulkWriteModel<TSchema extends Document> =
|
||||
| ClientInsertOneModel<TSchema>
|
||||
| ClientReplaceOneModel<TSchema>
|
||||
| ClientUpdateOneModel<TSchema>
|
||||
| ClientUpdateManyModel<TSchema>
|
||||
| ClientDeleteOneModel<TSchema>
|
||||
| ClientDeleteManyModel<TSchema>;
|
||||
|
||||
/**
|
||||
* A mapping of namespace strings to collections schemas.
|
||||
* @public
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* type MongoDBSchemas = {
|
||||
* 'db.books': Book;
|
||||
* 'db.authors': Author;
|
||||
* }
|
||||
*
|
||||
* const model: ClientBulkWriteModel<MongoDBSchemas> = {
|
||||
* namespace: 'db.books'
|
||||
* name: 'insertOne',
|
||||
* document: { title: 'Practical MongoDB Aggregations', authorName: 3 } // error `authorName` cannot be number
|
||||
* };
|
||||
* ```
|
||||
*
|
||||
* The type of the `namespace` field narrows other parts of the BulkWriteModel to use the correct schema for type assertions.
|
||||
*
|
||||
*/
|
||||
export type ClientBulkWriteModel<
|
||||
SchemaMap extends Record<string, Document> = Record<string, Document>
|
||||
> = {
|
||||
[Namespace in keyof SchemaMap]: AnyClientBulkWriteModel<SchemaMap[Namespace]> & {
|
||||
namespace: Namespace;
|
||||
};
|
||||
}[keyof SchemaMap];
|
||||
|
||||
/** @public */
|
||||
export interface ClientBulkWriteResult {
|
||||
/**
|
||||
* Whether the bulk write was acknowledged.
|
||||
*/
|
||||
readonly acknowledged: boolean;
|
||||
/**
|
||||
* The total number of documents inserted across all insert operations.
|
||||
*/
|
||||
readonly insertedCount: number;
|
||||
/**
|
||||
* The total number of documents upserted across all update operations.
|
||||
*/
|
||||
readonly upsertedCount: number;
|
||||
/**
|
||||
* The total number of documents matched across all update operations.
|
||||
*/
|
||||
readonly matchedCount: number;
|
||||
/**
|
||||
* The total number of documents modified across all update operations.
|
||||
*/
|
||||
readonly modifiedCount: number;
|
||||
/**
|
||||
* The total number of documents deleted across all delete operations.
|
||||
*/
|
||||
readonly deletedCount: number;
|
||||
/**
|
||||
* The results of each individual insert operation that was successfully performed.
|
||||
*/
|
||||
readonly insertResults?: ReadonlyMap<number, ClientInsertOneResult>;
|
||||
/**
|
||||
* The results of each individual update operation that was successfully performed.
|
||||
*/
|
||||
readonly updateResults?: ReadonlyMap<number, ClientUpdateResult>;
|
||||
/**
|
||||
* The results of each individual delete operation that was successfully performed.
|
||||
*/
|
||||
readonly deleteResults?: ReadonlyMap<number, ClientDeleteResult>;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientBulkWriteError {
|
||||
code: number;
|
||||
message: string;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientInsertOneResult {
|
||||
/**
|
||||
* The _id of the inserted document.
|
||||
*/
|
||||
insertedId: any;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientUpdateResult {
|
||||
/**
|
||||
* The number of documents that matched the filter.
|
||||
*/
|
||||
matchedCount: number;
|
||||
|
||||
/**
|
||||
* The number of documents that were modified.
|
||||
*/
|
||||
modifiedCount: number;
|
||||
|
||||
/**
|
||||
* The _id field of the upserted document if an upsert occurred.
|
||||
*
|
||||
* It MUST be possible to discern between a BSON Null upserted ID value and this field being
|
||||
* unset. If necessary, drivers MAY add a didUpsert boolean field to differentiate between
|
||||
* these two cases.
|
||||
*/
|
||||
upsertedId?: any;
|
||||
|
||||
/**
|
||||
* Determines if the upsert did include an _id, which includes the case of the _id being null.
|
||||
*/
|
||||
didUpsert: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ClientDeleteResult {
|
||||
/**
|
||||
* The number of documents that were deleted.
|
||||
*/
|
||||
deletedCount: number;
|
||||
}
|
||||
149
backend/node_modules/mongodb/src/operations/client_bulk_write/executor.ts
generated
vendored
Normal file
149
backend/node_modules/mongodb/src/operations/client_bulk_write/executor.ts
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
import { type Document } from '../../bson';
|
||||
import { CursorTimeoutContext, CursorTimeoutMode } from '../../cursor/abstract_cursor';
|
||||
import { ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
|
||||
import {
|
||||
MongoClientBulkWriteError,
|
||||
MongoClientBulkWriteExecutionError,
|
||||
MongoInvalidArgumentError,
|
||||
MongoServerError
|
||||
} from '../../error';
|
||||
import { type MongoClient } from '../../mongo_client';
|
||||
import { TimeoutContext } from '../../timeout';
|
||||
import { resolveTimeoutOptions } from '../../utils';
|
||||
import { WriteConcern } from '../../write_concern';
|
||||
import { executeOperation } from '../execute_operation';
|
||||
import { ClientBulkWriteOperation } from './client_bulk_write';
|
||||
import { ClientBulkWriteCommandBuilder } from './command_builder';
|
||||
import {
|
||||
type AnyClientBulkWriteModel,
|
||||
type ClientBulkWriteOptions,
|
||||
type ClientBulkWriteResult
|
||||
} from './common';
|
||||
import { ClientBulkWriteResultsMerger } from './results_merger';
|
||||
|
||||
/**
|
||||
* Responsible for executing a client bulk write.
|
||||
* @internal
|
||||
*/
|
||||
export class ClientBulkWriteExecutor {
|
||||
private readonly client: MongoClient;
|
||||
private readonly options: ClientBulkWriteOptions;
|
||||
private readonly operations: ReadonlyArray<AnyClientBulkWriteModel<Document>>;
|
||||
|
||||
/**
|
||||
* Instantiate the executor.
|
||||
* @param client - The mongo client.
|
||||
* @param operations - The user supplied bulk write models.
|
||||
* @param options - The bulk write options.
|
||||
*/
|
||||
constructor(
|
||||
client: MongoClient,
|
||||
operations: ReadonlyArray<AnyClientBulkWriteModel<Document>>,
|
||||
options?: ClientBulkWriteOptions
|
||||
) {
|
||||
if (operations.length === 0) {
|
||||
throw new MongoClientBulkWriteExecutionError('No client bulk write models were provided.');
|
||||
}
|
||||
|
||||
this.client = client;
|
||||
this.operations = operations;
|
||||
this.options = {
|
||||
ordered: true,
|
||||
bypassDocumentValidation: false,
|
||||
verboseResults: false,
|
||||
...options
|
||||
};
|
||||
|
||||
// If no write concern was provided, we inherit one from the client.
|
||||
if (!this.options.writeConcern) {
|
||||
this.options.writeConcern = WriteConcern.fromOptions(this.client.s.options);
|
||||
}
|
||||
|
||||
if (this.options.writeConcern?.w === 0) {
|
||||
if (this.options.verboseResults) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
'Cannot request unacknowledged write concern and verbose results'
|
||||
);
|
||||
}
|
||||
|
||||
if (this.options.ordered) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
'Cannot request unacknowledged write concern and ordered writes'
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the client bulk write. Will split commands into batches and exhaust the cursors
|
||||
* for each, then merge the results into one.
|
||||
* @returns The result.
|
||||
*/
|
||||
async execute(): Promise<ClientBulkWriteResult> {
|
||||
// The command builder will take the user provided models and potential split the batch
|
||||
// into multiple commands due to size.
|
||||
const pkFactory = this.client.s.options.pkFactory;
|
||||
const commandBuilder = new ClientBulkWriteCommandBuilder(
|
||||
this.operations,
|
||||
this.options,
|
||||
pkFactory
|
||||
);
|
||||
// Unacknowledged writes need to execute all batches and return { ok: 1}
|
||||
const resolvedOptions = resolveTimeoutOptions(this.client, this.options);
|
||||
const context = TimeoutContext.create(resolvedOptions);
|
||||
|
||||
if (this.options.writeConcern?.w === 0) {
|
||||
while (commandBuilder.hasNextBatch()) {
|
||||
const operation = new ClientBulkWriteOperation(commandBuilder, this.options);
|
||||
await executeOperation(this.client, operation, context);
|
||||
}
|
||||
return ClientBulkWriteResultsMerger.unacknowledged();
|
||||
} else {
|
||||
const resultsMerger = new ClientBulkWriteResultsMerger(this.options);
|
||||
// For each command will will create and exhaust a cursor for the results.
|
||||
while (commandBuilder.hasNextBatch()) {
|
||||
const cursorContext = new CursorTimeoutContext(context, Symbol());
|
||||
const options = {
|
||||
...this.options,
|
||||
timeoutContext: cursorContext,
|
||||
...(resolvedOptions.timeoutMS != null && { timeoutMode: CursorTimeoutMode.LIFETIME })
|
||||
};
|
||||
const cursor = new ClientBulkWriteCursor(this.client, commandBuilder, options);
|
||||
try {
|
||||
await resultsMerger.merge(cursor);
|
||||
} catch (error) {
|
||||
// Write concern errors are recorded in the writeConcernErrors field on MongoClientBulkWriteError.
|
||||
// When a write concern error is encountered, it should not terminate execution of the bulk write
|
||||
// for either ordered or unordered bulk writes. However, drivers MUST throw an exception at the end
|
||||
// of execution if any write concern errors were observed.
|
||||
if (error instanceof MongoServerError && !(error instanceof MongoClientBulkWriteError)) {
|
||||
// Server side errors need to be wrapped inside a MongoClientBulkWriteError, where the root
|
||||
// cause is the error property and a partial result is to be included.
|
||||
const bulkWriteError = new MongoClientBulkWriteError({
|
||||
message: 'Mongo client bulk write encountered an error during execution'
|
||||
});
|
||||
bulkWriteError.cause = error;
|
||||
bulkWriteError.partialResult = resultsMerger.bulkWriteResult;
|
||||
throw bulkWriteError;
|
||||
} else {
|
||||
// Client side errors are just thrown.
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we have write concern errors or unordered write errors at the end we throw.
|
||||
if (resultsMerger.writeConcernErrors.length > 0 || resultsMerger.writeErrors.size > 0) {
|
||||
const error = new MongoClientBulkWriteError({
|
||||
message: 'Mongo client bulk write encountered errors during execution.'
|
||||
});
|
||||
error.writeConcernErrors = resultsMerger.writeConcernErrors;
|
||||
error.writeErrors = resultsMerger.writeErrors;
|
||||
error.partialResult = resultsMerger.bulkWriteResult;
|
||||
throw error;
|
||||
}
|
||||
|
||||
return resultsMerger.bulkWriteResult;
|
||||
}
|
||||
}
|
||||
}
|
||||
260
backend/node_modules/mongodb/src/operations/client_bulk_write/results_merger.ts
generated
vendored
Normal file
260
backend/node_modules/mongodb/src/operations/client_bulk_write/results_merger.ts
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
import { MongoWriteConcernError } from '../..';
|
||||
import { type Document } from '../../bson';
|
||||
import { type ClientBulkWriteCursor } from '../../cursor/client_bulk_write_cursor';
|
||||
import { MongoClientBulkWriteError } from '../../error';
|
||||
import {
|
||||
type ClientBulkWriteError,
|
||||
type ClientBulkWriteOptions,
|
||||
type ClientBulkWriteResult,
|
||||
type ClientDeleteResult,
|
||||
type ClientInsertOneResult,
|
||||
type ClientUpdateResult
|
||||
} from './common';
|
||||
|
||||
/**
|
||||
* Unacknowledged bulk writes are always the same.
|
||||
*/
|
||||
const UNACKNOWLEDGED = {
|
||||
acknowledged: false,
|
||||
insertedCount: 0,
|
||||
upsertedCount: 0,
|
||||
matchedCount: 0,
|
||||
modifiedCount: 0,
|
||||
deletedCount: 0,
|
||||
insertResults: undefined,
|
||||
updateResults: undefined,
|
||||
deleteResults: undefined
|
||||
};
|
||||
|
||||
interface ClientBulkWriteResultAccumulation {
|
||||
/**
|
||||
* Whether the bulk write was acknowledged.
|
||||
*/
|
||||
acknowledged: boolean;
|
||||
/**
|
||||
* The total number of documents inserted across all insert operations.
|
||||
*/
|
||||
insertedCount: number;
|
||||
/**
|
||||
* The total number of documents upserted across all update operations.
|
||||
*/
|
||||
upsertedCount: number;
|
||||
/**
|
||||
* The total number of documents matched across all update operations.
|
||||
*/
|
||||
matchedCount: number;
|
||||
/**
|
||||
* The total number of documents modified across all update operations.
|
||||
*/
|
||||
modifiedCount: number;
|
||||
/**
|
||||
* The total number of documents deleted across all delete operations.
|
||||
*/
|
||||
deletedCount: number;
|
||||
/**
|
||||
* The results of each individual insert operation that was successfully performed.
|
||||
*/
|
||||
insertResults?: Map<number, ClientInsertOneResult>;
|
||||
/**
|
||||
* The results of each individual update operation that was successfully performed.
|
||||
*/
|
||||
updateResults?: Map<number, ClientUpdateResult>;
|
||||
/**
|
||||
* The results of each individual delete operation that was successfully performed.
|
||||
*/
|
||||
deleteResults?: Map<number, ClientDeleteResult>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges client bulk write cursor responses together into a single result.
|
||||
* @internal
|
||||
*/
|
||||
export class ClientBulkWriteResultsMerger {
|
||||
private result: ClientBulkWriteResultAccumulation;
|
||||
private options: ClientBulkWriteOptions;
|
||||
private currentBatchOffset: number;
|
||||
writeConcernErrors: Document[];
|
||||
writeErrors: Map<number, ClientBulkWriteError>;
|
||||
|
||||
/**
|
||||
* @returns The standard unacknowledged bulk write result.
|
||||
*/
|
||||
static unacknowledged(): ClientBulkWriteResult {
|
||||
return UNACKNOWLEDGED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiate the merger.
|
||||
* @param options - The options.
|
||||
*/
|
||||
constructor(options: ClientBulkWriteOptions) {
|
||||
this.options = options;
|
||||
this.currentBatchOffset = 0;
|
||||
this.writeConcernErrors = [];
|
||||
this.writeErrors = new Map();
|
||||
this.result = {
|
||||
acknowledged: true,
|
||||
insertedCount: 0,
|
||||
upsertedCount: 0,
|
||||
matchedCount: 0,
|
||||
modifiedCount: 0,
|
||||
deletedCount: 0,
|
||||
insertResults: undefined,
|
||||
updateResults: undefined,
|
||||
deleteResults: undefined
|
||||
};
|
||||
|
||||
if (options.verboseResults) {
|
||||
this.result.insertResults = new Map<number, ClientInsertOneResult>();
|
||||
this.result.updateResults = new Map<number, ClientUpdateResult>();
|
||||
this.result.deleteResults = new Map<number, ClientDeleteResult>();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the bulk write result object.
|
||||
*/
|
||||
get bulkWriteResult(): ClientBulkWriteResult {
|
||||
return {
|
||||
acknowledged: this.result.acknowledged,
|
||||
insertedCount: this.result.insertedCount,
|
||||
upsertedCount: this.result.upsertedCount,
|
||||
matchedCount: this.result.matchedCount,
|
||||
modifiedCount: this.result.modifiedCount,
|
||||
deletedCount: this.result.deletedCount,
|
||||
insertResults: this.result.insertResults,
|
||||
updateResults: this.result.updateResults,
|
||||
deleteResults: this.result.deleteResults
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge the results in the cursor to the existing result.
|
||||
* @param currentBatchOffset - The offset index to the original models.
|
||||
* @param response - The cursor response.
|
||||
* @param documents - The documents in the cursor.
|
||||
* @returns The current result.
|
||||
*/
|
||||
async merge(cursor: ClientBulkWriteCursor): Promise<ClientBulkWriteResult> {
|
||||
let writeConcernErrorResult;
|
||||
try {
|
||||
for await (const document of cursor) {
|
||||
// Only add to maps if ok: 1
|
||||
if (document.ok === 1) {
|
||||
if (this.options.verboseResults) {
|
||||
this.processDocument(cursor, document);
|
||||
}
|
||||
} else {
|
||||
// If an individual write error is encountered during an ordered bulk write, drivers MUST
|
||||
// record the error in writeErrors and immediately throw the exception. Otherwise, drivers
|
||||
// MUST continue to iterate the results cursor and execute any further bulkWrite batches.
|
||||
if (this.options.ordered) {
|
||||
const error = new MongoClientBulkWriteError({
|
||||
message: 'Mongo client ordered bulk write encountered a write error.'
|
||||
});
|
||||
error.writeErrors.set(document.idx + this.currentBatchOffset, {
|
||||
code: document.code,
|
||||
message: document.errmsg
|
||||
});
|
||||
error.partialResult = this.result;
|
||||
throw error;
|
||||
} else {
|
||||
this.writeErrors.set(document.idx + this.currentBatchOffset, {
|
||||
code: document.code,
|
||||
message: document.errmsg
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof MongoWriteConcernError) {
|
||||
const result = error.result;
|
||||
writeConcernErrorResult = {
|
||||
insertedCount: result.nInserted,
|
||||
upsertedCount: result.nUpserted,
|
||||
matchedCount: result.nMatched,
|
||||
modifiedCount: result.nModified,
|
||||
deletedCount: result.nDeleted,
|
||||
writeConcernError: result.writeConcernError
|
||||
};
|
||||
if (this.options.verboseResults && result.cursor.firstBatch) {
|
||||
for (const document of result.cursor.firstBatch) {
|
||||
if (document.ok === 1) {
|
||||
this.processDocument(cursor, document);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
} finally {
|
||||
// Update the counts from the cursor response.
|
||||
if (cursor.response) {
|
||||
const response = cursor.response;
|
||||
this.incrementCounts(response);
|
||||
}
|
||||
|
||||
// Increment the batch offset.
|
||||
this.currentBatchOffset += cursor.operations.length;
|
||||
}
|
||||
|
||||
// If we have write concern errors ensure they are added.
|
||||
if (writeConcernErrorResult) {
|
||||
const writeConcernError = writeConcernErrorResult.writeConcernError as Document;
|
||||
this.incrementCounts(writeConcernErrorResult);
|
||||
this.writeConcernErrors.push({
|
||||
code: writeConcernError.code,
|
||||
message: writeConcernError.errmsg
|
||||
});
|
||||
}
|
||||
|
||||
return this.result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Process an individual document in the results.
|
||||
* @param cursor - The cursor.
|
||||
* @param document - The document to process.
|
||||
*/
|
||||
private processDocument(cursor: ClientBulkWriteCursor, document: Document) {
|
||||
// Get the corresponding operation from the command.
|
||||
const operation = cursor.operations[document.idx];
|
||||
// Handle insert results.
|
||||
if ('insert' in operation) {
|
||||
this.result.insertResults?.set(document.idx + this.currentBatchOffset, {
|
||||
insertedId: operation.document._id
|
||||
});
|
||||
}
|
||||
// Handle update results.
|
||||
if ('update' in operation) {
|
||||
const result: ClientUpdateResult = {
|
||||
matchedCount: document.n,
|
||||
modifiedCount: document.nModified ?? 0,
|
||||
// Check if the bulk did actually upsert.
|
||||
didUpsert: document.upserted != null
|
||||
};
|
||||
if (document.upserted) {
|
||||
result.upsertedId = document.upserted._id;
|
||||
}
|
||||
this.result.updateResults?.set(document.idx + this.currentBatchOffset, result);
|
||||
}
|
||||
// Handle delete results.
|
||||
if ('delete' in operation) {
|
||||
this.result.deleteResults?.set(document.idx + this.currentBatchOffset, {
|
||||
deletedCount: document.n
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the result counts.
|
||||
* @param document - The document with the results.
|
||||
*/
|
||||
private incrementCounts(document: Document) {
|
||||
this.result.insertedCount += document.insertedCount;
|
||||
this.result.upsertedCount += document.upsertedCount;
|
||||
this.result.matchedCount += document.matchedCount;
|
||||
this.result.modifiedCount += document.modifiedCount;
|
||||
this.result.deletedCount += document.deletedCount;
|
||||
}
|
||||
}
|
||||
47
backend/node_modules/mongodb/src/operations/collections.ts
generated
vendored
Normal file
47
backend/node_modules/mongodb/src/operations/collections.ts
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
import { Collection } from '../collection';
|
||||
import type { Db } from '../db';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { AbstractOperation, type OperationOptions } from './operation';
|
||||
|
||||
export interface CollectionsOptions extends OperationOptions {
|
||||
nameOnly?: boolean;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class CollectionsOperation extends AbstractOperation<Collection[]> {
|
||||
override options: CollectionsOptions;
|
||||
db: Db;
|
||||
|
||||
constructor(db: Db, options: CollectionsOptions) {
|
||||
super(options);
|
||||
this.options = options;
|
||||
this.db = db;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'listCollections' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined
|
||||
): Promise<Collection[]> {
|
||||
// Let's get the collection names
|
||||
const documents = await this.db
|
||||
.listCollections(
|
||||
{},
|
||||
{ ...this.options, nameOnly: true, readPreference: this.readPreference, session }
|
||||
)
|
||||
.toArray();
|
||||
const collections: Collection[] = [];
|
||||
for (const { name } of documents) {
|
||||
if (!name.includes('$')) {
|
||||
// Filter collections removing any illegal ones
|
||||
collections.push(new Collection(this.db, name, this.db.s.options));
|
||||
}
|
||||
}
|
||||
// Return the collection objects
|
||||
return collections;
|
||||
}
|
||||
}
|
||||
181
backend/node_modules/mongodb/src/operations/command.ts
generated
vendored
Normal file
181
backend/node_modules/mongodb/src/operations/command.ts
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
import type { BSONSerializeOptions, Document } from '../bson';
|
||||
import { type MongoDBResponseConstructor } from '../cmap/wire_protocol/responses';
|
||||
import { MongoInvalidArgumentError } from '../error';
|
||||
import {
|
||||
decorateWithExplain,
|
||||
Explain,
|
||||
type ExplainOptions,
|
||||
validateExplainTimeoutOptions
|
||||
} from '../explain';
|
||||
import { ReadConcern } from '../read_concern';
|
||||
import type { ReadPreference } from '../read_preference';
|
||||
import type { Server } from '../sdam/server';
|
||||
import { MIN_SECONDARY_WRITE_WIRE_VERSION } from '../sdam/server_selection';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { commandSupportsReadConcern, maxWireVersion, MongoDBNamespace } from '../utils';
|
||||
import { WriteConcern, type WriteConcernOptions } from '../write_concern';
|
||||
import type { ReadConcernLike } from './../read_concern';
|
||||
import { AbstractOperation, Aspect, type OperationOptions } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface CollationOptions {
|
||||
locale: string;
|
||||
caseLevel?: boolean;
|
||||
caseFirst?: string;
|
||||
strength?: number;
|
||||
numericOrdering?: boolean;
|
||||
alternate?: string;
|
||||
maxVariable?: string;
|
||||
backwards?: boolean;
|
||||
normalization?: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface CommandOperationOptions
|
||||
extends OperationOptions,
|
||||
WriteConcernOptions,
|
||||
ExplainOptions {
|
||||
/** Specify a read concern and level for the collection. (only MongoDB 3.2 or higher supported) */
|
||||
readConcern?: ReadConcernLike;
|
||||
/** Collation */
|
||||
collation?: CollationOptions;
|
||||
/**
|
||||
* maxTimeMS is a server-side time limit in milliseconds for processing an operation.
|
||||
*/
|
||||
maxTimeMS?: number;
|
||||
/**
|
||||
* Comment to apply to the operation.
|
||||
*
|
||||
* In server versions pre-4.4, 'comment' must be string. A server
|
||||
* error will be thrown if any other type is provided.
|
||||
*
|
||||
* In server versions 4.4 and above, 'comment' can be any valid BSON type.
|
||||
*/
|
||||
comment?: unknown;
|
||||
/** Should retry failed writes */
|
||||
retryWrites?: boolean;
|
||||
|
||||
// Admin command overrides.
|
||||
dbName?: string;
|
||||
authdb?: string;
|
||||
noResponse?: boolean;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface OperationParent {
|
||||
s: { namespace: MongoDBNamespace };
|
||||
readConcern?: ReadConcern;
|
||||
writeConcern?: WriteConcern;
|
||||
readPreference?: ReadPreference;
|
||||
bsonOptions?: BSONSerializeOptions;
|
||||
timeoutMS?: number;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export abstract class CommandOperation<T> extends AbstractOperation<T> {
|
||||
override options: CommandOperationOptions;
|
||||
readConcern?: ReadConcern;
|
||||
writeConcern?: WriteConcern;
|
||||
explain?: Explain;
|
||||
|
||||
constructor(parent?: OperationParent, options?: CommandOperationOptions) {
|
||||
super(options);
|
||||
this.options = options ?? {};
|
||||
|
||||
// NOTE: this was explicitly added for the add/remove user operations, it's likely
|
||||
// something we'd want to reconsider. Perhaps those commands can use `Admin`
|
||||
// as a parent?
|
||||
const dbNameOverride = options?.dbName || options?.authdb;
|
||||
if (dbNameOverride) {
|
||||
this.ns = new MongoDBNamespace(dbNameOverride, '$cmd');
|
||||
} else {
|
||||
this.ns = parent
|
||||
? parent.s.namespace.withCollection('$cmd')
|
||||
: new MongoDBNamespace('admin', '$cmd');
|
||||
}
|
||||
|
||||
this.readConcern = ReadConcern.fromOptions(options);
|
||||
this.writeConcern = WriteConcern.fromOptions(options);
|
||||
|
||||
if (this.hasAspect(Aspect.EXPLAINABLE)) {
|
||||
this.explain = Explain.fromOptions(options);
|
||||
if (this.explain) validateExplainTimeoutOptions(this.options, this.explain);
|
||||
} else if (options?.explain != null) {
|
||||
throw new MongoInvalidArgumentError(`Option "explain" is not supported on this command`);
|
||||
}
|
||||
}
|
||||
|
||||
override get canRetryWrite(): boolean {
|
||||
if (this.hasAspect(Aspect.EXPLAINABLE)) {
|
||||
return this.explain == null;
|
||||
}
|
||||
return super.canRetryWrite;
|
||||
}
|
||||
|
||||
public async executeCommand<T extends MongoDBResponseConstructor>(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
cmd: Document,
|
||||
timeoutContext: TimeoutContext,
|
||||
responseType: T | undefined
|
||||
): Promise<typeof responseType extends undefined ? Document : InstanceType<T>>;
|
||||
|
||||
public async executeCommand(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
cmd: Document,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Document>;
|
||||
|
||||
async executeCommand(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
cmd: Document,
|
||||
timeoutContext: TimeoutContext,
|
||||
responseType?: MongoDBResponseConstructor
|
||||
): Promise<Document> {
|
||||
this.server = server;
|
||||
|
||||
const options = {
|
||||
...this.options,
|
||||
...this.bsonOptions,
|
||||
timeoutContext,
|
||||
readPreference: this.readPreference,
|
||||
session
|
||||
};
|
||||
|
||||
const serverWireVersion = maxWireVersion(server);
|
||||
const inTransaction = this.session && this.session.inTransaction();
|
||||
|
||||
if (this.readConcern && commandSupportsReadConcern(cmd) && !inTransaction) {
|
||||
Object.assign(cmd, { readConcern: this.readConcern });
|
||||
}
|
||||
|
||||
if (this.trySecondaryWrite && serverWireVersion < MIN_SECONDARY_WRITE_WIRE_VERSION) {
|
||||
options.omitReadPreference = true;
|
||||
}
|
||||
|
||||
if (this.writeConcern && this.hasAspect(Aspect.WRITE_OPERATION) && !inTransaction) {
|
||||
WriteConcern.apply(cmd, this.writeConcern);
|
||||
}
|
||||
|
||||
if (
|
||||
options.collation &&
|
||||
typeof options.collation === 'object' &&
|
||||
!this.hasAspect(Aspect.SKIP_COLLATION)
|
||||
) {
|
||||
Object.assign(cmd, { collation: options.collation });
|
||||
}
|
||||
|
||||
if (typeof options.maxTimeMS === 'number') {
|
||||
cmd.maxTimeMS = options.maxTimeMS;
|
||||
}
|
||||
|
||||
if (this.hasAspect(Aspect.EXPLAINABLE) && this.explain) {
|
||||
cmd = decorateWithExplain(cmd, this.explain);
|
||||
}
|
||||
|
||||
return await server.command(this.ns, cmd, options, responseType);
|
||||
}
|
||||
}
|
||||
74
backend/node_modules/mongodb/src/operations/count.ts
generated
vendored
Normal file
74
backend/node_modules/mongodb/src/operations/count.ts
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import type { MongoDBNamespace } from '../utils';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface CountOptions extends CommandOperationOptions {
|
||||
/** The number of documents to skip. */
|
||||
skip?: number;
|
||||
/** The maximum amounts to count before aborting. */
|
||||
limit?: number;
|
||||
/**
|
||||
* Number of milliseconds to wait before aborting the query.
|
||||
*/
|
||||
maxTimeMS?: number;
|
||||
/** An index name hint for the query. */
|
||||
hint?: string | Document;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class CountOperation extends CommandOperation<number> {
|
||||
override options: CountOptions;
|
||||
collectionName?: string;
|
||||
query: Document;
|
||||
|
||||
constructor(namespace: MongoDBNamespace, filter: Document, options: CountOptions) {
|
||||
super({ s: { namespace: namespace } } as unknown as Collection, options);
|
||||
|
||||
this.options = options;
|
||||
this.collectionName = namespace.collection;
|
||||
this.query = filter;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'count' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<number> {
|
||||
const options = this.options;
|
||||
const cmd: Document = {
|
||||
count: this.collectionName,
|
||||
query: this.query
|
||||
};
|
||||
|
||||
if (typeof options.limit === 'number') {
|
||||
cmd.limit = options.limit;
|
||||
}
|
||||
|
||||
if (typeof options.skip === 'number') {
|
||||
cmd.skip = options.skip;
|
||||
}
|
||||
|
||||
if (options.hint != null) {
|
||||
cmd.hint = options.hint;
|
||||
}
|
||||
|
||||
if (typeof options.maxTimeMS === 'number') {
|
||||
cmd.maxTimeMS = options.maxTimeMS;
|
||||
}
|
||||
|
||||
const result = await super.executeCommand(server, session, cmd, timeoutContext);
|
||||
return result ? result.n : 0;
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(CountOperation, [Aspect.READ_OPERATION, Aspect.RETRYABLE]);
|
||||
213
backend/node_modules/mongodb/src/operations/create_collection.ts
generated
vendored
Normal file
213
backend/node_modules/mongodb/src/operations/create_collection.ts
generated
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
import type { Document } from '../bson';
|
||||
import {
|
||||
MIN_SUPPORTED_QE_SERVER_VERSION,
|
||||
MIN_SUPPORTED_QE_WIRE_VERSION
|
||||
} from '../cmap/wire_protocol/constants';
|
||||
import { Collection } from '../collection';
|
||||
import type { Db } from '../db';
|
||||
import { MongoCompatibilityError } from '../error';
|
||||
import type { PkFactory } from '../mongo_client';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { CreateIndexesOperation } from './indexes';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
const ILLEGAL_COMMAND_FIELDS = new Set([
|
||||
'w',
|
||||
'wtimeout',
|
||||
'timeoutMS',
|
||||
'j',
|
||||
'fsync',
|
||||
'autoIndexId',
|
||||
'pkFactory',
|
||||
'raw',
|
||||
'readPreference',
|
||||
'session',
|
||||
'readConcern',
|
||||
'writeConcern',
|
||||
'raw',
|
||||
'fieldsAsRaw',
|
||||
'useBigInt64',
|
||||
'promoteLongs',
|
||||
'promoteValues',
|
||||
'promoteBuffers',
|
||||
'bsonRegExp',
|
||||
'serializeFunctions',
|
||||
'ignoreUndefined',
|
||||
'enableUtf8Validation'
|
||||
]);
|
||||
|
||||
/** @public
|
||||
* Configuration options for timeseries collections
|
||||
* @see https://www.mongodb.com/docs/manual/core/timeseries-collections/
|
||||
*/
|
||||
export interface TimeSeriesCollectionOptions extends Document {
|
||||
timeField: string;
|
||||
metaField?: string;
|
||||
granularity?: 'seconds' | 'minutes' | 'hours' | string;
|
||||
bucketMaxSpanSeconds?: number;
|
||||
bucketRoundingSeconds?: number;
|
||||
}
|
||||
|
||||
/** @public
|
||||
* Configuration options for clustered collections
|
||||
* @see https://www.mongodb.com/docs/manual/core/clustered-collections/
|
||||
*/
|
||||
export interface ClusteredCollectionOptions extends Document {
|
||||
name?: string;
|
||||
key: Document;
|
||||
unique: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface CreateCollectionOptions extends CommandOperationOptions {
|
||||
/** Create a capped collection */
|
||||
capped?: boolean;
|
||||
/** @deprecated Create an index on the _id field of the document. This option is deprecated in MongoDB 3.2+ and will be removed once no longer supported by the server. */
|
||||
autoIndexId?: boolean;
|
||||
/** The size of the capped collection in bytes */
|
||||
size?: number;
|
||||
/** The maximum number of documents in the capped collection */
|
||||
max?: number;
|
||||
/** Available for the MMAPv1 storage engine only to set the usePowerOf2Sizes and the noPadding flag */
|
||||
flags?: number;
|
||||
/** Allows users to specify configuration to the storage engine on a per-collection basis when creating a collection */
|
||||
storageEngine?: Document;
|
||||
/** Allows users to specify validation rules or expressions for the collection. For more information, see Document Validation */
|
||||
validator?: Document;
|
||||
/** Determines how strictly MongoDB applies the validation rules to existing documents during an update */
|
||||
validationLevel?: string;
|
||||
/** Determines whether to error on invalid documents or just warn about the violations but allow invalid documents to be inserted */
|
||||
validationAction?: string;
|
||||
/** Allows users to specify a default configuration for indexes when creating a collection */
|
||||
indexOptionDefaults?: Document;
|
||||
/** The name of the source collection or view from which to create the view. The name is not the full namespace of the collection or view (i.e., does not include the database name and implies the same database as the view to create) */
|
||||
viewOn?: string;
|
||||
/** An array that consists of the aggregation pipeline stage. Creates the view by applying the specified pipeline to the viewOn collection or view */
|
||||
pipeline?: Document[];
|
||||
/** A primary key factory function for generation of custom _id keys. */
|
||||
pkFactory?: PkFactory;
|
||||
/** A document specifying configuration options for timeseries collections. */
|
||||
timeseries?: TimeSeriesCollectionOptions;
|
||||
/** A document specifying configuration options for clustered collections. For MongoDB 5.3 and above. */
|
||||
clusteredIndex?: ClusteredCollectionOptions;
|
||||
/** The number of seconds after which a document in a timeseries or clustered collection expires. */
|
||||
expireAfterSeconds?: number;
|
||||
/** @experimental */
|
||||
encryptedFields?: Document;
|
||||
/**
|
||||
* If set, enables pre-update and post-update document events to be included for any
|
||||
* change streams that listen on this collection.
|
||||
*/
|
||||
changeStreamPreAndPostImages?: { enabled: boolean };
|
||||
}
|
||||
|
||||
/* @internal */
|
||||
const INVALID_QE_VERSION =
|
||||
'Driver support of Queryable Encryption is incompatible with server. Upgrade server to use Queryable Encryption.';
|
||||
|
||||
/** @internal */
|
||||
export class CreateCollectionOperation extends CommandOperation<Collection> {
|
||||
override options: CreateCollectionOptions;
|
||||
db: Db;
|
||||
name: string;
|
||||
|
||||
constructor(db: Db, name: string, options: CreateCollectionOptions = {}) {
|
||||
super(db, options);
|
||||
|
||||
this.options = options;
|
||||
this.db = db;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'create' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Collection> {
|
||||
const db = this.db;
|
||||
const name = this.name;
|
||||
const options = this.options;
|
||||
|
||||
const encryptedFields: Document | undefined =
|
||||
options.encryptedFields ??
|
||||
db.client.s.options.autoEncryption?.encryptedFieldsMap?.[`${db.databaseName}.${name}`];
|
||||
|
||||
if (encryptedFields) {
|
||||
// Creating a QE collection required min server of 7.0.0
|
||||
// TODO(NODE-5353): Get wire version information from connection.
|
||||
if (
|
||||
!server.loadBalanced &&
|
||||
server.description.maxWireVersion < MIN_SUPPORTED_QE_WIRE_VERSION
|
||||
) {
|
||||
throw new MongoCompatibilityError(
|
||||
`${INVALID_QE_VERSION} The minimum server version required is ${MIN_SUPPORTED_QE_SERVER_VERSION}`
|
||||
);
|
||||
}
|
||||
// Create auxilliary collections for queryable encryption support.
|
||||
const escCollection = encryptedFields.escCollection ?? `enxcol_.${name}.esc`;
|
||||
const ecocCollection = encryptedFields.ecocCollection ?? `enxcol_.${name}.ecoc`;
|
||||
|
||||
for (const collectionName of [escCollection, ecocCollection]) {
|
||||
const createOp = new CreateCollectionOperation(db, collectionName, {
|
||||
clusteredIndex: {
|
||||
key: { _id: 1 },
|
||||
unique: true
|
||||
}
|
||||
});
|
||||
await createOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext);
|
||||
}
|
||||
|
||||
if (!options.encryptedFields) {
|
||||
this.options = { ...this.options, encryptedFields };
|
||||
}
|
||||
}
|
||||
|
||||
const coll = await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext);
|
||||
|
||||
if (encryptedFields) {
|
||||
// Create the required index for queryable encryption support.
|
||||
const createIndexOp = CreateIndexesOperation.fromIndexSpecification(
|
||||
db,
|
||||
name,
|
||||
{ __safeContent__: 1 },
|
||||
{}
|
||||
);
|
||||
await createIndexOp.execute(server, session, timeoutContext);
|
||||
}
|
||||
|
||||
return coll;
|
||||
}
|
||||
|
||||
private async executeWithoutEncryptedFieldsCheck(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Collection> {
|
||||
const db = this.db;
|
||||
const name = this.name;
|
||||
const options = this.options;
|
||||
|
||||
const cmd: Document = { create: name };
|
||||
for (const n in options) {
|
||||
if (
|
||||
(options as any)[n] != null &&
|
||||
typeof (options as any)[n] !== 'function' &&
|
||||
!ILLEGAL_COMMAND_FIELDS.has(n)
|
||||
) {
|
||||
cmd[n] = (options as any)[n];
|
||||
}
|
||||
}
|
||||
// otherwise just execute the command
|
||||
await super.executeCommand(server, session, cmd, timeoutContext);
|
||||
return new Collection(db, name, options);
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(CreateCollectionOperation, [Aspect.WRITE_OPERATION]);
|
||||
184
backend/node_modules/mongodb/src/operations/delete.ts
generated
vendored
Normal file
184
backend/node_modules/mongodb/src/operations/delete.ts
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import { MongoCompatibilityError, MongoServerError } from '../error';
|
||||
import { type TODO_NODE_3286 } from '../mongo_types';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { type MongoDBNamespace } from '../utils';
|
||||
import { type WriteConcernOptions } from '../write_concern';
|
||||
import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects, type Hint } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface DeleteOptions extends CommandOperationOptions, WriteConcernOptions {
|
||||
/** If true, when an insert fails, don't execute the remaining writes. If false, continue with remaining inserts when one fails. */
|
||||
ordered?: boolean;
|
||||
/** Specifies the collation to use for the operation */
|
||||
collation?: CollationOptions;
|
||||
/** Specify that the update query should only consider plans using the hinted index */
|
||||
hint?: string | Document;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface DeleteResult {
|
||||
/** Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined. */
|
||||
acknowledged: boolean;
|
||||
/** The number of documents that were deleted */
|
||||
deletedCount: number;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface DeleteStatement {
|
||||
/** The query that matches documents to delete. */
|
||||
q: Document;
|
||||
/** The number of matching documents to delete. */
|
||||
limit: number;
|
||||
/** Specifies the collation to use for the operation. */
|
||||
collation?: CollationOptions;
|
||||
/** A document or string that specifies the index to use to support the query predicate. */
|
||||
hint?: Hint;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class DeleteOperation extends CommandOperation<DeleteResult> {
|
||||
override options: DeleteOptions;
|
||||
statements: DeleteStatement[];
|
||||
|
||||
constructor(ns: MongoDBNamespace, statements: DeleteStatement[], options: DeleteOptions) {
|
||||
super(undefined, options);
|
||||
this.options = options;
|
||||
this.ns = ns;
|
||||
this.statements = statements;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'delete' as const;
|
||||
}
|
||||
|
||||
override get canRetryWrite(): boolean {
|
||||
if (super.canRetryWrite === false) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this.statements.every(op => (op.limit != null ? op.limit > 0 : true));
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<DeleteResult> {
|
||||
const options = this.options ?? {};
|
||||
const ordered = typeof options.ordered === 'boolean' ? options.ordered : true;
|
||||
const command: Document = {
|
||||
delete: this.ns.collection,
|
||||
deletes: this.statements,
|
||||
ordered
|
||||
};
|
||||
|
||||
if (options.let) {
|
||||
command.let = options.let;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (options.comment !== undefined) {
|
||||
command.comment = options.comment;
|
||||
}
|
||||
|
||||
const unacknowledgedWrite = this.writeConcern && this.writeConcern.w === 0;
|
||||
if (unacknowledgedWrite) {
|
||||
if (this.statements.find((o: Document) => o.hint)) {
|
||||
// TODO(NODE-3541): fix error for hint with unacknowledged writes
|
||||
throw new MongoCompatibilityError(`hint is not supported with unacknowledged writes`);
|
||||
}
|
||||
}
|
||||
|
||||
const res: TODO_NODE_3286 = await super.executeCommand(
|
||||
server,
|
||||
session,
|
||||
command,
|
||||
timeoutContext
|
||||
);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
export class DeleteOneOperation extends DeleteOperation {
|
||||
constructor(collection: Collection, filter: Document, options: DeleteOptions) {
|
||||
super(collection.s.namespace, [makeDeleteStatement(filter, { ...options, limit: 1 })], options);
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<DeleteResult> {
|
||||
const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext);
|
||||
if (this.explain) return res;
|
||||
if (res.code) throw new MongoServerError(res);
|
||||
if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]);
|
||||
|
||||
return {
|
||||
acknowledged: this.writeConcern?.w !== 0,
|
||||
deletedCount: res.n
|
||||
};
|
||||
}
|
||||
}
|
||||
export class DeleteManyOperation extends DeleteOperation {
|
||||
constructor(collection: Collection, filter: Document, options: DeleteOptions) {
|
||||
super(collection.s.namespace, [makeDeleteStatement(filter, options)], options);
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<DeleteResult> {
|
||||
const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext);
|
||||
if (this.explain) return res;
|
||||
if (res.code) throw new MongoServerError(res);
|
||||
if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]);
|
||||
|
||||
return {
|
||||
acknowledged: this.writeConcern?.w !== 0,
|
||||
deletedCount: res.n
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export function makeDeleteStatement(
|
||||
filter: Document,
|
||||
options: DeleteOptions & { limit?: number }
|
||||
): DeleteStatement {
|
||||
const op: DeleteStatement = {
|
||||
q: filter,
|
||||
limit: typeof options.limit === 'number' ? options.limit : 0
|
||||
};
|
||||
|
||||
if (options.collation) {
|
||||
op.collation = options.collation;
|
||||
}
|
||||
|
||||
if (options.hint) {
|
||||
op.hint = options.hint;
|
||||
}
|
||||
|
||||
return op;
|
||||
}
|
||||
|
||||
defineAspects(DeleteOperation, [Aspect.RETRYABLE, Aspect.WRITE_OPERATION]);
|
||||
defineAspects(DeleteOneOperation, [
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.WRITE_OPERATION,
|
||||
Aspect.EXPLAINABLE,
|
||||
Aspect.SKIP_COLLATION
|
||||
]);
|
||||
defineAspects(DeleteManyOperation, [
|
||||
Aspect.WRITE_OPERATION,
|
||||
Aspect.EXPLAINABLE,
|
||||
Aspect.SKIP_COLLATION
|
||||
]);
|
||||
86
backend/node_modules/mongodb/src/operations/distinct.ts
generated
vendored
Normal file
86
backend/node_modules/mongodb/src/operations/distinct.ts
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { decorateWithCollation, decorateWithReadConcern } from '../utils';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export type DistinctOptions = CommandOperationOptions;
|
||||
|
||||
/**
|
||||
* Return a list of distinct values for the given key across a collection.
|
||||
* @internal
|
||||
*/
|
||||
export class DistinctOperation extends CommandOperation<any[]> {
|
||||
override options: DistinctOptions;
|
||||
collection: Collection;
|
||||
/** Field of the document to find distinct values for. */
|
||||
key: string;
|
||||
/** The query for filtering the set of documents to which we apply the distinct filter. */
|
||||
query: Document;
|
||||
|
||||
/**
|
||||
* Construct a Distinct operation.
|
||||
*
|
||||
* @param collection - Collection instance.
|
||||
* @param key - Field of the document to find distinct values for.
|
||||
* @param query - The query for filtering the set of documents to which we apply the distinct filter.
|
||||
* @param options - Optional settings. See Collection.prototype.distinct for a list of options.
|
||||
*/
|
||||
constructor(collection: Collection, key: string, query: Document, options?: DistinctOptions) {
|
||||
super(collection, options);
|
||||
|
||||
this.options = options ?? {};
|
||||
this.collection = collection;
|
||||
this.key = key;
|
||||
this.query = query;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'distinct' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<any[]> {
|
||||
const coll = this.collection;
|
||||
const key = this.key;
|
||||
const query = this.query;
|
||||
const options = this.options;
|
||||
|
||||
// Distinct command
|
||||
const cmd: Document = {
|
||||
distinct: coll.collectionName,
|
||||
key: key,
|
||||
query: query
|
||||
};
|
||||
|
||||
// Add maxTimeMS if defined
|
||||
if (typeof options.maxTimeMS === 'number') {
|
||||
cmd.maxTimeMS = options.maxTimeMS;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (typeof options.comment !== 'undefined') {
|
||||
cmd.comment = options.comment;
|
||||
}
|
||||
|
||||
// Do we have a readConcern specified
|
||||
decorateWithReadConcern(cmd, coll, options);
|
||||
|
||||
// Have we specified collation
|
||||
decorateWithCollation(cmd, coll, options);
|
||||
|
||||
const result = await super.executeCommand(server, session, cmd, timeoutContext);
|
||||
|
||||
return this.explain ? result : result.values;
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(DistinctOperation, [Aspect.READ_OPERATION, Aspect.RETRYABLE, Aspect.EXPLAINABLE]);
|
||||
116
backend/node_modules/mongodb/src/operations/drop.ts
generated
vendored
Normal file
116
backend/node_modules/mongodb/src/operations/drop.ts
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Db } from '../db';
|
||||
import { MONGODB_ERROR_CODES, MongoServerError } from '../error';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface DropCollectionOptions extends CommandOperationOptions {
|
||||
/** @experimental */
|
||||
encryptedFields?: Document;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class DropCollectionOperation extends CommandOperation<boolean> {
|
||||
override options: DropCollectionOptions;
|
||||
db: Db;
|
||||
name: string;
|
||||
|
||||
constructor(db: Db, name: string, options: DropCollectionOptions = {}) {
|
||||
super(db, options);
|
||||
this.db = db;
|
||||
this.options = options;
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'drop' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<boolean> {
|
||||
const db = this.db;
|
||||
const options = this.options;
|
||||
const name = this.name;
|
||||
|
||||
const encryptedFieldsMap = db.client.s.options.autoEncryption?.encryptedFieldsMap;
|
||||
let encryptedFields: Document | undefined =
|
||||
options.encryptedFields ?? encryptedFieldsMap?.[`${db.databaseName}.${name}`];
|
||||
|
||||
if (!encryptedFields && encryptedFieldsMap) {
|
||||
// If the MongoClient was configured with an encryptedFieldsMap,
|
||||
// and no encryptedFields config was available in it or explicitly
|
||||
// passed as an argument, the spec tells us to look one up using
|
||||
// listCollections().
|
||||
const listCollectionsResult = await db
|
||||
.listCollections({ name }, { nameOnly: false })
|
||||
.toArray();
|
||||
encryptedFields = listCollectionsResult?.[0]?.options?.encryptedFields;
|
||||
}
|
||||
|
||||
if (encryptedFields) {
|
||||
const escCollection = encryptedFields.escCollection || `enxcol_.${name}.esc`;
|
||||
const ecocCollection = encryptedFields.ecocCollection || `enxcol_.${name}.ecoc`;
|
||||
|
||||
for (const collectionName of [escCollection, ecocCollection]) {
|
||||
// Drop auxilliary collections, ignoring potential NamespaceNotFound errors.
|
||||
const dropOp = new DropCollectionOperation(db, collectionName);
|
||||
try {
|
||||
await dropOp.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext);
|
||||
} catch (err) {
|
||||
if (
|
||||
!(err instanceof MongoServerError) ||
|
||||
err.code !== MONGODB_ERROR_CODES.NamespaceNotFound
|
||||
) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return await this.executeWithoutEncryptedFieldsCheck(server, session, timeoutContext);
|
||||
}
|
||||
|
||||
private async executeWithoutEncryptedFieldsCheck(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<boolean> {
|
||||
await super.executeCommand(server, session, { drop: this.name }, timeoutContext);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export type DropDatabaseOptions = CommandOperationOptions;
|
||||
|
||||
/** @internal */
|
||||
export class DropDatabaseOperation extends CommandOperation<boolean> {
|
||||
override options: DropDatabaseOptions;
|
||||
|
||||
constructor(db: Db, options: DropDatabaseOptions) {
|
||||
super(db, options);
|
||||
this.options = options;
|
||||
}
|
||||
override get commandName() {
|
||||
return 'dropDatabase' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<boolean> {
|
||||
await super.executeCommand(server, session, { dropDatabase: 1 }, timeoutContext);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(DropCollectionOperation, [Aspect.WRITE_OPERATION]);
|
||||
defineAspects(DropDatabaseOperation, [Aspect.WRITE_OPERATION]);
|
||||
61
backend/node_modules/mongodb/src/operations/estimated_document_count.ts
generated
vendored
Normal file
61
backend/node_modules/mongodb/src/operations/estimated_document_count.ts
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface EstimatedDocumentCountOptions extends CommandOperationOptions {
|
||||
/**
|
||||
* The maximum amount of time to allow the operation to run.
|
||||
*
|
||||
* This option is sent only if the caller explicitly provides a value. The default is to not send a value.
|
||||
*/
|
||||
maxTimeMS?: number;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class EstimatedDocumentCountOperation extends CommandOperation<number> {
|
||||
override options: EstimatedDocumentCountOptions;
|
||||
collectionName: string;
|
||||
|
||||
constructor(collection: Collection, options: EstimatedDocumentCountOptions = {}) {
|
||||
super(collection, options);
|
||||
this.options = options;
|
||||
this.collectionName = collection.collectionName;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'count' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<number> {
|
||||
const cmd: Document = { count: this.collectionName };
|
||||
|
||||
if (typeof this.options.maxTimeMS === 'number') {
|
||||
cmd.maxTimeMS = this.options.maxTimeMS;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (this.options.comment !== undefined) {
|
||||
cmd.comment = this.options.comment;
|
||||
}
|
||||
|
||||
const response = await super.executeCommand(server, session, cmd, timeoutContext);
|
||||
|
||||
return response?.n || 0;
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(EstimatedDocumentCountOperation, [
|
||||
Aspect.READ_OPERATION,
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.CURSOR_CREATING
|
||||
]);
|
||||
299
backend/node_modules/mongodb/src/operations/execute_operation.ts
generated
vendored
Normal file
299
backend/node_modules/mongodb/src/operations/execute_operation.ts
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
import {
|
||||
isRetryableReadError,
|
||||
isRetryableWriteError,
|
||||
MongoCompatibilityError,
|
||||
MONGODB_ERROR_CODES,
|
||||
MongoError,
|
||||
MongoErrorLabel,
|
||||
MongoExpiredSessionError,
|
||||
MongoInvalidArgumentError,
|
||||
MongoNetworkError,
|
||||
MongoNotConnectedError,
|
||||
MongoRuntimeError,
|
||||
MongoServerError,
|
||||
MongoTransactionError,
|
||||
MongoUnexpectedServerResponseError
|
||||
} from '../error';
|
||||
import type { MongoClient } from '../mongo_client';
|
||||
import { ReadPreference } from '../read_preference';
|
||||
import type { ServerDescription } from '../sdam/server_description';
|
||||
import {
|
||||
sameServerSelector,
|
||||
secondaryWritableServerSelector,
|
||||
type ServerSelector
|
||||
} from '../sdam/server_selection';
|
||||
import type { Topology } from '../sdam/topology';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { TimeoutContext } from '../timeout';
|
||||
import { supportsRetryableWrites } from '../utils';
|
||||
import { AbstractOperation, Aspect } from './operation';
|
||||
|
||||
const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation;
|
||||
const MMAPv1_RETRY_WRITES_ERROR_MESSAGE =
|
||||
'This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.';
|
||||
|
||||
type ResultTypeFromOperation<TOperation> =
|
||||
TOperation extends AbstractOperation<infer K> ? K : never;
|
||||
|
||||
/**
|
||||
* Executes the given operation with provided arguments.
|
||||
* @internal
|
||||
*
|
||||
* @remarks
|
||||
* Allows for a single point of entry to provide features such as implicit sessions, which
|
||||
* are required by the Driver Sessions specification in the event that a ClientSession is
|
||||
* not provided.
|
||||
*
|
||||
* The expectation is that this function:
|
||||
* - Connects the MongoClient if it has not already been connected, see {@link autoConnect}
|
||||
* - Creates a session if none is provided and cleans up the session it creates
|
||||
* - Tries an operation and retries under certain conditions, see {@link tryOperation}
|
||||
*
|
||||
* @typeParam T - The operation's type
|
||||
* @typeParam TResult - The type of the operation's result, calculated from T
|
||||
*
|
||||
* @param client - The MongoClient to execute this operation with
|
||||
* @param operation - The operation to execute
|
||||
*/
|
||||
export async function executeOperation<
|
||||
T extends AbstractOperation<TResult>,
|
||||
TResult = ResultTypeFromOperation<T>
|
||||
>(client: MongoClient, operation: T, timeoutContext?: TimeoutContext | null): Promise<TResult> {
|
||||
if (!(operation instanceof AbstractOperation)) {
|
||||
// TODO(NODE-3483): Extend MongoRuntimeError
|
||||
throw new MongoRuntimeError('This method requires a valid operation instance');
|
||||
}
|
||||
|
||||
const topology = await autoConnect(client);
|
||||
|
||||
// The driver sessions spec mandates that we implicitly create sessions for operations
|
||||
// that are not explicitly provided with a session.
|
||||
let session = operation.session;
|
||||
let owner: symbol | undefined;
|
||||
|
||||
if (session == null) {
|
||||
owner = Symbol();
|
||||
session = client.startSession({ owner, explicit: false });
|
||||
} else if (session.hasEnded) {
|
||||
throw new MongoExpiredSessionError('Use of expired sessions is not permitted');
|
||||
} else if (session.snapshotEnabled && !topology.capabilities.supportsSnapshotReads) {
|
||||
throw new MongoCompatibilityError('Snapshot reads require MongoDB 5.0 or later');
|
||||
} else if (session.client !== client) {
|
||||
throw new MongoInvalidArgumentError('ClientSession must be from the same MongoClient');
|
||||
}
|
||||
|
||||
const readPreference = operation.readPreference ?? ReadPreference.primary;
|
||||
const inTransaction = !!session?.inTransaction();
|
||||
|
||||
const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION);
|
||||
|
||||
if (
|
||||
inTransaction &&
|
||||
!readPreference.equals(ReadPreference.primary) &&
|
||||
(hasReadAspect || operation.commandName === 'runCommand')
|
||||
) {
|
||||
throw new MongoTransactionError(
|
||||
`Read preference in a transaction must be primary, not: ${readPreference.mode}`
|
||||
);
|
||||
}
|
||||
|
||||
if (session?.isPinned && session.transaction.isCommitted && !operation.bypassPinningCheck) {
|
||||
session.unpin();
|
||||
}
|
||||
|
||||
timeoutContext ??= TimeoutContext.create({
|
||||
session,
|
||||
serverSelectionTimeoutMS: client.s.options.serverSelectionTimeoutMS,
|
||||
waitQueueTimeoutMS: client.s.options.waitQueueTimeoutMS,
|
||||
timeoutMS: operation.options.timeoutMS
|
||||
});
|
||||
|
||||
try {
|
||||
return await tryOperation(operation, {
|
||||
topology,
|
||||
timeoutContext,
|
||||
session,
|
||||
readPreference
|
||||
});
|
||||
} finally {
|
||||
if (session?.owner != null && session.owner === owner) {
|
||||
await session.endSession();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Connects a client if it has not yet been connected
|
||||
* @internal
|
||||
*/
|
||||
async function autoConnect(client: MongoClient): Promise<Topology> {
|
||||
if (client.topology == null) {
|
||||
if (client.s.hasBeenClosed) {
|
||||
throw new MongoNotConnectedError('Client must be connected before running operations');
|
||||
}
|
||||
client.s.options[Symbol.for('@@mdb.skipPingOnConnect')] = true;
|
||||
try {
|
||||
await client.connect();
|
||||
if (client.topology == null) {
|
||||
throw new MongoRuntimeError(
|
||||
'client.connect did not create a topology but also did not throw'
|
||||
);
|
||||
}
|
||||
return client.topology;
|
||||
} finally {
|
||||
delete client.s.options[Symbol.for('@@mdb.skipPingOnConnect')];
|
||||
}
|
||||
}
|
||||
return client.topology;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
type RetryOptions = {
|
||||
session: ClientSession | undefined;
|
||||
readPreference: ReadPreference;
|
||||
topology: Topology;
|
||||
timeoutContext: TimeoutContext;
|
||||
};
|
||||
|
||||
/**
|
||||
* Executes an operation and retries as appropriate
|
||||
* @internal
|
||||
*
|
||||
* @remarks
|
||||
* Implements behaviour described in [Retryable Reads](https://github.com/mongodb/specifications/blob/master/source/retryable-reads/retryable-reads.md) and [Retryable
|
||||
* Writes](https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.md) specification
|
||||
*
|
||||
* This function:
|
||||
* - performs initial server selection
|
||||
* - attempts to execute an operation
|
||||
* - retries the operation if it meets the criteria for a retryable read or a retryable write
|
||||
*
|
||||
* @typeParam T - The operation's type
|
||||
* @typeParam TResult - The type of the operation's result, calculated from T
|
||||
*
|
||||
* @param operation - The operation to execute
|
||||
* */
|
||||
async function tryOperation<
|
||||
T extends AbstractOperation<TResult>,
|
||||
TResult = ResultTypeFromOperation<T>
|
||||
>(
|
||||
operation: T,
|
||||
{ topology, timeoutContext, session, readPreference }: RetryOptions
|
||||
): Promise<TResult> {
|
||||
let selector: ReadPreference | ServerSelector;
|
||||
|
||||
if (operation.hasAspect(Aspect.MUST_SELECT_SAME_SERVER)) {
|
||||
// GetMore and KillCursor operations must always select the same server, but run through
|
||||
// server selection to potentially force monitor checks if the server is
|
||||
// in an unknown state.
|
||||
selector = sameServerSelector(operation.server?.description);
|
||||
} else if (operation.trySecondaryWrite) {
|
||||
// If operation should try to write to secondary use the custom server selector
|
||||
// otherwise provide the read preference.
|
||||
selector = secondaryWritableServerSelector(topology.commonWireVersion, readPreference);
|
||||
} else {
|
||||
selector = readPreference;
|
||||
}
|
||||
|
||||
let server = await topology.selectServer(selector, {
|
||||
session,
|
||||
operationName: operation.commandName,
|
||||
timeoutContext
|
||||
});
|
||||
|
||||
const hasReadAspect = operation.hasAspect(Aspect.READ_OPERATION);
|
||||
const hasWriteAspect = operation.hasAspect(Aspect.WRITE_OPERATION);
|
||||
const inTransaction = session?.inTransaction() ?? false;
|
||||
|
||||
const willRetryRead = topology.s.options.retryReads && !inTransaction && operation.canRetryRead;
|
||||
|
||||
const willRetryWrite =
|
||||
topology.s.options.retryWrites &&
|
||||
!inTransaction &&
|
||||
supportsRetryableWrites(server) &&
|
||||
operation.canRetryWrite;
|
||||
|
||||
const willRetry =
|
||||
operation.hasAspect(Aspect.RETRYABLE) &&
|
||||
session != null &&
|
||||
((hasReadAspect && willRetryRead) || (hasWriteAspect && willRetryWrite));
|
||||
|
||||
if (hasWriteAspect && willRetryWrite && session != null) {
|
||||
operation.options.willRetryWrite = true;
|
||||
session.incrementTransactionNumber();
|
||||
}
|
||||
|
||||
const maxTries = willRetry ? (timeoutContext.csotEnabled() ? Infinity : 2) : 1;
|
||||
let previousOperationError: MongoError | undefined;
|
||||
let previousServer: ServerDescription | undefined;
|
||||
|
||||
for (let tries = 0; tries < maxTries; tries++) {
|
||||
if (previousOperationError) {
|
||||
if (hasWriteAspect && previousOperationError.code === MMAPv1_RETRY_WRITES_ERROR_CODE) {
|
||||
throw new MongoServerError({
|
||||
message: MMAPv1_RETRY_WRITES_ERROR_MESSAGE,
|
||||
errmsg: MMAPv1_RETRY_WRITES_ERROR_MESSAGE,
|
||||
originalError: previousOperationError
|
||||
});
|
||||
}
|
||||
|
||||
if (operation.hasAspect(Aspect.COMMAND_BATCHING) && !operation.canRetryWrite) {
|
||||
throw previousOperationError;
|
||||
}
|
||||
|
||||
if (hasWriteAspect && !isRetryableWriteError(previousOperationError))
|
||||
throw previousOperationError;
|
||||
|
||||
if (hasReadAspect && !isRetryableReadError(previousOperationError))
|
||||
throw previousOperationError;
|
||||
|
||||
if (
|
||||
previousOperationError instanceof MongoNetworkError &&
|
||||
operation.hasAspect(Aspect.CURSOR_CREATING) &&
|
||||
session != null &&
|
||||
session.isPinned &&
|
||||
!session.inTransaction()
|
||||
) {
|
||||
session.unpin({ force: true, forceClear: true });
|
||||
}
|
||||
|
||||
server = await topology.selectServer(selector, {
|
||||
session,
|
||||
operationName: operation.commandName,
|
||||
previousServer
|
||||
});
|
||||
|
||||
if (hasWriteAspect && !supportsRetryableWrites(server)) {
|
||||
throw new MongoUnexpectedServerResponseError(
|
||||
'Selected server does not support retryable writes'
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// If tries > 0 and we are command batching we need to reset the batch.
|
||||
if (tries > 0 && operation.hasAspect(Aspect.COMMAND_BATCHING)) {
|
||||
operation.resetBatch();
|
||||
}
|
||||
return await operation.execute(server, session, timeoutContext);
|
||||
} catch (operationError) {
|
||||
if (!(operationError instanceof MongoError)) throw operationError;
|
||||
if (
|
||||
previousOperationError != null &&
|
||||
operationError.hasErrorLabel(MongoErrorLabel.NoWritesPerformed)
|
||||
) {
|
||||
throw previousOperationError;
|
||||
}
|
||||
previousServer = server.description;
|
||||
previousOperationError = operationError;
|
||||
|
||||
// Reset timeouts
|
||||
timeoutContext.clear();
|
||||
}
|
||||
}
|
||||
|
||||
throw (
|
||||
previousOperationError ??
|
||||
new MongoRuntimeError('Tried to propagate retryability error, but no error was found.')
|
||||
);
|
||||
}
|
||||
279
backend/node_modules/mongodb/src/operations/find.ts
generated
vendored
Normal file
279
backend/node_modules/mongodb/src/operations/find.ts
generated
vendored
Normal file
@@ -0,0 +1,279 @@
|
||||
import type { Document } from '../bson';
|
||||
import { CursorResponse, ExplainedCursorResponse } from '../cmap/wire_protocol/responses';
|
||||
import { type AbstractCursorOptions, type CursorTimeoutMode } from '../cursor/abstract_cursor';
|
||||
import { MongoInvalidArgumentError } from '../error';
|
||||
import {
|
||||
decorateWithExplain,
|
||||
type ExplainOptions,
|
||||
validateExplainTimeoutOptions
|
||||
} from '../explain';
|
||||
import { ReadConcern } from '../read_concern';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { formatSort, type Sort } from '../sort';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { type MongoDBNamespace, normalizeHintField } from '../utils';
|
||||
import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects, type Hint } from './operation';
|
||||
|
||||
/**
|
||||
* @public
|
||||
* @typeParam TSchema - Unused schema definition, deprecated usage, only specify `FindOptions` with no generic
|
||||
*/
|
||||
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
export interface FindOptions<TSchema extends Document = Document>
|
||||
extends Omit<CommandOperationOptions, 'writeConcern' | 'explain'>,
|
||||
AbstractCursorOptions {
|
||||
/** Sets the limit of documents returned in the query. */
|
||||
limit?: number;
|
||||
/** Set to sort the documents coming back from the query. Array of indexes, `[['a', 1]]` etc. */
|
||||
sort?: Sort;
|
||||
/** The fields to return in the query. Object of fields to either include or exclude (one of, not both), `{'a':1, 'b': 1}` **or** `{'a': 0, 'b': 0}` */
|
||||
projection?: Document;
|
||||
/** Set to skip N documents ahead in your query (useful for pagination). */
|
||||
skip?: number;
|
||||
/** Tell the query to use specific indexes in the query. Object of indexes to use, `{'_id':1}` */
|
||||
hint?: Hint;
|
||||
/** Specify if the cursor can timeout. */
|
||||
timeout?: boolean;
|
||||
/** Specify if the cursor is tailable. */
|
||||
tailable?: boolean;
|
||||
/** Specify if the cursor is a tailable-await cursor. Requires `tailable` to be true */
|
||||
awaitData?: boolean;
|
||||
/** Set the batchSize for the getMoreCommand when iterating over the query results. */
|
||||
batchSize?: number;
|
||||
/** If true, returns only the index keys in the resulting documents. */
|
||||
returnKey?: boolean;
|
||||
/** The inclusive lower bound for a specific index */
|
||||
min?: Document;
|
||||
/** The exclusive upper bound for a specific index */
|
||||
max?: Document;
|
||||
/** Number of milliseconds to wait before aborting the query. */
|
||||
maxTimeMS?: number;
|
||||
/** The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. Requires `tailable` and `awaitData` to be true */
|
||||
maxAwaitTimeMS?: number;
|
||||
/** The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. Set this option to prevent that. */
|
||||
noCursorTimeout?: boolean;
|
||||
/** Specify collation (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields). */
|
||||
collation?: CollationOptions;
|
||||
/** Allows disk use for blocking sort operations exceeding 100MB memory. (MongoDB 3.2 or higher) */
|
||||
allowDiskUse?: boolean;
|
||||
/** Determines whether to close the cursor after the first batch. Defaults to false. */
|
||||
singleBatch?: boolean;
|
||||
/** For queries against a sharded collection, allows the command (or subsequent getMore commands) to return partial results, rather than an error, if one or more queried shards are unavailable. */
|
||||
allowPartialResults?: boolean;
|
||||
/** Determines whether to return the record identifier for each document. If true, adds a field $recordId to the returned documents. */
|
||||
showRecordId?: boolean;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
/**
|
||||
* Option to enable an optimized code path for queries looking for a particular range of `ts` values in the oplog. Requires `tailable` to be true.
|
||||
* @deprecated Starting from MongoDB 4.4 this flag is not needed and will be ignored.
|
||||
*/
|
||||
oplogReplay?: boolean;
|
||||
|
||||
/**
|
||||
* Specifies the verbosity mode for the explain output.
|
||||
* @deprecated This API is deprecated in favor of `collection.find().explain()`.
|
||||
*/
|
||||
explain?: ExplainOptions['explain'];
|
||||
/** @internal*/
|
||||
timeoutMode?: CursorTimeoutMode;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class FindOperation extends CommandOperation<CursorResponse> {
|
||||
/**
|
||||
* @remarks WriteConcern can still be present on the options because
|
||||
* we inherit options from the client/db/collection. The
|
||||
* key must be present on the options in order to delete it.
|
||||
* This allows typescript to delete the key but will
|
||||
* not allow a writeConcern to be assigned as a property on options.
|
||||
*/
|
||||
override options: FindOptions & { writeConcern?: never };
|
||||
filter: Document;
|
||||
|
||||
constructor(ns: MongoDBNamespace, filter: Document = {}, options: FindOptions = {}) {
|
||||
super(undefined, options);
|
||||
|
||||
this.options = { ...options };
|
||||
delete this.options.writeConcern;
|
||||
this.ns = ns;
|
||||
|
||||
if (typeof filter !== 'object' || Array.isArray(filter)) {
|
||||
throw new MongoInvalidArgumentError('Query filter must be a plain object or ObjectId');
|
||||
}
|
||||
|
||||
// special case passing in an ObjectId as a filter
|
||||
this.filter = filter != null && filter._bsontype === 'ObjectId' ? { _id: filter } : filter;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'find' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<CursorResponse> {
|
||||
this.server = server;
|
||||
|
||||
const options = this.options;
|
||||
|
||||
let findCommand = makeFindCommand(this.ns, this.filter, options);
|
||||
if (this.explain) {
|
||||
validateExplainTimeoutOptions(this.options, this.explain);
|
||||
findCommand = decorateWithExplain(findCommand, this.explain);
|
||||
}
|
||||
|
||||
return await server.command(
|
||||
this.ns,
|
||||
findCommand,
|
||||
{
|
||||
...this.options,
|
||||
...this.bsonOptions,
|
||||
documentsReturnedIn: 'firstBatch',
|
||||
session,
|
||||
timeoutContext
|
||||
},
|
||||
this.explain ? ExplainedCursorResponse : CursorResponse
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function makeFindCommand(ns: MongoDBNamespace, filter: Document, options: FindOptions): Document {
|
||||
const findCommand: Document = {
|
||||
find: ns.collection,
|
||||
filter
|
||||
};
|
||||
|
||||
if (options.sort) {
|
||||
findCommand.sort = formatSort(options.sort);
|
||||
}
|
||||
|
||||
if (options.projection) {
|
||||
let projection = options.projection;
|
||||
if (projection && Array.isArray(projection)) {
|
||||
projection = projection.length
|
||||
? projection.reduce((result, field) => {
|
||||
result[field] = 1;
|
||||
return result;
|
||||
}, {})
|
||||
: { _id: 1 };
|
||||
}
|
||||
|
||||
findCommand.projection = projection;
|
||||
}
|
||||
|
||||
if (options.hint) {
|
||||
findCommand.hint = normalizeHintField(options.hint);
|
||||
}
|
||||
|
||||
if (typeof options.skip === 'number') {
|
||||
findCommand.skip = options.skip;
|
||||
}
|
||||
|
||||
if (typeof options.limit === 'number') {
|
||||
if (options.limit < 0) {
|
||||
findCommand.limit = -options.limit;
|
||||
findCommand.singleBatch = true;
|
||||
} else {
|
||||
findCommand.limit = options.limit;
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof options.batchSize === 'number') {
|
||||
if (options.batchSize < 0) {
|
||||
if (
|
||||
options.limit &&
|
||||
options.limit !== 0 &&
|
||||
Math.abs(options.batchSize) < Math.abs(options.limit)
|
||||
) {
|
||||
findCommand.limit = -options.batchSize;
|
||||
}
|
||||
|
||||
findCommand.singleBatch = true;
|
||||
} else {
|
||||
findCommand.batchSize = options.batchSize;
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof options.singleBatch === 'boolean') {
|
||||
findCommand.singleBatch = options.singleBatch;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (options.comment !== undefined) {
|
||||
findCommand.comment = options.comment;
|
||||
}
|
||||
|
||||
if (typeof options.maxTimeMS === 'number') {
|
||||
findCommand.maxTimeMS = options.maxTimeMS;
|
||||
}
|
||||
|
||||
const readConcern = ReadConcern.fromOptions(options);
|
||||
if (readConcern) {
|
||||
findCommand.readConcern = readConcern.toJSON();
|
||||
}
|
||||
|
||||
if (options.max) {
|
||||
findCommand.max = options.max;
|
||||
}
|
||||
|
||||
if (options.min) {
|
||||
findCommand.min = options.min;
|
||||
}
|
||||
|
||||
if (typeof options.returnKey === 'boolean') {
|
||||
findCommand.returnKey = options.returnKey;
|
||||
}
|
||||
|
||||
if (typeof options.showRecordId === 'boolean') {
|
||||
findCommand.showRecordId = options.showRecordId;
|
||||
}
|
||||
|
||||
if (typeof options.tailable === 'boolean') {
|
||||
findCommand.tailable = options.tailable;
|
||||
}
|
||||
|
||||
if (typeof options.oplogReplay === 'boolean') {
|
||||
findCommand.oplogReplay = options.oplogReplay;
|
||||
}
|
||||
|
||||
if (typeof options.timeout === 'boolean') {
|
||||
findCommand.noCursorTimeout = !options.timeout;
|
||||
} else if (typeof options.noCursorTimeout === 'boolean') {
|
||||
findCommand.noCursorTimeout = options.noCursorTimeout;
|
||||
}
|
||||
|
||||
if (typeof options.awaitData === 'boolean') {
|
||||
findCommand.awaitData = options.awaitData;
|
||||
}
|
||||
|
||||
if (typeof options.allowPartialResults === 'boolean') {
|
||||
findCommand.allowPartialResults = options.allowPartialResults;
|
||||
}
|
||||
|
||||
if (options.collation) {
|
||||
findCommand.collation = options.collation;
|
||||
}
|
||||
|
||||
if (typeof options.allowDiskUse === 'boolean') {
|
||||
findCommand.allowDiskUse = options.allowDiskUse;
|
||||
}
|
||||
|
||||
if (options.let) {
|
||||
findCommand.let = options.let;
|
||||
}
|
||||
|
||||
return findCommand;
|
||||
}
|
||||
|
||||
defineAspects(FindOperation, [
|
||||
Aspect.READ_OPERATION,
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.EXPLAINABLE,
|
||||
Aspect.CURSOR_CREATING
|
||||
]);
|
||||
294
backend/node_modules/mongodb/src/operations/find_and_modify.ts
generated
vendored
Normal file
294
backend/node_modules/mongodb/src/operations/find_and_modify.ts
generated
vendored
Normal file
@@ -0,0 +1,294 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import { MongoCompatibilityError, MongoInvalidArgumentError } from '../error';
|
||||
import { ReadPreference } from '../read_preference';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { formatSort, type Sort, type SortForCmd } from '../sort';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { decorateWithCollation, hasAtomicOperators, maxWireVersion } from '../utils';
|
||||
import { type WriteConcern, type WriteConcernSettings } from '../write_concern';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export const ReturnDocument = Object.freeze({
|
||||
BEFORE: 'before',
|
||||
AFTER: 'after'
|
||||
} as const);
|
||||
|
||||
/** @public */
|
||||
export type ReturnDocument = (typeof ReturnDocument)[keyof typeof ReturnDocument];
|
||||
|
||||
/** @public */
|
||||
export interface FindOneAndDeleteOptions extends CommandOperationOptions {
|
||||
/** An optional hint for query optimization. See the {@link https://www.mongodb.com/docs/manual/reference/command/update/#update-command-hint|update command} reference for more information.*/
|
||||
hint?: Document;
|
||||
/** Limits the fields to return for all matching documents. */
|
||||
projection?: Document;
|
||||
/** Determines which document the operation modifies if the query selects multiple documents. */
|
||||
sort?: Sort;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
/**
|
||||
* Return the ModifyResult instead of the modified document. Defaults to false
|
||||
*/
|
||||
includeResultMetadata?: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface FindOneAndReplaceOptions extends CommandOperationOptions {
|
||||
/** Allow driver to bypass schema validation. */
|
||||
bypassDocumentValidation?: boolean;
|
||||
/** An optional hint for query optimization. See the {@link https://www.mongodb.com/docs/manual/reference/command/update/#update-command-hint|update command} reference for more information.*/
|
||||
hint?: Document;
|
||||
/** Limits the fields to return for all matching documents. */
|
||||
projection?: Document;
|
||||
/** When set to 'after', returns the updated document rather than the original. The default is 'before'. */
|
||||
returnDocument?: ReturnDocument;
|
||||
/** Determines which document the operation modifies if the query selects multiple documents. */
|
||||
sort?: Sort;
|
||||
/** Upsert the document if it does not exist. */
|
||||
upsert?: boolean;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
/**
|
||||
* Return the ModifyResult instead of the modified document. Defaults to false
|
||||
*/
|
||||
includeResultMetadata?: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface FindOneAndUpdateOptions extends CommandOperationOptions {
|
||||
/** Optional list of array filters referenced in filtered positional operators */
|
||||
arrayFilters?: Document[];
|
||||
/** Allow driver to bypass schema validation. */
|
||||
bypassDocumentValidation?: boolean;
|
||||
/** An optional hint for query optimization. See the {@link https://www.mongodb.com/docs/manual/reference/command/update/#update-command-hint|update command} reference for more information.*/
|
||||
hint?: Document;
|
||||
/** Limits the fields to return for all matching documents. */
|
||||
projection?: Document;
|
||||
/** When set to 'after', returns the updated document rather than the original. The default is 'before'. */
|
||||
returnDocument?: ReturnDocument;
|
||||
/** Determines which document the operation modifies if the query selects multiple documents. */
|
||||
sort?: Sort;
|
||||
/** Upsert the document if it does not exist. */
|
||||
upsert?: boolean;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
/**
|
||||
* Return the ModifyResult instead of the modified document. Defaults to false
|
||||
*/
|
||||
includeResultMetadata?: boolean;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
interface FindAndModifyCmdBase {
|
||||
remove: boolean;
|
||||
new: boolean;
|
||||
upsert: boolean;
|
||||
update?: Document;
|
||||
sort?: SortForCmd;
|
||||
fields?: Document;
|
||||
bypassDocumentValidation?: boolean;
|
||||
arrayFilters?: Document[];
|
||||
maxTimeMS?: number;
|
||||
let?: Document;
|
||||
writeConcern?: WriteConcern | WriteConcernSettings;
|
||||
/**
|
||||
* Comment to apply to the operation.
|
||||
*
|
||||
* In server versions pre-4.4, 'comment' must be string. A server
|
||||
* error will be thrown if any other type is provided.
|
||||
*
|
||||
* In server versions 4.4 and above, 'comment' can be any valid BSON type.
|
||||
*/
|
||||
comment?: unknown;
|
||||
}
|
||||
|
||||
function configureFindAndModifyCmdBaseUpdateOpts(
|
||||
cmdBase: FindAndModifyCmdBase,
|
||||
options: FindOneAndReplaceOptions | FindOneAndUpdateOptions
|
||||
): FindAndModifyCmdBase {
|
||||
cmdBase.new = options.returnDocument === ReturnDocument.AFTER;
|
||||
cmdBase.upsert = options.upsert === true;
|
||||
|
||||
if (options.bypassDocumentValidation === true) {
|
||||
cmdBase.bypassDocumentValidation = options.bypassDocumentValidation;
|
||||
}
|
||||
return cmdBase;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class FindAndModifyOperation extends CommandOperation<Document> {
|
||||
override options: FindOneAndReplaceOptions | FindOneAndUpdateOptions | FindOneAndDeleteOptions;
|
||||
cmdBase: FindAndModifyCmdBase;
|
||||
collection: Collection;
|
||||
query: Document;
|
||||
doc?: Document;
|
||||
|
||||
constructor(
|
||||
collection: Collection,
|
||||
query: Document,
|
||||
options: FindOneAndReplaceOptions | FindOneAndUpdateOptions | FindOneAndDeleteOptions
|
||||
) {
|
||||
super(collection, options);
|
||||
this.options = options ?? {};
|
||||
this.cmdBase = {
|
||||
remove: false,
|
||||
new: false,
|
||||
upsert: false
|
||||
};
|
||||
|
||||
options.includeResultMetadata ??= false;
|
||||
|
||||
const sort = formatSort(options.sort);
|
||||
if (sort) {
|
||||
this.cmdBase.sort = sort;
|
||||
}
|
||||
|
||||
if (options.projection) {
|
||||
this.cmdBase.fields = options.projection;
|
||||
}
|
||||
|
||||
if (options.maxTimeMS) {
|
||||
this.cmdBase.maxTimeMS = options.maxTimeMS;
|
||||
}
|
||||
|
||||
// Decorate the findAndModify command with the write Concern
|
||||
if (options.writeConcern) {
|
||||
this.cmdBase.writeConcern = options.writeConcern;
|
||||
}
|
||||
|
||||
if (options.let) {
|
||||
this.cmdBase.let = options.let;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (options.comment !== undefined) {
|
||||
this.cmdBase.comment = options.comment;
|
||||
}
|
||||
|
||||
// force primary read preference
|
||||
this.readPreference = ReadPreference.primary;
|
||||
|
||||
this.collection = collection;
|
||||
this.query = query;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'findAndModify' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Document> {
|
||||
const coll = this.collection;
|
||||
const query = this.query;
|
||||
const options = { ...this.options, ...this.bsonOptions };
|
||||
|
||||
// Create findAndModify command object
|
||||
const cmd: Document = {
|
||||
findAndModify: coll.collectionName,
|
||||
query: query,
|
||||
...this.cmdBase
|
||||
};
|
||||
|
||||
decorateWithCollation(cmd, coll, options);
|
||||
|
||||
if (options.hint) {
|
||||
// TODO: once this method becomes a CommandOperation we will have the server
|
||||
// in place to check.
|
||||
const unacknowledgedWrite = this.writeConcern?.w === 0;
|
||||
if (unacknowledgedWrite || maxWireVersion(server) < 8) {
|
||||
throw new MongoCompatibilityError(
|
||||
'The current topology does not support a hint on findAndModify commands'
|
||||
);
|
||||
}
|
||||
|
||||
cmd.hint = options.hint;
|
||||
}
|
||||
|
||||
// Execute the command
|
||||
const result = await super.executeCommand(server, session, cmd, timeoutContext);
|
||||
return options.includeResultMetadata ? result : (result.value ?? null);
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class FindOneAndDeleteOperation extends FindAndModifyOperation {
|
||||
constructor(collection: Collection, filter: Document, options: FindOneAndDeleteOptions) {
|
||||
// Basic validation
|
||||
if (filter == null || typeof filter !== 'object') {
|
||||
throw new MongoInvalidArgumentError('Argument "filter" must be an object');
|
||||
}
|
||||
|
||||
super(collection, filter, options);
|
||||
this.cmdBase.remove = true;
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class FindOneAndReplaceOperation extends FindAndModifyOperation {
|
||||
constructor(
|
||||
collection: Collection,
|
||||
filter: Document,
|
||||
replacement: Document,
|
||||
options: FindOneAndReplaceOptions
|
||||
) {
|
||||
if (filter == null || typeof filter !== 'object') {
|
||||
throw new MongoInvalidArgumentError('Argument "filter" must be an object');
|
||||
}
|
||||
|
||||
if (replacement == null || typeof replacement !== 'object') {
|
||||
throw new MongoInvalidArgumentError('Argument "replacement" must be an object');
|
||||
}
|
||||
|
||||
if (hasAtomicOperators(replacement)) {
|
||||
throw new MongoInvalidArgumentError('Replacement document must not contain atomic operators');
|
||||
}
|
||||
|
||||
super(collection, filter, options);
|
||||
this.cmdBase.update = replacement;
|
||||
configureFindAndModifyCmdBaseUpdateOpts(this.cmdBase, options);
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class FindOneAndUpdateOperation extends FindAndModifyOperation {
|
||||
constructor(
|
||||
collection: Collection,
|
||||
filter: Document,
|
||||
update: Document,
|
||||
options: FindOneAndUpdateOptions
|
||||
) {
|
||||
if (filter == null || typeof filter !== 'object') {
|
||||
throw new MongoInvalidArgumentError('Argument "filter" must be an object');
|
||||
}
|
||||
|
||||
if (update == null || typeof update !== 'object') {
|
||||
throw new MongoInvalidArgumentError('Argument "update" must be an object');
|
||||
}
|
||||
|
||||
if (!hasAtomicOperators(update)) {
|
||||
throw new MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
|
||||
super(collection, filter, options);
|
||||
this.cmdBase.update = update;
|
||||
configureFindAndModifyCmdBaseUpdateOpts(this.cmdBase, options);
|
||||
|
||||
if (options.arrayFilters) {
|
||||
this.cmdBase.arrayFilters = options.arrayFilters;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(FindAndModifyOperation, [
|
||||
Aspect.WRITE_OPERATION,
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.EXPLAINABLE
|
||||
]);
|
||||
110
backend/node_modules/mongodb/src/operations/get_more.ts
generated
vendored
Normal file
110
backend/node_modules/mongodb/src/operations/get_more.ts
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
import type { Long } from '../bson';
|
||||
import { CursorResponse } from '../cmap/wire_protocol/responses';
|
||||
import { MongoRuntimeError } from '../error';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { maxWireVersion, type MongoDBNamespace } from '../utils';
|
||||
import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation';
|
||||
|
||||
/** @internal */
|
||||
export interface GetMoreOptions extends OperationOptions {
|
||||
/** Set the batchSize for the getMoreCommand when iterating over the query results. */
|
||||
batchSize?: number;
|
||||
/**
|
||||
* Comment to apply to the operation.
|
||||
*
|
||||
* getMore only supports 'comment' in server versions 4.4 and above.
|
||||
*/
|
||||
comment?: unknown;
|
||||
/** Number of milliseconds to wait before aborting the query. */
|
||||
maxTimeMS?: number;
|
||||
/** TODO(NODE-4413): Address bug with maxAwaitTimeMS not being passed in from the cursor correctly */
|
||||
maxAwaitTimeMS?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* GetMore command: https://www.mongodb.com/docs/manual/reference/command/getMore/
|
||||
* @internal
|
||||
*/
|
||||
export interface GetMoreCommand {
|
||||
getMore: Long;
|
||||
collection: string;
|
||||
batchSize?: number;
|
||||
maxTimeMS?: number;
|
||||
/** Only supported on wire versions 10 or greater */
|
||||
comment?: unknown;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class GetMoreOperation extends AbstractOperation {
|
||||
cursorId: Long;
|
||||
override options: GetMoreOptions;
|
||||
|
||||
constructor(ns: MongoDBNamespace, cursorId: Long, server: Server, options: GetMoreOptions) {
|
||||
super(options);
|
||||
|
||||
this.options = options;
|
||||
this.ns = ns;
|
||||
this.cursorId = cursorId;
|
||||
this.server = server;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'getMore' as const;
|
||||
}
|
||||
/**
|
||||
* Although there is a server already associated with the get more operation, the signature
|
||||
* for execute passes a server so we will just use that one.
|
||||
*/
|
||||
override async execute(
|
||||
server: Server,
|
||||
_session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<CursorResponse> {
|
||||
if (server !== this.server) {
|
||||
throw new MongoRuntimeError('Getmore must run on the same server operation began on');
|
||||
}
|
||||
|
||||
if (this.cursorId == null || this.cursorId.isZero()) {
|
||||
throw new MongoRuntimeError('Unable to iterate cursor with no id');
|
||||
}
|
||||
|
||||
const collection = this.ns.collection;
|
||||
if (collection == null) {
|
||||
// Cursors should have adopted the namespace returned by MongoDB
|
||||
// which should always defined a collection name (even a pseudo one, ex. db.aggregate())
|
||||
throw new MongoRuntimeError('A collection name must be determined before getMore');
|
||||
}
|
||||
|
||||
const getMoreCmd: GetMoreCommand = {
|
||||
getMore: this.cursorId,
|
||||
collection
|
||||
};
|
||||
|
||||
if (typeof this.options.batchSize === 'number') {
|
||||
getMoreCmd.batchSize = Math.abs(this.options.batchSize);
|
||||
}
|
||||
|
||||
if (typeof this.options.maxAwaitTimeMS === 'number') {
|
||||
getMoreCmd.maxTimeMS = this.options.maxAwaitTimeMS;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (this.options.comment !== undefined && maxWireVersion(server) >= 9) {
|
||||
getMoreCmd.comment = this.options.comment;
|
||||
}
|
||||
|
||||
const commandOptions = {
|
||||
returnFieldSelector: null,
|
||||
documentsReturnedIn: 'nextBatch',
|
||||
timeoutContext,
|
||||
...this.options
|
||||
};
|
||||
|
||||
return await server.command(this.ns, getMoreCmd, commandOptions, CursorResponse);
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(GetMoreOperation, [Aspect.READ_OPERATION, Aspect.MUST_SELECT_SAME_SERVER]);
|
||||
419
backend/node_modules/mongodb/src/operations/indexes.ts
generated
vendored
Normal file
419
backend/node_modules/mongodb/src/operations/indexes.ts
generated
vendored
Normal file
@@ -0,0 +1,419 @@
|
||||
import type { Document } from '../bson';
|
||||
import { CursorResponse } from '../cmap/wire_protocol/responses';
|
||||
import type { Collection } from '../collection';
|
||||
import { type AbstractCursorOptions } from '../cursor/abstract_cursor';
|
||||
import { MongoCompatibilityError } from '../error';
|
||||
import { type OneOrMore } from '../mongo_types';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { isObject, maxWireVersion, type MongoDBNamespace } from '../utils';
|
||||
import {
|
||||
type CollationOptions,
|
||||
CommandOperation,
|
||||
type CommandOperationOptions,
|
||||
type OperationParent
|
||||
} from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
const VALID_INDEX_OPTIONS = new Set([
|
||||
'background',
|
||||
'unique',
|
||||
'name',
|
||||
'partialFilterExpression',
|
||||
'sparse',
|
||||
'hidden',
|
||||
'expireAfterSeconds',
|
||||
'storageEngine',
|
||||
'collation',
|
||||
'version',
|
||||
|
||||
// text indexes
|
||||
'weights',
|
||||
'default_language',
|
||||
'language_override',
|
||||
'textIndexVersion',
|
||||
|
||||
// 2d-sphere indexes
|
||||
'2dsphereIndexVersion',
|
||||
|
||||
// 2d indexes
|
||||
'bits',
|
||||
'min',
|
||||
'max',
|
||||
|
||||
// geoHaystack Indexes
|
||||
'bucketSize',
|
||||
|
||||
// wildcard indexes
|
||||
'wildcardProjection'
|
||||
]);
|
||||
|
||||
/** @public */
|
||||
export type IndexDirection =
|
||||
| -1
|
||||
| 1
|
||||
| '2d'
|
||||
| '2dsphere'
|
||||
| 'text'
|
||||
| 'geoHaystack'
|
||||
| 'hashed'
|
||||
| number;
|
||||
|
||||
function isIndexDirection(x: unknown): x is IndexDirection {
|
||||
return (
|
||||
typeof x === 'number' || x === '2d' || x === '2dsphere' || x === 'text' || x === 'geoHaystack'
|
||||
);
|
||||
}
|
||||
/** @public */
|
||||
export type IndexSpecification = OneOrMore<
|
||||
| string
|
||||
| [string, IndexDirection]
|
||||
| { [key: string]: IndexDirection }
|
||||
| Map<string, IndexDirection>
|
||||
>;
|
||||
|
||||
/** @public */
|
||||
export interface IndexInformationOptions extends ListIndexesOptions {
|
||||
/**
|
||||
* When `true`, an array of index descriptions is returned.
|
||||
* When `false`, the driver returns an object that with keys corresponding to index names with values
|
||||
* corresponding to the entries of the indexes' key.
|
||||
*
|
||||
* For example, the given the following indexes:
|
||||
* ```
|
||||
* [ { name: 'a_1', key: { a: 1 } }, { name: 'b_1_c_1' , key: { b: 1, c: 1 } }]
|
||||
* ```
|
||||
*
|
||||
* When `full` is `true`, the above array is returned. When `full` is `false`, the following is returned:
|
||||
* ```
|
||||
* {
|
||||
* 'a_1': [['a', 1]],
|
||||
* 'b_1_c_1': [['b', 1], ['c', 1]],
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
full?: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface IndexDescription
|
||||
extends Pick<
|
||||
CreateIndexesOptions,
|
||||
| 'background'
|
||||
| 'unique'
|
||||
| 'partialFilterExpression'
|
||||
| 'sparse'
|
||||
| 'hidden'
|
||||
| 'expireAfterSeconds'
|
||||
| 'storageEngine'
|
||||
| 'version'
|
||||
| 'weights'
|
||||
| 'default_language'
|
||||
| 'language_override'
|
||||
| 'textIndexVersion'
|
||||
| '2dsphereIndexVersion'
|
||||
| 'bits'
|
||||
| 'min'
|
||||
| 'max'
|
||||
| 'bucketSize'
|
||||
| 'wildcardProjection'
|
||||
> {
|
||||
collation?: CollationOptions;
|
||||
name?: string;
|
||||
key: { [key: string]: IndexDirection } | Map<string, IndexDirection>;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface CreateIndexesOptions extends Omit<CommandOperationOptions, 'writeConcern'> {
|
||||
/** Creates the index in the background, yielding whenever possible. */
|
||||
background?: boolean;
|
||||
/** Creates an unique index. */
|
||||
unique?: boolean;
|
||||
/** Override the autogenerated index name (useful if the resulting name is larger than 128 bytes) */
|
||||
name?: string;
|
||||
/** Creates a partial index based on the given filter object (MongoDB 3.2 or higher) */
|
||||
partialFilterExpression?: Document;
|
||||
/** Creates a sparse index. */
|
||||
sparse?: boolean;
|
||||
/** Allows you to expire data on indexes applied to a data (MongoDB 2.2 or higher) */
|
||||
expireAfterSeconds?: number;
|
||||
/** Allows users to configure the storage engine on a per-index basis when creating an index. (MongoDB 3.0 or higher) */
|
||||
storageEngine?: Document;
|
||||
/** (MongoDB 4.4. or higher) Specifies how many data-bearing members of a replica set, including the primary, must complete the index builds successfully before the primary marks the indexes as ready. This option accepts the same values for the "w" field in a write concern plus "votingMembers", which indicates all voting data-bearing nodes. */
|
||||
commitQuorum?: number | string;
|
||||
/** Specifies the index version number, either 0 or 1. */
|
||||
version?: number;
|
||||
// text indexes
|
||||
weights?: Document;
|
||||
default_language?: string;
|
||||
language_override?: string;
|
||||
textIndexVersion?: number;
|
||||
// 2d-sphere indexes
|
||||
'2dsphereIndexVersion'?: number;
|
||||
// 2d indexes
|
||||
bits?: number;
|
||||
/** For geospatial indexes set the lower bound for the co-ordinates. */
|
||||
min?: number;
|
||||
/** For geospatial indexes set the high bound for the co-ordinates. */
|
||||
max?: number;
|
||||
// geoHaystack Indexes
|
||||
bucketSize?: number;
|
||||
// wildcard indexes
|
||||
wildcardProjection?: Document;
|
||||
/** Specifies that the index should exist on the target collection but should not be used by the query planner when executing operations. (MongoDB 4.4 or higher) */
|
||||
hidden?: boolean;
|
||||
}
|
||||
|
||||
function isSingleIndexTuple(t: unknown): t is [string, IndexDirection] {
|
||||
return Array.isArray(t) && t.length === 2 && isIndexDirection(t[1]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts an `IndexSpecification`, which can be specified in multiple formats, into a
|
||||
* valid `key` for the createIndexes command.
|
||||
*/
|
||||
function constructIndexDescriptionMap(indexSpec: IndexSpecification): Map<string, IndexDirection> {
|
||||
const key: Map<string, IndexDirection> = new Map();
|
||||
|
||||
const indexSpecs =
|
||||
!Array.isArray(indexSpec) || isSingleIndexTuple(indexSpec) ? [indexSpec] : indexSpec;
|
||||
|
||||
// Iterate through array and handle different types
|
||||
for (const spec of indexSpecs) {
|
||||
if (typeof spec === 'string') {
|
||||
key.set(spec, 1);
|
||||
} else if (Array.isArray(spec)) {
|
||||
key.set(spec[0], spec[1] ?? 1);
|
||||
} else if (spec instanceof Map) {
|
||||
for (const [property, value] of spec) {
|
||||
key.set(property, value);
|
||||
}
|
||||
} else if (isObject(spec)) {
|
||||
for (const [property, value] of Object.entries(spec)) {
|
||||
key.set(property, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return key;
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives an index description and returns a modified index description which has had invalid options removed
|
||||
* from the description and has mapped the `version` option to the `v` option.
|
||||
*/
|
||||
function resolveIndexDescription(
|
||||
description: IndexDescription
|
||||
): Omit<ResolvedIndexDescription, 'key'> {
|
||||
const validProvidedOptions = Object.entries(description).filter(([optionName]) =>
|
||||
VALID_INDEX_OPTIONS.has(optionName)
|
||||
);
|
||||
|
||||
return Object.fromEntries(
|
||||
// we support the `version` option, but the `createIndexes` command expects it to be the `v`
|
||||
validProvidedOptions.map(([name, value]) => (name === 'version' ? ['v', value] : [name, value]))
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @public
|
||||
* The index information returned by the listIndexes command. https://www.mongodb.com/docs/manual/reference/command/listIndexes/#mongodb-dbcommand-dbcmd.listIndexes
|
||||
*/
|
||||
export type IndexDescriptionInfo = Omit<IndexDescription, 'key' | 'version'> & {
|
||||
key: { [key: string]: IndexDirection };
|
||||
v?: IndexDescription['version'];
|
||||
} & Document;
|
||||
|
||||
/** @public */
|
||||
export type IndexDescriptionCompact = Record<string, [name: string, direction: IndexDirection][]>;
|
||||
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* Internally, the driver represents index description keys with `Map`s to preserve key ordering.
|
||||
* We don't require users to specify maps, so we transform user provided descriptions into
|
||||
* "resolved" by converting the `key` into a JS `Map`, if it isn't already a map.
|
||||
*
|
||||
* Additionally, we support the `version` option, but the `createIndexes` command uses the field `v`
|
||||
* to specify the index version so we map the value of `version` to `v`, if provided.
|
||||
*/
|
||||
type ResolvedIndexDescription = Omit<IndexDescription, 'key' | 'version'> & {
|
||||
key: Map<string, IndexDirection>;
|
||||
v?: IndexDescription['version'];
|
||||
};
|
||||
|
||||
/** @internal */
|
||||
export class CreateIndexesOperation extends CommandOperation<string[]> {
|
||||
override options: CreateIndexesOptions;
|
||||
collectionName: string;
|
||||
indexes: ReadonlyArray<ResolvedIndexDescription>;
|
||||
|
||||
private constructor(
|
||||
parent: OperationParent,
|
||||
collectionName: string,
|
||||
indexes: IndexDescription[],
|
||||
options?: CreateIndexesOptions
|
||||
) {
|
||||
super(parent, options);
|
||||
|
||||
this.options = options ?? {};
|
||||
this.collectionName = collectionName;
|
||||
this.indexes = indexes.map((userIndex: IndexDescription): ResolvedIndexDescription => {
|
||||
// Ensure the key is a Map to preserve index key ordering
|
||||
const key =
|
||||
userIndex.key instanceof Map ? userIndex.key : new Map(Object.entries(userIndex.key));
|
||||
const name = userIndex.name ?? Array.from(key).flat().join('_');
|
||||
const validIndexOptions = resolveIndexDescription(userIndex);
|
||||
return {
|
||||
...validIndexOptions,
|
||||
name,
|
||||
key
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
static fromIndexDescriptionArray(
|
||||
parent: OperationParent,
|
||||
collectionName: string,
|
||||
indexes: IndexDescription[],
|
||||
options?: CreateIndexesOptions
|
||||
): CreateIndexesOperation {
|
||||
return new CreateIndexesOperation(parent, collectionName, indexes, options);
|
||||
}
|
||||
|
||||
static fromIndexSpecification(
|
||||
parent: OperationParent,
|
||||
collectionName: string,
|
||||
indexSpec: IndexSpecification,
|
||||
options: CreateIndexesOptions = {}
|
||||
): CreateIndexesOperation {
|
||||
const key = constructIndexDescriptionMap(indexSpec);
|
||||
const description: IndexDescription = { ...options, key };
|
||||
return new CreateIndexesOperation(parent, collectionName, [description], options);
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'createIndexes';
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<string[]> {
|
||||
const options = this.options;
|
||||
const indexes = this.indexes;
|
||||
|
||||
const serverWireVersion = maxWireVersion(server);
|
||||
|
||||
const cmd: Document = { createIndexes: this.collectionName, indexes };
|
||||
|
||||
if (options.commitQuorum != null) {
|
||||
if (serverWireVersion < 9) {
|
||||
throw new MongoCompatibilityError(
|
||||
'Option `commitQuorum` for `createIndexes` not supported on servers < 4.4'
|
||||
);
|
||||
}
|
||||
cmd.commitQuorum = options.commitQuorum;
|
||||
}
|
||||
|
||||
// collation is set on each index, it should not be defined at the root
|
||||
this.options.collation = undefined;
|
||||
|
||||
await super.executeCommand(server, session, cmd, timeoutContext);
|
||||
|
||||
const indexNames = indexes.map(index => index.name || '');
|
||||
return indexNames;
|
||||
}
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export type DropIndexesOptions = CommandOperationOptions;
|
||||
|
||||
/** @internal */
|
||||
export class DropIndexOperation extends CommandOperation<Document> {
|
||||
override options: DropIndexesOptions;
|
||||
collection: Collection;
|
||||
indexName: string;
|
||||
|
||||
constructor(collection: Collection, indexName: string, options?: DropIndexesOptions) {
|
||||
super(collection, options);
|
||||
|
||||
this.options = options ?? {};
|
||||
this.collection = collection;
|
||||
this.indexName = indexName;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'dropIndexes' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Document> {
|
||||
const cmd = { dropIndexes: this.collection.collectionName, index: this.indexName };
|
||||
return await super.executeCommand(server, session, cmd, timeoutContext);
|
||||
}
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export type ListIndexesOptions = AbstractCursorOptions & {
|
||||
/** @internal */
|
||||
omitMaxTimeMS?: boolean;
|
||||
};
|
||||
|
||||
/** @internal */
|
||||
export class ListIndexesOperation extends CommandOperation<CursorResponse> {
|
||||
/**
|
||||
* @remarks WriteConcern can still be present on the options because
|
||||
* we inherit options from the client/db/collection. The
|
||||
* key must be present on the options in order to delete it.
|
||||
* This allows typescript to delete the key but will
|
||||
* not allow a writeConcern to be assigned as a property on options.
|
||||
*/
|
||||
override options: ListIndexesOptions & { writeConcern?: never };
|
||||
collectionNamespace: MongoDBNamespace;
|
||||
|
||||
constructor(collection: Collection, options?: ListIndexesOptions) {
|
||||
super(collection, options);
|
||||
|
||||
this.options = { ...options };
|
||||
delete this.options.writeConcern;
|
||||
this.collectionNamespace = collection.s.namespace;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'listIndexes' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<CursorResponse> {
|
||||
const serverWireVersion = maxWireVersion(server);
|
||||
|
||||
const cursor = this.options.batchSize ? { batchSize: this.options.batchSize } : {};
|
||||
|
||||
const command: Document = { listIndexes: this.collectionNamespace.collection, cursor };
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (serverWireVersion >= 9 && this.options.comment !== undefined) {
|
||||
command.comment = this.options.comment;
|
||||
}
|
||||
|
||||
return await super.executeCommand(server, session, command, timeoutContext, CursorResponse);
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(ListIndexesOperation, [
|
||||
Aspect.READ_OPERATION,
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.CURSOR_CREATING
|
||||
]);
|
||||
defineAspects(CreateIndexesOperation, [Aspect.WRITE_OPERATION]);
|
||||
defineAspects(DropIndexOperation, [Aspect.WRITE_OPERATION]);
|
||||
166
backend/node_modules/mongodb/src/operations/insert.ts
generated
vendored
Normal file
166
backend/node_modules/mongodb/src/operations/insert.ts
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { BulkWriteOptions } from '../bulk/common';
|
||||
import type { Collection } from '../collection';
|
||||
import { MongoInvalidArgumentError, MongoServerError } from '../error';
|
||||
import type { InferIdType } from '../mongo_types';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { maybeAddIdToDocuments, type MongoDBNamespace } from '../utils';
|
||||
import { WriteConcern } from '../write_concern';
|
||||
import { BulkWriteOperation } from './bulk_write';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { AbstractOperation, Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @internal */
|
||||
export class InsertOperation extends CommandOperation<Document> {
|
||||
override options: BulkWriteOptions;
|
||||
documents: Document[];
|
||||
|
||||
constructor(ns: MongoDBNamespace, documents: Document[], options: BulkWriteOptions) {
|
||||
super(undefined, options);
|
||||
this.options = { ...options, checkKeys: options.checkKeys ?? false };
|
||||
this.ns = ns;
|
||||
this.documents = documents;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'insert' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Document> {
|
||||
const options = this.options ?? {};
|
||||
const ordered = typeof options.ordered === 'boolean' ? options.ordered : true;
|
||||
const command: Document = {
|
||||
insert: this.ns.collection,
|
||||
documents: this.documents,
|
||||
ordered
|
||||
};
|
||||
|
||||
if (typeof options.bypassDocumentValidation === 'boolean') {
|
||||
command.bypassDocumentValidation = options.bypassDocumentValidation;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (options.comment !== undefined) {
|
||||
command.comment = options.comment;
|
||||
}
|
||||
|
||||
return await super.executeCommand(server, session, command, timeoutContext);
|
||||
}
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface InsertOneOptions extends CommandOperationOptions {
|
||||
/** Allow driver to bypass schema validation. */
|
||||
bypassDocumentValidation?: boolean;
|
||||
/** Force server to assign _id values instead of driver. */
|
||||
forceServerObjectId?: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface InsertOneResult<TSchema = Document> {
|
||||
/** Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined */
|
||||
acknowledged: boolean;
|
||||
/** The identifier that was inserted. If the server generated the identifier, this value will be null as the driver does not have access to that data */
|
||||
insertedId: InferIdType<TSchema>;
|
||||
}
|
||||
|
||||
export class InsertOneOperation extends InsertOperation {
|
||||
constructor(collection: Collection, doc: Document, options: InsertOneOptions) {
|
||||
super(collection.s.namespace, maybeAddIdToDocuments(collection, [doc], options), options);
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<InsertOneResult> {
|
||||
const res = await super.execute(server, session, timeoutContext);
|
||||
if (res.code) throw new MongoServerError(res);
|
||||
if (res.writeErrors) {
|
||||
// This should be a WriteError but we can't change it now because of error hierarchy
|
||||
throw new MongoServerError(res.writeErrors[0]);
|
||||
}
|
||||
|
||||
return {
|
||||
acknowledged: this.writeConcern?.w !== 0,
|
||||
insertedId: this.documents[0]._id
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface InsertManyResult<TSchema = Document> {
|
||||
/** Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined */
|
||||
acknowledged: boolean;
|
||||
/** The number of inserted documents for this operations */
|
||||
insertedCount: number;
|
||||
/** Map of the index of the inserted document to the id of the inserted document */
|
||||
insertedIds: { [key: number]: InferIdType<TSchema> };
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class InsertManyOperation extends AbstractOperation<InsertManyResult> {
|
||||
override options: BulkWriteOptions;
|
||||
collection: Collection;
|
||||
docs: ReadonlyArray<Document>;
|
||||
|
||||
constructor(collection: Collection, docs: ReadonlyArray<Document>, options: BulkWriteOptions) {
|
||||
super(options);
|
||||
|
||||
if (!Array.isArray(docs)) {
|
||||
throw new MongoInvalidArgumentError('Argument "docs" must be an array of documents');
|
||||
}
|
||||
|
||||
this.options = options;
|
||||
this.collection = collection;
|
||||
this.docs = docs;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'insert' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<InsertManyResult> {
|
||||
const coll = this.collection;
|
||||
const options = { ...this.options, ...this.bsonOptions, readPreference: this.readPreference };
|
||||
const writeConcern = WriteConcern.fromOptions(options);
|
||||
const bulkWriteOperation = new BulkWriteOperation(
|
||||
coll,
|
||||
this.docs.map(document => ({
|
||||
insertOne: { document }
|
||||
})),
|
||||
options
|
||||
);
|
||||
|
||||
try {
|
||||
const res = await bulkWriteOperation.execute(server, session, timeoutContext);
|
||||
return {
|
||||
acknowledged: writeConcern?.w !== 0,
|
||||
insertedCount: res.insertedCount,
|
||||
insertedIds: res.insertedIds
|
||||
};
|
||||
} catch (err) {
|
||||
if (err && err.message === 'Operation must be an object with an operation key') {
|
||||
throw new MongoInvalidArgumentError(
|
||||
'Collection.insertMany() cannot be called with an array that has null/undefined values'
|
||||
);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(InsertOperation, [Aspect.RETRYABLE, Aspect.WRITE_OPERATION]);
|
||||
defineAspects(InsertOneOperation, [Aspect.RETRYABLE, Aspect.WRITE_OPERATION]);
|
||||
defineAspects(InsertManyOperation, [Aspect.WRITE_OPERATION]);
|
||||
35
backend/node_modules/mongodb/src/operations/is_capped.ts
generated
vendored
Normal file
35
backend/node_modules/mongodb/src/operations/is_capped.ts
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
import type { Collection } from '../collection';
|
||||
import { MongoAPIError } from '../error';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { AbstractOperation, type OperationOptions } from './operation';
|
||||
|
||||
/** @internal */
|
||||
export class IsCappedOperation extends AbstractOperation<boolean> {
|
||||
override options: OperationOptions;
|
||||
collection: Collection;
|
||||
|
||||
constructor(collection: Collection, options: OperationOptions) {
|
||||
super(options);
|
||||
this.options = options;
|
||||
this.collection = collection;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'listCollections' as const;
|
||||
}
|
||||
|
||||
override async execute(server: Server, session: ClientSession | undefined): Promise<boolean> {
|
||||
const coll = this.collection;
|
||||
const [collection] = await coll.s.db
|
||||
.listCollections(
|
||||
{ name: coll.collectionName },
|
||||
{ ...this.options, nameOnly: false, readPreference: this.readPreference, session }
|
||||
)
|
||||
.toArray();
|
||||
if (collection == null || collection.options == null) {
|
||||
throw new MongoAPIError(`collection ${coll.namespace} not found`);
|
||||
}
|
||||
return !!collection.options?.capped;
|
||||
}
|
||||
}
|
||||
65
backend/node_modules/mongodb/src/operations/kill_cursors.ts
generated
vendored
Normal file
65
backend/node_modules/mongodb/src/operations/kill_cursors.ts
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
import type { Long } from '../bson';
|
||||
import { MongoRuntimeError } from '../error';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { type MongoDBNamespace, squashError } from '../utils';
|
||||
import { AbstractOperation, Aspect, defineAspects, type OperationOptions } from './operation';
|
||||
|
||||
/**
|
||||
* https://www.mongodb.com/docs/manual/reference/command/killCursors/
|
||||
* @internal
|
||||
*/
|
||||
interface KillCursorsCommand {
|
||||
killCursors: string;
|
||||
cursors: Long[];
|
||||
comment?: unknown;
|
||||
}
|
||||
|
||||
export class KillCursorsOperation extends AbstractOperation {
|
||||
cursorId: Long;
|
||||
|
||||
constructor(cursorId: Long, ns: MongoDBNamespace, server: Server, options: OperationOptions) {
|
||||
super(options);
|
||||
this.ns = ns;
|
||||
this.cursorId = cursorId;
|
||||
this.server = server;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'killCursors' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<void> {
|
||||
if (server !== this.server) {
|
||||
throw new MongoRuntimeError('Killcursor must run on the same server operation began on');
|
||||
}
|
||||
|
||||
const killCursors = this.ns.collection;
|
||||
if (killCursors == null) {
|
||||
// Cursors should have adopted the namespace returned by MongoDB
|
||||
// which should always defined a collection name (even a pseudo one, ex. db.aggregate())
|
||||
throw new MongoRuntimeError('A collection name must be determined before killCursors');
|
||||
}
|
||||
|
||||
const killCursorsCommand: KillCursorsCommand = {
|
||||
killCursors,
|
||||
cursors: [this.cursorId]
|
||||
};
|
||||
try {
|
||||
await server.command(this.ns, killCursorsCommand, {
|
||||
session,
|
||||
timeoutContext
|
||||
});
|
||||
} catch (error) {
|
||||
// The driver should never emit errors from killCursors, this is spec-ed behavior
|
||||
squashError(error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(KillCursorsOperation, [Aspect.MUST_SELECT_SAME_SERVER]);
|
||||
112
backend/node_modules/mongodb/src/operations/list_collections.ts
generated
vendored
Normal file
112
backend/node_modules/mongodb/src/operations/list_collections.ts
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
import type { Binary, Document } from '../bson';
|
||||
import { CursorResponse } from '../cmap/wire_protocol/responses';
|
||||
import { type CursorTimeoutContext, type CursorTimeoutMode } from '../cursor/abstract_cursor';
|
||||
import type { Db } from '../db';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { maxWireVersion } from '../utils';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface ListCollectionsOptions extends Omit<CommandOperationOptions, 'writeConcern'> {
|
||||
/** Since 4.0: If true, will only return the collection name in the response, and will omit additional info */
|
||||
nameOnly?: boolean;
|
||||
/** Since 4.0: If true and nameOnly is true, allows a user without the required privilege (i.e. listCollections action on the database) to run the command when access control is enforced. */
|
||||
authorizedCollections?: boolean;
|
||||
/** The batchSize for the returned command cursor or if pre 2.8 the systems batch collection */
|
||||
batchSize?: number;
|
||||
/** @internal */
|
||||
timeoutMode?: CursorTimeoutMode;
|
||||
|
||||
/** @internal */
|
||||
timeoutContext?: CursorTimeoutContext;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class ListCollectionsOperation extends CommandOperation<CursorResponse> {
|
||||
/**
|
||||
* @remarks WriteConcern can still be present on the options because
|
||||
* we inherit options from the client/db/collection. The
|
||||
* key must be present on the options in order to delete it.
|
||||
* This allows typescript to delete the key but will
|
||||
* not allow a writeConcern to be assigned as a property on options.
|
||||
*/
|
||||
override options: ListCollectionsOptions & { writeConcern?: never };
|
||||
db: Db;
|
||||
filter: Document;
|
||||
nameOnly: boolean;
|
||||
authorizedCollections: boolean;
|
||||
batchSize?: number;
|
||||
|
||||
constructor(db: Db, filter: Document, options?: ListCollectionsOptions) {
|
||||
super(db, options);
|
||||
|
||||
this.options = { ...options };
|
||||
delete this.options.writeConcern;
|
||||
this.db = db;
|
||||
this.filter = filter;
|
||||
this.nameOnly = !!this.options.nameOnly;
|
||||
this.authorizedCollections = !!this.options.authorizedCollections;
|
||||
|
||||
if (typeof this.options.batchSize === 'number') {
|
||||
this.batchSize = this.options.batchSize;
|
||||
}
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'listCollections' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<CursorResponse> {
|
||||
return await super.executeCommand(
|
||||
server,
|
||||
session,
|
||||
this.generateCommand(maxWireVersion(server)),
|
||||
timeoutContext,
|
||||
CursorResponse
|
||||
);
|
||||
}
|
||||
|
||||
/* This is here for the purpose of unit testing the final command that gets sent. */
|
||||
generateCommand(wireVersion: number): Document {
|
||||
const command: Document = {
|
||||
listCollections: 1,
|
||||
filter: this.filter,
|
||||
cursor: this.batchSize ? { batchSize: this.batchSize } : {},
|
||||
nameOnly: this.nameOnly,
|
||||
authorizedCollections: this.authorizedCollections
|
||||
};
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (wireVersion >= 9 && this.options.comment !== undefined) {
|
||||
command.comment = this.options.comment;
|
||||
}
|
||||
|
||||
return command;
|
||||
}
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface CollectionInfo extends Document {
|
||||
name: string;
|
||||
type?: string;
|
||||
options?: Document;
|
||||
info?: {
|
||||
readOnly?: false;
|
||||
uuid?: Binary;
|
||||
};
|
||||
idIndex?: Document;
|
||||
}
|
||||
|
||||
defineAspects(ListCollectionsOperation, [
|
||||
Aspect.READ_OPERATION,
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.CURSOR_CREATING
|
||||
]);
|
||||
77
backend/node_modules/mongodb/src/operations/list_databases.ts
generated
vendored
Normal file
77
backend/node_modules/mongodb/src/operations/list_databases.ts
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Db } from '../db';
|
||||
import { type TODO_NODE_3286 } from '../mongo_types';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { maxWireVersion, MongoDBNamespace } from '../utils';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface ListDatabasesResult {
|
||||
databases: ({ name: string; sizeOnDisk?: number; empty?: boolean } & Document)[];
|
||||
totalSize?: number;
|
||||
totalSizeMb?: number;
|
||||
ok: 1 | 0;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ListDatabasesOptions extends CommandOperationOptions {
|
||||
/** A query predicate that determines which databases are listed */
|
||||
filter?: Document;
|
||||
/** A flag to indicate whether the command should return just the database names, or return both database names and size information */
|
||||
nameOnly?: boolean;
|
||||
/** A flag that determines which databases are returned based on the user privileges when access control is enabled */
|
||||
authorizedDatabases?: boolean;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class ListDatabasesOperation extends CommandOperation<ListDatabasesResult> {
|
||||
override options: ListDatabasesOptions;
|
||||
|
||||
constructor(db: Db, options?: ListDatabasesOptions) {
|
||||
super(db, options);
|
||||
this.options = options ?? {};
|
||||
this.ns = new MongoDBNamespace('admin', '$cmd');
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'listDatabases' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<ListDatabasesResult> {
|
||||
const cmd: Document = { listDatabases: 1 };
|
||||
|
||||
if (typeof this.options.nameOnly === 'boolean') {
|
||||
cmd.nameOnly = this.options.nameOnly;
|
||||
}
|
||||
|
||||
if (this.options.filter) {
|
||||
cmd.filter = this.options.filter;
|
||||
}
|
||||
|
||||
if (typeof this.options.authorizedDatabases === 'boolean') {
|
||||
cmd.authorizedDatabases = this.options.authorizedDatabases;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (maxWireVersion(server) >= 9 && this.options.comment !== undefined) {
|
||||
cmd.comment = this.options.comment;
|
||||
}
|
||||
|
||||
return await (super.executeCommand(
|
||||
server,
|
||||
session,
|
||||
cmd,
|
||||
timeoutContext
|
||||
) as Promise<TODO_NODE_3286>);
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(ListDatabasesOperation, [Aspect.READ_OPERATION, Aspect.RETRYABLE]);
|
||||
144
backend/node_modules/mongodb/src/operations/operation.ts
generated
vendored
Normal file
144
backend/node_modules/mongodb/src/operations/operation.ts
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
import { type BSONSerializeOptions, type Document, resolveBSONOptions } from '../bson';
|
||||
import { ReadPreference, type ReadPreferenceLike } from '../read_preference';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import type { MongoDBNamespace } from '../utils';
|
||||
|
||||
export const Aspect = {
|
||||
READ_OPERATION: Symbol('READ_OPERATION'),
|
||||
WRITE_OPERATION: Symbol('WRITE_OPERATION'),
|
||||
RETRYABLE: Symbol('RETRYABLE'),
|
||||
EXPLAINABLE: Symbol('EXPLAINABLE'),
|
||||
SKIP_COLLATION: Symbol('SKIP_COLLATION'),
|
||||
CURSOR_CREATING: Symbol('CURSOR_CREATING'),
|
||||
MUST_SELECT_SAME_SERVER: Symbol('MUST_SELECT_SAME_SERVER'),
|
||||
COMMAND_BATCHING: Symbol('COMMAND_BATCHING')
|
||||
} as const;
|
||||
|
||||
/** @public */
|
||||
export type Hint = string | Document;
|
||||
|
||||
/** @public */
|
||||
export interface OperationOptions extends BSONSerializeOptions {
|
||||
/** Specify ClientSession for this command */
|
||||
session?: ClientSession;
|
||||
willRetryWrite?: boolean;
|
||||
|
||||
/** The preferred read preference (ReadPreference.primary, ReadPreference.primary_preferred, ReadPreference.secondary, ReadPreference.secondary_preferred, ReadPreference.nearest). */
|
||||
readPreference?: ReadPreferenceLike;
|
||||
|
||||
/** @internal Hints to `executeOperation` that this operation should not unpin on an ended transaction */
|
||||
bypassPinningCheck?: boolean;
|
||||
omitReadPreference?: boolean;
|
||||
|
||||
/** @internal Hint to `executeOperation` to omit maxTimeMS */
|
||||
omitMaxTimeMS?: boolean;
|
||||
|
||||
/**
|
||||
* @experimental
|
||||
* Specifies the time an operation will run until it throws a timeout error
|
||||
*/
|
||||
timeoutMS?: number;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
const kSession = Symbol('session');
|
||||
|
||||
/**
|
||||
* This class acts as a parent class for any operation and is responsible for setting this.options,
|
||||
* as well as setting and getting a session.
|
||||
* Additionally, this class implements `hasAspect`, which determines whether an operation has
|
||||
* a specific aspect.
|
||||
* @internal
|
||||
*/
|
||||
export abstract class AbstractOperation<TResult = any> {
|
||||
ns!: MongoDBNamespace;
|
||||
readPreference: ReadPreference;
|
||||
server!: Server;
|
||||
bypassPinningCheck: boolean;
|
||||
trySecondaryWrite: boolean;
|
||||
|
||||
// BSON serialization options
|
||||
bsonOptions?: BSONSerializeOptions;
|
||||
|
||||
options: OperationOptions;
|
||||
|
||||
/** Specifies the time an operation will run until it throws a timeout error. */
|
||||
timeoutMS?: number;
|
||||
|
||||
[kSession]: ClientSession | undefined;
|
||||
|
||||
static aspects?: Set<symbol>;
|
||||
|
||||
constructor(options: OperationOptions = {}) {
|
||||
this.readPreference = this.hasAspect(Aspect.WRITE_OPERATION)
|
||||
? ReadPreference.primary
|
||||
: (ReadPreference.fromOptions(options) ?? ReadPreference.primary);
|
||||
|
||||
// Pull the BSON serialize options from the already-resolved options
|
||||
this.bsonOptions = resolveBSONOptions(options);
|
||||
|
||||
this[kSession] = options.session != null ? options.session : undefined;
|
||||
|
||||
this.options = options;
|
||||
this.bypassPinningCheck = !!options.bypassPinningCheck;
|
||||
this.trySecondaryWrite = false;
|
||||
}
|
||||
|
||||
/** Must match the first key of the command object sent to the server.
|
||||
Command name should be stateless (should not use 'this' keyword) */
|
||||
abstract get commandName(): string;
|
||||
|
||||
abstract execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<TResult>;
|
||||
|
||||
hasAspect(aspect: symbol): boolean {
|
||||
const ctor = this.constructor as { aspects?: Set<symbol> };
|
||||
if (ctor.aspects == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return ctor.aspects.has(aspect);
|
||||
}
|
||||
|
||||
get session(): ClientSession | undefined {
|
||||
return this[kSession];
|
||||
}
|
||||
|
||||
clearSession() {
|
||||
this[kSession] = undefined;
|
||||
}
|
||||
|
||||
resetBatch(): boolean {
|
||||
return true;
|
||||
}
|
||||
|
||||
get canRetryRead(): boolean {
|
||||
return this.hasAspect(Aspect.RETRYABLE) && this.hasAspect(Aspect.READ_OPERATION);
|
||||
}
|
||||
|
||||
get canRetryWrite(): boolean {
|
||||
return this.hasAspect(Aspect.RETRYABLE) && this.hasAspect(Aspect.WRITE_OPERATION);
|
||||
}
|
||||
}
|
||||
|
||||
export function defineAspects(
|
||||
operation: { aspects?: Set<symbol> },
|
||||
aspects: symbol | symbol[] | Set<symbol>
|
||||
): Set<symbol> {
|
||||
if (!Array.isArray(aspects) && !(aspects instanceof Set)) {
|
||||
aspects = [aspects];
|
||||
}
|
||||
|
||||
aspects = new Set(aspects);
|
||||
Object.defineProperty(operation, 'aspects', {
|
||||
value: aspects,
|
||||
writable: false
|
||||
});
|
||||
|
||||
return aspects;
|
||||
}
|
||||
35
backend/node_modules/mongodb/src/operations/options_operation.ts
generated
vendored
Normal file
35
backend/node_modules/mongodb/src/operations/options_operation.ts
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import { MongoAPIError } from '../error';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { AbstractOperation, type OperationOptions } from './operation';
|
||||
|
||||
/** @internal */
|
||||
export class OptionsOperation extends AbstractOperation<Document> {
|
||||
override options: OperationOptions;
|
||||
collection: Collection;
|
||||
|
||||
constructor(collection: Collection, options: OperationOptions) {
|
||||
super(options);
|
||||
this.options = options;
|
||||
this.collection = collection;
|
||||
}
|
||||
override get commandName() {
|
||||
return 'listCollections' as const;
|
||||
}
|
||||
|
||||
override async execute(server: Server, session: ClientSession | undefined): Promise<Document> {
|
||||
const coll = this.collection;
|
||||
const [collection] = await coll.s.db
|
||||
.listCollections(
|
||||
{ name: coll.collectionName },
|
||||
{ ...this.options, nameOnly: false, readPreference: this.readPreference, session }
|
||||
)
|
||||
.toArray();
|
||||
if (collection == null || collection.options == null) {
|
||||
throw new MongoAPIError(`collection ${coll.namespace} not found`);
|
||||
}
|
||||
return collection.options;
|
||||
}
|
||||
}
|
||||
40
backend/node_modules/mongodb/src/operations/profiling_level.ts
generated
vendored
Normal file
40
backend/node_modules/mongodb/src/operations/profiling_level.ts
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
import type { Db } from '../db';
|
||||
import { MongoUnexpectedServerResponseError } from '../error';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
|
||||
/** @public */
|
||||
export type ProfilingLevelOptions = CommandOperationOptions;
|
||||
|
||||
/** @internal */
|
||||
export class ProfilingLevelOperation extends CommandOperation<string> {
|
||||
override options: ProfilingLevelOptions;
|
||||
|
||||
constructor(db: Db, options: ProfilingLevelOptions) {
|
||||
super(db, options);
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'profile' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<string> {
|
||||
const doc = await super.executeCommand(server, session, { profile: -1 }, timeoutContext);
|
||||
if (doc.ok === 1) {
|
||||
const was = doc.was;
|
||||
if (was === 0) return 'off';
|
||||
if (was === 1) return 'slow_only';
|
||||
if (was === 2) return 'all';
|
||||
throw new MongoUnexpectedServerResponseError(`Illegal profiling level value ${was}`);
|
||||
} else {
|
||||
throw new MongoUnexpectedServerResponseError('Error with profile command');
|
||||
}
|
||||
}
|
||||
}
|
||||
36
backend/node_modules/mongodb/src/operations/remove_user.ts
generated
vendored
Normal file
36
backend/node_modules/mongodb/src/operations/remove_user.ts
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
import type { Db } from '../db';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export type RemoveUserOptions = CommandOperationOptions;
|
||||
|
||||
/** @internal */
|
||||
export class RemoveUserOperation extends CommandOperation<boolean> {
|
||||
override options: RemoveUserOptions;
|
||||
username: string;
|
||||
|
||||
constructor(db: Db, username: string, options: RemoveUserOptions) {
|
||||
super(db, options);
|
||||
this.options = options;
|
||||
this.username = username;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'dropUser' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<boolean> {
|
||||
await super.executeCommand(server, session, { dropUser: this.username }, timeoutContext);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(RemoveUserOperation, [Aspect.WRITE_OPERATION]);
|
||||
55
backend/node_modules/mongodb/src/operations/rename.ts
generated
vendored
Normal file
55
backend/node_modules/mongodb/src/operations/rename.ts
generated
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
import type { Document } from '../bson';
|
||||
import { Collection } from '../collection';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { MongoDBNamespace } from '../utils';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface RenameOptions extends CommandOperationOptions {
|
||||
/** Drop the target name collection if it previously exists. */
|
||||
dropTarget?: boolean;
|
||||
/** Unclear */
|
||||
new_collection?: boolean;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class RenameOperation extends CommandOperation<Document> {
|
||||
constructor(
|
||||
public collection: Collection,
|
||||
public newName: string,
|
||||
public override options: RenameOptions
|
||||
) {
|
||||
super(collection, options);
|
||||
this.ns = new MongoDBNamespace('admin', '$cmd');
|
||||
}
|
||||
|
||||
override get commandName(): string {
|
||||
return 'renameCollection' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Collection> {
|
||||
// Build the command
|
||||
const renameCollection = this.collection.namespace;
|
||||
const toCollection = this.collection.s.namespace.withCollection(this.newName).toString();
|
||||
const dropTarget =
|
||||
typeof this.options.dropTarget === 'boolean' ? this.options.dropTarget : false;
|
||||
|
||||
const command = {
|
||||
renameCollection: renameCollection,
|
||||
to: toCollection,
|
||||
dropTarget: dropTarget
|
||||
};
|
||||
|
||||
await super.executeCommand(server, session, command, timeoutContext);
|
||||
return new Collection(this.collection.s.db, this.newName, this.collection.s.options);
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(RenameOperation, [Aspect.WRITE_OPERATION]);
|
||||
94
backend/node_modules/mongodb/src/operations/run_command.ts
generated
vendored
Normal file
94
backend/node_modules/mongodb/src/operations/run_command.ts
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
import type { BSONSerializeOptions, Document } from '../bson';
|
||||
import { type MongoDBResponseConstructor } from '../cmap/wire_protocol/responses';
|
||||
import { type Db } from '../db';
|
||||
import { type TODO_NODE_3286 } from '../mongo_types';
|
||||
import type { ReadPreferenceLike } from '../read_preference';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { MongoDBNamespace } from '../utils';
|
||||
import { AbstractOperation } from './operation';
|
||||
|
||||
/** @public */
|
||||
export type RunCommandOptions = {
|
||||
/** Specify ClientSession for this command */
|
||||
session?: ClientSession;
|
||||
/** The read preference */
|
||||
readPreference?: ReadPreferenceLike;
|
||||
/**
|
||||
* @experimental
|
||||
* Specifies the time an operation will run until it throws a timeout error
|
||||
*/
|
||||
timeoutMS?: number;
|
||||
/** @internal */
|
||||
omitMaxTimeMS?: boolean;
|
||||
} & BSONSerializeOptions;
|
||||
|
||||
/** @internal */
|
||||
export class RunCommandOperation<T = Document> extends AbstractOperation<T> {
|
||||
constructor(
|
||||
parent: Db,
|
||||
public command: Document,
|
||||
public override options: RunCommandOptions & { responseType?: MongoDBResponseConstructor }
|
||||
) {
|
||||
super(options);
|
||||
this.ns = parent.s.namespace.withCollection('$cmd');
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'runCommand' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<T> {
|
||||
this.server = server;
|
||||
const res: TODO_NODE_3286 = await server.command(
|
||||
this.ns,
|
||||
this.command,
|
||||
{
|
||||
...this.options,
|
||||
readPreference: this.readPreference,
|
||||
session,
|
||||
timeoutContext
|
||||
},
|
||||
this.options.responseType
|
||||
);
|
||||
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
export class RunAdminCommandOperation<T = Document> extends AbstractOperation<T> {
|
||||
constructor(
|
||||
public command: Document,
|
||||
public override options: RunCommandOptions & {
|
||||
noResponse?: boolean;
|
||||
bypassPinningCheck?: boolean;
|
||||
}
|
||||
) {
|
||||
super(options);
|
||||
this.ns = new MongoDBNamespace('admin', '$cmd');
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'runCommand' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<T> {
|
||||
this.server = server;
|
||||
const res: TODO_NODE_3286 = await server.command(this.ns, this.command, {
|
||||
...this.options,
|
||||
readPreference: this.readPreference,
|
||||
session,
|
||||
timeoutContext
|
||||
});
|
||||
return res;
|
||||
}
|
||||
}
|
||||
54
backend/node_modules/mongodb/src/operations/search_indexes/create.ts
generated
vendored
Normal file
54
backend/node_modules/mongodb/src/operations/search_indexes/create.ts
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
import type { Document } from '../../bson';
|
||||
import type { Collection } from '../../collection';
|
||||
import type { Server } from '../../sdam/server';
|
||||
import type { ClientSession } from '../../sessions';
|
||||
import { type TimeoutContext } from '../../timeout';
|
||||
import { AbstractOperation } from '../operation';
|
||||
|
||||
/**
|
||||
* @public
|
||||
*/
|
||||
export interface SearchIndexDescription extends Document {
|
||||
/** The name of the index. */
|
||||
name?: string;
|
||||
|
||||
/** The index definition. */
|
||||
definition: Document;
|
||||
|
||||
/** The type of the index. Currently `search` or `vectorSearch` are supported. */
|
||||
type?: string;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class CreateSearchIndexesOperation extends AbstractOperation<string[]> {
|
||||
constructor(
|
||||
private readonly collection: Collection,
|
||||
private readonly descriptions: ReadonlyArray<SearchIndexDescription>
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'createSearchIndexes' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<string[]> {
|
||||
const namespace = this.collection.fullNamespace;
|
||||
const command = {
|
||||
createSearchIndexes: namespace.collection,
|
||||
indexes: this.descriptions
|
||||
};
|
||||
|
||||
const res = await server.command(namespace, command, {
|
||||
session,
|
||||
timeoutContext
|
||||
});
|
||||
|
||||
const indexesCreated: Array<{ name: string }> = res?.indexesCreated ?? [];
|
||||
return indexesCreated.map(({ name }) => name);
|
||||
}
|
||||
}
|
||||
47
backend/node_modules/mongodb/src/operations/search_indexes/drop.ts
generated
vendored
Normal file
47
backend/node_modules/mongodb/src/operations/search_indexes/drop.ts
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
import type { Document } from '../../bson';
|
||||
import type { Collection } from '../../collection';
|
||||
import { MONGODB_ERROR_CODES, MongoServerError } from '../../error';
|
||||
import type { Server } from '../../sdam/server';
|
||||
import type { ClientSession } from '../../sessions';
|
||||
import { type TimeoutContext } from '../../timeout';
|
||||
import { AbstractOperation } from '../operation';
|
||||
|
||||
/** @internal */
|
||||
export class DropSearchIndexOperation extends AbstractOperation<void> {
|
||||
constructor(
|
||||
private readonly collection: Collection,
|
||||
private readonly name: string
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'dropSearchIndex' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<void> {
|
||||
const namespace = this.collection.fullNamespace;
|
||||
|
||||
const command: Document = {
|
||||
dropSearchIndex: namespace.collection
|
||||
};
|
||||
|
||||
if (typeof this.name === 'string') {
|
||||
command.name = this.name;
|
||||
}
|
||||
|
||||
try {
|
||||
await server.command(namespace, command, { session, timeoutContext });
|
||||
} catch (error) {
|
||||
const isNamespaceNotFoundError =
|
||||
error instanceof MongoServerError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound;
|
||||
if (!isNamespaceNotFoundError) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
37
backend/node_modules/mongodb/src/operations/search_indexes/update.ts
generated
vendored
Normal file
37
backend/node_modules/mongodb/src/operations/search_indexes/update.ts
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
import type { Document } from '../../bson';
|
||||
import type { Collection } from '../../collection';
|
||||
import type { Server } from '../../sdam/server';
|
||||
import type { ClientSession } from '../../sessions';
|
||||
import { type TimeoutContext } from '../../timeout';
|
||||
import { AbstractOperation } from '../operation';
|
||||
|
||||
/** @internal */
|
||||
export class UpdateSearchIndexOperation extends AbstractOperation<void> {
|
||||
constructor(
|
||||
private readonly collection: Collection,
|
||||
private readonly name: string,
|
||||
private readonly definition: Document
|
||||
) {
|
||||
super();
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'updateSearchIndex' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<void> {
|
||||
const namespace = this.collection.fullNamespace;
|
||||
const command = {
|
||||
updateSearchIndex: namespace.collection,
|
||||
name: this.name,
|
||||
definition: this.definition
|
||||
};
|
||||
|
||||
await server.command(namespace, command, { session, timeoutContext });
|
||||
return;
|
||||
}
|
||||
}
|
||||
72
backend/node_modules/mongodb/src/operations/set_profiling_level.ts
generated
vendored
Normal file
72
backend/node_modules/mongodb/src/operations/set_profiling_level.ts
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
import type { Db } from '../db';
|
||||
import { MongoInvalidArgumentError } from '../error';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { enumToString } from '../utils';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
|
||||
const levelValues = new Set(['off', 'slow_only', 'all']);
|
||||
|
||||
/** @public */
|
||||
export const ProfilingLevel = Object.freeze({
|
||||
off: 'off',
|
||||
slowOnly: 'slow_only',
|
||||
all: 'all'
|
||||
} as const);
|
||||
|
||||
/** @public */
|
||||
export type ProfilingLevel = (typeof ProfilingLevel)[keyof typeof ProfilingLevel];
|
||||
|
||||
/** @public */
|
||||
export type SetProfilingLevelOptions = CommandOperationOptions;
|
||||
|
||||
/** @internal */
|
||||
export class SetProfilingLevelOperation extends CommandOperation<ProfilingLevel> {
|
||||
override options: SetProfilingLevelOptions;
|
||||
level: ProfilingLevel;
|
||||
profile: 0 | 1 | 2;
|
||||
|
||||
constructor(db: Db, level: ProfilingLevel, options: SetProfilingLevelOptions) {
|
||||
super(db, options);
|
||||
this.options = options;
|
||||
switch (level) {
|
||||
case ProfilingLevel.off:
|
||||
this.profile = 0;
|
||||
break;
|
||||
case ProfilingLevel.slowOnly:
|
||||
this.profile = 1;
|
||||
break;
|
||||
case ProfilingLevel.all:
|
||||
this.profile = 2;
|
||||
break;
|
||||
default:
|
||||
this.profile = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
this.level = level;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'profile' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<ProfilingLevel> {
|
||||
const level = this.level;
|
||||
|
||||
if (!levelValues.has(level)) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
`Profiling level must be one of "${enumToString(ProfilingLevel)}"`
|
||||
);
|
||||
}
|
||||
|
||||
// TODO(NODE-3483): Determine error to put here
|
||||
await super.executeCommand(server, session, { profile: this.profile }, timeoutContext);
|
||||
return level;
|
||||
}
|
||||
}
|
||||
42
backend/node_modules/mongodb/src/operations/stats.ts
generated
vendored
Normal file
42
backend/node_modules/mongodb/src/operations/stats.ts
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Db } from '../db';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface DbStatsOptions extends CommandOperationOptions {
|
||||
/** Divide the returned sizes by scale value. */
|
||||
scale?: number;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class DbStatsOperation extends CommandOperation<Document> {
|
||||
override options: DbStatsOptions;
|
||||
|
||||
constructor(db: Db, options: DbStatsOptions) {
|
||||
super(db, options);
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'dbStats' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Document> {
|
||||
const command: Document = { dbStats: true };
|
||||
if (this.options.scale != null) {
|
||||
command.scale = this.options.scale;
|
||||
}
|
||||
|
||||
return await super.executeCommand(server, session, command, timeoutContext);
|
||||
}
|
||||
}
|
||||
|
||||
defineAspects(DbStatsOperation, [Aspect.READ_OPERATION]);
|
||||
312
backend/node_modules/mongodb/src/operations/update.ts
generated
vendored
Normal file
312
backend/node_modules/mongodb/src/operations/update.ts
generated
vendored
Normal file
@@ -0,0 +1,312 @@
|
||||
import type { Document } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import { MongoCompatibilityError, MongoInvalidArgumentError, MongoServerError } from '../error';
|
||||
import type { InferIdType, TODO_NODE_3286 } from '../mongo_types';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { hasAtomicOperators, type MongoDBNamespace } from '../utils';
|
||||
import { type CollationOptions, CommandOperation, type CommandOperationOptions } from './command';
|
||||
import { Aspect, defineAspects, type Hint } from './operation';
|
||||
|
||||
/** @public */
|
||||
export interface UpdateOptions extends CommandOperationOptions {
|
||||
/** A set of filters specifying to which array elements an update should apply */
|
||||
arrayFilters?: Document[];
|
||||
/** If true, allows the write to opt-out of document level validation */
|
||||
bypassDocumentValidation?: boolean;
|
||||
/** Specifies a collation */
|
||||
collation?: CollationOptions;
|
||||
/** Specify that the update query should only consider plans using the hinted index */
|
||||
hint?: Hint;
|
||||
/** When true, creates a new document if no document matches the query */
|
||||
upsert?: boolean;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
}
|
||||
|
||||
/**
|
||||
* @public
|
||||
* `TSchema` is the schema of the collection
|
||||
*/
|
||||
export interface UpdateResult<TSchema extends Document = Document> {
|
||||
/** Indicates whether this write result was acknowledged. If not, then all other members of this result will be undefined */
|
||||
acknowledged: boolean;
|
||||
/** The number of documents that matched the filter */
|
||||
matchedCount: number;
|
||||
/** The number of documents that were modified */
|
||||
modifiedCount: number;
|
||||
/** The number of documents that were upserted */
|
||||
upsertedCount: number;
|
||||
/** The identifier of the inserted document if an upsert took place */
|
||||
upsertedId: InferIdType<TSchema> | null;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface UpdateStatement {
|
||||
/** The query that matches documents to update. */
|
||||
q: Document;
|
||||
/** The modifications to apply. */
|
||||
u: Document | Document[];
|
||||
/** If true, perform an insert if no documents match the query. */
|
||||
upsert?: boolean;
|
||||
/** If true, updates all documents that meet the query criteria. */
|
||||
multi?: boolean;
|
||||
/** Specifies the collation to use for the operation. */
|
||||
collation?: CollationOptions;
|
||||
/** An array of filter documents that determines which array elements to modify for an update operation on an array field. */
|
||||
arrayFilters?: Document[];
|
||||
/** A document or string that specifies the index to use to support the query predicate. */
|
||||
hint?: Hint;
|
||||
}
|
||||
|
||||
/**
|
||||
* @internal
|
||||
* UpdateOperation is used in bulk write, while UpdateOneOperation and UpdateManyOperation are only used in the collections API
|
||||
*/
|
||||
export class UpdateOperation extends CommandOperation<Document> {
|
||||
override options: UpdateOptions & { ordered?: boolean };
|
||||
statements: UpdateStatement[];
|
||||
|
||||
constructor(
|
||||
ns: MongoDBNamespace,
|
||||
statements: UpdateStatement[],
|
||||
options: UpdateOptions & { ordered?: boolean }
|
||||
) {
|
||||
super(undefined, options);
|
||||
this.options = options;
|
||||
this.ns = ns;
|
||||
|
||||
this.statements = statements;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'update' as const;
|
||||
}
|
||||
|
||||
override get canRetryWrite(): boolean {
|
||||
if (super.canRetryWrite === false) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return this.statements.every(op => op.multi == null || op.multi === false);
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Document> {
|
||||
const options = this.options ?? {};
|
||||
const ordered = typeof options.ordered === 'boolean' ? options.ordered : true;
|
||||
const command: Document = {
|
||||
update: this.ns.collection,
|
||||
updates: this.statements,
|
||||
ordered
|
||||
};
|
||||
|
||||
if (typeof options.bypassDocumentValidation === 'boolean') {
|
||||
command.bypassDocumentValidation = options.bypassDocumentValidation;
|
||||
}
|
||||
|
||||
if (options.let) {
|
||||
command.let = options.let;
|
||||
}
|
||||
|
||||
// we check for undefined specifically here to allow falsy values
|
||||
// eslint-disable-next-line no-restricted-syntax
|
||||
if (options.comment !== undefined) {
|
||||
command.comment = options.comment;
|
||||
}
|
||||
|
||||
const unacknowledgedWrite = this.writeConcern && this.writeConcern.w === 0;
|
||||
if (unacknowledgedWrite) {
|
||||
if (this.statements.find((o: Document) => o.hint)) {
|
||||
// TODO(NODE-3541): fix error for hint with unacknowledged writes
|
||||
throw new MongoCompatibilityError(`hint is not supported with unacknowledged writes`);
|
||||
}
|
||||
}
|
||||
|
||||
const res = await super.executeCommand(server, session, command, timeoutContext);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class UpdateOneOperation extends UpdateOperation {
|
||||
constructor(collection: Collection, filter: Document, update: Document, options: UpdateOptions) {
|
||||
super(
|
||||
collection.s.namespace,
|
||||
[makeUpdateStatement(filter, update, { ...options, multi: false })],
|
||||
options
|
||||
);
|
||||
|
||||
if (!hasAtomicOperators(update)) {
|
||||
throw new MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<UpdateResult> {
|
||||
const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext);
|
||||
if (this.explain != null) return res;
|
||||
if (res.code) throw new MongoServerError(res);
|
||||
if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]);
|
||||
|
||||
return {
|
||||
acknowledged: this.writeConcern?.w !== 0,
|
||||
modifiedCount: res.nModified ?? res.n,
|
||||
upsertedId:
|
||||
Array.isArray(res.upserted) && res.upserted.length > 0 ? res.upserted[0]._id : null,
|
||||
upsertedCount: Array.isArray(res.upserted) && res.upserted.length ? res.upserted.length : 0,
|
||||
matchedCount: Array.isArray(res.upserted) && res.upserted.length > 0 ? 0 : res.n
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class UpdateManyOperation extends UpdateOperation {
|
||||
constructor(collection: Collection, filter: Document, update: Document, options: UpdateOptions) {
|
||||
super(
|
||||
collection.s.namespace,
|
||||
[makeUpdateStatement(filter, update, { ...options, multi: true })],
|
||||
options
|
||||
);
|
||||
|
||||
if (!hasAtomicOperators(update)) {
|
||||
throw new MongoInvalidArgumentError('Update document requires atomic operators');
|
||||
}
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<UpdateResult> {
|
||||
const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext);
|
||||
if (this.explain != null) return res;
|
||||
if (res.code) throw new MongoServerError(res);
|
||||
if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]);
|
||||
|
||||
return {
|
||||
acknowledged: this.writeConcern?.w !== 0,
|
||||
modifiedCount: res.nModified ?? res.n,
|
||||
upsertedId:
|
||||
Array.isArray(res.upserted) && res.upserted.length > 0 ? res.upserted[0]._id : null,
|
||||
upsertedCount: Array.isArray(res.upserted) && res.upserted.length ? res.upserted.length : 0,
|
||||
matchedCount: Array.isArray(res.upserted) && res.upserted.length > 0 ? 0 : res.n
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface ReplaceOptions extends CommandOperationOptions {
|
||||
/** If true, allows the write to opt-out of document level validation */
|
||||
bypassDocumentValidation?: boolean;
|
||||
/** Specifies a collation */
|
||||
collation?: CollationOptions;
|
||||
/** Specify that the update query should only consider plans using the hinted index */
|
||||
hint?: string | Document;
|
||||
/** When true, creates a new document if no document matches the query */
|
||||
upsert?: boolean;
|
||||
/** Map of parameter names and values that can be accessed using $$var (requires MongoDB 5.0). */
|
||||
let?: Document;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class ReplaceOneOperation extends UpdateOperation {
|
||||
constructor(
|
||||
collection: Collection,
|
||||
filter: Document,
|
||||
replacement: Document,
|
||||
options: ReplaceOptions
|
||||
) {
|
||||
super(
|
||||
collection.s.namespace,
|
||||
[makeUpdateStatement(filter, replacement, { ...options, multi: false })],
|
||||
options
|
||||
);
|
||||
|
||||
if (hasAtomicOperators(replacement)) {
|
||||
throw new MongoInvalidArgumentError('Replacement document must not contain atomic operators');
|
||||
}
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<UpdateResult> {
|
||||
const res: TODO_NODE_3286 = await super.execute(server, session, timeoutContext);
|
||||
if (this.explain != null) return res;
|
||||
if (res.code) throw new MongoServerError(res);
|
||||
if (res.writeErrors) throw new MongoServerError(res.writeErrors[0]);
|
||||
|
||||
return {
|
||||
acknowledged: this.writeConcern?.w !== 0,
|
||||
modifiedCount: res.nModified ?? res.n,
|
||||
upsertedId:
|
||||
Array.isArray(res.upserted) && res.upserted.length > 0 ? res.upserted[0]._id : null,
|
||||
upsertedCount: Array.isArray(res.upserted) && res.upserted.length ? res.upserted.length : 0,
|
||||
matchedCount: Array.isArray(res.upserted) && res.upserted.length > 0 ? 0 : res.n
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export function makeUpdateStatement(
|
||||
filter: Document,
|
||||
update: Document | Document[],
|
||||
options: UpdateOptions & { multi?: boolean }
|
||||
): UpdateStatement {
|
||||
if (filter == null || typeof filter !== 'object') {
|
||||
throw new MongoInvalidArgumentError('Selector must be a valid JavaScript object');
|
||||
}
|
||||
|
||||
if (update == null || typeof update !== 'object') {
|
||||
throw new MongoInvalidArgumentError('Document must be a valid JavaScript object');
|
||||
}
|
||||
|
||||
const op: UpdateStatement = { q: filter, u: update };
|
||||
if (typeof options.upsert === 'boolean') {
|
||||
op.upsert = options.upsert;
|
||||
}
|
||||
|
||||
if (options.multi) {
|
||||
op.multi = options.multi;
|
||||
}
|
||||
|
||||
if (options.hint) {
|
||||
op.hint = options.hint;
|
||||
}
|
||||
|
||||
if (options.arrayFilters) {
|
||||
op.arrayFilters = options.arrayFilters;
|
||||
}
|
||||
|
||||
if (options.collation) {
|
||||
op.collation = options.collation;
|
||||
}
|
||||
|
||||
return op;
|
||||
}
|
||||
|
||||
defineAspects(UpdateOperation, [Aspect.RETRYABLE, Aspect.WRITE_OPERATION, Aspect.SKIP_COLLATION]);
|
||||
defineAspects(UpdateOneOperation, [
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.WRITE_OPERATION,
|
||||
Aspect.EXPLAINABLE,
|
||||
Aspect.SKIP_COLLATION
|
||||
]);
|
||||
defineAspects(UpdateManyOperation, [
|
||||
Aspect.WRITE_OPERATION,
|
||||
Aspect.EXPLAINABLE,
|
||||
Aspect.SKIP_COLLATION
|
||||
]);
|
||||
defineAspects(ReplaceOneOperation, [
|
||||
Aspect.RETRYABLE,
|
||||
Aspect.WRITE_OPERATION,
|
||||
Aspect.SKIP_COLLATION
|
||||
]);
|
||||
58
backend/node_modules/mongodb/src/operations/validate_collection.ts
generated
vendored
Normal file
58
backend/node_modules/mongodb/src/operations/validate_collection.ts
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
import type { Admin } from '../admin';
|
||||
import type { Document } from '../bson';
|
||||
import { MongoUnexpectedServerResponseError } from '../error';
|
||||
import type { Server } from '../sdam/server';
|
||||
import type { ClientSession } from '../sessions';
|
||||
import { type TimeoutContext } from '../timeout';
|
||||
import { CommandOperation, type CommandOperationOptions } from './command';
|
||||
|
||||
/** @public */
|
||||
export interface ValidateCollectionOptions extends CommandOperationOptions {
|
||||
/** Validates a collection in the background, without interrupting read or write traffic (only in MongoDB 4.4+) */
|
||||
background?: boolean;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export class ValidateCollectionOperation extends CommandOperation<Document> {
|
||||
override options: ValidateCollectionOptions;
|
||||
collectionName: string;
|
||||
command: Document;
|
||||
|
||||
constructor(admin: Admin, collectionName: string, options: ValidateCollectionOptions) {
|
||||
// Decorate command with extra options
|
||||
const command: Document = { validate: collectionName };
|
||||
const keys = Object.keys(options);
|
||||
for (let i = 0; i < keys.length; i++) {
|
||||
if (Object.prototype.hasOwnProperty.call(options, keys[i]) && keys[i] !== 'session') {
|
||||
command[keys[i]] = (options as Document)[keys[i]];
|
||||
}
|
||||
}
|
||||
|
||||
super(admin.s.db, options);
|
||||
this.options = options;
|
||||
this.command = command;
|
||||
this.collectionName = collectionName;
|
||||
}
|
||||
|
||||
override get commandName() {
|
||||
return 'validate' as const;
|
||||
}
|
||||
|
||||
override async execute(
|
||||
server: Server,
|
||||
session: ClientSession | undefined,
|
||||
timeoutContext: TimeoutContext
|
||||
): Promise<Document> {
|
||||
const collectionName = this.collectionName;
|
||||
|
||||
const doc = await super.executeCommand(server, session, this.command, timeoutContext);
|
||||
if (doc.result != null && typeof doc.result !== 'string')
|
||||
throw new MongoUnexpectedServerResponseError('Error with validation data');
|
||||
if (doc.result != null && doc.result.match(/exception|corrupt/) != null)
|
||||
throw new MongoUnexpectedServerResponseError(`Invalid collection ${collectionName}`);
|
||||
if (doc.valid != null && !doc.valid)
|
||||
throw new MongoUnexpectedServerResponseError(`Invalid collection ${collectionName}`);
|
||||
|
||||
return doc;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user