forked from CSI-KJSCE/BOS-React-
Commit With Frontend and Backend in MERN
This commit is contained in:
487
backend/node_modules/mongodb/src/gridfs/download.ts
generated
vendored
Normal file
487
backend/node_modules/mongodb/src/gridfs/download.ts
generated
vendored
Normal file
@@ -0,0 +1,487 @@
|
||||
import { Readable } from 'stream';
|
||||
|
||||
import type { Document, ObjectId } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import { CursorTimeoutMode } from '../cursor/abstract_cursor';
|
||||
import type { FindCursor } from '../cursor/find_cursor';
|
||||
import {
|
||||
MongoGridFSChunkError,
|
||||
MongoGridFSStreamError,
|
||||
MongoInvalidArgumentError,
|
||||
MongoRuntimeError
|
||||
} from '../error';
|
||||
import type { FindOptions } from '../operations/find';
|
||||
import type { ReadPreference } from '../read_preference';
|
||||
import type { Sort } from '../sort';
|
||||
import { CSOTTimeoutContext } from '../timeout';
|
||||
import type { Callback } from '../utils';
|
||||
import type { GridFSChunk } from './upload';
|
||||
|
||||
/** @public */
|
||||
export interface GridFSBucketReadStreamOptions {
|
||||
sort?: Sort;
|
||||
skip?: number;
|
||||
/**
|
||||
* 0-indexed non-negative byte offset from the beginning of the file
|
||||
*/
|
||||
start?: number;
|
||||
/**
|
||||
* 0-indexed non-negative byte offset to the end of the file contents
|
||||
* to be returned by the stream. `end` is non-inclusive
|
||||
*/
|
||||
end?: number;
|
||||
/**
|
||||
* @experimental
|
||||
* Specifies the time an operation will run until it throws a timeout error
|
||||
*/
|
||||
timeoutMS?: number;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface GridFSBucketReadStreamOptionsWithRevision extends GridFSBucketReadStreamOptions {
|
||||
/** The revision number relative to the oldest file with the given filename. 0
|
||||
* gets you the oldest file, 1 gets you the 2nd oldest, -1 gets you the
|
||||
* newest. */
|
||||
revision?: number;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface GridFSFile {
|
||||
_id: ObjectId;
|
||||
length: number;
|
||||
chunkSize: number;
|
||||
filename: string;
|
||||
metadata?: Document;
|
||||
uploadDate: Date;
|
||||
/** @deprecated Will be removed in the next major version. */
|
||||
contentType?: string;
|
||||
/** @deprecated Will be removed in the next major version. */
|
||||
aliases?: string[];
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface GridFSBucketReadStreamPrivate {
|
||||
/**
|
||||
* The running total number of bytes read from the chunks collection.
|
||||
*/
|
||||
bytesRead: number;
|
||||
/**
|
||||
* The number of bytes to remove from the last chunk read in the file. This is non-zero
|
||||
* if `end` is not equal to the length of the document and `end` is not a multiple
|
||||
* of the chunkSize.
|
||||
*/
|
||||
bytesToTrim: number;
|
||||
|
||||
/**
|
||||
* The number of bytes to remove from the first chunk read in the file. This is non-zero
|
||||
* if `start` is not equal to the 0 and `start` is not a multiple
|
||||
* of the chunkSize.
|
||||
*/
|
||||
bytesToSkip: number;
|
||||
|
||||
files: Collection<GridFSFile>;
|
||||
chunks: Collection<GridFSChunk>;
|
||||
cursor?: FindCursor<GridFSChunk>;
|
||||
|
||||
/** The running total number of chunks read from the chunks collection. */
|
||||
expected: number;
|
||||
|
||||
/**
|
||||
* The filter used to search in the _files_ collection (i.e., `{ _id: <> }`)
|
||||
* This is not the same filter used when reading chunks from the chunks collection.
|
||||
*/
|
||||
filter: Document;
|
||||
|
||||
/** Indicates whether or not download has started. */
|
||||
init: boolean;
|
||||
|
||||
/** The expected number of chunks to read, calculated from start, end, chunkSize and file length. */
|
||||
expectedEnd: number;
|
||||
file?: GridFSFile;
|
||||
options: {
|
||||
sort?: Sort;
|
||||
skip?: number;
|
||||
start: number;
|
||||
end: number;
|
||||
timeoutMS?: number;
|
||||
};
|
||||
readPreference?: ReadPreference;
|
||||
timeoutContext?: CSOTTimeoutContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* A readable stream that enables you to read buffers from GridFS.
|
||||
*
|
||||
* Do not instantiate this class directly. Use `openDownloadStream()` instead.
|
||||
* @public
|
||||
*/
|
||||
export class GridFSBucketReadStream extends Readable {
|
||||
/** @internal */
|
||||
s: GridFSBucketReadStreamPrivate;
|
||||
|
||||
/**
|
||||
* Fires when the stream loaded the file document corresponding to the provided id.
|
||||
* @event
|
||||
*/
|
||||
static readonly FILE = 'file' as const;
|
||||
|
||||
/**
|
||||
* @param chunks - Handle for chunks collection
|
||||
* @param files - Handle for files collection
|
||||
* @param readPreference - The read preference to use
|
||||
* @param filter - The filter to use to find the file document
|
||||
* @internal
|
||||
*/
|
||||
constructor(
|
||||
chunks: Collection<GridFSChunk>,
|
||||
files: Collection<GridFSFile>,
|
||||
readPreference: ReadPreference | undefined,
|
||||
filter: Document,
|
||||
options?: GridFSBucketReadStreamOptions
|
||||
) {
|
||||
super({ emitClose: true });
|
||||
this.s = {
|
||||
bytesToTrim: 0,
|
||||
bytesToSkip: 0,
|
||||
bytesRead: 0,
|
||||
chunks,
|
||||
expected: 0,
|
||||
files,
|
||||
filter,
|
||||
init: false,
|
||||
expectedEnd: 0,
|
||||
options: {
|
||||
start: 0,
|
||||
end: 0,
|
||||
...options
|
||||
},
|
||||
readPreference,
|
||||
timeoutContext:
|
||||
options?.timeoutMS != null
|
||||
? new CSOTTimeoutContext({ timeoutMS: options.timeoutMS, serverSelectionTimeoutMS: 0 })
|
||||
: undefined
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads from the cursor and pushes to the stream.
|
||||
* Private Impl, do not call directly
|
||||
* @internal
|
||||
*/
|
||||
override _read(): void {
|
||||
if (this.destroyed) return;
|
||||
waitForFile(this, () => doRead(this));
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the 0-based offset in bytes to start streaming from. Throws
|
||||
* an error if this stream has entered flowing mode
|
||||
* (e.g. if you've already called `on('data')`)
|
||||
*
|
||||
* @param start - 0-based offset in bytes to start streaming from
|
||||
*/
|
||||
start(start = 0): this {
|
||||
throwIfInitialized(this);
|
||||
this.s.options.start = start;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the 0-based offset in bytes to start streaming from. Throws
|
||||
* an error if this stream has entered flowing mode
|
||||
* (e.g. if you've already called `on('data')`)
|
||||
*
|
||||
* @param end - Offset in bytes to stop reading at
|
||||
*/
|
||||
end(end = 0): this {
|
||||
throwIfInitialized(this);
|
||||
this.s.options.end = end;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks this stream as aborted (will never push another `data` event)
|
||||
* and kills the underlying cursor. Will emit the 'end' event, and then
|
||||
* the 'close' event once the cursor is successfully killed.
|
||||
*/
|
||||
async abort(): Promise<void> {
|
||||
this.push(null);
|
||||
this.destroy();
|
||||
const remainingTimeMS = this.s.timeoutContext?.getRemainingTimeMSOrThrow();
|
||||
await this.s.cursor?.close({ timeoutMS: remainingTimeMS });
|
||||
}
|
||||
}
|
||||
|
||||
function throwIfInitialized(stream: GridFSBucketReadStream): void {
|
||||
if (stream.s.init) {
|
||||
throw new MongoGridFSStreamError('Options cannot be changed after the stream is initialized');
|
||||
}
|
||||
}
|
||||
|
||||
function doRead(stream: GridFSBucketReadStream): void {
|
||||
if (stream.destroyed) return;
|
||||
if (!stream.s.cursor) return;
|
||||
if (!stream.s.file) return;
|
||||
|
||||
const handleReadResult = (doc: Document | null) => {
|
||||
if (stream.destroyed) return;
|
||||
|
||||
if (!doc) {
|
||||
stream.push(null);
|
||||
|
||||
stream.s.cursor?.close().then(undefined, error => stream.destroy(error));
|
||||
return;
|
||||
}
|
||||
|
||||
if (!stream.s.file) return;
|
||||
|
||||
const bytesRemaining = stream.s.file.length - stream.s.bytesRead;
|
||||
const expectedN = stream.s.expected++;
|
||||
const expectedLength = Math.min(stream.s.file.chunkSize, bytesRemaining);
|
||||
if (doc.n > expectedN) {
|
||||
return stream.destroy(
|
||||
new MongoGridFSChunkError(
|
||||
`ChunkIsMissing: Got unexpected n: ${doc.n}, expected: ${expectedN}`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (doc.n < expectedN) {
|
||||
return stream.destroy(
|
||||
new MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected: ${expectedN}`)
|
||||
);
|
||||
}
|
||||
|
||||
let buf = Buffer.isBuffer(doc.data) ? doc.data : doc.data.buffer;
|
||||
|
||||
if (buf.byteLength !== expectedLength) {
|
||||
if (bytesRemaining <= 0) {
|
||||
return stream.destroy(
|
||||
new MongoGridFSChunkError(
|
||||
`ExtraChunk: Got unexpected n: ${doc.n}, expected file length ${stream.s.file.length} bytes but already read ${stream.s.bytesRead} bytes`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return stream.destroy(
|
||||
new MongoGridFSChunkError(
|
||||
`ChunkIsWrongSize: Got unexpected length: ${buf.byteLength}, expected: ${expectedLength}`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
stream.s.bytesRead += buf.byteLength;
|
||||
|
||||
if (buf.byteLength === 0) {
|
||||
return stream.push(null);
|
||||
}
|
||||
|
||||
let sliceStart = null;
|
||||
let sliceEnd = null;
|
||||
|
||||
if (stream.s.bytesToSkip != null) {
|
||||
sliceStart = stream.s.bytesToSkip;
|
||||
stream.s.bytesToSkip = 0;
|
||||
}
|
||||
|
||||
const atEndOfStream = expectedN === stream.s.expectedEnd - 1;
|
||||
const bytesLeftToRead = stream.s.options.end - stream.s.bytesToSkip;
|
||||
if (atEndOfStream && stream.s.bytesToTrim != null) {
|
||||
sliceEnd = stream.s.file.chunkSize - stream.s.bytesToTrim;
|
||||
} else if (stream.s.options.end && bytesLeftToRead < doc.data.byteLength) {
|
||||
sliceEnd = bytesLeftToRead;
|
||||
}
|
||||
|
||||
if (sliceStart != null || sliceEnd != null) {
|
||||
buf = buf.slice(sliceStart || 0, sliceEnd || buf.byteLength);
|
||||
}
|
||||
|
||||
stream.push(buf);
|
||||
return;
|
||||
};
|
||||
|
||||
stream.s.cursor.next().then(handleReadResult, error => {
|
||||
if (stream.destroyed) return;
|
||||
stream.destroy(error);
|
||||
});
|
||||
}
|
||||
|
||||
function init(stream: GridFSBucketReadStream): void {
|
||||
const findOneOptions: FindOptions = {};
|
||||
if (stream.s.readPreference) {
|
||||
findOneOptions.readPreference = stream.s.readPreference;
|
||||
}
|
||||
if (stream.s.options && stream.s.options.sort) {
|
||||
findOneOptions.sort = stream.s.options.sort;
|
||||
}
|
||||
if (stream.s.options && stream.s.options.skip) {
|
||||
findOneOptions.skip = stream.s.options.skip;
|
||||
}
|
||||
|
||||
const handleReadResult = (doc: Document | null) => {
|
||||
if (stream.destroyed) return;
|
||||
|
||||
if (!doc) {
|
||||
const identifier = stream.s.filter._id
|
||||
? stream.s.filter._id.toString()
|
||||
: stream.s.filter.filename;
|
||||
const errmsg = `FileNotFound: file ${identifier} was not found`;
|
||||
// TODO(NODE-3483)
|
||||
const err = new MongoRuntimeError(errmsg);
|
||||
err.code = 'ENOENT'; // TODO: NODE-3338 set property as part of constructor
|
||||
return stream.destroy(err);
|
||||
}
|
||||
|
||||
// If document is empty, kill the stream immediately and don't
|
||||
// execute any reads
|
||||
if (doc.length <= 0) {
|
||||
stream.push(null);
|
||||
return;
|
||||
}
|
||||
|
||||
if (stream.destroyed) {
|
||||
// If user destroys the stream before we have a cursor, wait
|
||||
// until the query is done to say we're 'closed' because we can't
|
||||
// cancel a query.
|
||||
stream.destroy();
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
stream.s.bytesToSkip = handleStartOption(stream, doc, stream.s.options);
|
||||
} catch (error) {
|
||||
return stream.destroy(error);
|
||||
}
|
||||
|
||||
const filter: Document = { files_id: doc._id };
|
||||
|
||||
// Currently (MongoDB 3.4.4) skip function does not support the index,
|
||||
// it needs to retrieve all the documents first and then skip them. (CS-25811)
|
||||
// As work around we use $gte on the "n" field.
|
||||
if (stream.s.options && stream.s.options.start != null) {
|
||||
const skip = Math.floor(stream.s.options.start / doc.chunkSize);
|
||||
if (skip > 0) {
|
||||
filter['n'] = { $gte: skip };
|
||||
}
|
||||
}
|
||||
|
||||
let remainingTimeMS: number | undefined;
|
||||
try {
|
||||
remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow(
|
||||
`Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms`
|
||||
);
|
||||
} catch (error) {
|
||||
return stream.destroy(error);
|
||||
}
|
||||
|
||||
stream.s.cursor = stream.s.chunks
|
||||
.find(filter, {
|
||||
timeoutMode: stream.s.options.timeoutMS != null ? CursorTimeoutMode.LIFETIME : undefined,
|
||||
timeoutMS: remainingTimeMS
|
||||
})
|
||||
.sort({ n: 1 });
|
||||
|
||||
if (stream.s.readPreference) {
|
||||
stream.s.cursor.withReadPreference(stream.s.readPreference);
|
||||
}
|
||||
|
||||
stream.s.expectedEnd = Math.ceil(doc.length / doc.chunkSize);
|
||||
stream.s.file = doc as GridFSFile;
|
||||
|
||||
try {
|
||||
stream.s.bytesToTrim = handleEndOption(stream, doc, stream.s.cursor, stream.s.options);
|
||||
} catch (error) {
|
||||
return stream.destroy(error);
|
||||
}
|
||||
|
||||
stream.emit(GridFSBucketReadStream.FILE, doc);
|
||||
return;
|
||||
};
|
||||
|
||||
let remainingTimeMS: number | undefined;
|
||||
try {
|
||||
remainingTimeMS = stream.s.timeoutContext?.getRemainingTimeMSOrThrow(
|
||||
`Download timed out after ${stream.s.timeoutContext?.timeoutMS}ms`
|
||||
);
|
||||
} catch (error) {
|
||||
if (!stream.destroyed) stream.destroy(error);
|
||||
return;
|
||||
}
|
||||
|
||||
findOneOptions.timeoutMS = remainingTimeMS;
|
||||
|
||||
stream.s.files.findOne(stream.s.filter, findOneOptions).then(handleReadResult, error => {
|
||||
if (stream.destroyed) return;
|
||||
stream.destroy(error);
|
||||
});
|
||||
}
|
||||
|
||||
function waitForFile(stream: GridFSBucketReadStream, callback: Callback): void {
|
||||
if (stream.s.file) {
|
||||
return callback();
|
||||
}
|
||||
|
||||
if (!stream.s.init) {
|
||||
init(stream);
|
||||
stream.s.init = true;
|
||||
}
|
||||
|
||||
stream.once('file', () => {
|
||||
callback();
|
||||
});
|
||||
}
|
||||
|
||||
function handleStartOption(
|
||||
stream: GridFSBucketReadStream,
|
||||
doc: Document,
|
||||
options: GridFSBucketReadStreamOptions
|
||||
): number {
|
||||
if (options && options.start != null) {
|
||||
if (options.start > doc.length) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
`Stream start (${options.start}) must not be more than the length of the file (${doc.length})`
|
||||
);
|
||||
}
|
||||
if (options.start < 0) {
|
||||
throw new MongoInvalidArgumentError(`Stream start (${options.start}) must not be negative`);
|
||||
}
|
||||
if (options.end != null && options.end < options.start) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
`Stream start (${options.start}) must not be greater than stream end (${options.end})`
|
||||
);
|
||||
}
|
||||
|
||||
stream.s.bytesRead = Math.floor(options.start / doc.chunkSize) * doc.chunkSize;
|
||||
stream.s.expected = Math.floor(options.start / doc.chunkSize);
|
||||
|
||||
return options.start - stream.s.bytesRead;
|
||||
}
|
||||
throw new MongoInvalidArgumentError('Start option must be defined');
|
||||
}
|
||||
|
||||
function handleEndOption(
|
||||
stream: GridFSBucketReadStream,
|
||||
doc: Document,
|
||||
cursor: FindCursor<GridFSChunk>,
|
||||
options: GridFSBucketReadStreamOptions
|
||||
) {
|
||||
if (options && options.end != null) {
|
||||
if (options.end > doc.length) {
|
||||
throw new MongoInvalidArgumentError(
|
||||
`Stream end (${options.end}) must not be more than the length of the file (${doc.length})`
|
||||
);
|
||||
}
|
||||
if (options.start == null || options.start < 0) {
|
||||
throw new MongoInvalidArgumentError(`Stream end (${options.end}) must not be negative`);
|
||||
}
|
||||
|
||||
const start = options.start != null ? Math.floor(options.start / doc.chunkSize) : 0;
|
||||
|
||||
cursor.limit(Math.ceil(options.end / doc.chunkSize) - start);
|
||||
|
||||
stream.s.expectedEnd = Math.ceil(options.end / doc.chunkSize);
|
||||
|
||||
return Math.ceil(options.end / doc.chunkSize) * doc.chunkSize - options.end;
|
||||
}
|
||||
throw new MongoInvalidArgumentError('End option must be defined');
|
||||
}
|
||||
263
backend/node_modules/mongodb/src/gridfs/index.ts
generated
vendored
Normal file
263
backend/node_modules/mongodb/src/gridfs/index.ts
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
import type { ObjectId } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import type { FindCursor } from '../cursor/find_cursor';
|
||||
import type { Db } from '../db';
|
||||
import { MongoOperationTimeoutError, MongoRuntimeError } from '../error';
|
||||
import { type Filter, TypedEventEmitter } from '../mongo_types';
|
||||
import type { ReadPreference } from '../read_preference';
|
||||
import type { Sort } from '../sort';
|
||||
import { CSOTTimeoutContext } from '../timeout';
|
||||
import { resolveOptions } from '../utils';
|
||||
import { WriteConcern, type WriteConcernOptions } from '../write_concern';
|
||||
import type { FindOptions } from './../operations/find';
|
||||
import {
|
||||
GridFSBucketReadStream,
|
||||
type GridFSBucketReadStreamOptions,
|
||||
type GridFSBucketReadStreamOptionsWithRevision,
|
||||
type GridFSFile
|
||||
} from './download';
|
||||
import {
|
||||
GridFSBucketWriteStream,
|
||||
type GridFSBucketWriteStreamOptions,
|
||||
type GridFSChunk
|
||||
} from './upload';
|
||||
|
||||
const DEFAULT_GRIDFS_BUCKET_OPTIONS: {
|
||||
bucketName: string;
|
||||
chunkSizeBytes: number;
|
||||
} = {
|
||||
bucketName: 'fs',
|
||||
chunkSizeBytes: 255 * 1024
|
||||
};
|
||||
|
||||
/** @public */
|
||||
export interface GridFSBucketOptions extends WriteConcernOptions {
|
||||
/** The 'files' and 'chunks' collections will be prefixed with the bucket name followed by a dot. */
|
||||
bucketName?: string;
|
||||
/** Number of bytes stored in each chunk. Defaults to 255KB */
|
||||
chunkSizeBytes?: number;
|
||||
/** Read preference to be passed to read operations */
|
||||
readPreference?: ReadPreference;
|
||||
/**
|
||||
* @experimental
|
||||
* Specifies the lifetime duration of a gridFS stream. If any async operations are in progress
|
||||
* when this timeout expires, the stream will throw a timeout error.
|
||||
*/
|
||||
timeoutMS?: number;
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
export interface GridFSBucketPrivate {
|
||||
db: Db;
|
||||
options: {
|
||||
bucketName: string;
|
||||
chunkSizeBytes: number;
|
||||
readPreference?: ReadPreference;
|
||||
writeConcern: WriteConcern | undefined;
|
||||
timeoutMS?: number;
|
||||
};
|
||||
_chunksCollection: Collection<GridFSChunk>;
|
||||
_filesCollection: Collection<GridFSFile>;
|
||||
checkedIndexes: boolean;
|
||||
calledOpenUploadStream: boolean;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export type GridFSBucketEvents = {
|
||||
index(): void;
|
||||
};
|
||||
|
||||
/**
|
||||
* Constructor for a streaming GridFS interface
|
||||
* @public
|
||||
*/
|
||||
export class GridFSBucket extends TypedEventEmitter<GridFSBucketEvents> {
|
||||
/** @internal */
|
||||
s: GridFSBucketPrivate;
|
||||
|
||||
/**
|
||||
* When the first call to openUploadStream is made, the upload stream will
|
||||
* check to see if it needs to create the proper indexes on the chunks and
|
||||
* files collections. This event is fired either when 1) it determines that
|
||||
* no index creation is necessary, 2) when it successfully creates the
|
||||
* necessary indexes.
|
||||
* @event
|
||||
*/
|
||||
static readonly INDEX = 'index' as const;
|
||||
|
||||
constructor(db: Db, options?: GridFSBucketOptions) {
|
||||
super();
|
||||
this.setMaxListeners(0);
|
||||
const privateOptions = resolveOptions(db, {
|
||||
...DEFAULT_GRIDFS_BUCKET_OPTIONS,
|
||||
...options,
|
||||
writeConcern: WriteConcern.fromOptions(options)
|
||||
});
|
||||
this.s = {
|
||||
db,
|
||||
options: privateOptions,
|
||||
_chunksCollection: db.collection<GridFSChunk>(privateOptions.bucketName + '.chunks'),
|
||||
_filesCollection: db.collection<GridFSFile>(privateOptions.bucketName + '.files'),
|
||||
checkedIndexes: false,
|
||||
calledOpenUploadStream: false
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a writable stream (GridFSBucketWriteStream) for writing
|
||||
* buffers to GridFS. The stream's 'id' property contains the resulting
|
||||
* file's id.
|
||||
*
|
||||
* @param filename - The value of the 'filename' key in the files doc
|
||||
* @param options - Optional settings.
|
||||
*/
|
||||
|
||||
openUploadStream(
|
||||
filename: string,
|
||||
options?: GridFSBucketWriteStreamOptions
|
||||
): GridFSBucketWriteStream {
|
||||
return new GridFSBucketWriteStream(this, filename, {
|
||||
timeoutMS: this.s.options.timeoutMS,
|
||||
...options
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a writable stream (GridFSBucketWriteStream) for writing
|
||||
* buffers to GridFS for a custom file id. The stream's 'id' property contains the resulting
|
||||
* file's id.
|
||||
*/
|
||||
openUploadStreamWithId(
|
||||
id: ObjectId,
|
||||
filename: string,
|
||||
options?: GridFSBucketWriteStreamOptions
|
||||
): GridFSBucketWriteStream {
|
||||
return new GridFSBucketWriteStream(this, filename, {
|
||||
timeoutMS: this.s.options.timeoutMS,
|
||||
...options,
|
||||
id
|
||||
});
|
||||
}
|
||||
|
||||
/** Returns a readable stream (GridFSBucketReadStream) for streaming file data from GridFS. */
|
||||
openDownloadStream(
|
||||
id: ObjectId,
|
||||
options?: GridFSBucketReadStreamOptions
|
||||
): GridFSBucketReadStream {
|
||||
return new GridFSBucketReadStream(
|
||||
this.s._chunksCollection,
|
||||
this.s._filesCollection,
|
||||
this.s.options.readPreference,
|
||||
{ _id: id },
|
||||
{ timeoutMS: this.s.options.timeoutMS, ...options }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a file with the given id
|
||||
*
|
||||
* @param id - The id of the file doc
|
||||
*/
|
||||
async delete(id: ObjectId, options?: { timeoutMS: number }): Promise<void> {
|
||||
const { timeoutMS } = resolveOptions(this.s.db, options);
|
||||
let timeoutContext: CSOTTimeoutContext | undefined = undefined;
|
||||
|
||||
if (timeoutMS) {
|
||||
timeoutContext = new CSOTTimeoutContext({
|
||||
timeoutMS,
|
||||
serverSelectionTimeoutMS: this.s.db.client.s.options.serverSelectionTimeoutMS
|
||||
});
|
||||
}
|
||||
|
||||
const { deletedCount } = await this.s._filesCollection.deleteOne(
|
||||
{ _id: id },
|
||||
{ timeoutMS: timeoutContext?.remainingTimeMS }
|
||||
);
|
||||
|
||||
const remainingTimeMS = timeoutContext?.remainingTimeMS;
|
||||
if (remainingTimeMS != null && remainingTimeMS <= 0)
|
||||
throw new MongoOperationTimeoutError(`Timed out after ${timeoutMS}ms`);
|
||||
// Delete orphaned chunks before returning FileNotFound
|
||||
await this.s._chunksCollection.deleteMany({ files_id: id }, { timeoutMS: remainingTimeMS });
|
||||
|
||||
if (deletedCount === 0) {
|
||||
// TODO(NODE-3483): Replace with more appropriate error
|
||||
// Consider creating new error MongoGridFSFileNotFoundError
|
||||
throw new MongoRuntimeError(`File not found for id ${id}`);
|
||||
}
|
||||
}
|
||||
|
||||
/** Convenience wrapper around find on the files collection */
|
||||
find(filter: Filter<GridFSFile> = {}, options: FindOptions = {}): FindCursor<GridFSFile> {
|
||||
return this.s._filesCollection.find(filter, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a readable stream (GridFSBucketReadStream) for streaming the
|
||||
* file with the given name from GridFS. If there are multiple files with
|
||||
* the same name, this will stream the most recent file with the given name
|
||||
* (as determined by the `uploadDate` field). You can set the `revision`
|
||||
* option to change this behavior.
|
||||
*/
|
||||
openDownloadStreamByName(
|
||||
filename: string,
|
||||
options?: GridFSBucketReadStreamOptionsWithRevision
|
||||
): GridFSBucketReadStream {
|
||||
let sort: Sort = { uploadDate: -1 };
|
||||
let skip = undefined;
|
||||
if (options && options.revision != null) {
|
||||
if (options.revision >= 0) {
|
||||
sort = { uploadDate: 1 };
|
||||
skip = options.revision;
|
||||
} else {
|
||||
skip = -options.revision - 1;
|
||||
}
|
||||
}
|
||||
return new GridFSBucketReadStream(
|
||||
this.s._chunksCollection,
|
||||
this.s._filesCollection,
|
||||
this.s.options.readPreference,
|
||||
{ filename },
|
||||
{ timeoutMS: this.s.options.timeoutMS, ...options, sort, skip }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Renames the file with the given _id to the given string
|
||||
*
|
||||
* @param id - the id of the file to rename
|
||||
* @param filename - new name for the file
|
||||
*/
|
||||
async rename(id: ObjectId, filename: string, options?: { timeoutMS: number }): Promise<void> {
|
||||
const filter = { _id: id };
|
||||
const update = { $set: { filename } };
|
||||
const { matchedCount } = await this.s._filesCollection.updateOne(filter, update, options);
|
||||
if (matchedCount === 0) {
|
||||
throw new MongoRuntimeError(`File with id ${id} not found`);
|
||||
}
|
||||
}
|
||||
|
||||
/** Removes this bucket's files collection, followed by its chunks collection. */
|
||||
async drop(options?: { timeoutMS: number }): Promise<void> {
|
||||
const { timeoutMS } = resolveOptions(this.s.db, options);
|
||||
let timeoutContext: CSOTTimeoutContext | undefined = undefined;
|
||||
|
||||
if (timeoutMS) {
|
||||
timeoutContext = new CSOTTimeoutContext({
|
||||
timeoutMS,
|
||||
serverSelectionTimeoutMS: this.s.db.client.s.options.serverSelectionTimeoutMS
|
||||
});
|
||||
}
|
||||
|
||||
if (timeoutContext) {
|
||||
await this.s._filesCollection.drop({ timeoutMS: timeoutContext.remainingTimeMS });
|
||||
const remainingTimeMS = timeoutContext.getRemainingTimeMSOrThrow(
|
||||
`Timed out after ${timeoutMS}ms`
|
||||
);
|
||||
await this.s._chunksCollection.drop({ timeoutMS: remainingTimeMS });
|
||||
} else {
|
||||
await this.s._filesCollection.drop();
|
||||
await this.s._chunksCollection.drop();
|
||||
}
|
||||
}
|
||||
}
|
||||
559
backend/node_modules/mongodb/src/gridfs/upload.ts
generated
vendored
Normal file
559
backend/node_modules/mongodb/src/gridfs/upload.ts
generated
vendored
Normal file
@@ -0,0 +1,559 @@
|
||||
import { Writable } from 'stream';
|
||||
|
||||
import { type Document, ObjectId } from '../bson';
|
||||
import type { Collection } from '../collection';
|
||||
import { CursorTimeoutMode } from '../cursor/abstract_cursor';
|
||||
import {
|
||||
MongoAPIError,
|
||||
MONGODB_ERROR_CODES,
|
||||
MongoError,
|
||||
MongoOperationTimeoutError
|
||||
} from '../error';
|
||||
import { CSOTTimeoutContext } from '../timeout';
|
||||
import { type Callback, resolveTimeoutOptions, squashError } from '../utils';
|
||||
import type { WriteConcernOptions } from '../write_concern';
|
||||
import { WriteConcern } from './../write_concern';
|
||||
import type { GridFSFile } from './download';
|
||||
import type { GridFSBucket } from './index';
|
||||
|
||||
/** @public */
|
||||
export interface GridFSChunk {
|
||||
_id: ObjectId;
|
||||
files_id: ObjectId;
|
||||
n: number;
|
||||
data: Buffer | Uint8Array;
|
||||
}
|
||||
|
||||
/** @public */
|
||||
export interface GridFSBucketWriteStreamOptions extends WriteConcernOptions {
|
||||
/** Overwrite this bucket's chunkSizeBytes for this file */
|
||||
chunkSizeBytes?: number;
|
||||
/** Custom file id for the GridFS file. */
|
||||
id?: ObjectId;
|
||||
/** Object to store in the file document's `metadata` field */
|
||||
metadata?: Document;
|
||||
/**
|
||||
* String to store in the file document's `contentType` field.
|
||||
* @deprecated Will be removed in the next major version. Add a contentType field to the metadata document instead.
|
||||
*/
|
||||
contentType?: string;
|
||||
/**
|
||||
* Array of strings to store in the file document's `aliases` field.
|
||||
* @deprecated Will be removed in the next major version. Add an aliases field to the metadata document instead.
|
||||
*/
|
||||
aliases?: string[];
|
||||
/**
|
||||
* @experimental
|
||||
* Specifies the time an operation will run until it throws a timeout error
|
||||
*/
|
||||
timeoutMS?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* A writable stream that enables you to write buffers to GridFS.
|
||||
*
|
||||
* Do not instantiate this class directly. Use `openUploadStream()` instead.
|
||||
* @public
|
||||
*/
|
||||
export class GridFSBucketWriteStream extends Writable {
|
||||
bucket: GridFSBucket;
|
||||
/** A Collection instance where the file's chunks are stored */
|
||||
chunks: Collection<GridFSChunk>;
|
||||
/** A Collection instance where the file's GridFSFile document is stored */
|
||||
files: Collection<GridFSFile>;
|
||||
/** The name of the file */
|
||||
filename: string;
|
||||
/** Options controlling the metadata inserted along with the file */
|
||||
options: GridFSBucketWriteStreamOptions;
|
||||
/** Indicates the stream is finished uploading */
|
||||
done: boolean;
|
||||
/** The ObjectId used for the `_id` field on the GridFSFile document */
|
||||
id: ObjectId;
|
||||
/** The number of bytes that each chunk will be limited to */
|
||||
chunkSizeBytes: number;
|
||||
/** Space used to store a chunk currently being inserted */
|
||||
bufToStore: Buffer;
|
||||
/** Accumulates the number of bytes inserted as the stream uploads chunks */
|
||||
length: number;
|
||||
/** Accumulates the number of chunks inserted as the stream uploads file contents */
|
||||
n: number;
|
||||
/** Tracks the current offset into the buffered bytes being uploaded */
|
||||
pos: number;
|
||||
/** Contains a number of properties indicating the current state of the stream */
|
||||
state: {
|
||||
/** If set the stream has ended */
|
||||
streamEnd: boolean;
|
||||
/** Indicates the number of chunks that still need to be inserted to exhaust the current buffered data */
|
||||
outstandingRequests: number;
|
||||
/** If set an error occurred during insertion */
|
||||
errored: boolean;
|
||||
/** If set the stream was intentionally aborted */
|
||||
aborted: boolean;
|
||||
};
|
||||
/** The write concern setting to be used with every insert operation */
|
||||
writeConcern?: WriteConcern;
|
||||
/**
|
||||
* The document containing information about the inserted file.
|
||||
* This property is defined _after_ the finish event has been emitted.
|
||||
* It will remain `null` if an error occurs.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* fs.createReadStream('file.txt')
|
||||
* .pipe(bucket.openUploadStream('file.txt'))
|
||||
* .on('finish', function () {
|
||||
* console.log(this.gridFSFile)
|
||||
* })
|
||||
* ```
|
||||
*/
|
||||
gridFSFile: GridFSFile | null = null;
|
||||
/** @internal */
|
||||
timeoutContext?: CSOTTimeoutContext;
|
||||
|
||||
/**
|
||||
* @param bucket - Handle for this stream's corresponding bucket
|
||||
* @param filename - The value of the 'filename' key in the files doc
|
||||
* @param options - Optional settings.
|
||||
* @internal
|
||||
*/
|
||||
constructor(bucket: GridFSBucket, filename: string, options?: GridFSBucketWriteStreamOptions) {
|
||||
super();
|
||||
|
||||
options = options ?? {};
|
||||
this.bucket = bucket;
|
||||
this.chunks = bucket.s._chunksCollection;
|
||||
this.filename = filename;
|
||||
this.files = bucket.s._filesCollection;
|
||||
this.options = options;
|
||||
this.writeConcern = WriteConcern.fromOptions(options) || bucket.s.options.writeConcern;
|
||||
// Signals the write is all done
|
||||
this.done = false;
|
||||
|
||||
this.id = options.id ? options.id : new ObjectId();
|
||||
// properly inherit the default chunksize from parent
|
||||
this.chunkSizeBytes = options.chunkSizeBytes || this.bucket.s.options.chunkSizeBytes;
|
||||
this.bufToStore = Buffer.alloc(this.chunkSizeBytes);
|
||||
this.length = 0;
|
||||
this.n = 0;
|
||||
this.pos = 0;
|
||||
this.state = {
|
||||
streamEnd: false,
|
||||
outstandingRequests: 0,
|
||||
errored: false,
|
||||
aborted: false
|
||||
};
|
||||
|
||||
if (options.timeoutMS != null)
|
||||
this.timeoutContext = new CSOTTimeoutContext({
|
||||
timeoutMS: options.timeoutMS,
|
||||
serverSelectionTimeoutMS: resolveTimeoutOptions(this.bucket.s.db.client, {})
|
||||
.serverSelectionTimeoutMS
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* @internal
|
||||
*
|
||||
* The stream is considered constructed when the indexes are done being created
|
||||
*/
|
||||
override _construct(callback: (error?: Error | null) => void): void {
|
||||
if (!this.bucket.s.calledOpenUploadStream) {
|
||||
this.bucket.s.calledOpenUploadStream = true;
|
||||
|
||||
checkIndexes(this).then(
|
||||
() => {
|
||||
this.bucket.s.checkedIndexes = true;
|
||||
this.bucket.emit('index');
|
||||
callback();
|
||||
},
|
||||
error => {
|
||||
if (error instanceof MongoOperationTimeoutError) {
|
||||
return handleError(this, error, callback);
|
||||
}
|
||||
squashError(error);
|
||||
callback();
|
||||
}
|
||||
);
|
||||
} else {
|
||||
return process.nextTick(callback);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @internal
|
||||
* Write a buffer to the stream.
|
||||
*
|
||||
* @param chunk - Buffer to write
|
||||
* @param encoding - Optional encoding for the buffer
|
||||
* @param callback - Function to call when the chunk was added to the buffer, or if the entire chunk was persisted to MongoDB if this chunk caused a flush.
|
||||
*/
|
||||
override _write(
|
||||
chunk: Buffer | string,
|
||||
encoding: BufferEncoding,
|
||||
callback: Callback<void>
|
||||
): void {
|
||||
doWrite(this, chunk, encoding, callback);
|
||||
}
|
||||
|
||||
/** @internal */
|
||||
override _final(callback: (error?: Error | null) => void): void {
|
||||
if (this.state.streamEnd) {
|
||||
return process.nextTick(callback);
|
||||
}
|
||||
this.state.streamEnd = true;
|
||||
writeRemnant(this, callback);
|
||||
}
|
||||
|
||||
/**
|
||||
* Places this write stream into an aborted state (all future writes fail)
|
||||
* and deletes all chunks that have already been written.
|
||||
*/
|
||||
async abort(): Promise<void> {
|
||||
if (this.state.streamEnd) {
|
||||
// TODO(NODE-3485): Replace with MongoGridFSStreamClosed
|
||||
throw new MongoAPIError('Cannot abort a stream that has already completed');
|
||||
}
|
||||
|
||||
if (this.state.aborted) {
|
||||
// TODO(NODE-3485): Replace with MongoGridFSStreamClosed
|
||||
throw new MongoAPIError('Cannot call abort() on a stream twice');
|
||||
}
|
||||
|
||||
this.state.aborted = true;
|
||||
const remainingTimeMS = this.timeoutContext?.getRemainingTimeMSOrThrow(
|
||||
`Upload timed out after ${this.timeoutContext?.timeoutMS}ms`
|
||||
);
|
||||
|
||||
await this.chunks.deleteMany({ files_id: this.id }, { timeoutMS: remainingTimeMS });
|
||||
}
|
||||
}
|
||||
|
||||
function handleError(stream: GridFSBucketWriteStream, error: Error, callback: Callback): void {
|
||||
if (stream.state.errored) {
|
||||
process.nextTick(callback);
|
||||
return;
|
||||
}
|
||||
stream.state.errored = true;
|
||||
process.nextTick(callback, error);
|
||||
}
|
||||
|
||||
function createChunkDoc(filesId: ObjectId, n: number, data: Buffer): GridFSChunk {
|
||||
return {
|
||||
_id: new ObjectId(),
|
||||
files_id: filesId,
|
||||
n,
|
||||
data
|
||||
};
|
||||
}
|
||||
|
||||
async function checkChunksIndex(stream: GridFSBucketWriteStream): Promise<void> {
|
||||
const index = { files_id: 1, n: 1 };
|
||||
|
||||
let remainingTimeMS;
|
||||
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
||||
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
||||
);
|
||||
|
||||
let indexes;
|
||||
try {
|
||||
indexes = await stream.chunks
|
||||
.listIndexes({
|
||||
timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined,
|
||||
timeoutMS: remainingTimeMS
|
||||
})
|
||||
.toArray();
|
||||
} catch (error) {
|
||||
if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) {
|
||||
indexes = [];
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
const hasChunksIndex = !!indexes.find(index => {
|
||||
const keys = Object.keys(index.key);
|
||||
if (keys.length === 2 && index.key.files_id === 1 && index.key.n === 1) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
if (!hasChunksIndex) {
|
||||
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
||||
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
||||
);
|
||||
await stream.chunks.createIndex(index, {
|
||||
...stream.writeConcern,
|
||||
background: true,
|
||||
unique: true,
|
||||
timeoutMS: remainingTimeMS
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function checkDone(stream: GridFSBucketWriteStream, callback: Callback): void {
|
||||
if (stream.done) {
|
||||
return process.nextTick(callback);
|
||||
}
|
||||
|
||||
if (stream.state.streamEnd && stream.state.outstandingRequests === 0 && !stream.state.errored) {
|
||||
// Set done so we do not trigger duplicate createFilesDoc
|
||||
stream.done = true;
|
||||
// Create a new files doc
|
||||
const gridFSFile = createFilesDoc(
|
||||
stream.id,
|
||||
stream.length,
|
||||
stream.chunkSizeBytes,
|
||||
stream.filename,
|
||||
stream.options.contentType,
|
||||
stream.options.aliases,
|
||||
stream.options.metadata
|
||||
);
|
||||
|
||||
if (isAborted(stream, callback)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
|
||||
if (remainingTimeMS != null && remainingTimeMS <= 0) {
|
||||
return handleError(
|
||||
stream,
|
||||
new MongoOperationTimeoutError(
|
||||
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
||||
),
|
||||
callback
|
||||
);
|
||||
}
|
||||
|
||||
stream.files
|
||||
.insertOne(gridFSFile, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
|
||||
.then(
|
||||
() => {
|
||||
stream.gridFSFile = gridFSFile;
|
||||
callback();
|
||||
},
|
||||
error => {
|
||||
return handleError(stream, error, callback);
|
||||
}
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
process.nextTick(callback);
|
||||
}
|
||||
|
||||
async function checkIndexes(stream: GridFSBucketWriteStream): Promise<void> {
|
||||
let remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
||||
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
||||
);
|
||||
const doc = await stream.files.findOne(
|
||||
{},
|
||||
{
|
||||
projection: { _id: 1 },
|
||||
timeoutMS: remainingTimeMS
|
||||
}
|
||||
);
|
||||
if (doc != null) {
|
||||
// If at least one document exists assume the collection has the required index
|
||||
return;
|
||||
}
|
||||
|
||||
const index = { filename: 1, uploadDate: 1 };
|
||||
|
||||
let indexes;
|
||||
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
||||
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
||||
);
|
||||
const listIndexesOptions = {
|
||||
timeoutMode: remainingTimeMS != null ? CursorTimeoutMode.LIFETIME : undefined,
|
||||
timeoutMS: remainingTimeMS
|
||||
};
|
||||
try {
|
||||
indexes = await stream.files.listIndexes(listIndexesOptions).toArray();
|
||||
} catch (error) {
|
||||
if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) {
|
||||
indexes = [];
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
const hasFileIndex = !!indexes.find(index => {
|
||||
const keys = Object.keys(index.key);
|
||||
if (keys.length === 2 && index.key.filename === 1 && index.key.uploadDate === 1) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
|
||||
if (!hasFileIndex) {
|
||||
remainingTimeMS = stream.timeoutContext?.getRemainingTimeMSOrThrow(
|
||||
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
||||
);
|
||||
|
||||
await stream.files.createIndex(index, { background: false, timeoutMS: remainingTimeMS });
|
||||
}
|
||||
|
||||
await checkChunksIndex(stream);
|
||||
}
|
||||
|
||||
function createFilesDoc(
|
||||
_id: ObjectId,
|
||||
length: number,
|
||||
chunkSize: number,
|
||||
filename: string,
|
||||
contentType?: string,
|
||||
aliases?: string[],
|
||||
metadata?: Document
|
||||
): GridFSFile {
|
||||
const ret: GridFSFile = {
|
||||
_id,
|
||||
length,
|
||||
chunkSize,
|
||||
uploadDate: new Date(),
|
||||
filename
|
||||
};
|
||||
|
||||
if (contentType) {
|
||||
ret.contentType = contentType;
|
||||
}
|
||||
|
||||
if (aliases) {
|
||||
ret.aliases = aliases;
|
||||
}
|
||||
|
||||
if (metadata) {
|
||||
ret.metadata = metadata;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
function doWrite(
|
||||
stream: GridFSBucketWriteStream,
|
||||
chunk: Buffer | string,
|
||||
encoding: BufferEncoding,
|
||||
callback: Callback<void>
|
||||
): void {
|
||||
if (isAborted(stream, callback)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const inputBuf = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk, encoding);
|
||||
|
||||
stream.length += inputBuf.length;
|
||||
|
||||
// Input is small enough to fit in our buffer
|
||||
if (stream.pos + inputBuf.length < stream.chunkSizeBytes) {
|
||||
inputBuf.copy(stream.bufToStore, stream.pos);
|
||||
stream.pos += inputBuf.length;
|
||||
process.nextTick(callback);
|
||||
return;
|
||||
}
|
||||
|
||||
// Otherwise, buffer is too big for current chunk, so we need to flush
|
||||
// to MongoDB.
|
||||
let inputBufRemaining = inputBuf.length;
|
||||
let spaceRemaining: number = stream.chunkSizeBytes - stream.pos;
|
||||
let numToCopy = Math.min(spaceRemaining, inputBuf.length);
|
||||
let outstandingRequests = 0;
|
||||
while (inputBufRemaining > 0) {
|
||||
const inputBufPos = inputBuf.length - inputBufRemaining;
|
||||
inputBuf.copy(stream.bufToStore, stream.pos, inputBufPos, inputBufPos + numToCopy);
|
||||
stream.pos += numToCopy;
|
||||
spaceRemaining -= numToCopy;
|
||||
let doc: GridFSChunk;
|
||||
if (spaceRemaining === 0) {
|
||||
doc = createChunkDoc(stream.id, stream.n, Buffer.from(stream.bufToStore));
|
||||
|
||||
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
|
||||
if (remainingTimeMS != null && remainingTimeMS <= 0) {
|
||||
return handleError(
|
||||
stream,
|
||||
new MongoOperationTimeoutError(
|
||||
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
||||
),
|
||||
callback
|
||||
);
|
||||
}
|
||||
|
||||
++stream.state.outstandingRequests;
|
||||
++outstandingRequests;
|
||||
|
||||
if (isAborted(stream, callback)) {
|
||||
return;
|
||||
}
|
||||
|
||||
stream.chunks
|
||||
.insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
|
||||
.then(
|
||||
() => {
|
||||
--stream.state.outstandingRequests;
|
||||
--outstandingRequests;
|
||||
|
||||
if (!outstandingRequests) {
|
||||
checkDone(stream, callback);
|
||||
}
|
||||
},
|
||||
error => {
|
||||
return handleError(stream, error, callback);
|
||||
}
|
||||
);
|
||||
|
||||
spaceRemaining = stream.chunkSizeBytes;
|
||||
stream.pos = 0;
|
||||
++stream.n;
|
||||
}
|
||||
inputBufRemaining -= numToCopy;
|
||||
numToCopy = Math.min(spaceRemaining, inputBufRemaining);
|
||||
}
|
||||
}
|
||||
|
||||
function writeRemnant(stream: GridFSBucketWriteStream, callback: Callback): void {
|
||||
// Buffer is empty, so don't bother to insert
|
||||
if (stream.pos === 0) {
|
||||
return checkDone(stream, callback);
|
||||
}
|
||||
|
||||
// Create a new buffer to make sure the buffer isn't bigger than it needs
|
||||
// to be.
|
||||
const remnant = Buffer.alloc(stream.pos);
|
||||
stream.bufToStore.copy(remnant, 0, 0, stream.pos);
|
||||
const doc = createChunkDoc(stream.id, stream.n, remnant);
|
||||
|
||||
// If the stream was aborted, do not write remnant
|
||||
if (isAborted(stream, callback)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const remainingTimeMS = stream.timeoutContext?.remainingTimeMS;
|
||||
if (remainingTimeMS != null && remainingTimeMS <= 0) {
|
||||
return handleError(
|
||||
stream,
|
||||
new MongoOperationTimeoutError(
|
||||
`Upload timed out after ${stream.timeoutContext?.timeoutMS}ms`
|
||||
),
|
||||
callback
|
||||
);
|
||||
}
|
||||
++stream.state.outstandingRequests;
|
||||
stream.chunks
|
||||
.insertOne(doc, { writeConcern: stream.writeConcern, timeoutMS: remainingTimeMS })
|
||||
.then(
|
||||
() => {
|
||||
--stream.state.outstandingRequests;
|
||||
checkDone(stream, callback);
|
||||
},
|
||||
error => {
|
||||
return handleError(stream, error, callback);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
function isAborted(stream: GridFSBucketWriteStream, callback: Callback<void>): boolean {
|
||||
if (stream.state.aborted) {
|
||||
process.nextTick(callback, new MongoAPIError('Stream has been aborted'));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
Reference in New Issue
Block a user