download.js 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. "use strict";
  2. Object.defineProperty(exports, "__esModule", { value: true });
  3. exports.GridFSBucketReadStream = void 0;
  4. const stream_1 = require("stream");
  5. const error_1 = require("../error");
  6. /**
  7. * A readable stream that enables you to read buffers from GridFS.
  8. *
  9. * Do not instantiate this class directly. Use `openDownloadStream()` instead.
  10. * @public
  11. */
  12. class GridFSBucketReadStream extends stream_1.Readable {
  13. /**
  14. * @param chunks - Handle for chunks collection
  15. * @param files - Handle for files collection
  16. * @param readPreference - The read preference to use
  17. * @param filter - The filter to use to find the file document
  18. * @internal
  19. */
  20. constructor(chunks, files, readPreference, filter, options) {
  21. super({ emitClose: true });
  22. this.s = {
  23. bytesToTrim: 0,
  24. bytesToSkip: 0,
  25. bytesRead: 0,
  26. chunks,
  27. expected: 0,
  28. files,
  29. filter,
  30. init: false,
  31. expectedEnd: 0,
  32. options: {
  33. start: 0,
  34. end: 0,
  35. ...options
  36. },
  37. readPreference
  38. };
  39. }
  40. /**
  41. * Reads from the cursor and pushes to the stream.
  42. * Private Impl, do not call directly
  43. * @internal
  44. */
  45. _read() {
  46. if (this.destroyed)
  47. return;
  48. waitForFile(this, () => doRead(this));
  49. }
  50. /**
  51. * Sets the 0-based offset in bytes to start streaming from. Throws
  52. * an error if this stream has entered flowing mode
  53. * (e.g. if you've already called `on('data')`)
  54. *
  55. * @param start - 0-based offset in bytes to start streaming from
  56. */
  57. start(start = 0) {
  58. throwIfInitialized(this);
  59. this.s.options.start = start;
  60. return this;
  61. }
  62. /**
  63. * Sets the 0-based offset in bytes to start streaming from. Throws
  64. * an error if this stream has entered flowing mode
  65. * (e.g. if you've already called `on('data')`)
  66. *
  67. * @param end - Offset in bytes to stop reading at
  68. */
  69. end(end = 0) {
  70. throwIfInitialized(this);
  71. this.s.options.end = end;
  72. return this;
  73. }
  74. /**
  75. * Marks this stream as aborted (will never push another `data` event)
  76. * and kills the underlying cursor. Will emit the 'end' event, and then
  77. * the 'close' event once the cursor is successfully killed.
  78. */
  79. async abort() {
  80. this.push(null);
  81. this.destroy();
  82. await this.s.cursor?.close();
  83. }
  84. }
  85. /**
  86. * Fires when the stream loaded the file document corresponding to the provided id.
  87. * @event
  88. */
  89. GridFSBucketReadStream.FILE = 'file';
  90. exports.GridFSBucketReadStream = GridFSBucketReadStream;
  91. function throwIfInitialized(stream) {
  92. if (stream.s.init) {
  93. throw new error_1.MongoGridFSStreamError('Options cannot be changed after the stream is initialized');
  94. }
  95. }
  96. function doRead(stream) {
  97. if (stream.destroyed)
  98. return;
  99. if (!stream.s.cursor)
  100. return;
  101. if (!stream.s.file)
  102. return;
  103. const handleReadResult = ({ error, doc }) => {
  104. if (stream.destroyed) {
  105. return;
  106. }
  107. if (error) {
  108. stream.destroy(error);
  109. return;
  110. }
  111. if (!doc) {
  112. stream.push(null);
  113. stream.s.cursor?.close().then(() => null, error => stream.destroy(error));
  114. return;
  115. }
  116. if (!stream.s.file)
  117. return;
  118. const bytesRemaining = stream.s.file.length - stream.s.bytesRead;
  119. const expectedN = stream.s.expected++;
  120. const expectedLength = Math.min(stream.s.file.chunkSize, bytesRemaining);
  121. if (doc.n > expectedN) {
  122. return stream.destroy(new error_1.MongoGridFSChunkError(`ChunkIsMissing: Got unexpected n: ${doc.n}, expected: ${expectedN}`));
  123. }
  124. if (doc.n < expectedN) {
  125. return stream.destroy(new error_1.MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected: ${expectedN}`));
  126. }
  127. let buf = Buffer.isBuffer(doc.data) ? doc.data : doc.data.buffer;
  128. if (buf.byteLength !== expectedLength) {
  129. if (bytesRemaining <= 0) {
  130. return stream.destroy(new error_1.MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected file length ${stream.s.file.length} bytes but already read ${stream.s.bytesRead} bytes`));
  131. }
  132. return stream.destroy(new error_1.MongoGridFSChunkError(`ChunkIsWrongSize: Got unexpected length: ${buf.byteLength}, expected: ${expectedLength}`));
  133. }
  134. stream.s.bytesRead += buf.byteLength;
  135. if (buf.byteLength === 0) {
  136. return stream.push(null);
  137. }
  138. let sliceStart = null;
  139. let sliceEnd = null;
  140. if (stream.s.bytesToSkip != null) {
  141. sliceStart = stream.s.bytesToSkip;
  142. stream.s.bytesToSkip = 0;
  143. }
  144. const atEndOfStream = expectedN === stream.s.expectedEnd - 1;
  145. const bytesLeftToRead = stream.s.options.end - stream.s.bytesToSkip;
  146. if (atEndOfStream && stream.s.bytesToTrim != null) {
  147. sliceEnd = stream.s.file.chunkSize - stream.s.bytesToTrim;
  148. }
  149. else if (stream.s.options.end && bytesLeftToRead < doc.data.byteLength) {
  150. sliceEnd = bytesLeftToRead;
  151. }
  152. if (sliceStart != null || sliceEnd != null) {
  153. buf = buf.slice(sliceStart || 0, sliceEnd || buf.byteLength);
  154. }
  155. stream.push(buf);
  156. return;
  157. };
  158. stream.s.cursor.next().then(doc => handleReadResult({ error: null, doc }), error => handleReadResult({ error, doc: null }));
  159. }
  160. function init(stream) {
  161. const findOneOptions = {};
  162. if (stream.s.readPreference) {
  163. findOneOptions.readPreference = stream.s.readPreference;
  164. }
  165. if (stream.s.options && stream.s.options.sort) {
  166. findOneOptions.sort = stream.s.options.sort;
  167. }
  168. if (stream.s.options && stream.s.options.skip) {
  169. findOneOptions.skip = stream.s.options.skip;
  170. }
  171. const handleReadResult = ({ error, doc }) => {
  172. if (error) {
  173. return stream.destroy(error);
  174. }
  175. if (!doc) {
  176. const identifier = stream.s.filter._id
  177. ? stream.s.filter._id.toString()
  178. : stream.s.filter.filename;
  179. const errmsg = `FileNotFound: file ${identifier} was not found`;
  180. // TODO(NODE-3483)
  181. const err = new error_1.MongoRuntimeError(errmsg);
  182. err.code = 'ENOENT'; // TODO: NODE-3338 set property as part of constructor
  183. return stream.destroy(err);
  184. }
  185. // If document is empty, kill the stream immediately and don't
  186. // execute any reads
  187. if (doc.length <= 0) {
  188. stream.push(null);
  189. return;
  190. }
  191. if (stream.destroyed) {
  192. // If user destroys the stream before we have a cursor, wait
  193. // until the query is done to say we're 'closed' because we can't
  194. // cancel a query.
  195. stream.destroy();
  196. return;
  197. }
  198. try {
  199. stream.s.bytesToSkip = handleStartOption(stream, doc, stream.s.options);
  200. }
  201. catch (error) {
  202. return stream.destroy(error);
  203. }
  204. const filter = { files_id: doc._id };
  205. // Currently (MongoDB 3.4.4) skip function does not support the index,
  206. // it needs to retrieve all the documents first and then skip them. (CS-25811)
  207. // As work around we use $gte on the "n" field.
  208. if (stream.s.options && stream.s.options.start != null) {
  209. const skip = Math.floor(stream.s.options.start / doc.chunkSize);
  210. if (skip > 0) {
  211. filter['n'] = { $gte: skip };
  212. }
  213. }
  214. stream.s.cursor = stream.s.chunks.find(filter).sort({ n: 1 });
  215. if (stream.s.readPreference) {
  216. stream.s.cursor.withReadPreference(stream.s.readPreference);
  217. }
  218. stream.s.expectedEnd = Math.ceil(doc.length / doc.chunkSize);
  219. stream.s.file = doc;
  220. try {
  221. stream.s.bytesToTrim = handleEndOption(stream, doc, stream.s.cursor, stream.s.options);
  222. }
  223. catch (error) {
  224. return stream.destroy(error);
  225. }
  226. stream.emit(GridFSBucketReadStream.FILE, doc);
  227. return;
  228. };
  229. stream.s.files.findOne(stream.s.filter, findOneOptions).then(doc => handleReadResult({ error: null, doc }), error => handleReadResult({ error, doc: null }));
  230. }
  231. function waitForFile(stream, callback) {
  232. if (stream.s.file) {
  233. return callback();
  234. }
  235. if (!stream.s.init) {
  236. init(stream);
  237. stream.s.init = true;
  238. }
  239. stream.once('file', () => {
  240. callback();
  241. });
  242. }
  243. function handleStartOption(stream, doc, options) {
  244. if (options && options.start != null) {
  245. if (options.start > doc.length) {
  246. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be more than the length of the file (${doc.length})`);
  247. }
  248. if (options.start < 0) {
  249. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be negative`);
  250. }
  251. if (options.end != null && options.end < options.start) {
  252. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be greater than stream end (${options.end})`);
  253. }
  254. stream.s.bytesRead = Math.floor(options.start / doc.chunkSize) * doc.chunkSize;
  255. stream.s.expected = Math.floor(options.start / doc.chunkSize);
  256. return options.start - stream.s.bytesRead;
  257. }
  258. throw new error_1.MongoInvalidArgumentError('Start option must be defined');
  259. }
  260. function handleEndOption(stream, doc, cursor, options) {
  261. if (options && options.end != null) {
  262. if (options.end > doc.length) {
  263. throw new error_1.MongoInvalidArgumentError(`Stream end (${options.end}) must not be more than the length of the file (${doc.length})`);
  264. }
  265. if (options.start == null || options.start < 0) {
  266. throw new error_1.MongoInvalidArgumentError(`Stream end (${options.end}) must not be negative`);
  267. }
  268. const start = options.start != null ? Math.floor(options.start / doc.chunkSize) : 0;
  269. cursor.limit(Math.ceil(options.end / doc.chunkSize) - start);
  270. stream.s.expectedEnd = Math.ceil(options.end / doc.chunkSize);
  271. return Math.ceil(options.end / doc.chunkSize) * doc.chunkSize - options.end;
  272. }
  273. throw new error_1.MongoInvalidArgumentError('End option must be defined');
  274. }
  275. //# sourceMappingURL=download.js.map