download.js 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. "use strict";
  2. Object.defineProperty(exports, "__esModule", { value: true });
  3. exports.GridFSBucketReadStream = void 0;
  4. const stream_1 = require("stream");
  5. const error_1 = require("../error");
  6. /**
  7. * A readable stream that enables you to read buffers from GridFS.
  8. *
  9. * Do not instantiate this class directly. Use `openDownloadStream()` instead.
  10. * @public
  11. */
  12. class GridFSBucketReadStream extends stream_1.Readable {
  13. /**
  14. * @param chunks - Handle for chunks collection
  15. * @param files - Handle for files collection
  16. * @param readPreference - The read preference to use
  17. * @param filter - The filter to use to find the file document
  18. * @internal
  19. */
  20. constructor(chunks, files, readPreference, filter, options) {
  21. super();
  22. this.s = {
  23. bytesToTrim: 0,
  24. bytesToSkip: 0,
  25. bytesRead: 0,
  26. chunks,
  27. expected: 0,
  28. files,
  29. filter,
  30. init: false,
  31. expectedEnd: 0,
  32. options: {
  33. start: 0,
  34. end: 0,
  35. ...options
  36. },
  37. readPreference
  38. };
  39. }
  40. /**
  41. * Reads from the cursor and pushes to the stream.
  42. * Private Impl, do not call directly
  43. * @internal
  44. */
  45. _read() {
  46. if (this.destroyed)
  47. return;
  48. waitForFile(this, () => doRead(this));
  49. }
  50. /**
  51. * Sets the 0-based offset in bytes to start streaming from. Throws
  52. * an error if this stream has entered flowing mode
  53. * (e.g. if you've already called `on('data')`)
  54. *
  55. * @param start - 0-based offset in bytes to start streaming from
  56. */
  57. start(start = 0) {
  58. throwIfInitialized(this);
  59. this.s.options.start = start;
  60. return this;
  61. }
  62. /**
  63. * Sets the 0-based offset in bytes to start streaming from. Throws
  64. * an error if this stream has entered flowing mode
  65. * (e.g. if you've already called `on('data')`)
  66. *
  67. * @param end - Offset in bytes to stop reading at
  68. */
  69. end(end = 0) {
  70. throwIfInitialized(this);
  71. this.s.options.end = end;
  72. return this;
  73. }
  74. /**
  75. * Marks this stream as aborted (will never push another `data` event)
  76. * and kills the underlying cursor. Will emit the 'end' event, and then
  77. * the 'close' event once the cursor is successfully killed.
  78. */
  79. async abort() {
  80. this.push(null);
  81. this.destroyed = true;
  82. if (this.s.cursor) {
  83. try {
  84. await this.s.cursor.close();
  85. }
  86. finally {
  87. this.emit(GridFSBucketReadStream.CLOSE);
  88. }
  89. }
  90. else {
  91. if (!this.s.init) {
  92. // If not initialized, fire close event because we will never
  93. // get a cursor
  94. this.emit(GridFSBucketReadStream.CLOSE);
  95. }
  96. }
  97. }
  98. }
  99. /**
  100. * An error occurred
  101. * @event
  102. */
  103. GridFSBucketReadStream.ERROR = 'error';
  104. /**
  105. * Fires when the stream loaded the file document corresponding to the provided id.
  106. * @event
  107. */
  108. GridFSBucketReadStream.FILE = 'file';
  109. /**
  110. * Emitted when a chunk of data is available to be consumed.
  111. * @event
  112. */
  113. GridFSBucketReadStream.DATA = 'data';
  114. /**
  115. * Fired when the stream is exhausted (no more data events).
  116. * @event
  117. */
  118. GridFSBucketReadStream.END = 'end';
  119. /**
  120. * Fired when the stream is exhausted and the underlying cursor is killed
  121. * @event
  122. */
  123. GridFSBucketReadStream.CLOSE = 'close';
  124. exports.GridFSBucketReadStream = GridFSBucketReadStream;
  125. function throwIfInitialized(stream) {
  126. if (stream.s.init) {
  127. throw new error_1.MongoGridFSStreamError('Options cannot be changed after the stream is initialized');
  128. }
  129. }
  130. function doRead(stream) {
  131. if (stream.destroyed)
  132. return;
  133. if (!stream.s.cursor)
  134. return;
  135. if (!stream.s.file)
  136. return;
  137. const handleReadResult = ({ error, doc }) => {
  138. if (stream.destroyed) {
  139. return;
  140. }
  141. if (error) {
  142. stream.emit(GridFSBucketReadStream.ERROR, error);
  143. return;
  144. }
  145. if (!doc) {
  146. stream.push(null);
  147. stream.s.cursor?.close().then(() => {
  148. stream.emit(GridFSBucketReadStream.CLOSE);
  149. }, error => {
  150. stream.emit(GridFSBucketReadStream.ERROR, error);
  151. });
  152. return;
  153. }
  154. if (!stream.s.file)
  155. return;
  156. const bytesRemaining = stream.s.file.length - stream.s.bytesRead;
  157. const expectedN = stream.s.expected++;
  158. const expectedLength = Math.min(stream.s.file.chunkSize, bytesRemaining);
  159. if (doc.n > expectedN) {
  160. return stream.emit(GridFSBucketReadStream.ERROR, new error_1.MongoGridFSChunkError(`ChunkIsMissing: Got unexpected n: ${doc.n}, expected: ${expectedN}`));
  161. }
  162. if (doc.n < expectedN) {
  163. return stream.emit(GridFSBucketReadStream.ERROR, new error_1.MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected: ${expectedN}`));
  164. }
  165. let buf = Buffer.isBuffer(doc.data) ? doc.data : doc.data.buffer;
  166. if (buf.byteLength !== expectedLength) {
  167. if (bytesRemaining <= 0) {
  168. return stream.emit(GridFSBucketReadStream.ERROR, new error_1.MongoGridFSChunkError(`ExtraChunk: Got unexpected n: ${doc.n}, expected file length ${stream.s.file.length} bytes but already read ${stream.s.bytesRead} bytes`));
  169. }
  170. return stream.emit(GridFSBucketReadStream.ERROR, new error_1.MongoGridFSChunkError(`ChunkIsWrongSize: Got unexpected length: ${buf.byteLength}, expected: ${expectedLength}`));
  171. }
  172. stream.s.bytesRead += buf.byteLength;
  173. if (buf.byteLength === 0) {
  174. return stream.push(null);
  175. }
  176. let sliceStart = null;
  177. let sliceEnd = null;
  178. if (stream.s.bytesToSkip != null) {
  179. sliceStart = stream.s.bytesToSkip;
  180. stream.s.bytesToSkip = 0;
  181. }
  182. const atEndOfStream = expectedN === stream.s.expectedEnd - 1;
  183. const bytesLeftToRead = stream.s.options.end - stream.s.bytesToSkip;
  184. if (atEndOfStream && stream.s.bytesToTrim != null) {
  185. sliceEnd = stream.s.file.chunkSize - stream.s.bytesToTrim;
  186. }
  187. else if (stream.s.options.end && bytesLeftToRead < doc.data.byteLength) {
  188. sliceEnd = bytesLeftToRead;
  189. }
  190. if (sliceStart != null || sliceEnd != null) {
  191. buf = buf.slice(sliceStart || 0, sliceEnd || buf.byteLength);
  192. }
  193. stream.push(buf);
  194. return;
  195. };
  196. stream.s.cursor.next().then(doc => handleReadResult({ error: null, doc }), error => handleReadResult({ error, doc: null }));
  197. }
  198. function init(stream) {
  199. const findOneOptions = {};
  200. if (stream.s.readPreference) {
  201. findOneOptions.readPreference = stream.s.readPreference;
  202. }
  203. if (stream.s.options && stream.s.options.sort) {
  204. findOneOptions.sort = stream.s.options.sort;
  205. }
  206. if (stream.s.options && stream.s.options.skip) {
  207. findOneOptions.skip = stream.s.options.skip;
  208. }
  209. const handleReadResult = ({ error, doc }) => {
  210. if (error) {
  211. return stream.emit(GridFSBucketReadStream.ERROR, error);
  212. }
  213. if (!doc) {
  214. const identifier = stream.s.filter._id
  215. ? stream.s.filter._id.toString()
  216. : stream.s.filter.filename;
  217. const errmsg = `FileNotFound: file ${identifier} was not found`;
  218. // TODO(NODE-3483)
  219. const err = new error_1.MongoRuntimeError(errmsg);
  220. err.code = 'ENOENT'; // TODO: NODE-3338 set property as part of constructor
  221. return stream.emit(GridFSBucketReadStream.ERROR, err);
  222. }
  223. // If document is empty, kill the stream immediately and don't
  224. // execute any reads
  225. if (doc.length <= 0) {
  226. stream.push(null);
  227. return;
  228. }
  229. if (stream.destroyed) {
  230. // If user destroys the stream before we have a cursor, wait
  231. // until the query is done to say we're 'closed' because we can't
  232. // cancel a query.
  233. stream.emit(GridFSBucketReadStream.CLOSE);
  234. return;
  235. }
  236. try {
  237. stream.s.bytesToSkip = handleStartOption(stream, doc, stream.s.options);
  238. }
  239. catch (error) {
  240. return stream.emit(GridFSBucketReadStream.ERROR, error);
  241. }
  242. const filter = { files_id: doc._id };
  243. // Currently (MongoDB 3.4.4) skip function does not support the index,
  244. // it needs to retrieve all the documents first and then skip them. (CS-25811)
  245. // As work around we use $gte on the "n" field.
  246. if (stream.s.options && stream.s.options.start != null) {
  247. const skip = Math.floor(stream.s.options.start / doc.chunkSize);
  248. if (skip > 0) {
  249. filter['n'] = { $gte: skip };
  250. }
  251. }
  252. stream.s.cursor = stream.s.chunks.find(filter).sort({ n: 1 });
  253. if (stream.s.readPreference) {
  254. stream.s.cursor.withReadPreference(stream.s.readPreference);
  255. }
  256. stream.s.expectedEnd = Math.ceil(doc.length / doc.chunkSize);
  257. stream.s.file = doc;
  258. try {
  259. stream.s.bytesToTrim = handleEndOption(stream, doc, stream.s.cursor, stream.s.options);
  260. }
  261. catch (error) {
  262. return stream.emit(GridFSBucketReadStream.ERROR, error);
  263. }
  264. stream.emit(GridFSBucketReadStream.FILE, doc);
  265. return;
  266. };
  267. stream.s.files.findOne(stream.s.filter, findOneOptions).then(doc => handleReadResult({ error: null, doc }), error => handleReadResult({ error, doc: null }));
  268. }
  269. function waitForFile(stream, callback) {
  270. if (stream.s.file) {
  271. return callback();
  272. }
  273. if (!stream.s.init) {
  274. init(stream);
  275. stream.s.init = true;
  276. }
  277. stream.once('file', () => {
  278. callback();
  279. });
  280. }
  281. function handleStartOption(stream, doc, options) {
  282. if (options && options.start != null) {
  283. if (options.start > doc.length) {
  284. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be more than the length of the file (${doc.length})`);
  285. }
  286. if (options.start < 0) {
  287. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be negative`);
  288. }
  289. if (options.end != null && options.end < options.start) {
  290. throw new error_1.MongoInvalidArgumentError(`Stream start (${options.start}) must not be greater than stream end (${options.end})`);
  291. }
  292. stream.s.bytesRead = Math.floor(options.start / doc.chunkSize) * doc.chunkSize;
  293. stream.s.expected = Math.floor(options.start / doc.chunkSize);
  294. return options.start - stream.s.bytesRead;
  295. }
  296. throw new error_1.MongoInvalidArgumentError('Start option must be defined');
  297. }
  298. function handleEndOption(stream, doc, cursor, options) {
  299. if (options && options.end != null) {
  300. if (options.end > doc.length) {
  301. throw new error_1.MongoInvalidArgumentError(`Stream end (${options.end}) must not be more than the length of the file (${doc.length})`);
  302. }
  303. if (options.start == null || options.start < 0) {
  304. throw new error_1.MongoInvalidArgumentError(`Stream end (${options.end}) must not be negative`);
  305. }
  306. const start = options.start != null ? Math.floor(options.start / doc.chunkSize) : 0;
  307. cursor.limit(Math.ceil(options.end / doc.chunkSize) - start);
  308. stream.s.expectedEnd = Math.ceil(options.end / doc.chunkSize);
  309. return Math.ceil(options.end / doc.chunkSize) * doc.chunkSize - options.end;
  310. }
  311. throw new error_1.MongoInvalidArgumentError('End option must be defined');
  312. }
  313. //# sourceMappingURL=download.js.map