diff --git a/benchmark/fs/bench-filehandle-pipetosync.js b/benchmark/fs/bench-filehandle-pipetosync.js new file mode 100644 index 00000000000000..475b0d0240e5b9 --- /dev/null +++ b/benchmark/fs/bench-filehandle-pipetosync.js @@ -0,0 +1,104 @@ +// Flags: --experimental-stream-iter +// Benchmark: pipeToSync with sync compression transforms. +// Measures fully synchronous file-to-file pipeline (no threadpool, no promises). +'use strict'; + +const common = require('../common.js'); +const fs = require('fs'); +const { openSync, closeSync, writeSync, readFileSync, unlinkSync } = fs; + +const tmpdir = require('../../test/common/tmpdir'); +tmpdir.refresh(); +const srcFile = tmpdir.resolve(`.removeme-sync-bench-src-${process.pid}`); +const dstFile = tmpdir.resolve(`.removeme-sync-bench-dst-${process.pid}`); + +const bench = common.createBenchmark(main, { + compression: ['gzip', 'deflate', 'brotli', 'zstd'], + filesize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], + n: [5], +}); + +function main({ compression, filesize, n }) { + // Create the fixture file with repeating lowercase ASCII + const chunk = Buffer.alloc(Math.min(filesize, 64 * 1024), 'abcdefghij'); + const fd = openSync(srcFile, 'w'); + let remaining = filesize; + while (remaining > 0) { + const toWrite = Math.min(remaining, chunk.length); + writeSync(fd, chunk, 0, toWrite); + remaining -= toWrite; + } + closeSync(fd); + + const { + pipeToSync, + compressGzipSync, + compressDeflateSync, + compressBrotliSync, + compressZstdSync, + } = require('stream/iter'); + const { open } = fs.promises; + + const compressFactory = { + gzip: compressGzipSync, + deflate: compressDeflateSync, + brotli: compressBrotliSync, + zstd: compressZstdSync, + }[compression]; + + // Stateless uppercase transform (sync) + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + const src = chunks[j]; + const buf = Buffer.allocUnsafe(src.length); + for (let i = 0; i < src.length; i++) { + const b = src[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[j] = buf; + } + return out; + }; + + // Use a synchronous wrapper since pipeToSync is fully sync. + // We need FileHandle for pullSync/writer, so open async then run sync. + (async () => { + const srcFh = await open(srcFile, 'r'); + const dstFh = await open(dstFile, 'w'); + + // Warm up + runSync(srcFh, dstFh, upper, compressFactory, pipeToSync); + + // Reset file positions for the benchmark + await srcFh.close(); + await dstFh.close(); + + bench.start(); + let totalBytes = 0; + for (let i = 0; i < n; i++) { + const src = await open(srcFile, 'r'); + const dst = await open(dstFile, 'w'); + totalBytes += runSync(src, dst, upper, compressFactory, pipeToSync); + await src.close(); + await dst.close(); + } + bench.end(totalBytes / (1024 * 1024)); + + cleanup(); + })(); +} + +function runSync(srcFh, dstFh, upper, compressFactory, pipeToSync) { + const w = dstFh.writer(); + pipeToSync(srcFh.pullSync(upper, compressFactory()), w); + + // Read back compressed size + return readFileSync(dstFile).length; +} + +function cleanup() { + try { unlinkSync(srcFile); } catch { /* Ignore */ } + try { unlinkSync(dstFile); } catch { /* Ignore */ } +} diff --git a/benchmark/fs/bench-filehandle-pull-vs-webstream.js b/benchmark/fs/bench-filehandle-pull-vs-webstream.js index 5d1bd56e441cbf..6fe850d1286255 100644 --- a/benchmark/fs/bench-filehandle-pull-vs-webstream.js +++ b/benchmark/fs/bench-filehandle-pull-vs-webstream.js @@ -1,6 +1,6 @@ // Flags: --experimental-stream-iter // Compare FileHandle.createReadStream() vs readableWebStream() vs pull() -// reading a large file through two transforms: uppercase then gzip compress. +// reading a large file through two transforms: uppercase then compress. 'use strict'; const common = require('../common.js'); @@ -14,11 +14,20 @@ const filename = tmpdir.resolve(`.removeme-benchmark-garbage-${process.pid}`); const bench = common.createBenchmark(main, { api: ['classic', 'webstream', 'pull'], + compression: ['gzip', 'deflate', 'brotli', 'zstd'], filesize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], n: [5], +}, { + // Classic and webstream only support gzip (native zlib / CompressionStream). + // Brotli, deflate, zstd are pull-only via stream/iter transforms. + combinationFilter({ api, compression }) { + if (api === 'classic' && compression !== 'gzip') return false; + if (api === 'webstream' && compression !== 'gzip') return false; + return true; + }, }); -function main({ api, filesize, n }) { +function main({ api, compression, filesize, n }) { // Create the fixture file with repeating lowercase ASCII const chunk = Buffer.alloc(Math.min(filesize, 64 * 1024), 'abcdefghij'); const fd = fs.openSync(filename, 'w'); @@ -35,7 +44,7 @@ function main({ api, filesize, n }) { } else if (api === 'webstream') { benchWebStream(n, filesize).then(() => cleanup()); } else { - benchPull(n, filesize).then(() => cleanup()); + benchPull(n, filesize, compression).then(() => cleanup()); } } @@ -43,11 +52,20 @@ function cleanup() { try { fs.unlinkSync(filename); } catch { /* ignore */ } } +// Stateless uppercase transform (shared by all paths) +function uppercaseChunk(chunk) { + const buf = Buffer.allocUnsafe(chunk.length); + for (let i = 0; i < chunk.length; i++) { + const b = chunk[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + return buf; +} + // --------------------------------------------------------------------------- // Classic streams path: createReadStream -> Transform (upper) -> createGzip // --------------------------------------------------------------------------- async function benchClassic(n, filesize) { - // Warm up await runClassic(); bench.start(); @@ -62,22 +80,14 @@ function runClassic() { return new Promise((resolve, reject) => { const rs = fs.createReadStream(filename); - // Transform 1: uppercase const upper = new Transform({ transform(chunk, encoding, callback) { - const buf = Buffer.allocUnsafe(chunk.length); - for (let i = 0; i < chunk.length; i++) { - const b = chunk[i]; - buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; - } - callback(null, buf); + callback(null, uppercaseChunk(chunk)); }, }); - // Transform 2: gzip const gz = zlib.createGzip(); - // Sink: count compressed bytes let totalBytes = 0; const sink = new Writable({ write(chunk, encoding, callback) { @@ -97,7 +107,6 @@ function runClassic() { // WebStream path: readableWebStream -> TransformStream (upper) -> CompressionStream // --------------------------------------------------------------------------- async function benchWebStream(n, filesize) { - // Warm up await runWebStream(); bench.start(); @@ -113,22 +122,18 @@ async function runWebStream() { try { const rs = fh.readableWebStream(); - // Transform 1: uppercase const upper = new TransformStream({ transform(chunk, controller) { const buf = new Uint8Array(chunk.length); for (let i = 0; i < chunk.length; i++) { const b = chunk[i]; - // a-z (0x61-0x7a) -> A-Z (0x41-0x5a) buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; } controller.enqueue(buf); }, }); - // Transform 2: gzip via CompressionStream const compress = new CompressionStream('gzip'); - const output = rs.pipeThrough(upper).pipeThrough(compress); const reader = output.getReader(); @@ -145,23 +150,30 @@ async function runWebStream() { } // --------------------------------------------------------------------------- -// New streams path: pull() with uppercase transform + gzip transform +// Pull/iter path: pull() with uppercase transform + selected compression // --------------------------------------------------------------------------- -async function benchPull(n, filesize) { - const { pull, compressGzip } = require('stream/iter'); +async function benchPull(n, filesize, compression) { + const iter = require('stream/iter'); + + const compressFactory = { + gzip: iter.compressGzip, + deflate: iter.compressDeflate, + brotli: iter.compressBrotli, + zstd: iter.compressZstd, + }[compression]; // Warm up - await runPull(pull, compressGzip); + await runPull(compressFactory); bench.start(); let totalBytes = 0; for (let i = 0; i < n; i++) { - totalBytes += await runPull(pull, compressGzip); + totalBytes += await runPull(compressFactory); } bench.end(totalBytes / (1024 * 1024)); } -async function runPull(pull, compressGzip) { +async function runPull(compressFactory) { const fh = await fs.promises.open(filename, 'r'); try { // Stateless transform: uppercase each chunk in the batch @@ -169,21 +181,13 @@ async function runPull(pull, compressGzip) { if (chunks === null) return null; const out = new Array(chunks.length); for (let j = 0; j < chunks.length; j++) { - const src = chunks[j]; - const buf = new Uint8Array(src.length); - for (let i = 0; i < src.length; i++) { - const b = src[i]; - buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; - } - out[j] = buf; + out[j] = uppercaseChunk(chunks[j]); } return out; }; - const readable = fh.pull(upper, compressGzip()); + const readable = fh.pull(upper, compressFactory()); - // Count bytes symmetrically with the classic path (no final - // concatenation into a single buffer). let totalBytes = 0; for await (const chunks of readable) { for (let i = 0; i < chunks.length; i++) { diff --git a/doc/api/fs.md b/doc/api/fs.md index 5cc8229f3eb941..8952f9a67c76e9 100644 --- a/doc/api/fs.md +++ b/doc/api/fs.md @@ -391,14 +391,23 @@ added: REPLACEME * `signal` {AbortSignal} * `autoClose` {boolean} Close the file handle when the stream ends. **Default:** `false`. + * `start` {number} Byte offset to begin reading from. When specified, + reads use explicit positioning (`pread` semantics). **Default:** current + file position. + * `limit` {number} Maximum number of bytes to read before ending the + iterator. Reads stop when `limit` bytes have been delivered or EOF is + reached, whichever comes first. **Default:** read until EOF. + * `chunkSize` {number} Size in bytes of the buffer allocated for each + read operation. **Default:** `131072` (128 KB). * Returns: {AsyncIterable\} Return the file contents as an async iterable using the -[`node:stream/iter`][] pull model. Reads are performed in 64 KB chunks. -If transforms are provided, they are applied via [`stream/iter pull()`][]. +[`node:stream/iter`][] pull model. Reads are performed in `chunkSize`-byte +chunks (default 128 KB). If transforms are provided, they are applied +via [`stream/iter pull()`][]. The file handle is locked while the iterable is being consumed and unlocked -when iteration completes. +when iteration completes, an error occurs, or the consumer breaks. This function is only available when the `--experimental-stream-iter` flag is enabled. @@ -412,9 +421,13 @@ const fh = await open('input.txt', 'r'); // Read as text console.log(await text(fh.pull({ autoClose: true }))); -// Read with compression +// Read 1 KB starting at byte 100 const fh2 = await open('input.txt', 'r'); -const compressed = fh2.pull(compressGzip(), { autoClose: true }); +console.log(await text(fh2.pull({ start: 100, limit: 1024, autoClose: true }))); + +// Read with compression +const fh3 = await open('input.txt', 'r'); +const compressed = fh3.pull(compressGzip(), { autoClose: true }); ``` ```cjs @@ -427,9 +440,86 @@ async function run() { // Read as text console.log(await text(fh.pull({ autoClose: true }))); - // Read with compression + // Read 1 KB starting at byte 100 const fh2 = await open('input.txt', 'r'); - const compressed = fh2.pull(compressGzip(), { autoClose: true }); + console.log(await text(fh2.pull({ start: 100, limit: 1024, autoClose: true }))); + + // Read with compression + const fh3 = await open('input.txt', 'r'); + const compressed = fh3.pull(compressGzip(), { autoClose: true }); +} + +run().catch(console.error); +``` + +#### `filehandle.pullSync([...transforms][, options])` + + + +> Stability: 1 - Experimental + +* `...transforms` {Function|Object} Optional transforms to apply via + [`stream/iter pullSync()`][]. +* `options` {Object} + * `autoClose` {boolean} Close the file handle when the stream ends. + **Default:** `false`. + * `start` {number} Byte offset to begin reading from. When specified, + reads use explicit positioning. **Default:** current file position. + * `limit` {number} Maximum number of bytes to read before ending the + iterator. **Default:** read until EOF. + * `chunkSize` {number} Size in bytes of the buffer allocated for each + read operation. **Default:** `131072` (128 KB). +* Returns: {Iterable\} + +Synchronous counterpart of [`filehandle.pull()`][]. Returns a sync iterable +that reads the file using synchronous I/O on the main thread. Reads are +performed in `chunkSize`-byte chunks (default 128 KB). + +The file handle is locked while the iterable is being consumed. Unlike the +async `pull()`, this method does not support `AbortSignal` since all +operations are synchronous. + +This function is only available when the `--experimental-stream-iter` flag is +enabled. + +```mjs +import { open } from 'node:fs/promises'; +import { + textSync, pipeToSync, compressGzipSync, decompressGzipSync, +} from 'node:stream/iter'; + +const fh = await open('input.txt', 'r'); + +// Read as text (sync) +console.log(textSync(fh.pullSync({ autoClose: true }))); + +// Sync compress pipeline: file -> gzip -> file +const src = await open('input.txt', 'r'); +const dst = await open('output.gz', 'w'); +pipeToSync(src.pullSync(compressGzipSync(), { autoClose: true }), dst.writer({ autoClose: true })); +``` + +```cjs +const { open } = require('node:fs/promises'); +const { + textSync, pipeToSync, compressGzipSync, decompressGzipSync, +} = require('node:stream/iter'); + +async function run() { + const fh = await open('input.txt', 'r'); + + // Read as text (sync) + console.log(textSync(fh.pullSync({ autoClose: true }))); + + // Sync compress pipeline: file -> gzip -> file + const src = await open('input.txt', 'r'); + const dst = await open('output.gz', 'w'); + pipeToSync( + src.pullSync(compressGzipSync(), { autoClose: true }), + dst.writer({ autoClose: true }), + ); } run().catch(console.error); @@ -926,33 +1016,68 @@ added: REPLACEME > Stability: 1 - Experimental * `options` {Object} - * `autoClose` {boolean} Close the file handle when the writer ends. - **Default:** `false`. - * `start` {number} Byte offset to start writing at. **Default:** current - position (append). + * `autoClose` {boolean} Close the file handle when the writer ends or + fails. **Default:** `false`. + * `start` {number} Byte offset to start writing at. When specified, + writes use explicit positioning. **Default:** current file position. + * `limit` {number} Maximum number of bytes the writer will accept. + Async writes (`write()`, `writev()`) that would exceed the limit reject + with `ERR_OUT_OF_RANGE`. Sync writes (`writeSync()`, `writevSync()`) + return `false`. **Default:** no limit. + * `chunkSize` {number} Maximum chunk size in bytes for synchronous write + operations. Writes larger than this threshold fall back to async I/O. + Set this to match the reader's `chunkSize` for optimal `pipeTo()` + performance. **Default:** `131072` (128 KB). * Returns: {Object} * `write(chunk[, options])` {Function} Returns {Promise\}. - * `chunk` {Buffer|TypedArray|DataView} + Accepts `Uint8Array`, `Buffer`, or string (UTF-8 encoded). + * `chunk` {Buffer|TypedArray|DataView|string} * `options` {Object} * `signal` {AbortSignal} If the signal is already aborted, the write rejects with `AbortError` without performing I/O. * `writev(chunks[, options])` {Function} Returns {Promise\}. Uses - scatter/gather I/O via a single `writev()` syscall. - * `chunks` {Buffer\[]|TypedArray\[]|DataView\[]} + scatter/gather I/O via a single `writev()` syscall. Accepts mixed + `Uint8Array`/string arrays. + * `chunks` {Array\} * `options` {Object} * `signal` {AbortSignal} If the signal is already aborted, the write rejects with `AbortError` without performing I/O. - * `end([options])` {Function} Returns {Promise\} total bytes written. + * `writeSync(chunk)` {Function} Returns {boolean}. Attempts a synchronous + write. Returns `true` if the write succeeded, `false` if the caller + should fall back to async `write()`. Returns `false` when: the writer + is closed/errored, an async operation is in flight, the chunk exceeds + `chunkSize`, or the write would exceed `limit`. + * `chunk` {Buffer|TypedArray|DataView|string} + * `writevSync(chunks)` {Function} Returns {boolean}. Synchronous batch + write. Same fallback semantics as `writeSync()`. + * `chunks` {Array\} + * `end([options])` {Function} Returns {Promise\} total bytes + written. Idempotent: returns `totalBytesWritten` if already closed, + returns the pending promise if already closing. Rejects if the writer + is in an errored state. * `options` {Object} * `signal` {AbortSignal} If the signal is already aborted, `end()` rejects with `AbortError` and the writer remains open. + * `endSync()` {Function} Returns {number|number} total bytes written on + success, `-1` if the writer is errored or an async operation is in + flight. Idempotent when already closed. * `fail(reason)` {Function} Puts the writer into a terminal error state. - Synchronous. If the writer is already closed or errored, this is a no-op. + Synchronous. If the writer is already closed or errored, this is a + no-op. If `autoClose` is true, closes the file handle synchronously. Return a [`node:stream/iter`][] writer backed by this file handle. -The writer supports `Symbol.asyncDispose`, so it can be used with -`await using`. +The writer supports both `Symbol.asyncDispose` and `Symbol.dispose`: + +* `await using w = fh.writer()` — if the writer is still open (no `end()` + called), `asyncDispose` calls `fail()`. If `end()` is pending, it waits + for it to complete. +* `using w = fh.writer()` — calls `fail()` unconditionally. + +The `writeSync()` and `writevSync()` methods enable the try-sync fast path +used by [`stream/iter pipeTo()`][]. When the reader's chunk size matches the +writer's `chunkSize`, all writes in a `pipeTo()` pipeline complete +synchronously with zero promise overhead. This function is only available when the `--experimental-stream-iter` flag is enabled. @@ -961,10 +1086,17 @@ enabled. import { open } from 'node:fs/promises'; import { from, pipeTo, compressGzip } from 'node:stream/iter'; +// Async pipeline const fh = await open('output.gz', 'w'); -const w = fh.writer({ autoClose: true }); -await pipeTo(from('Hello!'), compressGzip(), w); +await pipeTo(from('Hello!'), compressGzip(), fh.writer({ autoClose: true })); + +// Sync pipeline with limit +const src = await open('input.txt', 'r'); +const dst = await open('output.txt', 'w'); +const w = dst.writer({ limit: 1024 * 1024 }); // Max 1 MB +await pipeTo(src.pull({ autoClose: true }), w); await w.end(); +await dst.close(); ``` ```cjs @@ -972,10 +1104,17 @@ const { open } = require('node:fs/promises'); const { from, pipeTo, compressGzip } = require('node:stream/iter'); async function run() { + // Async pipeline const fh = await open('output.gz', 'w'); - const w = fh.writer({ autoClose: true }); - await pipeTo(from('Hello!'), compressGzip(), w); + await pipeTo(from('Hello!'), compressGzip(), fh.writer({ autoClose: true })); + + // Sync pipeline with limit + const src = await open('input.txt', 'r'); + const dst = await open('output.txt', 'w'); + const w = dst.writer({ limit: 1024 * 1024 }); // Max 1 MB + await pipeTo(src.pull({ autoClose: true }), w); await w.end(); + await dst.close(); } run().catch(console.error); @@ -8859,6 +8998,7 @@ the file contents. [`event ports`]: https://illumos.org/man/port_create [`filehandle.createReadStream()`]: #filehandlecreatereadstreamoptions [`filehandle.createWriteStream()`]: #filehandlecreatewritestreamoptions +[`filehandle.pull()`]: #filehandlepulltransforms-options [`filehandle.writeFile()`]: #filehandlewritefiledata-options [`fs.access()`]: #fsaccesspath-mode-callback [`fs.accessSync()`]: #fsaccesssyncpath-mode @@ -8910,7 +9050,9 @@ the file contents. [`kqueue(2)`]: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 [`minimatch`]: https://github.com/isaacs/minimatch [`node:stream/iter`]: stream_iter.md +[`stream/iter pipeTo()`]: stream_iter.md#pipetosource-transforms-writer [`stream/iter pull()`]: stream_iter.md#pullsource-transforms-options +[`stream/iter pullSync()`]: stream_iter.md#pullsyncsource-transforms [`util.promisify()`]: util.md#utilpromisifyoriginal [bigints]: https://tc39.github.io/proposal-bigint [caveats]: #caveats diff --git a/doc/api/stream_iter.md b/doc/api/stream_iter.md index ef74110bc1a5ec..6eb372a8110b1a 100644 --- a/doc/api/stream_iter.md +++ b/doc/api/stream_iter.md @@ -1414,26 +1414,45 @@ added: REPLACEME ## Compression and decompression transforms -These transforms apply zlib, Brotli, and Zstd compression transforms. +These transforms apply zlib, Brotli, and Zstd compression and decompression. +Each algorithm has both an async variant (stateful async generator, for use +with `pull()` and `pipeTo()`) and a sync variant (stateful sync generator, +for use with `pullSync()` and `pipeToSync()`). + +The async transforms run compression on the libuv threadpool, overlapping +I/O with JavaScript execution. The sync transforms run compression directly +on the main thread. + +> Note: The defaults for these transforms are tuned for streaming throughput, +> and differ from the defaults in `node:zlib`. In particular, gzip/deflate +> default to level 4 (not 6) and memLevel 9 (not 8), and Brotli defaults to +> quality 6 (not 11). These choices match common HTTP server configurations +> and provide significantly faster compression with only a small reduction in +> compression ratio. All defaults can be overridden via options. ### `compressBrotli([options])` +### `compressBrotliSync([options])` + * `options` {Object} - * `chunkSize` {number} **Default:** `16384`. + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). * `params` {Object} Key-value object where keys and values are `zlib.constants` entries. The most important compressor parameters are: * `BROTLI_PARAM_MODE` -- `BROTLI_MODE_GENERIC` (default), `BROTLI_MODE_TEXT`, or `BROTLI_MODE_FONT`. * `BROTLI_PARAM_QUALITY` -- ranges from `BROTLI_MIN_QUALITY` to - `BROTLI_MAX_QUALITY`. **Default:** `BROTLI_DEFAULT_QUALITY`. + `BROTLI_MAX_QUALITY`. **Default:** `6` (not `BROTLI_DEFAULT_QUALITY` + which is 11). Quality 6 is appropriate for streaming; quality 11 is + intended for offline/build-time compression. * `BROTLI_PARAM_SIZE_HINT` -- expected input size. **Default:** `0` (unknown). - * `BROTLI_PARAM_LGWIN` -- window size (log2). Ranges from - `BROTLI_MIN_WINDOW_BITS` to `BROTLI_MAX_WINDOW_BITS`. + * `BROTLI_PARAM_LGWIN` -- window size (log2). **Default:** `20` (1 MB). + The Brotli library default is 22 (4 MB); the reduced default saves + memory without significant compression impact for streaming workloads. * `BROTLI_PARAM_LGBLOCK` -- input block size (log2). See the [Brotli compressor options][] in the zlib documentation for the full list. @@ -1441,47 +1460,52 @@ added: REPLACEME * Returns: {Object} A stateful transform. Create a Brotli compression transform. Output is compatible with -`zlib.brotliDecompress()` and `decompressBrotli()`. +`zlib.brotliDecompress()` and `decompressBrotli()`/`decompressBrotliSync()`. ### `compressDeflate([options])` +### `compressDeflateSync([options])` + * `options` {Object} - * `chunkSize` {number} Output buffer size. **Default:** `16384`. - * `level` {number} Compression level (`0`-`9`). **Default:** `Z_DEFAULT_COMPRESSION`. - * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS`. - * `memLevel` {number} **Default:** `Z_DEFAULT_MEMLEVEL`. + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `level` {number} Compression level (`0`-`9`). **Default:** `4`. + * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS` (15). + * `memLevel` {number} **Default:** `9`. * `strategy` {number} **Default:** `Z_DEFAULT_STRATEGY`. * `dictionary` {Buffer|TypedArray|DataView} * Returns: {Object} A stateful transform. Create a deflate compression transform. Output is compatible with -`zlib.inflate()` and `decompressDeflate()`. +`zlib.inflate()` and `decompressDeflate()`/`decompressDeflateSync()`. ### `compressGzip([options])` +### `compressGzipSync([options])` + * `options` {Object} - * `chunkSize` {number} Output buffer size. **Default:** `16384`. - * `level` {number} Compression level (`0`-`9`). **Default:** `Z_DEFAULT_COMPRESSION`. - * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS`. - * `memLevel` {number} **Default:** `Z_DEFAULT_MEMLEVEL`. + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `level` {number} Compression level (`0`-`9`). **Default:** `4`. + * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS` (15). + * `memLevel` {number} **Default:** `9`. * `strategy` {number} **Default:** `Z_DEFAULT_STRATEGY`. * `dictionary` {Buffer|TypedArray|DataView} * Returns: {Object} A stateful transform. Create a gzip compression transform. Output is compatible with `zlib.gunzip()` -and `decompressGzip()`. +and `decompressGzip()`/`decompressGzipSync()`. ```mjs import { from, pull, bytes, text, compressGzip, decompressGzip } from 'node:stream/iter'; +// Async round-trip const compressed = await bytes(pull(from('hello'), compressGzip())); const original = await text(pull(from(compressed), decompressGzip())); console.log(original); // 'hello' @@ -1499,14 +1523,33 @@ async function run() { run().catch(console.error); ``` +```mjs +import { fromSync, pullSync, textSync, compressGzipSync, decompressGzipSync } from 'node:stream/iter'; + +// Sync round-trip +const compressed = pullSync(fromSync('hello'), compressGzipSync()); +const original = textSync(pullSync(compressed, decompressGzipSync())); +console.log(original); // 'hello' +``` + +```cjs +const { fromSync, pullSync, textSync, compressGzipSync, decompressGzipSync } = require('node:stream/iter'); + +const compressed = pullSync(fromSync('hello'), compressGzipSync()); +const original = textSync(pullSync(compressed, decompressGzipSync())); +console.log(original); // 'hello' +``` + ### `compressZstd([options])` +### `compressZstdSync([options])` + * `options` {Object} - * `chunkSize` {number} **Default:** `16384`. + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). * `params` {Object} Key-value object where keys and values are `zlib.constants` entries. The most important compressor parameters are: * `ZSTD_c_compressionLevel` -- **Default:** `ZSTD_CLEVEL_DEFAULT` (3). @@ -1522,16 +1565,18 @@ added: REPLACEME * Returns: {Object} A stateful transform. Create a Zstandard compression transform. Output is compatible with -`zlib.zstdDecompress()` and `decompressZstd()`. +`zlib.zstdDecompress()` and `decompressZstd()`/`decompressZstdSync()`. ### `decompressBrotli([options])` +### `decompressBrotliSync([options])` + * `options` {Object} - * `chunkSize` {number} **Default:** `16384`. + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). * `params` {Object} Key-value object where keys and values are `zlib.constants` entries. Available decompressor parameters: * `BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION` -- boolean @@ -1547,13 +1592,15 @@ Create a Brotli decompression transform. ### `decompressDeflate([options])` +### `decompressDeflateSync([options])` + * `options` {Object} - * `chunkSize` {number} Output buffer size. **Default:** `16384`. - * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS`. + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS` (15). * `dictionary` {Buffer|TypedArray|DataView} * Returns: {Object} A stateful transform. @@ -1561,13 +1608,15 @@ Create a deflate decompression transform. ### `decompressGzip([options])` +### `decompressGzipSync([options])` + * `options` {Object} - * `chunkSize` {number} Output buffer size. **Default:** `16384`. - * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS`. + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS` (15). * `dictionary` {Buffer|TypedArray|DataView} * Returns: {Object} A stateful transform. @@ -1575,12 +1624,14 @@ Create a gzip decompression transform. ### `decompressZstd([options])` +### `decompressZstdSync([options])` + * `options` {Object} - * `chunkSize` {number} **Default:** `16384`. + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). * `params` {Object} Key-value object where keys and values are `zlib.constants` entries. Available decompressor parameters: * `ZSTD_d_windowLogMax` -- maximum window size (log2) the decompressor diff --git a/lib/internal/fs/promises.js b/lib/internal/fs/promises.js index 842272b31706d0..982ee89c3b17c8 100644 --- a/lib/internal/fs/promises.js +++ b/lib/internal/fs/promises.js @@ -17,6 +17,8 @@ const { Symbol, SymbolAsyncDispose, SymbolAsyncIterator, + SymbolDispose, + SymbolIterator, Uint8Array, } = primordials; @@ -42,6 +44,7 @@ const { ERR_INVALID_STATE, ERR_METHOD_NOT_IMPLEMENTED, ERR_OPERATION_FAILED, + ERR_OUT_OF_RANGE, }, } = require('internal/errors'); const { isArrayBufferView } = require('internal/util/types'); @@ -64,6 +67,7 @@ const { stringToFlags, stringToSymlinkType, toUnixTimestamp, + handleErrorFromBinding: handleSyncErrorFromBinding, validateBufferArray, validateCpOptions, validateOffsetLengthRead, @@ -115,6 +119,7 @@ const kCloseReject = Symbol('kCloseReject'); const kRef = Symbol('kRef'); const kUnref = Symbol('kUnref'); const kLocked = Symbol('kLocked'); +const kCloseSync = Symbol('kCloseSync'); const { kUsePromises } = binding; const { Interface } = require('internal/readline/interface'); @@ -144,12 +149,19 @@ const lazyReadableStream = getLazy(() => // Lazy loaded to avoid circular dependency with new streams. let newStreamsPull; +let newStreamsPullSync; let newStreamsParsePullArgs; +let newStreamsToUint8Array; +let newStreamsConvertChunks; function lazyNewStreams() { if (newStreamsPull === undefined) { - newStreamsPull = require('internal/streams/iter/pull').pull; - newStreamsParsePullArgs = - require('internal/streams/iter/utils').parsePullArgs; + const pullModule = require('internal/streams/iter/pull'); + newStreamsPull = pullModule.pull; + newStreamsPullSync = pullModule.pullSync; + const utils = require('internal/streams/iter/utils'); + newStreamsParsePullArgs = utils.parsePullArgs; + newStreamsToUint8Array = utils.toUint8Array; + newStreamsConvertChunks = utils.convertChunks; } } @@ -280,6 +292,16 @@ class FileHandle extends EventEmitter { return this[kClosePromise]; }; + [kCloseSync]() { + if (this[kFd] === -1) return; + if (this[kClosePromise]) { + throw new ERR_INVALID_STATE('The FileHandle is closing'); + } + this[kFd] = -1; + this[kHandle].closeSync(); + this.emit('close'); + } + async [SymbolAsyncDispose]() { await this.close(); } @@ -436,6 +458,9 @@ class FileHandle extends EventEmitter { } if (getOptionValue('--experimental-stream-iter')) { + const kNullPrototo = { __proto__: null }; + const kDefaultChunkSize = 131072; + const kNone = -1; /** * Return the file contents as an AsyncIterable using the * new streams pull model. Optional transforms and options (including @@ -445,68 +470,95 @@ if (getOptionValue('--experimental-stream-iter')) { * @returns {AsyncIterable} */ FileHandle.prototype.pull = function pull(...args) { - if (this[kFd] === -1) + if (this[kFd] === kNone) throw new ERR_INVALID_STATE('The FileHandle is closed'); if (this[kClosePromise]) throw new ERR_INVALID_STATE('The FileHandle is closing'); if (this[kLocked]) throw new ERR_INVALID_STATE('The FileHandle is locked'); - this[kLocked] = true; lazyNewStreams(); - const { transforms, options } = newStreamsParsePullArgs(args); + const { transforms, options = kNullPrototo } = newStreamsParsePullArgs(args); + + const { + autoClose = false, + chunkSize: readSize = kDefaultChunkSize, + signal, + } = options; + let { + start: pos = kNone, + limit: remaining = kNone, + } = options; const handle = this; const fd = this[kFd]; - const autoClose = options?.autoClose ?? false; - const signal = options?.signal; + + validateBoolean(autoClose, 'options.autoClose'); + + if (pos !== kNone) { + validateInteger(pos, 'options.start', 0); + } + if (remaining !== kNone) { + validateInteger(remaining, 'options.limit', 1); + } + if (readSize !== undefined) { + validateInteger(readSize, 'options.chunkSize', 1); + } + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + } + + this[kLocked] = true; const source = { __proto__: null, async *[SymbolAsyncIterator]() { handle[kRef](); - const readSize = 65536; try { if (signal) { // Signal-aware path - while (true) { + while (remaining !== 0) { if (signal.aborted) { throw signal.reason ?? lazyDOMException('The operation was aborted', 'AbortError'); } - // Allocate a fresh buffer each iteration. At 64 KiB this - // bypasses the slab pool, so there is no reuse benefit. - // Yielding the buffer directly avoids the per-chunk copy - // that was needed when a single buffer was reused. - const buf = Buffer.allocUnsafe(readSize); + const toRead = remaining > 0 ? + MathMin(readSize, remaining) : readSize; + const buf = Buffer.allocUnsafe(toRead); let bytesRead; try { bytesRead = (await binding.read(fd, buf, 0, - readSize, -1, kUsePromises)) || 0; + toRead, pos, kUsePromises)) || 0; } catch (err) { ErrorCaptureStackTrace(err, handleErrorFromBinding); throw err; } if (bytesRead === 0) break; - yield [bytesRead < readSize ? buf.subarray(0, bytesRead) : buf]; + if (pos >= 0) pos += bytesRead; + if (remaining > 0) remaining -= bytesRead; + yield [bytesRead < toRead ? buf.subarray(0, bytesRead) : buf]; } } else { // Fast path - no signal check per iteration - while (true) { - const buf = Buffer.allocUnsafe(readSize); + while (remaining !== 0) { + const toRead = remaining > 0 ? + MathMin(readSize, remaining) : readSize; + const buf = Buffer.allocUnsafe(toRead); let bytesRead; try { bytesRead = (await binding.read(fd, buf, 0, - readSize, -1, kUsePromises)) || 0; + toRead, pos, kUsePromises)) || 0; } catch (err) { ErrorCaptureStackTrace(err, handleErrorFromBinding); throw err; } if (bytesRead === 0) break; - yield [bytesRead < readSize ? buf.subarray(0, bytesRead) : buf]; + if (pos >= 0) pos += bytesRead; + if (remaining > 0) remaining -= bytesRead; + yield [bytesRead < toRead ? buf.subarray(0, bytesRead) : buf]; } } } finally { @@ -530,6 +582,113 @@ if (getOptionValue('--experimental-stream-iter')) { return source; }; + /** + * Return the file contents as an Iterable using synchronous + * reads. Optional transforms and options may be provided as trailing + * arguments, mirroring the Stream.pullSync() signature. + * @param {...(Function|object)} args - Optional transforms and/or options + * @returns {Iterable} + */ + FileHandle.prototype.pullSync = function pullSync(...args) { + if (this[kFd] === kNone) + throw new ERR_INVALID_STATE('The FileHandle is closed'); + if (this[kClosePromise]) + throw new ERR_INVALID_STATE('The FileHandle is closing'); + if (this[kLocked]) + throw new ERR_INVALID_STATE('The FileHandle is locked'); + + lazyNewStreams(); + const { transforms, options = kNullPrototo } = newStreamsParsePullArgs(args); + + const { + autoClose = false, + chunkSize: readSize = kDefaultChunkSize, + } = options; + let { + start: pos = kNone, + limit: remaining = kNone, + } = options; + + const handle = this; + const fd = this[kFd]; + + validateBoolean(autoClose, 'options.autoClose'); + + if (pos !== kNone) { + validateInteger(pos, 'options.start', 0); + } + if (remaining !== kNone) { + validateInteger(remaining, 'options.limit', 1); + } + if (readSize !== undefined) { + validateInteger(readSize, 'options.chunkSize', 1); + } + + this[kLocked] = true; + + handle[kRef](); + + function cleanup() { + handle[kLocked] = false; + handle[kUnref](); + if (autoClose) { + handle[kCloseSync](); + } + } + + const source = { + __proto__: null, + [SymbolIterator]() { + let done = false; + return { + __proto__: null, + next() { + if (done || remaining === 0) { + if (!done) { + done = true; + cleanup(); + } + return { value: undefined, done: true }; + } + const toRead = remaining > 0 ? + MathMin(readSize, remaining) : readSize; + const buf = Buffer.allocUnsafe(toRead); + let bytesRead; + try { + bytesRead = binding.read(fd, buf, 0, toRead, pos) || 0; + } catch (err) { + done = true; + cleanup(); + throw err; + } + if (bytesRead === 0) { + done = true; + cleanup(); + return { value: undefined, done: true }; + } + if (pos >= 0) pos += bytesRead; + if (remaining > 0) remaining -= bytesRead; + const chunk = bytesRead < toRead ? + buf.subarray(0, bytesRead) : buf; + return { value: [chunk], done: false }; + }, + return() { + if (!done) { + done = true; + cleanup(); + } + return { value: undefined, done: true }; + }, + }; + }, + }; + + if (transforms.length > 0) { + return newStreamsPullSync(source, ...transforms); + } + return source; + }; + /** * Return a new-streams Writer backed by this file handle. * The writer uses direct binding.writeBuffer / binding.writeBuffers @@ -543,91 +702,154 @@ if (getOptionValue('--experimental-stream-iter')) { * }} [options] * @returns {{ write, writev, end, fail }} */ - FileHandle.prototype.writer = function writer(options) { - if (this[kFd] === -1) + FileHandle.prototype.writer = function writer(options = kNullPrototo) { + if (this[kFd] === kNone) throw new ERR_INVALID_STATE('The FileHandle is closed'); if (this[kClosePromise]) throw new ERR_INVALID_STATE('The FileHandle is closing'); if (this[kLocked]) throw new ERR_INVALID_STATE('The FileHandle is locked'); - this[kLocked] = true; + + lazyNewStreams(); + + validateObject(options, 'options'); + const { + autoClose = false, + chunkSize: syncWriteThreshold = kDefaultChunkSize, + } = options; + let { + start: pos = kNone, + limit: bytesRemaining = kNone, + } = options; const handle = this; const fd = this[kFd]; - const autoClose = options?.autoClose ?? false; - let pos = options?.start ?? -1; let totalBytesWritten = 0; let closed = false; + let closing = false; + let pendingEndPromise = null; let error = null; + let asyncPending = false; + + validateBoolean(autoClose, 'options.autoClose'); - if (pos !== -1) { + if (pos !== kNone) { validateInteger(pos, 'options.start', 0); } + if (bytesRemaining !== kNone) { + validateInteger(bytesRemaining, 'options.limit', 1); + } + if (syncWriteThreshold !== undefined) { + validateInteger(syncWriteThreshold, 'options.chunkSize', 1); + } + this[kLocked] = true; handle[kRef](); // Write a single buffer with EAGAIN retry (up to 5 retries). - async function writeAll(buf, offset, length, position) { - let retries = 0; - while (length > 0) { - const bytesWritten = (await PromisePrototypeThen( - binding.writeBuffer(fd, buf, offset, length, position, - kUsePromises), - undefined, - handleErrorFromBinding, - )) || 0; - - if (bytesWritten === 0) { - if (++retries > 5) { - throw new ERR_OPERATION_FAILED('write failed after retries'); + async function writeAll(buf, offset, length, position, signal) { + asyncPending = true; + try { + let retries = 0; + while (length > 0) { + const bytesWritten = (await PromisePrototypeThen( + binding.writeBuffer(fd, buf, offset, length, position, + kUsePromises), + undefined, + handleErrorFromBinding, + )) || 0; + + signal?.throwIfAborted(); + + if (bytesWritten === 0) { + if (++retries > 5) { + throw new ERR_OPERATION_FAILED('write failed after retries'); + } + } else { + retries = 0; } - } else { - retries = 0; - } - totalBytesWritten += bytesWritten; - offset += bytesWritten; - length -= bytesWritten; - if (position >= 0) position += bytesWritten; + totalBytesWritten += bytesWritten; + offset += bytesWritten; + length -= bytesWritten; + if (position >= 0) position += bytesWritten; + } + } finally { + asyncPending = false; } } // Writev with EAGAIN retry. On partial write, concatenates remaining // buffers and falls back to writeAll (same approach as WriteStream). - async function writevAll(buffers, position) { - let totalSize = 0; - for (let i = 0; i < buffers.length; i++) { - totalSize += buffers[i].byteLength; + async function writevAll(buffers, position, signal) { + asyncPending = true; + try { + let totalSize = 0; + for (let i = 0; i < buffers.length; i++) { + totalSize += buffers[i].byteLength; + } + + let retries = 0; + while (totalSize > 0) { + const bytesWritten = (await PromisePrototypeThen( + binding.writeBuffers(fd, buffers, position, kUsePromises), + undefined, + handleErrorFromBinding, + )) || 0; + + signal?.throwIfAborted(); + + if (bytesWritten === 0) { + if (++retries > 5) { + throw new ERR_OPERATION_FAILED('writev failed after retries'); + } + } else { + retries = 0; + } + + totalBytesWritten += bytesWritten; + totalSize -= bytesWritten; + if (position >= 0) position += bytesWritten; + + if (totalSize > 0) { + // Partial write - concatenate remaining and use writeAll. + const remaining = Buffer.concat(buffers); + const wrote = bytesWritten; + // writeAll is already inside asyncPending = true, but + // writeAll sets it again - that's fine (idempotent). + await writeAll(remaining, wrote, remaining.length - wrote, + position, signal); + return; + } + } + } finally { + asyncPending = false; } + } + // Synchronous write with EAGAIN retry. Throws on I/O error. + // Used by writeSync for the full write, and by writevSync for + // completing a partial writev. + function writeSyncAll(buf, offset, length, position) { let retries = 0; - while (totalSize > 0) { - const bytesWritten = (await PromisePrototypeThen( - binding.writeBuffers(fd, buffers, position, kUsePromises), - undefined, - handleErrorFromBinding, - )) || 0; - + while (length > 0) { + const ctx = {}; + const bytesWritten = binding.writeBuffer( + fd, buf, offset, length, position, undefined, ctx) || 0; + if (ctx.errno !== undefined) { + handleSyncErrorFromBinding(ctx); + } if (bytesWritten === 0) { if (++retries > 5) { - throw new ERR_OPERATION_FAILED('writev failed after retries'); + throw new ERR_OPERATION_FAILED('write failed after retries'); } } else { retries = 0; } - totalBytesWritten += bytesWritten; - totalSize -= bytesWritten; + offset += bytesWritten; + length -= bytesWritten; if (position >= 0) position += bytesWritten; - - if (totalSize > 0) { - // Partial write - concatenate remaining and use writeAll. - const remaining = Buffer.concat(buffers); - const wrote = bytesWritten; - await writeAll(remaining, wrote, remaining.length - wrote, - position); - return; - } } } @@ -643,7 +865,7 @@ if (getOptionValue('--experimental-stream-iter')) { return { __proto__: null, - write(chunk, options) { + write(chunk, options = kNullPrototo) { if (error) { return PromiseReject(error); } @@ -651,16 +873,29 @@ if (getOptionValue('--experimental-stream-iter')) { return PromiseReject( new ERR_INVALID_STATE.TypeError('The writer is closed')); } - if (options?.signal?.aborted) { + validateObject(options, 'options'); + const { + signal, + } = options; + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + if (signal.aborted) { + return PromiseReject(signal.reason); + } + } + chunk = newStreamsToUint8Array(chunk); + if (bytesRemaining >= 0 && chunk.byteLength > bytesRemaining) { return PromiseReject( - new AbortError(undefined, { cause: options.signal.reason })); + new ERR_OUT_OF_RANGE('write', `<= ${bytesRemaining} bytes`, + chunk.byteLength)); } + if (bytesRemaining > 0) bytesRemaining -= chunk.byteLength; const position = pos; if (pos >= 0) pos += chunk.byteLength; - return writeAll(chunk, 0, chunk.byteLength, position); + return writeAll(chunk, 0, chunk.byteLength, position, signal); }, - writev(chunks, options) { + writev(chunks, options = kNullPrototo) { if (error) { return PromiseReject(error); } @@ -668,24 +903,135 @@ if (getOptionValue('--experimental-stream-iter')) { return PromiseReject( new ERR_INVALID_STATE.TypeError('The writer is closed')); } - if (options?.signal?.aborted) { + validateObject(options, 'options'); + const { + signal, + } = options; + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + if (signal?.aborted) { + return PromiseReject(signal.reason); + } + } + chunks = newStreamsConvertChunks(chunks); + let totalSize = 0; + for (let i = 0; i < chunks.length; i++) { + totalSize += chunks[i].byteLength; + } + if (bytesRemaining >= 0 && totalSize > bytesRemaining) { return PromiseReject( - new AbortError(undefined, { cause: options.signal.reason })); + new ERR_OUT_OF_RANGE('writev', `<= ${bytesRemaining} bytes`, + totalSize)); + } + if (bytesRemaining > 0) bytesRemaining -= totalSize; + const position = pos; + if (pos >= 0) pos += totalSize; + return writevAll(chunks, position, signal); + }, + + writeSync(chunk) { + if (error || closed || asyncPending) return false; + chunk = newStreamsToUint8Array(chunk); + const length = chunk.byteLength; + if (length > syncWriteThreshold) return false; + if (length === 0) return true; + if (bytesRemaining >= 0 && length > bytesRemaining) return false; + const position = pos; + // First attempt - if this fails with zero bytes written, + // return false so pipeTo can fall back to async write(). + const ctx = {}; + const bytesWritten = binding.writeBuffer( + fd, chunk, 0, length, position, undefined, ctx) || 0; + if (ctx.errno !== undefined) return false; + totalBytesWritten += bytesWritten; + if (position >= 0) { + pos = position + bytesWritten; } + if (bytesWritten === length) { + if (bytesRemaining > 0) bytesRemaining -= length; + return true; + } + // Partial write - bytes are on disk. Must complete or throw. + // Cannot return false here because pipeTo would re-send the + // full chunk, causing duplicate data on disk. + writeSyncAll(chunk, bytesWritten, length - bytesWritten, + position >= 0 ? position + bytesWritten : -1); + if (bytesRemaining > 0) bytesRemaining -= length; + return true; + }, + + writevSync(chunks) { + if (error || closed || asyncPending) return false; + chunks = newStreamsConvertChunks(chunks); + let totalSize = 0; + for (let i = 0; i < chunks.length; i++) { + totalSize += chunks[i].byteLength; + } + if (totalSize > syncWriteThreshold) return false; + if (totalSize === 0) return true; + if (bytesRemaining >= 0 && totalSize > bytesRemaining) return false; const position = pos; - if (pos >= 0) { - for (let i = 0; i < chunks.length; i++) { - pos += chunks[i].byteLength; + // writeBuffers throws on error (zero bytes written) - safe + // to catch and return false for async fallback. + let bytesWritten; + try { + bytesWritten = binding.writeBuffers(fd, chunks, position) || 0; + } catch { + return false; + } + totalBytesWritten += bytesWritten; + if (position >= 0) { + pos = position + bytesWritten; + } + if (bytesWritten === totalSize) { + if (bytesRemaining > 0) bytesRemaining -= totalSize; + return true; + } + // Partial writev - bytes are on disk. Must complete or throw. + const rest = Buffer.concat(chunks); + writeSyncAll(rest, bytesWritten, + rest.byteLength - bytesWritten, + position >= 0 ? position + bytesWritten : -1); + if (bytesRemaining > 0) bytesRemaining -= totalSize; + return true; + }, + + end(options = kNullPrototo) { + if (error) { + return PromiseReject(error); + } + if (closed) { + return PromiseResolve(totalBytesWritten); + } + if (closing) { + return pendingEndPromise; + } + validateObject(options, 'options'); + const { + signal, + } = options; + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + if (signal.aborted) { + return PromiseReject(signal.reason); } } - return writevAll(chunks, position); + closing = true; + pendingEndPromise = PromisePrototypeThen( + cleanup(), () => totalBytesWritten); + return pendingEndPromise; }, - async end(options) { - if (options?.signal?.aborted) { - throw new AbortError(undefined, { cause: options.signal.reason }); + endSync() { + if (error) return -1; + if (closed) return totalBytesWritten; + if (asyncPending) return -1; + closed = true; + handle[kLocked] = false; + handle[kUnref](); + if (autoClose) { + handle[kCloseSync](); } - await cleanup(); return totalBytesWritten; }, @@ -696,16 +1042,22 @@ if (getOptionValue('--experimental-stream-iter')) { handle[kLocked] = false; handle[kUnref](); if (autoClose) { - // The close call will return a promise. On the off chance - // the rejects we'll end up with an unhandled rejection. - // TODO(@jasnell): Add a closeSync method to the underlying - // C++ FileHandle handle. - handle.close(); + handle[kCloseSync](); + } + }, + + [SymbolAsyncDispose]() { + if (closing) { + return pendingEndPromise ?? PromiseResolve(); + } + if (!closed && !error) { + this.fail(); } + return PromiseResolve(); }, - async [SymbolAsyncDispose]() { - await cleanup(); + [SymbolDispose]() { + this.fail(); }, }; }; diff --git a/lib/internal/streams/iter/pull.js b/lib/internal/streams/iter/pull.js index f233d5db716263..8c49941ddb54ed 100644 --- a/lib/internal/streams/iter/pull.js +++ b/lib/internal/streams/iter/pull.js @@ -50,6 +50,10 @@ const { wrapError, } = require('internal/streams/iter/utils'); +const { + kTrustedTransform, +} = require('internal/streams/iter/types'); + // ============================================================================= // Type Guards and Helpers // ============================================================================= @@ -559,6 +563,25 @@ async function* applyStatefulAsyncTransform(source, transform, options) { } } +/** + * Fast path for trusted stateful transforms (e.g. compression). + * Skips withFlushAsync (transform handles done internally) and + * skips isUint8ArrayBatch validation (transform guarantees valid output). + * @yields {Uint8Array[]} + */ +async function* applyTrustedStatefulAsyncTransform(source, transform, options) { + const output = transform(source, options); + for await (const batch of output) { + if (batch.length > 0) { + yield batch; + } + } + // Check abort after the transform completes - without the + // withFlushAsync wrapper there is no extra yield to give + // the outer pipeline a chance to see the abort. + options.signal?.throwIfAborted(); +} + /** * Create an async pipeline from source through transforms. * @yields {Uint8Array[]} @@ -615,9 +638,14 @@ async function* createAsyncPipeline(source, transforms, signal) { transformSignal); statelessRun = []; } - current = applyStatefulAsyncTransform(current, transform.transform, - { __proto__: null, - signal: transformSignal }); + const opts = { __proto__: null, signal: transformSignal }; + if (transform[kTrustedTransform]) { + current = applyTrustedStatefulAsyncTransform( + current, transform.transform, opts); + } else { + current = applyStatefulAsyncTransform( + current, transform.transform, opts); + } } else { statelessRun.push(transform); } diff --git a/lib/internal/streams/iter/transform.js b/lib/internal/streams/iter/transform.js index 64d1098920b8c8..4cb417ed98ce32 100644 --- a/lib/internal/streams/iter/transform.js +++ b/lib/internal/streams/iter/transform.js @@ -12,7 +12,6 @@ const { ArrayPrototypeMap, ArrayPrototypePush, ArrayPrototypeShift, - ArrayPrototypeSplice, MathMax, NumberIsNaN, ObjectEntries, @@ -38,6 +37,7 @@ const { } = require('internal/errors'); const { lazyDOMException } = require('internal/util'); const { isArrayBufferView, isAnyArrayBuffer } = require('internal/util/types'); +const { kTrustedTransform } = require('internal/streams/iter/types'); const { checkRangesOrGetDefault, validateFiniteNumber, @@ -54,8 +54,8 @@ const { // Zlib flush Z_NO_FLUSH, Z_FINISH, // Zlib defaults - Z_DEFAULT_WINDOWBITS, Z_DEFAULT_COMPRESSION, - Z_DEFAULT_MEMLEVEL, Z_DEFAULT_STRATEGY, Z_DEFAULT_CHUNK, + Z_DEFAULT_WINDOWBITS, + Z_DEFAULT_STRATEGY, // Brotli flush BROTLI_OPERATION_PROCESS, BROTLI_OPERATION_FINISH, // Zlib ranges @@ -71,10 +71,22 @@ const { // Option validation helpers (matching lib/zlib.js validation patterns) // --------------------------------------------------------------------------- +// Default output buffer size for compression transforms. Larger than +// Z_DEFAULT_CHUNK (16KB) to reduce the number of threadpool re-entries +// when the engine has more output than fits in one buffer. 64KB matches +// BATCH_HWM and the typical input chunk size from pull(). +const DEFAULT_OUTPUT_SIZE = 64 * 1024; + +// Batch high water mark - yield output in chunks of approximately this size. +const BATCH_HWM = DEFAULT_OUTPUT_SIZE; + +// Pre-allocated empty buffer for flush/finalize calls. +const kEmpty = Buffer.alloc(0); + function validateChunkSize(options) { let chunkSize = options.chunkSize; if (!validateFiniteNumber(chunkSize, 'options.chunkSize')) { - chunkSize = Z_DEFAULT_CHUNK; + chunkSize = DEFAULT_OUTPUT_SIZE; } else if (chunkSize < Z_MIN_CHUNK) { throw new ERR_OUT_OF_RANGE('options.chunkSize', `>= ${Z_MIN_CHUNK}`, chunkSize); @@ -111,14 +123,6 @@ function validateParams(params, maxParam, errClass) { } } -// --------------------------------------------------------------------------- -// Batch high water mark - yield output in chunks of approximately this size. -// --------------------------------------------------------------------------- -const BATCH_HWM = 64 * 1024; - -// Pre-allocated empty buffer for flush/finalize calls. -const kEmpty = Buffer.alloc(0); - // --------------------------------------------------------------------------- // Brotli / Zstd parameter arrays (computed once, reused per init call). // Mirrors the pattern in lib/zlib.js. @@ -166,12 +170,17 @@ function createZlibHandle(mode, options, processCallback, onError) { const windowBits = checkRangesOrGetDefault( options.windowBits, 'options.windowBits', Z_MIN_WINDOWBITS, Z_MAX_WINDOWBITS, Z_DEFAULT_WINDOWBITS); + // Default compression level 4 (not Z_DEFAULT_COMPRESSION which maps to + // level 6). Level 4 is ~1.5x faster with only ~5-10% worse compression + // ratio - the sweet spot for streaming and HTTP content-encoding. const level = checkRangesOrGetDefault( options.level, 'options.level', - Z_MIN_LEVEL, Z_MAX_LEVEL, Z_DEFAULT_COMPRESSION); + Z_MIN_LEVEL, Z_MAX_LEVEL, 4); + // memLevel 9 uses ~128KB more memory than 8 but provides faster hash + // lookups during compression. Negligible memory cost for the speed gain. const memLevel = checkRangesOrGetDefault( options.memLevel, 'options.memLevel', - Z_MIN_MEMLEVEL, Z_MAX_MEMLEVEL, Z_DEFAULT_MEMLEVEL); + Z_MIN_MEMLEVEL, Z_MAX_MEMLEVEL, 9); const strategy = checkRangesOrGetDefault( options.strategy, 'options.strategy', Z_DEFAULT_STRATEGY, Z_FIXED, Z_DEFAULT_STRATEGY); @@ -204,7 +213,17 @@ function createBrotliHandle(mode, options, processCallback, onError) { const writeState = new Uint32Array(2); TypedArrayPrototypeFill(brotliInitParamsArray, -1); + // Streaming-appropriate defaults: quality 6 (not 11) and lgwin 20 (1MB, + // not 4MB). Quality 11 is intended for offline/build-time compression + // and allocates ~400MB of internal state. Quality 6 is ~10x faster with + // only ~10-15% worse compression ratio - the standard for dynamic HTTP + // content-encoding (nginx, Caddy, Cloudflare all use 4-6). + if (mode === BROTLI_ENCODE) { + brotliInitParamsArray[constants.BROTLI_PARAM_QUALITY] = 6; + brotliInitParamsArray[constants.BROTLI_PARAM_LGWIN] = 20; + } if (options.params) { + // User-supplied params override the defaults above. const params = options.params; const keys = ObjectKeys(params); for (let i = 0; i < keys.length; i++) { @@ -287,6 +306,7 @@ function createZstdHandle(mode, options, processCallback, onError) { function makeZlibTransform(createHandleFn, processFlag, finishFlag) { return { __proto__: null, + [kTrustedTransform]: true, transform: async function*(source, options) { const { signal } = options; @@ -297,7 +317,7 @@ function makeZlibTransform(createHandleFn, processFlag, finishFlag) { let outBuf; let outOffset = 0; let chunkSize; - const pending = []; + let pending = []; let pendingBytes = 0; // Current write operation state (read by the callback for looping). @@ -313,16 +333,30 @@ function makeZlibTransform(createHandleFn, processFlag, finishFlag) { const availOut = writeState[0]; const availInAfter = writeState[1]; const have = writeAvailOutBefore - availOut; + const bufferExhausted = availOut === 0 || outOffset + have >= chunkSize; if (have > 0) { - ArrayPrototypePush(pending, - TypedArrayPrototypeSlice(outBuf, outOffset, outOffset + have)); + if (bufferExhausted && outOffset === 0) { + // Entire buffer filled from start - yield directly, no copy. + ArrayPrototypePush(pending, outBuf); + } else if (bufferExhausted) { + // Tail of buffer filled and buffer is being replaced - + // subarray is safe since outBuf reference is overwritten below. + ArrayPrototypePush(pending, + outBuf.subarray(outOffset, outOffset + have)); + } else { + // Partial fill, buffer will be reused - must copy. + ArrayPrototypePush(pending, + TypedArrayPrototypeSlice(outBuf, + outOffset, + outOffset + have)); + } pendingBytes += have; outOffset += have; } // Reallocate output buffer if exhausted. - if (availOut === 0 || outOffset >= chunkSize) { + if (bufferExhausted) { outBuf = Buffer.allocUnsafe(chunkSize); outOffset = 0; } @@ -397,14 +431,16 @@ function makeZlibTransform(createHandleFn, processFlag, finishFlag) { handle.buffer = input; handle.write(flushFlag, - input, 0, TypedArrayPrototypeGetByteLength(input), + input, 0, writeAvailIn, outBuf, outOffset, writeAvailOutBefore); return promise; } function drainBatch() { if (pendingBytes <= BATCH_HWM) { - const batch = ArrayPrototypeSplice(pending, 0, pending.length); + // Swap instead of splice - avoids copying the array. + const batch = pending; + pending = []; pendingBytes = 0; return batch; } @@ -413,8 +449,9 @@ function makeZlibTransform(createHandleFn, processFlag, finishFlag) { while (pending.length > 0 && batchBytes < BATCH_HWM) { const buf = ArrayPrototypeShift(pending); ArrayPrototypePush(batch, buf); - batchBytes += TypedArrayPrototypeGetByteLength(buf); - pendingBytes -= TypedArrayPrototypeGetByteLength(buf); + const len = TypedArrayPrototypeGetByteLength(buf); + batchBytes += len; + pendingBytes -= len; } return batch; } @@ -488,7 +525,152 @@ function makeZlibTransform(createHandleFn, processFlag, finishFlag) { // Compression factories // --------------------------------------------------------------------------- -function compressGzip(options = { __proto__: null }) { +// --------------------------------------------------------------------------- +// Core: makeZlibTransformSync +// +// Synchronous counterpart to makeZlibTransform. Uses handle.writeSync() +// which runs compression directly on the main thread (no threadpool). +// Returns a stateful sync transform (generator function). +// --------------------------------------------------------------------------- +function makeZlibTransformSync(createHandleFn, processFlag, finishFlag) { + return { + __proto__: null, + transform: function*(source) { + // The processCallback is never called in sync mode, but handle.init() + // requires it. Pass a no-op. + let error = null; + function onError(message, errno, code) { + error = genericNodeError(message, { __proto__: null, errno, code }); + error.errno = errno; + error.code = code; + } + + const result = createHandleFn(() => {}, onError); + const handle = result.handle; + const writeState = result.writeState; + const chunkSize = result.chunkSize; + let outBuf = Buffer.allocUnsafe(chunkSize); + let outOffset = 0; + let pending = []; + let pendingBytes = 0; + + function processSyncInput(input, flushFlag) { + let inOff = 0; + let availIn = TypedArrayPrototypeGetByteLength(input); + let availOutBefore = chunkSize - outOffset; + + handle.writeSync(flushFlag, + input, inOff, availIn, + outBuf, outOffset, availOutBefore); + if (error) throw error; + + while (true) { + const availOut = writeState[0]; + const availInAfter = writeState[1]; + const have = availOutBefore - availOut; + const bufferExhausted = availOut === 0 || + outOffset + have >= chunkSize; + + if (have > 0) { + if (bufferExhausted && outOffset === 0) { + // Entire buffer filled - yield directly, no copy. + ArrayPrototypePush(pending, outBuf); + } else if (bufferExhausted) { + // Tail filled, buffer being replaced - subarray is safe. + ArrayPrototypePush(pending, + outBuf.subarray(outOffset, outOffset + have)); + } else { + // Partial fill, buffer reused - must copy. + ArrayPrototypePush(pending, + TypedArrayPrototypeSlice(outBuf, + outOffset, + outOffset + have)); + } + pendingBytes += have; + outOffset += have; + } + + if (bufferExhausted) { + outBuf = Buffer.allocUnsafe(chunkSize); + outOffset = 0; + } + + if (availOut === 0) { + // Engine has more output - loop. + const consumed = availIn - availInAfter; + inOff += consumed; + availIn = availInAfter; + availOutBefore = chunkSize - outOffset; + + handle.writeSync(flushFlag, + input, inOff, availIn, + outBuf, outOffset, availOutBefore); + if (error) throw error; + continue; + } + + // All input consumed. + break; + } + } + + function drainBatch() { + if (pendingBytes <= BATCH_HWM) { + const batch = pending; + pending = []; + pendingBytes = 0; + return batch; + } + const batch = []; + let batchBytes = 0; + while (pending.length > 0 && batchBytes < BATCH_HWM) { + const buf = ArrayPrototypeShift(pending); + const len = TypedArrayPrototypeGetByteLength(buf); + ArrayPrototypePush(batch, buf); + batchBytes += len; + pendingBytes -= len; + } + return batch; + } + + try { + for (const batch of source) { + if (batch === null) { + // Flush signal - finalize the engine. + processSyncInput(Buffer.alloc(0), finishFlag); + while (pending.length > 0) { + yield drainBatch(); + } + continue; + } + + for (let i = 0; i < batch.length; i++) { + processSyncInput(batch[i], processFlag); + } + + if (pendingBytes >= BATCH_HWM) { + while (pending.length > 0 && pendingBytes >= BATCH_HWM) { + yield drainBatch(); + } + } + if (pending.length > 0) { + yield drainBatch(); + } + } + } finally { + handle.close(); + } + }, + }; +} + +// --------------------------------------------------------------------------- +// Async compression factories +// --------------------------------------------------------------------------- + +const kNullPrototype = { __proto__: null }; + +function compressGzip(options = kNullPrototype) { validateObject(options, 'options'); return makeZlibTransform( (cb, onErr) => createZlibHandle(GZIP, options, cb, onErr), @@ -496,7 +678,7 @@ function compressGzip(options = { __proto__: null }) { ); } -function compressDeflate(options = { __proto__: null }) { +function compressDeflate(options = kNullPrototype) { validateObject(options, 'options'); return makeZlibTransform( (cb, onErr) => createZlibHandle(DEFLATE, options, cb, onErr), @@ -504,7 +686,7 @@ function compressDeflate(options = { __proto__: null }) { ); } -function compressBrotli(options = { __proto__: null }) { +function compressBrotli(options = kNullPrototype) { validateObject(options, 'options'); return makeZlibTransform( (cb, onErr) => createBrotliHandle(BROTLI_ENCODE, options, cb, onErr), @@ -512,7 +694,7 @@ function compressBrotli(options = { __proto__: null }) { ); } -function compressZstd(options = { __proto__: null }) { +function compressZstd(options = kNullPrototype) { validateObject(options, 'options'); return makeZlibTransform( (cb, onErr) => createZstdHandle(ZSTD_COMPRESS, options, cb, onErr), @@ -524,7 +706,7 @@ function compressZstd(options = { __proto__: null }) { // Decompression factories // --------------------------------------------------------------------------- -function decompressGzip(options = { __proto__: null }) { +function decompressGzip(options = kNullPrototype) { validateObject(options, 'options'); return makeZlibTransform( (cb, onErr) => createZlibHandle(GUNZIP, options, cb, onErr), @@ -532,7 +714,7 @@ function decompressGzip(options = { __proto__: null }) { ); } -function decompressDeflate(options = { __proto__: null }) { +function decompressDeflate(options = kNullPrototype) { validateObject(options, 'options'); return makeZlibTransform( (cb, onErr) => createZlibHandle(INFLATE, options, cb, onErr), @@ -540,7 +722,7 @@ function decompressDeflate(options = { __proto__: null }) { ); } -function decompressBrotli(options = { __proto__: null }) { +function decompressBrotli(options = kNullPrototype) { validateObject(options, 'options'); return makeZlibTransform( (cb, onErr) => createBrotliHandle(BROTLI_DECODE, options, cb, onErr), @@ -548,7 +730,7 @@ function decompressBrotli(options = { __proto__: null }) { ); } -function decompressZstd(options = { __proto__: null }) { +function decompressZstd(options = kNullPrototype) { validateObject(options, 'options'); return makeZlibTransform( (cb, onErr) => createZstdHandle(ZSTD_DECOMPRESS, options, cb, onErr), @@ -556,13 +738,93 @@ function decompressZstd(options = { __proto__: null }) { ); } +// --------------------------------------------------------------------------- +// Sync compression factories +// --------------------------------------------------------------------------- + +function compressGzipSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZlibHandle(GZIP, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function compressDeflateSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZlibHandle(DEFLATE, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function compressBrotliSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createBrotliHandle(BROTLI_ENCODE, options, cb, onErr), + BROTLI_OPERATION_PROCESS, BROTLI_OPERATION_FINISH, + ); +} + +function compressZstdSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZstdHandle(ZSTD_COMPRESS, options, cb, onErr), + ZSTD_e_continue, ZSTD_e_end, + ); +} + +// --------------------------------------------------------------------------- +// Sync decompression factories +// --------------------------------------------------------------------------- + +function decompressGzipSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZlibHandle(GUNZIP, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function decompressDeflateSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZlibHandle(INFLATE, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function decompressBrotliSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createBrotliHandle(BROTLI_DECODE, options, cb, onErr), + BROTLI_OPERATION_PROCESS, BROTLI_OPERATION_FINISH, + ); +} + +function decompressZstdSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZstdHandle(ZSTD_DECOMPRESS, options, cb, onErr), + ZSTD_e_continue, ZSTD_e_end, + ); +} + module.exports = { compressBrotli, + compressBrotliSync, compressDeflate, + compressDeflateSync, compressGzip, + compressGzipSync, compressZstd, + compressZstdSync, decompressBrotli, + decompressBrotliSync, decompressDeflate, + decompressDeflateSync, decompressGzip, + decompressGzipSync, decompressZstd, + decompressZstdSync, }; diff --git a/lib/internal/streams/iter/types.js b/lib/internal/streams/iter/types.js index 3f936a37a69d1d..c205db00e3782a 100644 --- a/lib/internal/streams/iter/types.js +++ b/lib/internal/streams/iter/types.js @@ -1,6 +1,7 @@ 'use strict'; const { + Symbol, SymbolFor, } = primordials; @@ -43,9 +44,21 @@ const shareSyncProtocol = SymbolFor('Stream.shareSyncProtocol'); */ const drainableProtocol = SymbolFor('Stream.drainableProtocol'); +/** + * Internal sentinel for trusted stateful transforms. A transform object + * with [kTrustedTransform] = true signals that: + * 1. It handles source exhaustion (done) internally - no withFlushAsync + * wrapper needed. + * 2. It always yields valid Uint8Array[] batches - no isUint8ArrayBatch + * validation needed on each yield. + * This is NOT a public protocol symbol - it uses Symbol() not Symbol.for(). + */ +const kTrustedTransform = Symbol('kTrustedTransform'); + module.exports = { broadcastProtocol, drainableProtocol, + kTrustedTransform, shareProtocol, shareSyncProtocol, toAsyncStreamable, diff --git a/lib/stream/iter.js b/lib/stream/iter.js index 8d25a7eb3a50ab..7ed8c858394c90 100644 --- a/lib/stream/iter.js +++ b/lib/stream/iter.js @@ -53,13 +53,21 @@ const { // Transforms const { compressGzip, + compressGzipSync, compressDeflate, + compressDeflateSync, compressBrotli, + compressBrotliSync, compressZstd, + compressZstdSync, decompressGzip, + decompressGzipSync, decompressDeflate, + decompressDeflateSync, decompressBrotli, + decompressBrotliSync, decompressZstd, + decompressZstdSync, } = require('internal/streams/iter/transform'); // Multi-consumer @@ -127,7 +135,7 @@ const Stream = ObjectFreeze({ // Drain utility for event source integration ondrain, - // Compression / decompression transforms + // Compression / decompression transforms (async) compressGzip, compressDeflate, compressBrotli, @@ -137,6 +145,16 @@ const Stream = ObjectFreeze({ decompressBrotli, decompressZstd, + // Compression / decompression transforms (sync) + compressGzipSync, + compressDeflateSync, + compressBrotliSync, + compressZstdSync, + decompressGzipSync, + decompressDeflateSync, + decompressBrotliSync, + decompressZstdSync, + // Protocol symbols toStreamable, toAsyncStreamable, @@ -200,7 +218,7 @@ module.exports = { tapSync, ondrain, - // Compression / decompression transforms + // Compression / decompression transforms (async) compressGzip, compressDeflate, compressBrotli, @@ -209,4 +227,14 @@ module.exports = { decompressDeflate, decompressBrotli, decompressZstd, + + // Compression / decompression transforms (sync) + compressGzipSync, + compressDeflateSync, + compressBrotliSync, + compressZstdSync, + decompressGzipSync, + decompressDeflateSync, + decompressBrotliSync, + decompressZstdSync, }; diff --git a/src/node_file.cc b/src/node_file.cc index 0fe01e8b08127c..79e047438170a3 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -49,7 +49,7 @@ #include #if defined(__MINGW32__) || defined(_MSC_VER) -# include +#include #endif #ifdef _WIN32 @@ -88,7 +88,7 @@ using v8::Undefined; using v8::Value; #ifndef S_ISDIR -# define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR) +#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR) #endif #ifdef __POSIX__ @@ -203,8 +203,7 @@ static const char* get_fs_func_name_by_type(uv_fs_type req_type) { // We sometimes need to convert a C++ lambda function to a raw C-style function. // This is helpful, because ReqWrap::Dispatch() does not recognize lambda // functions, and thus does not wrap them properly. -typedef void(*uv_fs_callback_t)(uv_fs_t*); - +typedef void (*uv_fs_callback_t)(uv_fs_t*); void FSContinuationData::MemoryInfo(MemoryTracker* tracker) const { tracker->TrackField("paths", paths_); @@ -336,7 +335,7 @@ BaseObjectPtr FileHandle::TransferData::Deserialize( int fd = fd_; fd_ = -1; - return BaseObjectPtr { FileHandle::New(bd, fd) }; + return BaseObjectPtr{FileHandle::New(bd, fd)}; } // Throw an exception if the file handle has not yet been closed. @@ -431,7 +430,7 @@ FileHandle::CloseReq::CloseReq(Environment* env, Local obj, Local promise, Local ref) - : ReqWrap(env, obj, AsyncWrap::PROVIDER_FILEHANDLECLOSEREQ) { + : ReqWrap(env, obj, AsyncWrap::PROVIDER_FILEHANDLECLOSEREQ) { promise_.Reset(env->isolate(), promise); ref_.Reset(env->isolate(), ref); } @@ -447,8 +446,6 @@ void FileHandle::CloseReq::MemoryInfo(MemoryTracker* tracker) const { tracker->TrackField("ref", ref_); } - - // Closes this FileHandle asynchronously and returns a Promise that will be // resolved when the callback is invoked, or rejects with a UVException if // there was a problem closing the fd. This is the preferred mechanism for @@ -476,8 +473,10 @@ MaybeLocal FileHandle::ClosePromise() { Local promise = resolver.As(); Local close_req_obj; - if (!env()->fdclose_constructor_template() - ->NewInstance(env()->context()).ToLocal(&close_req_obj)) { + if (!env() + ->fdclose_constructor_template() + ->NewInstance(env()->context()) + .ToLocal(&close_req_obj)) { return MaybeLocal(); } closing_ = true; @@ -520,6 +519,27 @@ void FileHandle::Close(const FunctionCallbackInfo& args) { args.GetReturnValue().Set(ret); } +void FileHandle::CloseSync(const FunctionCallbackInfo& args) { + FileHandle* fd; + ASSIGN_OR_RETURN_UNWRAP(&fd, args.This()); + + // Already closed or closing - no-op. + if (fd->closed_ || fd->closing_) return; + + uv_fs_t req; + CHECK_NE(fd->fd_, -1); + FS_SYNC_TRACE_BEGIN(close); + int ret = uv_fs_close(fd->env()->event_loop(), &req, fd->fd_, nullptr); + FS_SYNC_TRACE_END(close); + uv_fs_req_cleanup(&req); + + fd->AfterClose(); + + if (ret < 0) { + Environment* env = fd->env(); + env->ThrowUVException(ret, "close"); + } +} void FileHandle::ReleaseFD(const FunctionCallbackInfo& args) { FileHandle* fd; @@ -538,8 +558,7 @@ void FileHandle::AfterClose() { closing_ = false; closed_ = true; fd_ = -1; - if (reading_ && !persistent().IsEmpty()) - EmitRead(UV_EOF); + if (reading_ && !persistent().IsEmpty()) EmitRead(UV_EOF); } void FileHandleReadWrap::MemoryInfo(MemoryTracker* tracker) const { @@ -548,17 +567,15 @@ void FileHandleReadWrap::MemoryInfo(MemoryTracker* tracker) const { } FileHandleReadWrap::FileHandleReadWrap(FileHandle* handle, Local obj) - : ReqWrap(handle->env(), obj, AsyncWrap::PROVIDER_FSREQCALLBACK), - file_handle_(handle) {} + : ReqWrap(handle->env(), obj, AsyncWrap::PROVIDER_FSREQCALLBACK), + file_handle_(handle) {} int FileHandle::ReadStart() { - if (!IsAlive() || IsClosing()) - return UV_EOF; + if (!IsAlive() || IsClosing()) return UV_EOF; reading_ = true; - if (current_read_) - return 0; + if (current_read_) return 0; BaseObjectPtr read_wrap; @@ -604,67 +621,65 @@ int FileHandle::ReadStart() { current_read_ = std::move(read_wrap); FS_ASYNC_TRACE_BEGIN0(UV_FS_READ, current_read_.get()) - current_read_->Dispatch(uv_fs_read, - fd_, - ¤t_read_->buffer_, - 1, - read_offset_, - uv_fs_callback_t{[](uv_fs_t* req) { - FileHandle* handle; - { - FileHandleReadWrap* req_wrap = FileHandleReadWrap::from_req(req); - FS_ASYNC_TRACE_END1( - req->fs_type, req_wrap, "result", static_cast(req->result)) - handle = req_wrap->file_handle_; - CHECK_EQ(handle->current_read_.get(), req_wrap); - } - - // ReadStart() checks whether current_read_ is set to determine whether - // a read is in progress. Moving it into a local variable makes sure that - // the ReadStart() call below doesn't think we're still actively reading. - BaseObjectPtr read_wrap = - std::move(handle->current_read_); - - ssize_t result = req->result; - uv_buf_t buffer = read_wrap->buffer_; - - uv_fs_req_cleanup(req); + current_read_->Dispatch( + uv_fs_read, + fd_, + ¤t_read_->buffer_, + 1, + read_offset_, + uv_fs_callback_t{[](uv_fs_t* req) { + FileHandle* handle; + { + FileHandleReadWrap* req_wrap = FileHandleReadWrap::from_req(req); + FS_ASYNC_TRACE_END1( + req->fs_type, req_wrap, "result", static_cast(req->result)) + handle = req_wrap->file_handle_; + CHECK_EQ(handle->current_read_.get(), req_wrap); + } - // Push the read wrap back to the freelist, or let it be destroyed - // once we’re exiting the current scope. - constexpr size_t kWantedFreelistFill = 100; - auto& freelist = handle->binding_data_->file_handle_read_wrap_freelist; - if (freelist.size() < kWantedFreelistFill) { - read_wrap->Reset(); - freelist.emplace_back(std::move(read_wrap)); - } + // ReadStart() checks whether current_read_ is set to determine whether + // a read is in progress. Moving it into a local variable makes sure + // that the ReadStart() call below doesn't think we're still actively + // reading. + BaseObjectPtr read_wrap = + std::move(handle->current_read_); + + ssize_t result = req->result; + uv_buf_t buffer = read_wrap->buffer_; + + uv_fs_req_cleanup(req); + + // Push the read wrap back to the freelist, or let it be destroyed + // once we’re exiting the current scope. + constexpr size_t kWantedFreelistFill = 100; + auto& freelist = handle->binding_data_->file_handle_read_wrap_freelist; + if (freelist.size() < kWantedFreelistFill) { + read_wrap->Reset(); + freelist.emplace_back(std::move(read_wrap)); + } - if (result >= 0) { - // Read at most as many bytes as we originally planned to. - if (handle->read_length_ >= 0 && handle->read_length_ < result) - result = handle->read_length_; + if (result >= 0) { + // Read at most as many bytes as we originally planned to. + if (handle->read_length_ >= 0 && handle->read_length_ < result) + result = handle->read_length_; - // If we read data and we have an expected length, decrease it by - // how much we have read. - if (handle->read_length_ >= 0) - handle->read_length_ -= result; + // If we read data and we have an expected length, decrease it by + // how much we have read. + if (handle->read_length_ >= 0) handle->read_length_ -= result; - // If we have an offset, increase it by how much we have read. - if (handle->read_offset_ >= 0) - handle->read_offset_ += result; - } + // If we have an offset, increase it by how much we have read. + if (handle->read_offset_ >= 0) handle->read_offset_ += result; + } - // Reading 0 bytes from a file always means EOF, or that we reached - // the end of the requested range. - if (result == 0) - result = UV_EOF; + // Reading 0 bytes from a file always means EOF, or that we reached + // the end of the requested range. + if (result == 0) result = UV_EOF; - handle->EmitRead(result, buffer); + handle->EmitRead(result, buffer); - // Start over, if EmitRead() didn’t tell us to stop. - if (handle->reading_) - handle->ReadStart(); - }}); + // Start over, if EmitRead() didn’t tell us to stop. + if (handle->reading_) handle->ReadStart(); + }}); return 0; } @@ -689,23 +704,23 @@ int FileHandle::DoShutdown(ShutdownWrap* req_wrap) { closing_ = true; CHECK_NE(fd_, -1); FS_ASYNC_TRACE_BEGIN0(UV_FS_CLOSE, wrap) - wrap->Dispatch(uv_fs_close, fd_, uv_fs_callback_t{[](uv_fs_t* req) { - FileHandleCloseWrap* wrap = static_cast( - FileHandleCloseWrap::from_req(req)); - FS_ASYNC_TRACE_END1( - req->fs_type, wrap, "result", static_cast(req->result)) - FileHandle* handle = static_cast(wrap->stream()); - handle->AfterClose(); - - int result = static_cast(req->result); - uv_fs_req_cleanup(req); - wrap->Done(result); - }}); + wrap->Dispatch( + uv_fs_close, fd_, uv_fs_callback_t{[](uv_fs_t* req) { + FileHandleCloseWrap* wrap = static_cast( + FileHandleCloseWrap::from_req(req)); + FS_ASYNC_TRACE_END1( + req->fs_type, wrap, "result", static_cast(req->result)) + FileHandle* handle = static_cast(wrap->stream()); + handle->AfterClose(); + + int result = static_cast(req->result); + uv_fs_req_cleanup(req); + wrap->Done(result); + }}); return 0; } - void FSReqCallback::Reject(Local reject) { MakeCallback(env()->oncomplete_string(), 1, &reject); } @@ -719,10 +734,7 @@ void FSReqCallback::ResolveStatFs(const uv_statfs_t* stat) { } void FSReqCallback::Resolve(Local value) { - Local argv[2] { - Null(env()->isolate()), - value - }; + Local argv[2]{Null(env()->isolate()), value}; MakeCallback(env()->oncomplete_string(), value->IsUndefined() ? 1 : arraysize(argv), argv); @@ -768,7 +780,7 @@ void FSReqAfterScope::Clear() { // which is also why the errors should have been constructed // in JS for more flexibility. void FSReqAfterScope::Reject(uv_fs_t* req) { - BaseObjectPtr wrap { wrap_ }; + BaseObjectPtr wrap{wrap_}; Local exception = UVException(wrap_->env()->isolate(), static_cast(req->result), wrap_->syscall(), @@ -796,8 +808,7 @@ void AfterNoArgs(uv_fs_t* req) { FSReqAfterScope after(req_wrap, req); FS_ASYNC_TRACE_END1( req->fs_type, req_wrap, "result", static_cast(req->result)) - if (after.Proceed()) - req_wrap->Resolve(Undefined(req_wrap->env()->isolate())); + if (after.Proceed()) req_wrap->Resolve(Undefined(req_wrap->env()->isolate())); } void AfterStat(uv_fs_t* req) { @@ -949,8 +960,7 @@ void AfterScanDir(uv_fs_t* req) { uv_dirent_t ent; r = uv_fs_scandir_next(req, &ent); - if (r == UV_EOF) - break; + if (r == UV_EOF) break; if (r != 0) { return req_wrap->Reject( UVException(isolate, r, nullptr, req_wrap->syscall(), req->path)); @@ -1005,8 +1015,15 @@ void Access(const FunctionCallbackInfo& args) { path.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_ACCESS, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "access", UTF8, AfterNoArgs, - uv_fs_access, *path, mode); + AsyncCall(env, + req_wrap_async, + args, + "access", + UTF8, + AfterNoArgs, + uv_fs_access, + *path, + mode); } else { // access(path, mode) THROW_IF_INSUFFICIENT_PERMISSIONS( env, permission::PermissionScope::kFileSystemRead, path.ToStringView()); @@ -1033,8 +1050,8 @@ void Close(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 1); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_CLOSE, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "close", UTF8, AfterNoArgs, - uv_fs_close, fd); + AsyncCall( + env, req_wrap_async, args, "close", UTF8, AfterNoArgs, uv_fs_close, fd); } else { // close(fd) FSReqWrapSync req_wrap_sync("close"); FS_SYNC_TRACE_BEGIN(close); @@ -1172,7 +1189,9 @@ static void Stat(const FunctionCallbackInfo& args) { if (is_uv_error(result)) { return; } - Local arr = FillGlobalStatsArray(binding_data, use_bigint, + Local arr = FillGlobalStatsArray( + binding_data, + use_bigint, static_cast(req_wrap_sync.req.ptr)); args.GetReturnValue().Set(arr); } @@ -1196,8 +1215,14 @@ static void LStat(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_LSTAT, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "lstat", UTF8, AfterStat, - uv_fs_lstat, *path); + AsyncCall(env, + req_wrap_async, + args, + "lstat", + UTF8, + AfterStat, + uv_fs_lstat, + *path); } else { // lstat(path, use_bigint, undefined, throw_if_no_entry) bool do_not_throw_if_no_entry = args[3]->IsFalse(); FSReqWrapSync req_wrap_sync("lstat", *path); @@ -1214,7 +1239,9 @@ static void LStat(const FunctionCallbackInfo& args) { return; } - Local arr = FillGlobalStatsArray(binding_data, use_bigint, + Local arr = FillGlobalStatsArray( + binding_data, + use_bigint, static_cast(req_wrap_sync.req.ptr)); args.GetReturnValue().Set(arr); } @@ -1238,8 +1265,8 @@ static void FStat(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 2, use_bigint); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FSTAT, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fstat", UTF8, AfterStat, - uv_fs_fstat, fd); + AsyncCall( + env, req_wrap_async, args, "fstat", UTF8, AfterStat, uv_fs_fstat, fd); } else { // fstat(fd, use_bigint, undefined, do_not_throw_error) bool do_not_throw_error = args[2]->IsTrue(); const auto should_throw = [do_not_throw_error](int result) { @@ -1254,7 +1281,9 @@ static void FStat(const FunctionCallbackInfo& args) { return; } - Local arr = FillGlobalStatsArray(binding_data, use_bigint, + Local arr = FillGlobalStatsArray( + binding_data, + use_bigint, static_cast(req_wrap_sync.req.ptr)); args.GetReturnValue().Set(arr); } @@ -1345,8 +1374,18 @@ static void Symlink(const FunctionCallbackInfo& args) { TRACE_STR_COPY(*target), "path", TRACE_STR_COPY(*path)) - AsyncDestCall(env, req_wrap_async, args, "symlink", *path, path.length(), - UTF8, AfterNoArgs, uv_fs_symlink, *target, *path, flags); + AsyncDestCall(env, + req_wrap_async, + args, + "symlink", + *path, + path.length(), + UTF8, + AfterNoArgs, + uv_fs_symlink, + *target, + *path, + flags); } else { // symlink(target, path, flags, undefined, ctx) FSReqWrapSync req_wrap_sync("symlink", *target, *path); FS_SYNC_TRACE_BEGIN(symlink); @@ -1401,8 +1440,17 @@ static void Link(const FunctionCallbackInfo& args) { TRACE_STR_COPY(*src), "dest", TRACE_STR_COPY(*dest)) - AsyncDestCall(env, req_wrap_async, args, "link", *dest, dest.length(), UTF8, - AfterNoArgs, uv_fs_link, *src, *dest); + AsyncDestCall(env, + req_wrap_async, + args, + "link", + *dest, + dest.length(), + UTF8, + AfterNoArgs, + uv_fs_link, + *src, + *dest); } else { // link(src, dest) // To avoid bypass the link target should be allowed to read and write THROW_IF_INSUFFICIENT_PERMISSIONS( @@ -1439,8 +1487,14 @@ static void ReadLink(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_READLINK, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "readlink", encoding, AfterStringPtr, - uv_fs_readlink, *path); + AsyncCall(env, + req_wrap_async, + args, + "readlink", + encoding, + AfterStringPtr, + uv_fs_readlink, + *path); } else { // readlink(path, encoding) FSReqWrapSync req_wrap_sync("readlink", *path); FS_SYNC_TRACE_BEGIN(readlink); @@ -1499,9 +1553,17 @@ static void Rename(const FunctionCallbackInfo& args) { TRACE_STR_COPY(*old_path), "new_path", TRACE_STR_COPY(*new_path)) - AsyncDestCall(env, req_wrap_async, args, "rename", *new_path, - new_path.length(), UTF8, AfterNoArgs, uv_fs_rename, - *old_path, *new_path); + AsyncDestCall(env, + req_wrap_async, + args, + "rename", + *new_path, + new_path.length(), + UTF8, + AfterNoArgs, + uv_fs_rename, + *old_path, + *new_path); } else { // rename(old_path, new_path) THROW_IF_INSUFFICIENT_PERMISSIONS( env, permission::PermissionScope::kFileSystemRead, view_old_path); @@ -1537,8 +1599,15 @@ static void FTruncate(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 2); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FTRUNCATE, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "ftruncate", UTF8, AfterNoArgs, - uv_fs_ftruncate, fd, len); + AsyncCall(env, + req_wrap_async, + args, + "ftruncate", + UTF8, + AfterNoArgs, + uv_fs_ftruncate, + fd, + len); } else { // ftruncate(fd, len) FSReqWrapSync req_wrap_sync("ftruncate"); FS_SYNC_TRACE_BEGIN(ftruncate); @@ -1562,8 +1631,14 @@ static void Fdatasync(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 1); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FDATASYNC, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fdatasync", UTF8, AfterNoArgs, - uv_fs_fdatasync, fd); + AsyncCall(env, + req_wrap_async, + args, + "fdatasync", + UTF8, + AfterNoArgs, + uv_fs_fdatasync, + fd); } else { // fdatasync(fd) FSReqWrapSync req_wrap_sync("fdatasync"); FS_SYNC_TRACE_BEGIN(fdatasync); @@ -1587,8 +1662,8 @@ static void Fsync(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 1); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FSYNC, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fsync", UTF8, AfterNoArgs, - uv_fs_fsync, fd); + AsyncCall( + env, req_wrap_async, args, "fsync", UTF8, AfterNoArgs, uv_fs_fsync, fd); } else { FSReqWrapSync req_wrap_sync("fsync"); FS_SYNC_TRACE_BEGIN(fsync); @@ -1617,8 +1692,14 @@ static void Unlink(const FunctionCallbackInfo& args) { path.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_UNLINK, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "unlink", UTF8, AfterNoArgs, - uv_fs_unlink, *path); + AsyncCall(env, + req_wrap_async, + args, + "unlink", + UTF8, + AfterNoArgs, + uv_fs_unlink, + *path); } else { // unlink(path) THROW_IF_INSUFFICIENT_PERMISSIONS( env, @@ -1648,8 +1729,14 @@ static void RMDir(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_RMDIR, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "rmdir", UTF8, AfterNoArgs, - uv_fs_rmdir, *path); + AsyncCall(env, + req_wrap_async, + args, + "rmdir", + UTF8, + AfterNoArgs, + uv_fs_rmdir, + *path); } else { // rmdir(path) FSReqWrapSync req_wrap_sync("rmdir", *path); FS_SYNC_TRACE_BEGIN(rmdir); @@ -1809,7 +1896,7 @@ int MKDirpSync(uv_loop_t* loop, if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) { uv_fs_req_cleanup(req); if (orig_err == UV_EEXIST && - req_wrap->continuation_data()->paths().size() > 0) { + req_wrap->continuation_data()->paths().size() > 0) { return UV_ENOTDIR; } return UV_EEXIST; @@ -1825,11 +1912,8 @@ int MKDirpSync(uv_loop_t* loop, return 0; } -int MKDirpAsync(uv_loop_t* loop, - uv_fs_t* req, - const char* path, - int mode, - uv_fs_cb cb) { +int MKDirpAsync( + uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) { FSReqBase* req_wrap = FSReqBase::from_req(req); // on the first iteration of algorithm, stash state information. if (req_wrap->continuation_data() == nullptr) { @@ -1840,83 +1924,93 @@ int MKDirpAsync(uv_loop_t* loop, // on each iteration of algorithm, mkdir directory on top of stack. std::string next_path = req_wrap->continuation_data()->PopPath(); - int err = uv_fs_mkdir(loop, req, next_path.c_str(), mode, - uv_fs_callback_t{[](uv_fs_t* req) { - FSReqBase* req_wrap = FSReqBase::from_req(req); - Environment* env = req_wrap->env(); - uv_loop_t* loop = env->event_loop(); - std::string path = req->path; - int err = static_cast(req->result); - - while (true) { - switch (err) { - // Note: uv_fs_req_cleanup in terminal paths will be called by - // FSReqAfterScope::~FSReqAfterScope() - case 0: { - if (req_wrap->continuation_data()->paths().empty()) { - req_wrap->continuation_data()->MaybeSetFirstPath(path); - req_wrap->continuation_data()->Done(0); - } else { - req_wrap->continuation_data()->MaybeSetFirstPath(path); - uv_fs_req_cleanup(req); - MKDirpAsync(loop, req, path.c_str(), - req_wrap->continuation_data()->mode(), nullptr); - } - break; - } - case UV_EACCES: - case UV_ENOSPC: - case UV_ENOTDIR: - case UV_EPERM: { - req_wrap->continuation_data()->Done(err); - break; - } - case UV_ENOENT: { - std::string dirname = - path.substr(0, path.find_last_of(kPathSeparator)); - if (dirname != path) { - req_wrap->continuation_data()->PushPath(path); - req_wrap->continuation_data()->PushPath(std::move(dirname)); - } else if (req_wrap->continuation_data()->paths().empty()) { - err = UV_EEXIST; - continue; - } - uv_fs_req_cleanup(req); - MKDirpAsync(loop, req, path.c_str(), - req_wrap->continuation_data()->mode(), nullptr); - break; - } - default: - uv_fs_req_cleanup(req); - // Stash err for use in the callback. - req->data = reinterpret_cast(static_cast(err)); - int err = uv_fs_stat(loop, req, path.c_str(), - uv_fs_callback_t{[](uv_fs_t* req) { - FSReqBase* req_wrap = FSReqBase::from_req(req); - int err = static_cast(req->result); - if (reinterpret_cast(req->data) == UV_EEXIST && - req_wrap->continuation_data()->paths().size() > 0) { - if (err == 0 && S_ISDIR(req->statbuf.st_mode)) { - Environment* env = req_wrap->env(); - uv_loop_t* loop = env->event_loop(); - std::string path = req->path; + int err = uv_fs_mkdir( + loop, req, next_path.c_str(), mode, uv_fs_callback_t{[](uv_fs_t* req) { + FSReqBase* req_wrap = FSReqBase::from_req(req); + Environment* env = req_wrap->env(); + uv_loop_t* loop = env->event_loop(); + std::string path = req->path; + int err = static_cast(req->result); + + while (true) { + switch (err) { + // Note: uv_fs_req_cleanup in terminal paths will be called by + // FSReqAfterScope::~FSReqAfterScope() + case 0: { + if (req_wrap->continuation_data()->paths().empty()) { + req_wrap->continuation_data()->MaybeSetFirstPath(path); + req_wrap->continuation_data()->Done(0); + } else { + req_wrap->continuation_data()->MaybeSetFirstPath(path); uv_fs_req_cleanup(req); - MKDirpAsync(loop, req, path.c_str(), - req_wrap->continuation_data()->mode(), nullptr); - return; + MKDirpAsync(loop, + req, + path.c_str(), + req_wrap->continuation_data()->mode(), + nullptr); + } + break; + } + case UV_EACCES: + case UV_ENOSPC: + case UV_ENOTDIR: + case UV_EPERM: { + req_wrap->continuation_data()->Done(err); + break; + } + case UV_ENOENT: { + std::string dirname = + path.substr(0, path.find_last_of(kPathSeparator)); + if (dirname != path) { + req_wrap->continuation_data()->PushPath(path); + req_wrap->continuation_data()->PushPath(std::move(dirname)); + } else if (req_wrap->continuation_data()->paths().empty()) { + err = UV_EEXIST; + continue; } - err = UV_ENOTDIR; + uv_fs_req_cleanup(req); + MKDirpAsync(loop, + req, + path.c_str(), + req_wrap->continuation_data()->mode(), + nullptr); + break; } - // verify that the path pointed to is actually a directory. - if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) err = UV_EEXIST; - req_wrap->continuation_data()->Done(err); - }}); - if (err < 0) req_wrap->continuation_data()->Done(err); + default: + uv_fs_req_cleanup(req); + // Stash err for use in the callback. + req->data = reinterpret_cast(static_cast(err)); + int err = uv_fs_stat( + loop, req, path.c_str(), uv_fs_callback_t{[](uv_fs_t* req) { + FSReqBase* req_wrap = FSReqBase::from_req(req); + int err = static_cast(req->result); + if (reinterpret_cast(req->data) == UV_EEXIST && + req_wrap->continuation_data()->paths().size() > 0) { + if (err == 0 && S_ISDIR(req->statbuf.st_mode)) { + Environment* env = req_wrap->env(); + uv_loop_t* loop = env->event_loop(); + std::string path = req->path; + uv_fs_req_cleanup(req); + MKDirpAsync(loop, + req, + path.c_str(), + req_wrap->continuation_data()->mode(), + nullptr); + return; + } + err = UV_ENOTDIR; + } + // verify that the path pointed to is actually a directory. + if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) + err = UV_EEXIST; + req_wrap->continuation_data()->Done(err); + }}); + if (err < 0) req_wrap->continuation_data()->Done(err); + break; + } break; - } - break; - } - }}); + } + }}); return err; } @@ -1945,9 +2039,15 @@ static void MKDir(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_UNLINK, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "mkdir", UTF8, + AsyncCall(env, + req_wrap_async, + args, + "mkdir", + UTF8, mkdirp ? AfterMkdirp : AfterNoArgs, - mkdirp ? MKDirpAsync : uv_fs_mkdir, *path, mode); + mkdirp ? MKDirpAsync : uv_fs_mkdir, + *path, + mode); } else { // mkdir(path, mode, recursive) FSReqWrapSync req_wrap_sync("mkdir", *path); FS_SYNC_TRACE_BEGIN(mkdir); @@ -1992,8 +2092,14 @@ static void RealPath(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_REALPATH, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "realpath", encoding, AfterStringPtr, - uv_fs_realpath, *path); + AsyncCall(env, + req_wrap_async, + args, + "realpath", + encoding, + AfterStringPtr, + uv_fs_realpath, + *path); } else { // realpath(path, encoding, undefined, ctx) FSReqWrapSync req_wrap_sync("realpath", *path); FS_SYNC_TRACE_BEGIN(realpath); @@ -2089,8 +2195,7 @@ static void ReadDir(const FunctionCallbackInfo& args) { uv_dirent_t ent; r = uv_fs_scandir_next(&(req_wrap_sync.req), &ent); - if (r == UV_EOF) - break; + if (r == UV_EOF) break; if (is_uv_error(r)) { env->ThrowUVException(r, "scandir", nullptr, *path); return; @@ -2108,13 +2213,10 @@ static void ReadDir(const FunctionCallbackInfo& args) { } } - Local names = Array::New(isolate, name_v.data(), name_v.size()); if (with_types) { Local result[] = { - names, - Array::New(isolate, type_v.data(), type_v.size()) - }; + names, Array::New(isolate, type_v.data(), type_v.size())}; args.GetReturnValue().Set(Array::New(isolate, result, arraysize(result))); } else { args.GetReturnValue().Set(names); @@ -2209,8 +2311,16 @@ static void Open(const FunctionCallbackInfo& args) { req_wrap_async->set_is_plain_open(true); FS_ASYNC_TRACE_BEGIN1( UV_FS_OPEN, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "open", UTF8, AfterInteger, - uv_fs_open, *path, flags, mode); + AsyncCall(env, + req_wrap_async, + args, + "open", + UTF8, + AfterInteger, + uv_fs_open, + *path, + flags, + mode); } else { // open(path, flags, mode) if (CheckOpenPermissions(env, path, flags).IsNothing()) return; FSReqWrapSync req_wrap_sync("open", *path); @@ -2248,8 +2358,16 @@ static void OpenFileHandle(const FunctionCallbackInfo& args) { if (req_wrap_async != nullptr) { // openFileHandle(path, flags, mode, req) FS_ASYNC_TRACE_BEGIN1( UV_FS_OPEN, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "open", UTF8, AfterOpenFileHandle, - uv_fs_open, *path, flags, mode); + AsyncCall(env, + req_wrap_async, + args, + "open", + UTF8, + AfterOpenFileHandle, + uv_fs_open, + *path, + flags, + mode); } else { // openFileHandle(path, flags, mode, undefined, ctx) CHECK_EQ(argc, 5); FSReqWrapSync req_wrap_sync; @@ -2316,9 +2434,18 @@ static void CopyFile(const FunctionCallbackInfo& args) { TRACE_STR_COPY(*src), "dest", TRACE_STR_COPY(*dest)) - AsyncDestCall(env, req_wrap_async, args, "copyfile", - *dest, dest.length(), UTF8, AfterNoArgs, - uv_fs_copyfile, *src, *dest, flags); + AsyncDestCall(env, + req_wrap_async, + args, + "copyfile", + *dest, + dest.length(), + UTF8, + AfterNoArgs, + uv_fs_copyfile, + *src, + *dest, + flags); } else { // copyFile(src, dest, flags) THROW_IF_INSUFFICIENT_PERMISSIONS( env, permission::PermissionScope::kFileSystemRead, src.ToStringView()); @@ -2379,8 +2506,17 @@ static void WriteBuffer(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 5); if (req_wrap_async != nullptr) { // write(fd, buffer, off, len, pos, req) FS_ASYNC_TRACE_BEGIN0(UV_FS_WRITE, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "write", UTF8, AfterInteger, - uv_fs_write, fd, &uvbuf, 1, pos); + AsyncCall(env, + req_wrap_async, + args, + "write", + UTF8, + AfterInteger, + uv_fs_write, + fd, + &uvbuf, + 1, + pos); } else { // write(fd, buffer, off, len, pos, undefined, ctx) CHECK_EQ(argc, 7); FSReqWrapSync req_wrap_sync; @@ -2404,7 +2540,6 @@ static void WriteBuffer(const FunctionCallbackInfo& args) { } } - // Wrapper for writev(2). // // bytesWritten = writev(fd, chunks, position, callback) @@ -2465,7 +2600,6 @@ static void WriteBuffers(const FunctionCallbackInfo& args) { } } - // Wrapper for write(2). // // bytesWritten = write(fd, string, position, enc, callback) @@ -2529,12 +2663,8 @@ static void WriteString(const FunctionCallbackInfo& args) { stack_buffer.SetLengthAndZeroTerminate(len); uv_buf_t uvbuf = uv_buf_init(*stack_buffer, len); FS_ASYNC_TRACE_BEGIN0(UV_FS_WRITE, req_wrap_async) - int err = req_wrap_async->Dispatch(uv_fs_write, - fd, - &uvbuf, - 1, - pos, - AfterInteger); + int err = + req_wrap_async->Dispatch(uv_fs_write, fd, &uvbuf, 1, pos, AfterInteger); if (err < 0) { uv_fs_t* uv_req = req_wrap_async->req(); uv_req->result = err; @@ -2546,13 +2676,11 @@ static void WriteString(const FunctionCallbackInfo& args) { CHECK_EQ(argc, 6); FSReqBase::FSReqBuffer stack_buffer; if (buf == nullptr) { - if (!StringBytes::StorageSize(isolate, value, enc).To(&len)) - return; + if (!StringBytes::StorageSize(isolate, value, enc).To(&len)) return; stack_buffer.AllocateSufficientStorage(len + 1); // StorageSize may return too large a char, so correct the actual length // by the write size - len = StringBytes::Write(isolate, *stack_buffer, - len, args[1], enc); + len = StringBytes::Write(isolate, *stack_buffer, len, args[1], enc); stack_buffer.SetLengthAndZeroTerminate(len); buf = *stack_buffer; } @@ -2696,9 +2824,8 @@ static void Read(const FunctionCallbackInfo& args) { CHECK(Buffer::IsWithinBounds(off, len, buffer_length)); CHECK(IsSafeJsInt(args[4]) || args[4]->IsBigInt()); - const int64_t pos = args[4]->IsNumber() ? - args[4].As()->Value() : - args[4].As()->Int64Value(); + const int64_t pos = args[4]->IsNumber() ? args[4].As()->Value() + : args[4].As()->Int64Value(); char* buf = buffer_data + off; uv_buf_t uvbuf = uv_buf_init(buf, len); @@ -2707,8 +2834,17 @@ static void Read(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 5); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_READ, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "read", UTF8, AfterInteger, - uv_fs_read, fd, &uvbuf, 1, pos); + AsyncCall(env, + req_wrap_async, + args, + "read", + UTF8, + AfterInteger, + uv_fs_read, + fd, + &uvbuf, + 1, + pos); } else { // read(fd, buffer, offset, len, pos) FSReqWrapSync req_wrap_sync("read"); FS_SYNC_TRACE_BEGIN(read); @@ -2832,8 +2968,17 @@ static void ReadBuffers(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 3); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_READ, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "read", UTF8, AfterInteger, - uv_fs_read, fd, *iovs, iovs.length(), pos); + AsyncCall(env, + req_wrap_async, + args, + "read", + UTF8, + AfterInteger, + uv_fs_read, + fd, + *iovs, + iovs.length(), + pos); } else { // readBuffers(fd, buffers, undefined, ctx) FSReqWrapSync req_wrap_sync("read"); FS_SYNC_TRACE_BEGIN(read); @@ -2847,7 +2992,6 @@ static void ReadBuffers(const FunctionCallbackInfo& args) { } } - /* fs.chmod(path, mode); * Wrapper for chmod(1) / EIO_CHMOD */ @@ -2871,8 +3015,15 @@ static void Chmod(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_CHMOD, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "chmod", UTF8, AfterNoArgs, - uv_fs_chmod, *path, mode); + AsyncCall(env, + req_wrap_async, + args, + "chmod", + UTF8, + AfterNoArgs, + uv_fs_chmod, + *path, + mode); } else { // chmod(path, mode) FSReqWrapSync req_wrap_sync("chmod", *path); FS_SYNC_TRACE_BEGIN(chmod); @@ -2881,7 +3032,6 @@ static void Chmod(const FunctionCallbackInfo& args) { } } - /* fs.fchmod(fd, mode); * Wrapper for fchmod(1) / EIO_FCHMOD */ @@ -2903,8 +3053,15 @@ static void FChmod(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 2); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FCHMOD, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fchmod", UTF8, AfterNoArgs, - uv_fs_fchmod, fd, mode); + AsyncCall(env, + req_wrap_async, + args, + "fchmod", + UTF8, + AfterNoArgs, + uv_fs_fchmod, + fd, + mode); } else { // fchmod(fd, mode) FSReqWrapSync req_wrap_sync("fchmod"); FS_SYNC_TRACE_BEGIN(fchmod); @@ -2942,8 +3099,16 @@ static void Chown(const FunctionCallbackInfo& args) { path.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_CHOWN, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "chown", UTF8, AfterNoArgs, - uv_fs_chown, *path, uid, gid); + AsyncCall(env, + req_wrap_async, + args, + "chown", + UTF8, + AfterNoArgs, + uv_fs_chown, + *path, + uid, + gid); } else { // chown(path, uid, gid) THROW_IF_INSUFFICIENT_PERMISSIONS( env, @@ -2956,7 +3121,6 @@ static void Chown(const FunctionCallbackInfo& args) { } } - /* fs.fchown(fd, uid, gid); * Wrapper for fchown(1) / EIO_FCHOWN */ @@ -2981,8 +3145,16 @@ static void FChown(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 3); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FCHOWN, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fchown", UTF8, AfterNoArgs, - uv_fs_fchown, fd, uid, gid); + AsyncCall(env, + req_wrap_async, + args, + "fchown", + UTF8, + AfterNoArgs, + uv_fs_fchown, + fd, + uid, + gid); } else { // fchown(fd, uid, gid) FSReqWrapSync req_wrap_sync("fchown"); FS_SYNC_TRACE_BEGIN(fchown); @@ -2991,7 +3163,6 @@ static void FChown(const FunctionCallbackInfo& args) { } } - static void LChown(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); @@ -3018,8 +3189,16 @@ static void LChown(const FunctionCallbackInfo& args) { path.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_LCHOWN, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "lchown", UTF8, AfterNoArgs, - uv_fs_lchown, *path, uid, gid); + AsyncCall(env, + req_wrap_async, + args, + "lchown", + UTF8, + AfterNoArgs, + uv_fs_lchown, + *path, + uid, + gid); } else { // lchown(path, uid, gid) THROW_IF_INSUFFICIENT_PERMISSIONS( env, @@ -3032,7 +3211,6 @@ static void LChown(const FunctionCallbackInfo& args) { } } - static void UTimes(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); @@ -3056,8 +3234,16 @@ static void UTimes(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_UTIME, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "utime", UTF8, AfterNoArgs, - uv_fs_utime, *path, atime, mtime); + AsyncCall(env, + req_wrap_async, + args, + "utime", + UTF8, + AfterNoArgs, + uv_fs_utime, + *path, + atime, + mtime); } else { // utimes(path, atime, mtime) FSReqWrapSync req_wrap_sync("utime", *path); FS_SYNC_TRACE_BEGIN(utimes); @@ -3088,8 +3274,16 @@ static void FUTimes(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 3); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FUTIME, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "futime", UTF8, AfterNoArgs, - uv_fs_futime, fd, atime, mtime); + AsyncCall(env, + req_wrap_async, + args, + "futime", + UTF8, + AfterNoArgs, + uv_fs_futime, + fd, + atime, + mtime); } else { // futimes(fd, atime, mtime) FSReqWrapSync req_wrap_sync("futime"); FS_SYNC_TRACE_BEGIN(futimes); @@ -3122,8 +3316,16 @@ static void LUTimes(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_LUTIME, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "lutime", UTF8, AfterNoArgs, - uv_fs_lutime, *path, atime, mtime); + AsyncCall(env, + req_wrap_async, + args, + "lutime", + UTF8, + AfterNoArgs, + uv_fs_lutime, + *path, + atime, + mtime); } else { // lutimes(path, atime, mtime) FSReqWrapSync req_wrap_sync("lutime", *path); FS_SYNC_TRACE_BEGIN(lutimes); @@ -3160,8 +3362,14 @@ static void Mkdtemp(const FunctionCallbackInfo& args) { tmpl.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_MKDTEMP, req_wrap_async, "path", TRACE_STR_COPY(*tmpl)) - AsyncCall(env, req_wrap_async, args, "mkdtemp", encoding, AfterStringPath, - uv_fs_mkdtemp, *tmpl); + AsyncCall(env, + req_wrap_async, + args, + "mkdtemp", + encoding, + AfterStringPath, + uv_fs_mkdtemp, + *tmpl); } else { // mkdtemp(tmpl, encoding) THROW_IF_INSUFFICIENT_PERMISSIONS( env, @@ -4008,8 +4216,7 @@ static void CreatePerIsolateProperties(IsolateData* isolate_data, // Create Function Template for FSReqPromise Local fpt = FunctionTemplate::New(isolate); fpt->Inherit(AsyncWrap::GetConstructorTemplate(isolate_data)); - Local promiseString = - FIXED_ONE_BYTE_STRING(isolate, "FSReqPromise"); + Local promiseString = FIXED_ONE_BYTE_STRING(isolate, "FSReqPromise"); fpt->SetClassName(promiseString); Local fpo = fpt->InstanceTemplate(); fpo->SetInternalFieldCount(FSReqBase::kInternalFieldCount); @@ -4019,6 +4226,7 @@ static void CreatePerIsolateProperties(IsolateData* isolate_data, Local fd = NewFunctionTemplate(isolate, FileHandle::New); fd->Inherit(AsyncWrap::GetConstructorTemplate(isolate_data)); SetProtoMethod(isolate, fd, "close", FileHandle::Close); + SetProtoMethod(isolate, fd, "closeSync", FileHandle::CloseSync); SetProtoMethod(isolate, fd, "releaseFD", FileHandle::ReleaseFD); Local fdt = fd->InstanceTemplate(); fdt->SetInternalFieldCount(FileHandle::kInternalFieldCount); @@ -4028,8 +4236,7 @@ static void CreatePerIsolateProperties(IsolateData* isolate_data, // Create FunctionTemplate for FileHandle::CloseReq Local fdclose = FunctionTemplate::New(isolate); - fdclose->SetClassName(FIXED_ONE_BYTE_STRING(isolate, - "FileHandleCloseReq")); + fdclose->SetClassName(FIXED_ONE_BYTE_STRING(isolate, "FileHandleCloseReq")); fdclose->Inherit(AsyncWrap::GetConstructorTemplate(isolate_data)); Local fdcloset = fdclose->InstanceTemplate(); fdcloset->SetInternalFieldCount(FSReqBase::kInternalFieldCount); @@ -4107,6 +4314,7 @@ void RegisterExternalReferences(ExternalReferenceRegistry* registry) { registry->Register(FileHandle::New); registry->Register(FileHandle::Close); + registry->Register(FileHandle::CloseSync); registry->Register(FileHandle::ReleaseFD); StreamBase::RegisterExternalReferences(registry); } diff --git a/src/node_file.h b/src/node_file.h index 2213d590659595..5d04a7d4dd6af3 100644 --- a/src/node_file.h +++ b/src/node_file.h @@ -80,8 +80,7 @@ class BindingData : public SnapshotableObject { AliasedFloat64Array statfs_field_array; AliasedBigInt64Array statfs_field_bigint_array; - std::vector> - file_handle_read_wrap_freelist; + std::vector> file_handle_read_wrap_freelist; SERIALIZABLE_OBJECT_METHODS() SET_BINDING_ID(fs_binding_data) @@ -146,7 +145,8 @@ class FSReqBase : public ReqWrap { const char* data, size_t len, enum encoding encoding); - inline FSReqBuffer& Init(const char* syscall, size_t len, + inline FSReqBuffer& Init(const char* syscall, + size_t len, enum encoding encoding); virtual void Reject(v8::Local reject) = 0; @@ -240,8 +240,7 @@ inline v8::Local FillGlobalStatFsArray(BindingData* binding_data, template class FSReqPromise final : public FSReqBase { public: - static inline FSReqPromise* New(BindingData* binding_data, - bool use_bigint); + static inline FSReqPromise* New(BindingData* binding_data, bool use_bigint); inline ~FSReqPromise() override; inline void Reject(v8::Local reject) override; @@ -345,6 +344,9 @@ class FileHandle final : public AsyncWrap, public StreamBase { // be resolved once closing is complete. static void Close(const v8::FunctionCallbackInfo& args); + // Synchronously closes the FD. Throws on error. + static void CloseSync(const v8::FunctionCallbackInfo& args); + // Releases ownership of the FD. static void ReleaseFD(const v8::FunctionCallbackInfo& args); @@ -499,19 +501,27 @@ inline FSReqBase* GetReqWrap(const v8::FunctionCallbackInfo& args, // Returns nullptr if the operation fails from the start. template -inline FSReqBase* AsyncDestCall(Environment* env, FSReqBase* req_wrap, +inline FSReqBase* AsyncDestCall(Environment* env, + FSReqBase* req_wrap, const v8::FunctionCallbackInfo& args, - const char* syscall, const char* dest, - size_t len, enum encoding enc, uv_fs_cb after, - Func fn, Args... fn_args); + const char* syscall, + const char* dest, + size_t len, + enum encoding enc, + uv_fs_cb after, + Func fn, + Args... fn_args); // Returns nullptr if the operation fails from the start. template inline FSReqBase* AsyncCall(Environment* env, FSReqBase* req_wrap, const v8::FunctionCallbackInfo& args, - const char* syscall, enum encoding enc, - uv_fs_cb after, Func fn, Args... fn_args); + const char* syscall, + enum encoding enc, + uv_fs_cb after, + Func fn, + Args... fn_args); // Template counterpart of SYNC_CALL, except that it only puts // the error number and the syscall in the context instead of diff --git a/test/parallel/test-fs-promises-file-handle-pull.js b/test/parallel/test-fs-promises-file-handle-pull.js index 5dd78e53dbfc6e..63351d3639875f 100644 --- a/test/parallel/test-fs-promises-file-handle-pull.js +++ b/test/parallel/test-fs-promises-file-handle-pull.js @@ -241,6 +241,182 @@ async function testPullIterateBatches() { } } +// ============================================================================= +// pull() with start option - read from specific position +// ============================================================================= + +async function testPullStart() { + const filePath = path.join(tmpDir, 'pull-start.txt'); + fs.writeFileSync(filePath, 'AAABBBCCC'); + + const fh = await open(filePath, 'r'); + try { + // Read from offset 3 + const data = await text(fh.pull({ start: 3 })); + assert.strictEqual(data, 'BBBCCC'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with limit option - read at most N bytes +// ============================================================================= + +async function testPullLimit() { + const filePath = path.join(tmpDir, 'pull-limit.txt'); + fs.writeFileSync(filePath, 'Hello, World! Extra data here.'); + + const fh = await open(filePath, 'r'); + try { + const data = await text(fh.pull({ limit: 13 })); + assert.strictEqual(data, 'Hello, World!'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with start + limit - read a slice +// ============================================================================= + +async function testPullStartAndLimit() { + const filePath = path.join(tmpDir, 'pull-start-limit.txt'); + fs.writeFileSync(filePath, 'AAABBBCCCDDD'); + + const fh = await open(filePath, 'r'); + try { + // Read 3 bytes starting at offset 3 + const data = await text(fh.pull({ start: 3, limit: 3 })); + assert.strictEqual(data, 'BBB'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with limit larger than file - reads whole file +// ============================================================================= + +async function testPullLimitLargerThanFile() { + const filePath = path.join(tmpDir, 'pull-limit-large.txt'); + fs.writeFileSync(filePath, 'short'); + + const fh = await open(filePath, 'r'); + try { + const data = await text(fh.pull({ limit: 1000000 })); + assert.strictEqual(data, 'short'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with limit spanning multiple chunks +// ============================================================================= + +async function testPullLimitMultiChunk() { + const filePath = path.join(tmpDir, 'pull-limit-multi.bin'); + // 300KB file - spans multiple 128KB reads + const input = Buffer.alloc(300 * 1024, 'x'); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + // Read exactly 200KB from offset 50KB + const data = await bytes(fh.pull({ start: 50 * 1024, limit: 200 * 1024 })); + assert.strictEqual(data.byteLength, 200 * 1024); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with start + limit + transforms +// ============================================================================= + +async function testPullStartLimitWithTransforms() { + const filePath = path.join(tmpDir, 'pull-start-limit-transform.txt'); + fs.writeFileSync(filePath, 'aaabbbcccddd'); + + const fh = await open(filePath, 'r'); + try { + const { compressGzip, decompressGzip } = require('stream/iter'); + const compressed = fh.pull(compressGzip(), { start: 3, limit: 6 }); + const decompressed = await text( + require('stream/iter').pull(compressed, decompressGzip())); + assert.strictEqual(decompressed, 'bbbccc'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with chunkSize option +// ============================================================================= + +async function testPullChunkSize() { + const filePath = path.join(tmpDir, 'pull-chunksize.bin'); + // Write 64KB of data + const input = Buffer.alloc(64 * 1024, 'z'); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + // Use 16KB chunks - should produce 4 batches + let batchCount = 0; + for await (const batch of fh.pull({ chunkSize: 16 * 1024 })) { + batchCount++; + for (const chunk of batch) { + assert.ok(chunk.byteLength <= 16 * 1024, + `Chunk ${chunk.byteLength} should be <= 16384`); + } + } + assert.strictEqual(batchCount, 4); + } finally { + await fh.close(); + } +} + +async function testPullChunkSizeSmall() { + const filePath = path.join(tmpDir, 'pull-chunksize-small.txt'); + fs.writeFileSync(filePath, 'hello'); + + const fh = await open(filePath, 'r'); + try { + // 1-byte chunks + let totalBytes = 0; + let batchCount = 0; + for await (const batch of fh.pull({ chunkSize: 1 })) { + batchCount++; + for (const chunk of batch) totalBytes += chunk.byteLength; + } + assert.strictEqual(totalBytes, 5); + assert.strictEqual(batchCount, 5); + } finally { + await fh.close(); + } +} + +async function testPullSyncArgumentValidation() { + const filePath = path.join(tmpDir, 'pull-arg-validation.txt'); + fs.writeFileSync(filePath, 'data'); + + const fh = await open(filePath, 'r'); + try { + assert.throws(() => fh.pull({ autoClose: 'no' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ start: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ limit: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ chunkSize: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ signal: {} }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ start: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.pull({ limit: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.pull({ chunkSize: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + } finally { + await fh.close(); + } +} + Promise.all([ testBasicPull(), testPullBinary(), @@ -252,4 +428,13 @@ Promise.all([ testPullClosedHandle(), testPullAbortSignal(), testPullIterateBatches(), + testPullStart(), + testPullLimit(), + testPullStartAndLimit(), + testPullLimitLargerThanFile(), + testPullLimitMultiChunk(), + testPullStartLimitWithTransforms(), + testPullChunkSize(), + testPullChunkSizeSmall(), + testPullSyncArgumentValidation(), ]).then(common.mustCall()); diff --git a/test/parallel/test-fs-promises-file-handle-pullsync.js b/test/parallel/test-fs-promises-file-handle-pullsync.js new file mode 100644 index 00000000000000..6deed283e6a594 --- /dev/null +++ b/test/parallel/test-fs-promises-file-handle-pullsync.js @@ -0,0 +1,496 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const fs = require('fs'); +const { open } = fs.promises; +const path = require('path'); +const tmpdir = require('../common/tmpdir'); +const { + textSync, + bytesSync, + pipeToSync, + pullSync, + compressGzipSync, + decompressGzipSync, +} = require('stream/iter'); + +tmpdir.refresh(); + +const tmpDir = tmpdir.path; + +// ============================================================================= +// Basic pullSync() +// ============================================================================= + +async function testBasicPullSync() { + const filePath = path.join(tmpDir, 'pullsync-basic.txt'); + fs.writeFileSync(filePath, 'hello from sync file read'); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync()); + assert.strictEqual(data, 'hello from sync file read'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Large file (multi-chunk) +// ============================================================================= + +async function testLargeFile() { + const filePath = path.join(tmpDir, 'pullsync-large.txt'); + const input = 'sync large data test. '.repeat(10000); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync()); + assert.strictEqual(data, input); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Binary data round-trip +// ============================================================================= + +async function testBinaryData() { + const filePath = path.join(tmpDir, 'pullsync-binary.bin'); + const input = Buffer.alloc(200000); + for (let i = 0; i < input.length; i++) input[i] = i & 0xff; + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + const data = bytesSync(fh.pullSync()); + assert.deepStrictEqual(Buffer.from(data), input); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync with sync compression transform round-trip +// ============================================================================= + +async function testPullSyncWithCompression() { + const filePath = path.join(tmpDir, 'pullsync-compress-src.txt'); + const dstPath = path.join(tmpDir, 'pullsync-compress-dst.gz'); + const input = 'compress via sync pullSync. '.repeat(1000); + fs.writeFileSync(filePath, input); + + // Compress: pullSync -> compressGzipSync -> write to file + const srcFh = await open(filePath, 'r'); + const dstFh = await open(dstPath, 'w'); + try { + const w = dstFh.writer(); + pipeToSync(srcFh.pullSync(compressGzipSync()), w); + } finally { + await srcFh.close(); + await dstFh.close(); + } + + // Verify compressed file is smaller + const compressedSize = fs.statSync(dstPath).size; + assert.ok(compressedSize < Buffer.byteLength(input), + `Compressed ${compressedSize} should be < original ` + + `${Buffer.byteLength(input)}`); + + // Decompress and verify + const readFh = await open(dstPath, 'r'); + try { + const result = textSync(readFh.pullSync(decompressGzipSync())); + assert.strictEqual(result, input); + } finally { + await readFh.close(); + } +} + +// ============================================================================= +// pullSync with stateless transform +// ============================================================================= + +async function testPullSyncWithStatelessTransform() { + const filePath = path.join(tmpDir, 'pullsync-upper.txt'); + fs.writeFileSync(filePath, 'hello world'); + + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + const src = chunks[j]; + const buf = Buffer.allocUnsafe(src.length); + for (let i = 0; i < src.length; i++) { + const b = src[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[j] = buf; + } + return out; + }; + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync(upper)); + assert.strictEqual(data, 'HELLO WORLD'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync with mixed stateless + stateful transforms +// ============================================================================= + +async function testPullSyncMixedTransforms() { + const filePath = path.join(tmpDir, 'pullsync-mixed.txt'); + const input = 'mixed transform test '.repeat(500); + fs.writeFileSync(filePath, input); + + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + const src = chunks[j]; + const buf = Buffer.allocUnsafe(src.length); + for (let i = 0; i < src.length; i++) { + const b = src[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[j] = buf; + } + return out; + }; + + const fh = await open(filePath, 'r'); + try { + // Upper + compress + decompress + const data = textSync( + pullSync(fh.pullSync(upper, compressGzipSync()), decompressGzipSync()), + ); + assert.strictEqual(data, input.toUpperCase()); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// autoClose: true - handle closed after iteration completes +// ============================================================================= + +async function testAutoClose() { + const filePath = path.join(tmpDir, 'pullsync-autoclose.txt'); + fs.writeFileSync(filePath, 'auto close test'); + + const fh = await open(filePath, 'r'); + const data = textSync(fh.pullSync({ autoClose: true })); + assert.strictEqual(data, 'auto close test'); + + // Handle should be closed + await assert.rejects(fh.stat(), { code: 'EBADF' }); +} + +// ============================================================================= +// autoClose: true with early break +// ============================================================================= + +async function testAutoCloseEarlyBreak() { + const filePath = path.join(tmpDir, 'pullsync-autoclose-break.txt'); + fs.writeFileSync(filePath, 'x'.repeat(1000000)); + + const fh = await open(filePath, 'r'); + // eslint-disable-next-line no-unused-vars + for (const batch of fh.pullSync({ autoClose: true })) { + break; // Early exit + } + + // Handle should be closed by autoClose + await assert.rejects(fh.stat(), { code: 'EBADF' }); +} + +// ============================================================================= +// autoClose: false (default) - handle stays open +// ============================================================================= + +async function testNoAutoClose() { + const filePath = path.join(tmpDir, 'pullsync-no-autoclose.txt'); + fs.writeFileSync(filePath, 'still open'); + + const fh = await open(filePath, 'r'); + const data = textSync(fh.pullSync()); + assert.strictEqual(data, 'still open'); + + // Handle should still be open and reusable + const stat = await fh.stat(); + assert.ok(stat.size > 0); + await fh.close(); +} + +// ============================================================================= +// Lock semantics - pullSync locks the handle +// ============================================================================= + +async function testLocked() { + const filePath = path.join(tmpDir, 'pullsync-locked.txt'); + fs.writeFileSync(filePath, 'lock test'); + + const fh = await open(filePath, 'r'); + const iter = fh.pullSync()[Symbol.iterator](); + iter.next(); // Start iteration, handle is locked + + assert.throws(() => fh.pullSync(), { + code: 'ERR_INVALID_STATE', + }); + + assert.throws(() => fh.pull(), { + code: 'ERR_INVALID_STATE', + }); + + // Finish iteration to unlock + while (!iter.next().done) { /* drain */ } + await fh.close(); +} + +// ============================================================================= +// Empty file +// ============================================================================= + +async function testEmptyFile() { + const filePath = path.join(tmpDir, 'pullsync-empty.txt'); + fs.writeFileSync(filePath, ''); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync()); + assert.strictEqual(data, ''); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pipeToSync: file-to-file sync pipeline +// ============================================================================= + +async function testPipeToSync() { + const srcPath = path.join(tmpDir, 'pullsync-pipeto-src.txt'); + const dstPath = path.join(tmpDir, 'pullsync-pipeto-dst.txt'); + const input = 'pipeToSync test data '.repeat(200); + fs.writeFileSync(srcPath, input); + + const srcFh = await open(srcPath, 'r'); + const dstFh = await open(dstPath, 'w'); + try { + const w = dstFh.writer(); + pipeToSync(srcFh.pullSync(), w); + } finally { + await srcFh.close(); + await dstFh.close(); + } + + assert.strictEqual(fs.readFileSync(dstPath, 'utf8'), input); +} + +// ============================================================================= +// pullSync() with start option +// ============================================================================= + +async function testPullSyncStart() { + const filePath = path.join(tmpDir, 'pullsync-start.txt'); + fs.writeFileSync(filePath, 'AAABBBCCC'); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync({ start: 3 })); + assert.strictEqual(data, 'BBBCCC'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with limit option +// ============================================================================= + +async function testPullSyncLimit() { + const filePath = path.join(tmpDir, 'pullsync-limit.txt'); + fs.writeFileSync(filePath, 'Hello, World! Extra data here.'); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync({ limit: 13 })); + assert.strictEqual(data, 'Hello, World!'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with start + limit +// ============================================================================= + +async function testPullSyncStartAndLimit() { + const filePath = path.join(tmpDir, 'pullsync-start-limit.txt'); + fs.writeFileSync(filePath, 'AAABBBCCCDDD'); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync({ start: 3, limit: 3 })); + assert.strictEqual(data, 'BBB'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with limit spanning multiple chunks +// ============================================================================= + +async function testPullSyncLimitMultiChunk() { + const filePath = path.join(tmpDir, 'pullsync-limit-multi.bin'); + const input = Buffer.alloc(300 * 1024, 'x'); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + const data = bytesSync(fh.pullSync({ start: 50 * 1024, limit: 200 * 1024 })); + assert.strictEqual(data.byteLength, 200 * 1024); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with start + limit + compression transform +// ============================================================================= + +async function testPullSyncStartLimitWithTransforms() { + const filePath = path.join(tmpDir, 'pullsync-start-limit-transform.txt'); + fs.writeFileSync(filePath, 'aaabbbcccddd'); + + const fh = await open(filePath, 'r'); + try { + const compressed = fh.pullSync(compressGzipSync(), + { start: 3, limit: 6 }); + const decompressed = textSync(pullSync(compressed, decompressGzipSync())); + assert.strictEqual(decompressed, 'bbbccc'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with start + autoClose +// ============================================================================= + +async function testPullSyncStartAutoClose() { + const filePath = path.join(tmpDir, 'pullsync-start-autoclose.txt'); + fs.writeFileSync(filePath, 'AAABBBCCC'); + + const fh = await open(filePath, 'r'); + const data = textSync(fh.pullSync({ start: 3, autoClose: true })); + assert.strictEqual(data, 'BBBCCC'); + + // Handle should be closed + await assert.rejects(fh.stat(), { code: 'EBADF' }); +} + +// ============================================================================= +// pullSync() with chunkSize option +// ============================================================================= + +async function testPullSyncChunkSize() { + const filePath = path.join(tmpDir, 'pullsync-chunksize.bin'); + const input = Buffer.alloc(64 * 1024, 'z'); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + let batchCount = 0; + for (const batch of fh.pullSync({ chunkSize: 16 * 1024 })) { + batchCount++; + for (const chunk of batch) { + assert.ok(chunk.byteLength <= 16 * 1024, + `Chunk ${chunk.byteLength} should be <= 16384`); + } + } + assert.strictEqual(batchCount, 4); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// writer() with chunkSize option (sync write threshold) +// ============================================================================= + +async function testWriterChunkSize() { + const filePath = path.join(tmpDir, 'pullsync-writer-chunksize.txt'); + const fh = await open(filePath, 'w'); + // Set chunkSize to 1024 - writes larger than this should fall back to async + const w = fh.writer({ chunkSize: 1024 }); + + // Small write should succeed sync + assert.strictEqual(w.writeSync(Buffer.alloc(512, 'a')), true); + + // Write larger than chunkSize should return false + assert.strictEqual(w.writeSync(Buffer.alloc(2048, 'b')), false); + + await w.end(); + await fh.close(); +} + +// ============================================================================= +// Argument validation +// ============================================================================= + +async function testPullArgumentValidation() { + const filePath = path.join(tmpDir, 'pull-arg-validation.txt'); + fs.writeFileSync(filePath, 'data'); + + const fh = await open(filePath, 'r'); + try { + assert.throws(() => fh.pullSync({ autoClose: 'no' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pullSync({ start: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pullSync({ limit: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pullSync({ chunkSize: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pullSync({ start: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.pullSync({ limit: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.pullSync({ chunkSize: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Run all tests +// ============================================================================= + +Promise.all([ + testBasicPullSync(), + testLargeFile(), + testBinaryData(), + testPullSyncWithCompression(), + testPullSyncWithStatelessTransform(), + testPullSyncMixedTransforms(), + testAutoClose(), + testAutoCloseEarlyBreak(), + testNoAutoClose(), + testLocked(), + testEmptyFile(), + testPipeToSync(), + testPullSyncStart(), + testPullSyncLimit(), + testPullSyncStartAndLimit(), + testPullSyncLimitMultiChunk(), + testPullSyncStartLimitWithTransforms(), + testPullSyncStartAutoClose(), + testPullSyncChunkSize(), + testWriterChunkSize(), + testPullArgumentValidation(), +]).then(common.mustCall()); diff --git a/test/parallel/test-fs-promises-file-handle-writer.js b/test/parallel/test-fs-promises-file-handle-writer.js index 20851bf2b691ef..c9828939db89ef 100644 --- a/test/parallel/test-fs-promises-file-handle-writer.js +++ b/test/parallel/test-fs-promises-file-handle-writer.js @@ -515,6 +515,558 @@ async function testEndWithAbortedSignalRejects() { assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'data'); } +// ============================================================================= +// write() with string input (UTF-8 encoding) +// ============================================================================= + +async function testWriteString() { + const filePath = path.join(tmpDir, 'writer-string.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.write('Hello '); + await w.write('World!'); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 12); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'Hello World!'); +} + +// ============================================================================= +// write() with string containing multi-byte UTF-8 characters +// ============================================================================= + +async function testWriteStringMultibyte() { + const filePath = path.join(tmpDir, 'writer-string-multibyte.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + const input = 'café ☕ 日本語'; + await w.write(input); + const totalBytes = await w.end(); + await fh.close(); + + const expected = Buffer.from(input, 'utf8'); + assert.strictEqual(totalBytes, expected.byteLength); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), input); +} + +// ============================================================================= +// writev() with string chunks (UTF-8 encoding) +// ============================================================================= + +async function testWritevStrings() { + const filePath = path.join(tmpDir, 'writer-writev-strings.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.writev(['aaa', 'bbb', 'ccc']); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 9); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'aaabbbccc'); +} + +// ============================================================================= +// writev() with mixed string and Uint8Array chunks +// ============================================================================= + +async function testWritevMixed() { + const filePath = path.join(tmpDir, 'writer-writev-mixed.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.writev(['hello', Buffer.from(' '), 'world']); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 11); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'hello world'); +} + +// ============================================================================= +// Symbol.dispose calls fail() +// ============================================================================= + +async function testSyncDispose() { + const filePath = path.join(tmpDir, 'writer-sync-dispose.txt'); + const fh = await open(filePath, 'w'); + + { + using w = fh.writer(); + await w.write(Buffer.from('before dispose')); + } + // Symbol.dispose calls fail(), which unlocks the handle. + // The handle should be reusable. + const w2 = fh.writer(); + await w2.write(Buffer.from('after dispose')); + await w2.end(); + await fh.close(); + + const content = fs.readFileSync(filePath, 'utf8'); + assert.ok(content.includes('after dispose'), + `Expected 'after dispose' in ${JSON.stringify(content)}`); +} + +// ============================================================================= +// Symbol.dispose on error unwind +// ============================================================================= + +async function testSyncDisposeOnError() { + const filePath = path.join(tmpDir, 'writer-sync-dispose-error.txt'); + const fh = await open(filePath, 'w'); + + try { + using w = fh.writer(); + await w.write(Buffer.from('data')); + throw new Error('intentional'); + } catch (e) { + assert.strictEqual(e.message, 'intentional'); + } + + // Handle should be unlocked and reusable after sync dispose + const w2 = fh.writer(); + await w2.write(Buffer.from('recovered')); + await w2.end(); + await fh.close(); + + const content = fs.readFileSync(filePath, 'utf8'); + assert.ok(content.includes('recovered'), + `Expected 'recovered' in ${JSON.stringify(content)}`); +} + +// ============================================================================= +// writeSync() basic +// ============================================================================= + +async function testWriteSyncBasic() { + const filePath = path.join(tmpDir, 'writer-writesync-basic.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + assert.strictEqual(w.writeSync('Hello '), true); + assert.strictEqual(w.writeSync(Buffer.from('World!')), true); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 12); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'Hello World!'); +} + +// ============================================================================= +// writevSync() basic +// ============================================================================= + +async function testWritevSyncBasic() { + const filePath = path.join(tmpDir, 'writer-writevsync-basic.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + assert.strictEqual(w.writevSync(['aaa', Buffer.from('bbb'), 'ccc']), true); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 9); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'aaabbbccc'); +} + +// ============================================================================= +// writeSync() returns false for large chunks +// ============================================================================= + +async function testWriteSyncLargeChunk() { + const filePath = path.join(tmpDir, 'writer-writesync-large.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + // Chunk larger than 131072 should return false + const bigChunk = Buffer.alloc(131073, 'x'); + assert.strictEqual(w.writeSync(bigChunk), false); + + // Chunk at exactly 131072 should succeed + const exactChunk = Buffer.alloc(131072, 'y'); + assert.strictEqual(w.writeSync(exactChunk), true); + + await w.end(); + await fh.close(); + + // Only the exact chunk should have been written + const content = fs.readFileSync(filePath); + assert.strictEqual(content.length, 131072); +} + +// ============================================================================= +// writeSync() returns false when async op is in flight +// ============================================================================= + +async function testWriteSyncReturnsFalseDuringAsync() { + const filePath = path.join(tmpDir, 'writer-writesync-async.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + // Start an async write but don't await yet + const p = w.write(Buffer.from('async')); + + // Sync write should return false because async is in flight + assert.strictEqual(w.writeSync(Buffer.from('sync')), false); + + await p; + + // After async completes, sync should work again + assert.strictEqual(w.writeSync(Buffer.from(' then sync')), true); + + await w.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'async then sync'); +} + +// ============================================================================= +// writeSync() returns false on closed/errored writer +// ============================================================================= + +async function testWriteSyncClosedErrored() { + const filePath = path.join(tmpDir, 'writer-writesync-closed.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.end(); + + // Should return false after end() + assert.strictEqual(w.writeSync(Buffer.from('data')), false); + await fh.close(); + + // Test errored state + const fh2 = await open(filePath, 'w'); + const w2 = fh2.writer(); + w2.fail(new Error('test')); + assert.strictEqual(w2.writeSync(Buffer.from('data')), false); + await fh2.close(); +} + +// ============================================================================= +// endSync() basic +// ============================================================================= + +async function testEndSyncBasic() { + const filePath = path.join(tmpDir, 'writer-endsync-basic.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + w.writeSync(Buffer.from('hello')); + const totalBytes = w.endSync(); + await fh.close(); + + assert.strictEqual(totalBytes, 5); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'hello'); +} + +// ============================================================================= +// endSync() returns -1 when async op is in flight +// ============================================================================= + +async function testEndSyncReturnsFalseDuringAsync() { + const filePath = path.join(tmpDir, 'writer-endsync-async.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + const p = w.write(Buffer.from('data')); + assert.strictEqual(w.endSync(), -1); + + await p; + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 4); +} + +// ============================================================================= +// endSync() idempotent on closed writer +// ============================================================================= + +async function testEndSyncIdempotent() { + const filePath = path.join(tmpDir, 'writer-endsync-idempotent.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + w.writeSync(Buffer.from('data')); + const first = w.endSync(); + const second = w.endSync(); + + assert.strictEqual(first, 4); + assert.strictEqual(second, 4); // Idempotent + await fh.close(); +} + +// ============================================================================= +// endSync() with autoClose fires handle.close() +// ============================================================================= + +async function testEndSyncAutoClose() { + const filePath = path.join(tmpDir, 'writer-endsync-autoclose.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ autoClose: true }); + + w.writeSync(Buffer.from('auto')); + const totalBytes = w.endSync(); + + assert.strictEqual(totalBytes, 4); + + // Handle should be closed synchronously + await assert.rejects(fh.stat(), { code: 'EBADF' }); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'auto'); +} + +// ============================================================================= +// Full sync pipeline: writeSync + endSync (no async at all) +// ============================================================================= + +async function testFullSyncPipeline() { + const filePath = path.join(tmpDir, 'writer-full-sync.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + // Entirely synchronous write pipeline + w.writeSync('line 1\n'); + w.writeSync('line 2\n'); + w.writevSync(['line 3\n', 'line 4\n']); + const totalBytes = w.endSync(); + await fh.close(); + + assert.strictEqual(totalBytes, 28); + assert.strictEqual( + fs.readFileSync(filePath, 'utf8'), + 'line 1\nline 2\nline 3\nline 4\n', + ); +} + +// ============================================================================= +// end() rejects on errored writer +// ============================================================================= + +async function testEndRejectsOnErrored() { + const filePath = path.join(tmpDir, 'writer-end-errored.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.write(Buffer.from('data')); + w.fail(new Error('test error')); + + await assert.rejects( + w.end(), + { message: 'test error' }, + ); + await fh.close(); +} + +// ============================================================================= +// end() is idempotent when closing/closed +// ============================================================================= + +async function testEndIdempotent() { + const filePath = path.join(tmpDir, 'writer-end-idempotent.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.write(Buffer.from('data')); + + // Call end() twice concurrently - second should return same promise + const p1 = w.end(); + const p2 = w.end(); + const [bytes1, bytes2] = await Promise.all([p1, p2]); + + assert.strictEqual(bytes1, 4); + assert.strictEqual(bytes2, 4); + + // After closed, calling end() again returns totalBytesWritten + const bytes3 = await w.end(); + assert.strictEqual(bytes3, 4); + + await fh.close(); +} + +// ============================================================================= +// asyncDispose waits for pending end() when closing +// ============================================================================= + +async function testAsyncDisposeWhileClosing() { + const filePath = path.join(tmpDir, 'writer-dispose-closing.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ autoClose: true }); + + await w.write(Buffer.from('closing test')); + + // Start end() but don't await - writer is now "closing" + const endPromise = w.end(); + + // asyncDispose should wait for the pending end, not call fail() + await w[Symbol.asyncDispose](); + await endPromise; + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'closing test'); +} + +// ============================================================================= +// asyncDispose calls fail() on open writer (not graceful cleanup) +// ============================================================================= + +async function testAsyncDisposeCallsFail() { + const filePath = path.join(tmpDir, 'writer-dispose-fails.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.write(Buffer.from('some data')); + + // Dispose without end() - should call fail(), not graceful cleanup + await w[Symbol.asyncDispose](); + + // Writer should be in errored state - write should reject + await assert.rejects( + w.write(Buffer.from('more')), + (err) => err instanceof Error, + ); + + // Handle should be unlocked and reusable + const w2 = fh.writer(); + await w2.end(); + await fh.close(); +} + +// ============================================================================= +// writer() with limit - async write within limit succeeds +// ============================================================================= + +async function testWriterLimit() { + const filePath = path.join(tmpDir, 'writer-limit.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 10 }); + + await w.write(Buffer.from('12345')); // 5 bytes, 5 remaining + await w.write(Buffer.from('67890')); // 5 bytes, 0 remaining + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 10); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), '1234567890'); +} + +// ============================================================================= +// writer() with limit - async write exceeding limit rejects +// ============================================================================= + +async function testWriterLimitExceeded() { + const filePath = path.join(tmpDir, 'writer-limit-exceeded.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 5 }); + + await w.write(Buffer.from('123')); // 3 bytes, 2 remaining + + await assert.rejects( + w.write(Buffer.from('45678')), // 5 bytes > 2 remaining + { code: 'ERR_OUT_OF_RANGE' }, + ); + + await w.end(); + await fh.close(); +} + +// ============================================================================= +// writer() with limit - writev exceeding limit rejects +// ============================================================================= + +async function testWriterLimitWritev() { + const filePath = path.join(tmpDir, 'writer-limit-writev.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 6 }); + + await w.writev([Buffer.from('ab'), Buffer.from('cd')]); // 4 bytes + + await assert.rejects( + w.writev([Buffer.from('ef'), Buffer.from('gh')]), // 4 bytes > 2 remaining + { code: 'ERR_OUT_OF_RANGE' }, + ); + + await w.end(); + await fh.close(); +} + +// ============================================================================= +// writer() with limit - writeSync returns false when exceeding limit +// ============================================================================= + +async function testWriterLimitWriteSync() { + const filePath = path.join(tmpDir, 'writer-limit-writesync.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 10 }); + + assert.strictEqual(w.writeSync(Buffer.from('12345')), true); // 5 ok + assert.strictEqual(w.writeSync(Buffer.from('678')), true); // 3 ok + assert.strictEqual(w.writeSync(Buffer.from('901')), false); // 3 > 2 remaining + + const totalBytes = w.endSync(); + await fh.close(); + + assert.strictEqual(totalBytes, 8); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), '12345678'); +} + +// ============================================================================= +// writer() with limit - writevSync returns false when exceeding limit +// ============================================================================= + +async function testWriterLimitWritevSync() { + const filePath = path.join(tmpDir, 'writer-limit-writevsync.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 5 }); + + assert.strictEqual(w.writevSync([Buffer.from('ab')]), true); + // 4 bytes > 3 remaining + assert.strictEqual( + w.writevSync([Buffer.from('cd'), Buffer.from('ef')]), false); + + w.endSync(); + await fh.close(); +} + +// ============================================================================= +// writer() with limit + start +// ============================================================================= + +async function testWriterLimitAndStart() { + const filePath = path.join(tmpDir, 'writer-limit-start.txt'); + // Pre-fill file with dots + fs.writeFileSync(filePath, '...........'); // 11 dots + + const fh = await open(filePath, 'r+'); + const w = fh.writer({ start: 3, limit: 5 }); + + await w.write(Buffer.from('HELLO')); // Write at offset 3 + await w.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), '...HELLO...'); +} + +// ============================================================================= +// Argument validation +// ============================================================================= + +async function testWriterArgumentValidation() { + const filePath = path.join(tmpDir, 'pull-arg-validation.txt'); + fs.writeFileSync(filePath, 'data'); + + const fh = await open(filePath, 'r'); + try { + assert.throws(() => fh.writer({ autoClose: 'no' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.writer({ start: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.writer({ limit: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.writer({ chunkSize: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.writer({ start: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.writer({ limit: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.writer({ chunkSize: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + } finally { + await fh.close(); + } +} + // ============================================================================= // Run all tests // ============================================================================= @@ -542,4 +1094,31 @@ Promise.all([ testWriteWithAbortedSignalRejects(), testWritevWithAbortedSignalRejects(), testEndWithAbortedSignalRejects(), + testWriteString(), + testWriteStringMultibyte(), + testWritevStrings(), + testWritevMixed(), + testSyncDispose(), + testSyncDisposeOnError(), + testWriteSyncBasic(), + testWritevSyncBasic(), + testWriteSyncLargeChunk(), + testWriteSyncReturnsFalseDuringAsync(), + testWriteSyncClosedErrored(), + testEndSyncBasic(), + testEndSyncReturnsFalseDuringAsync(), + testEndSyncIdempotent(), + testEndSyncAutoClose(), + testFullSyncPipeline(), + testEndRejectsOnErrored(), + testEndIdempotent(), + testAsyncDisposeWhileClosing(), + testAsyncDisposeCallsFail(), + testWriterLimit(), + testWriterLimitExceeded(), + testWriterLimitWritev(), + testWriterLimitWriteSync(), + testWriterLimitWritevSync(), + testWriterLimitAndStart(), + testWriterArgumentValidation(), ]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-transform-sync.js b/test/parallel/test-stream-iter-transform-sync.js new file mode 100644 index 00000000000000..a7e238beb4e5a8 --- /dev/null +++ b/test/parallel/test-stream-iter-transform-sync.js @@ -0,0 +1,225 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + fromSync, + pullSync, + bytesSync, + textSync, + compressGzipSync, + compressDeflateSync, + compressBrotliSync, + compressZstdSync, + decompressGzipSync, + decompressDeflateSync, + decompressBrotliSync, + decompressZstdSync, +} = require('stream/iter'); + +// ============================================================================= +// Helper: sync compress then decompress, verify round-trip equality +// ============================================================================= + +function roundTrip(input, compress, decompress) { + return textSync(pullSync(pullSync(fromSync(input), compress), decompress)); +} + +function roundTripBytes(inputBuf, compress, decompress) { + return bytesSync(pullSync(pullSync(fromSync(inputBuf), compress), decompress)); +} + +// ============================================================================= +// Gzip sync round-trip tests +// ============================================================================= + +function testGzipRoundTrip() { + const input = 'Hello, sync gzip compression!'; + const result = roundTrip(input, compressGzipSync(), decompressGzipSync()); + assert.strictEqual(result, input); +} + +function testGzipLargeData() { + const input = 'gzip sync large data test. '.repeat(5000); + const result = roundTrip(input, compressGzipSync(), decompressGzipSync()); + assert.strictEqual(result, input); +} + +function testGzipActuallyCompresses() { + const input = 'Repeated data compresses well. '.repeat(1000); + const inputBuf = Buffer.from(input); + const compressed = bytesSync(pullSync(fromSync(inputBuf), + compressGzipSync())); + assert.ok(compressed.byteLength < inputBuf.byteLength, + `Compressed ${compressed.byteLength} should be < ` + + `original ${inputBuf.byteLength}`); +} + +function testGzipBinaryData() { + const inputBuf = Buffer.alloc(10000); + for (let i = 0; i < inputBuf.length; i++) inputBuf[i] = i & 0xff; + const result = roundTripBytes(inputBuf, compressGzipSync(), + decompressGzipSync()); + assert.deepStrictEqual(result, inputBuf); +} + +// ============================================================================= +// Deflate sync round-trip tests +// ============================================================================= + +function testDeflateRoundTrip() { + const input = 'Hello, sync deflate compression!'; + const result = roundTrip(input, compressDeflateSync(), + decompressDeflateSync()); + assert.strictEqual(result, input); +} + +function testDeflateLargeData() { + const input = 'deflate sync large data test. '.repeat(5000); + const result = roundTrip(input, compressDeflateSync(), + decompressDeflateSync()); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Brotli sync round-trip tests +// ============================================================================= + +function testBrotliRoundTrip() { + const input = 'Hello, sync brotli compression!'; + const result = roundTrip(input, compressBrotliSync(), + decompressBrotliSync()); + assert.strictEqual(result, input); +} + +function testBrotliLargeData() { + const input = 'brotli sync large data test. '.repeat(5000); + const result = roundTrip(input, compressBrotliSync(), + decompressBrotliSync()); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Zstd sync round-trip tests +// ============================================================================= + +function testZstdRoundTrip() { + const input = 'Hello, sync zstd compression!'; + const result = roundTrip(input, compressZstdSync(), decompressZstdSync()); + assert.strictEqual(result, input); +} + +function testZstdLargeData() { + const input = 'zstd sync large data test. '.repeat(5000); + const result = roundTrip(input, compressZstdSync(), decompressZstdSync()); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Cross-algorithm: compress async-compatible, decompress sync (and vice versa) +// The sync transforms should produce output compatible with the standard format +// ============================================================================= + +function testGzipWithOptions() { + const input = 'options test data '.repeat(100); + const result = roundTrip(input, + compressGzipSync({ level: 1 }), + decompressGzipSync()); + assert.strictEqual(result, input); +} + +function testBrotliWithOptions() { + const zlib = require('zlib'); + const input = 'brotli options test data '.repeat(100); + const result = roundTrip(input, + compressBrotliSync({ + params: { + [zlib.constants.BROTLI_PARAM_QUALITY]: 3, + }, + }), + decompressBrotliSync()); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Stateless + stateful sync transform pipeline +// ============================================================================= + +function testMixedStatelessAndStateful() { + // Uppercase stateless transform + gzip stateful transform + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + const src = chunks[j]; + const buf = Buffer.allocUnsafe(src.length); + for (let i = 0; i < src.length; i++) { + const b = src[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[j] = buf; + } + return out; + }; + + const input = 'hello world '.repeat(100); + const result = textSync( + pullSync( + pullSync(fromSync(input), upper, compressGzipSync()), + decompressGzipSync(), + ), + ); + assert.strictEqual(result, input.toUpperCase()); +} + +// ============================================================================= +// Early consumer exit (break from for-of) triggers cleanup +// ============================================================================= + +function testEarlyExit() { + const input = 'y'.repeat(100_000); + const compressed = pullSync(fromSync(input), compressGzipSync()); + + // eslint-disable-next-line no-unused-vars + for (const batch of compressed) { + break; // Early exit - should trigger finally block cleanup + } + // If we get here without crashing, cleanup worked +} + +// ============================================================================= +// Empty input +// ============================================================================= + +function testEmptyInput() { + const result = textSync( + pullSync( + pullSync(fromSync(''), compressGzipSync()), + decompressGzipSync(), + ), + ); + assert.strictEqual(result, ''); +} + +// ============================================================================= +// Run all tests +// ============================================================================= + +testGzipRoundTrip(); +testGzipLargeData(); +testGzipActuallyCompresses(); +testGzipBinaryData(); +testDeflateRoundTrip(); +testDeflateLargeData(); +testBrotliRoundTrip(); +testBrotliLargeData(); +testZstdRoundTrip(); +testZstdLargeData(); +testGzipWithOptions(); +testBrotliWithOptions(); +testMixedStatelessAndStateful(); +testEarlyExit(); +testEmptyInput(); + +common.mustCall()();