diff --git a/benchmark/fs/bench-filehandle-pipetosync.js b/benchmark/fs/bench-filehandle-pipetosync.js new file mode 100644 index 00000000000000..e62ade89472341 --- /dev/null +++ b/benchmark/fs/bench-filehandle-pipetosync.js @@ -0,0 +1,103 @@ +// Benchmark: pipeToSync with sync compression transforms. +// Measures fully synchronous file-to-file pipeline (no threadpool, no promises). +'use strict'; + +const common = require('../common.js'); +const fs = require('fs'); +const { openSync, closeSync, writeSync, unlinkSync } = fs; + +const tmpdir = require('../../test/common/tmpdir'); +tmpdir.refresh(); +const srcFile = tmpdir.resolve(`.removeme-sync-bench-src-${process.pid}`); +const dstFile = tmpdir.resolve(`.removeme-sync-bench-dst-${process.pid}`); + +const bench = common.createBenchmark(main, { + compression: ['gzip', 'deflate', 'brotli', 'zstd'], + filesize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], + n: [5], +}, { + flags: ['--experimental-stream-iter'], +}); + +function main({ compression, filesize, n }) { + // Create the fixture file with repeating lowercase ASCII + const chunk = Buffer.alloc(Math.min(filesize, 64 * 1024), 'abcdefghij'); + const fd = openSync(srcFile, 'w'); + let remaining = filesize; + while (remaining > 0) { + const toWrite = Math.min(remaining, chunk.length); + writeSync(fd, chunk, 0, toWrite); + remaining -= toWrite; + } + closeSync(fd); + + const { pipeToSync } = require('stream/iter'); + const { + compressGzipSync, + compressDeflateSync, + compressBrotliSync, + compressZstdSync, + } = require('zlib/iter'); + const { open } = fs.promises; + + const compressFactory = { + gzip: compressGzipSync, + deflate: compressDeflateSync, + brotli: compressBrotliSync, + zstd: compressZstdSync, + }[compression]; + + // Stateless uppercase transform (sync) + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + const src = chunks[j]; + const buf = Buffer.allocUnsafe(src.length); + for (let i = 0; i < src.length; i++) { + const b = src[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[j] = buf; + } + return out; + }; + + // Use a synchronous wrapper since pipeToSync is fully sync. + // We need FileHandle for pullSync/writer, so open async then run sync. + (async () => { + const srcFh = await open(srcFile, 'r'); + const dstFh = await open(dstFile, 'w'); + + // Warm up + runSync(srcFh, dstFh, upper, compressFactory, pipeToSync); + + // Reset file positions for the benchmark + await srcFh.close(); + await dstFh.close(); + + bench.start(); + let totalBytes = 0; + for (let i = 0; i < n; i++) { + const src = await open(srcFile, 'r'); + const dst = await open(dstFile, 'w'); + totalBytes += runSync(src, dst, upper, compressFactory, pipeToSync); + await src.close(); + await dst.close(); + } + bench.end(totalBytes / (1024 * 1024)); + + cleanup(); + })(); +} + +function runSync(srcFh, dstFh, upper, compressFactory, pipeToSync) { + const w = dstFh.writer(); + pipeToSync(srcFh.pullSync(upper, compressFactory()), w); + return w.endSync(); +} + +function cleanup() { + try { unlinkSync(srcFile); } catch { /* Ignore */ } + try { unlinkSync(dstFile); } catch { /* Ignore */ } +} diff --git a/benchmark/fs/bench-filehandle-pull-vs-webstream.js b/benchmark/fs/bench-filehandle-pull-vs-webstream.js new file mode 100644 index 00000000000000..9ced02e817db4d --- /dev/null +++ b/benchmark/fs/bench-filehandle-pull-vs-webstream.js @@ -0,0 +1,201 @@ +// Compare FileHandle.createReadStream() vs readableWebStream() vs pull() +// reading a large file through two transforms: uppercase then compress. +'use strict'; + +const common = require('../common.js'); +const fs = require('fs'); +const zlib = require('zlib'); +const { Transform, Writable, pipeline } = require('stream'); + +const tmpdir = require('../../test/common/tmpdir'); +tmpdir.refresh(); +const filename = tmpdir.resolve(`.removeme-benchmark-garbage-${process.pid}`); + +const bench = common.createBenchmark(main, { + api: ['classic', 'webstream', 'pull'], + compression: ['gzip', 'deflate', 'brotli', 'zstd'], + filesize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], + n: [5], +}, { + flags: ['--experimental-stream-iter'], + // Classic and webstream only support gzip (native zlib / CompressionStream). + // Brotli, deflate, zstd are pull-only via stream/iter transforms. + combinationFilter({ api, compression }) { + if (api === 'classic' && compression !== 'gzip') return false; + if (api === 'webstream' && compression !== 'gzip') return false; + return true; + }, +}); + +function main({ api, compression, filesize, n }) { + // Create the fixture file with repeating lowercase ASCII + const chunk = Buffer.alloc(Math.min(filesize, 64 * 1024), 'abcdefghij'); + const fd = fs.openSync(filename, 'w'); + let remaining = filesize; + while (remaining > 0) { + const toWrite = Math.min(remaining, chunk.length); + fs.writeSync(fd, chunk, 0, toWrite); + remaining -= toWrite; + } + fs.closeSync(fd); + + if (api === 'classic') { + benchClassic(n, filesize).then(() => cleanup()); + } else if (api === 'webstream') { + benchWebStream(n, filesize).then(() => cleanup()); + } else { + benchPull(n, filesize, compression).then(() => cleanup()); + } +} + +function cleanup() { + try { fs.unlinkSync(filename); } catch { /* ignore */ } +} + +// Stateless uppercase transform (shared by all paths) +function uppercaseChunk(chunk) { + const buf = Buffer.allocUnsafe(chunk.length); + for (let i = 0; i < chunk.length; i++) { + const b = chunk[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + return buf; +} + +// --------------------------------------------------------------------------- +// Classic streams path: createReadStream -> Transform (upper) -> createGzip +// --------------------------------------------------------------------------- +async function benchClassic(n, filesize) { + await runClassic(); + + bench.start(); + let totalBytes = 0; + for (let i = 0; i < n; i++) { + totalBytes += await runClassic(); + } + bench.end(totalBytes / (1024 * 1024)); +} + +function runClassic() { + return new Promise((resolve, reject) => { + const rs = fs.createReadStream(filename); + + const upper = new Transform({ + transform(chunk, encoding, callback) { + callback(null, uppercaseChunk(chunk)); + }, + }); + + const gz = zlib.createGzip(); + + let totalBytes = 0; + const sink = new Writable({ + write(chunk, encoding, callback) { + totalBytes += chunk.length; + callback(); + }, + }); + + pipeline(rs, upper, gz, sink, (err) => { + if (err) reject(err); + else resolve(totalBytes); + }); + }); +} + +// --------------------------------------------------------------------------- +// WebStream path: readableWebStream -> TransformStream (upper) -> CompressionStream +// --------------------------------------------------------------------------- +async function benchWebStream(n, filesize) { + await runWebStream(); + + bench.start(); + let totalBytes = 0; + for (let i = 0; i < n; i++) { + totalBytes += await runWebStream(); + } + bench.end(totalBytes / (1024 * 1024)); +} + +async function runWebStream() { + const fh = await fs.promises.open(filename, 'r'); + try { + const rs = fh.readableWebStream(); + + const upper = new TransformStream({ + transform(chunk, controller) { + const buf = new Uint8Array(chunk.length); + for (let i = 0; i < chunk.length; i++) { + const b = chunk[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + controller.enqueue(buf); + }, + }); + + const compress = new CompressionStream('gzip'); + const output = rs.pipeThrough(upper).pipeThrough(compress); + const reader = output.getReader(); + + let totalBytes = 0; + while (true) { + const { done, value } = await reader.read(); + if (done) break; + totalBytes += value.byteLength; + } + return totalBytes; + } finally { + await fh.close(); + } +} + +// --------------------------------------------------------------------------- +// Pull/iter path: pull() with uppercase transform + selected compression +// --------------------------------------------------------------------------- +async function benchPull(n, filesize, compression) { + const iter = require('zlib/iter'); + + const compressFactory = { + gzip: iter.compressGzip, + deflate: iter.compressDeflate, + brotli: iter.compressBrotli, + zstd: iter.compressZstd, + }[compression]; + + // Warm up + await runPull(compressFactory); + + bench.start(); + let totalBytes = 0; + for (let i = 0; i < n; i++) { + totalBytes += await runPull(compressFactory); + } + bench.end(totalBytes / (1024 * 1024)); +} + +async function runPull(compressFactory) { + const fh = await fs.promises.open(filename, 'r'); + try { + // Stateless transform: uppercase each chunk in the batch + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + out[j] = uppercaseChunk(chunks[j]); + } + return out; + }; + + const readable = fh.pull(upper, compressFactory()); + + let totalBytes = 0; + for await (const chunks of readable) { + for (let i = 0; i < chunks.length; i++) { + totalBytes += chunks[i].byteLength; + } + } + return totalBytes; + } finally { + await fh.close(); + } +} diff --git a/benchmark/streams/iter-creation.js b/benchmark/streams/iter-creation.js new file mode 100644 index 00000000000000..d5fa59d239688a --- /dev/null +++ b/benchmark/streams/iter-creation.js @@ -0,0 +1,92 @@ +// Object creation overhead benchmark. +// Measures the cost of constructing stream infrastructure (no data flow). +'use strict'; + +const common = require('../common.js'); +const { Readable, Writable, Transform, PassThrough } = require('stream'); + +const bench = common.createBenchmark(main, { + api: ['classic', 'webstream', 'iter'], + type: ['readable', 'writable', 'transform', 'pair'], + n: [1e5], +}, { + flags: ['--experimental-stream-iter'], + // Iter has no standalone Transform class; transforms are plain functions. + combinationFilter: ({ api, type }) => + !(api === 'iter' && type === 'transform'), +}); + +function main({ api, type, n }) { + switch (api) { + case 'classic': + return benchClassic(type, n); + case 'webstream': + return benchWebStream(type, n); + case 'iter': + return benchIter(type, n); + } +} + +function benchClassic(type, n) { + bench.start(); + switch (type) { + case 'readable': + for (let i = 0; i < n; i++) new Readable({ read() {} }); + break; + case 'writable': + for (let i = 0; i < n; i++) new Writable({ write(c, e, cb) { cb(); } }); + break; + case 'transform': + for (let i = 0; i < n; i++) new Transform({ + transform(c, e, cb) { cb(null, c); }, + }); + break; + case 'pair': + for (let i = 0; i < n; i++) new PassThrough(); + break; + } + bench.end(n); +} + +function benchWebStream(type, n) { + bench.start(); + switch (type) { + case 'readable': + for (let i = 0; i < n; i++) new ReadableStream({ pull() {} }); + break; + case 'writable': + for (let i = 0; i < n; i++) new WritableStream({ write() {} }); + break; + case 'transform': + for (let i = 0; i < n; i++) new TransformStream({ + transform(c, controller) { controller.enqueue(c); }, + }); + break; + case 'pair': { + // TransformStream gives a readable+writable pair + for (let i = 0; i < n; i++) new TransformStream(); + break; + } + } + bench.end(n); +} + +function benchIter(type, n) { + const { push, from, duplex } = require('stream/iter'); + + bench.start(); + switch (type) { + case 'readable': + for (let i = 0; i < n; i++) from('x'); + break; + case 'writable': + // push() creates a writer+readable pair + for (let i = 0; i < n; i++) push(); + break; + case 'pair': + // duplex() creates a bidirectional channel pair + for (let i = 0; i < n; i++) duplex(); + break; + } + bench.end(n); +} diff --git a/benchmark/streams/iter-file-read.js b/benchmark/streams/iter-file-read.js new file mode 100644 index 00000000000000..6d8139dc6de399 --- /dev/null +++ b/benchmark/streams/iter-file-read.js @@ -0,0 +1,107 @@ +// File reading throughput benchmark. +// Reads a real file through the three stream APIs. +'use strict'; + +const common = require('../common.js'); +const fs = require('fs'); +const { Writable, pipeline } = require('stream'); +const tmpdir = require('../../test/common/tmpdir'); + +tmpdir.refresh(); +const filename = tmpdir.resolve(`.removeme-bench-file-read-${process.pid}`); + +const bench = common.createBenchmark(main, { + api: ['classic', 'webstream', 'iter'], + filesize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], + n: [5], +}, { + flags: ['--experimental-stream-iter'], +}); + +function main({ api, filesize, n }) { + // Create fixture file + const chunk = Buffer.alloc(Math.min(filesize, 64 * 1024), 'abcdefghij'); + const fd = fs.openSync(filename, 'w'); + let remaining = filesize; + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + fs.writeSync(fd, chunk, 0, size); + remaining -= size; + } + fs.closeSync(fd); + + const totalOps = (filesize * n) / (1024 * 1024); + + switch (api) { + case 'classic': + return benchClassic(filesize, n, totalOps); + case 'webstream': + return benchWebStream(filesize, n, totalOps); + case 'iter': + return benchIter(filesize, n, totalOps); + } +} + +function benchClassic(filesize, n, totalOps) { + function run(cb) { + const r = fs.createReadStream(filename); + const w = new Writable({ write(data, enc, cb) { cb(); } }); + pipeline(r, w, cb); + } + + // Warmup + run(() => { + let i = 0; + bench.start(); + (function next() { + if (i++ >= n) { + fs.unlinkSync(filename); + return bench.end(totalOps); + } + run(next); + })(); + }); +} + +function benchWebStream(filesize, n, totalOps) { + const fsp = require('fs/promises'); + + async function run() { + const fh = await fsp.open(filename, 'r'); + const rs = fh.readableWebStream(); + const ws = new WritableStream({ write() {} }); + await rs.pipeTo(ws); + await fh.close(); + } + + (async () => { + // Warmup + await run(); + + bench.start(); + for (let i = 0; i < n; i++) await run(); + fs.unlinkSync(filename); + bench.end(totalOps); + })(); +} + +function benchIter(filesize, n, totalOps) { + const fsp = require('fs/promises'); + const { pipeTo } = require('stream/iter'); + + async function run() { + const fh = await fsp.open(filename, 'r'); + await pipeTo(fh.pull(), { write() {} }); + await fh.close(); + } + + (async () => { + // Warmup + await run(); + + bench.start(); + for (let i = 0; i < n; i++) await run(); + fs.unlinkSync(filename); + bench.end(totalOps); + })(); +} diff --git a/benchmark/streams/iter-throughput-broadcast.js b/benchmark/streams/iter-throughput-broadcast.js new file mode 100644 index 00000000000000..459d78e7c75f25 --- /dev/null +++ b/benchmark/streams/iter-throughput-broadcast.js @@ -0,0 +1,145 @@ +// Throughput benchmark: fan-out data to N consumers simultaneously. +// Classic streams use PassThrough + pipe, Web Streams use tee() chains, +// stream/iter uses broadcast() with push() consumers. +'use strict'; + +const common = require('../common.js'); +const { PassThrough, Writable } = require('stream'); + +const bench = common.createBenchmark(main, { + api: ['classic', 'webstream', 'iter'], + consumers: [1, 2, 4], + datasize: [1024 * 1024, 16 * 1024 * 1024], + n: [5], +}, { + flags: ['--experimental-stream-iter'], +}); + +const CHUNK_SIZE = 64 * 1024; + +function main({ api, consumers, datasize, n }) { + const chunk = Buffer.alloc(CHUNK_SIZE, 'abcdefghij'); + const totalOps = (datasize * n) / (1024 * 1024); + + switch (api) { + case 'classic': + return benchClassic(chunk, consumers, datasize, n, totalOps); + case 'webstream': + return benchWebStream(chunk, consumers, datasize, n, totalOps); + case 'iter': + return benchIter(chunk, consumers, datasize, n, totalOps); + } +} + +function benchClassic(chunk, numConsumers, datasize, n, totalOps) { + function run(cb) { + const source = new PassThrough(); + const sinks = []; + let finished = 0; + + for (let c = 0; c < numConsumers; c++) { + const w = new Writable({ write(data, enc, cb) { cb(); } }); + source.pipe(w); + w.on('finish', () => { if (++finished === numConsumers) cb(); }); + sinks.push(w); + } + + let remaining = datasize; + function write() { + let ok = true; + while (ok && remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + const buf = size === chunk.length ? chunk : chunk.subarray(0, size); + ok = source.write(buf); + } + if (remaining > 0) { + source.once('drain', write); + } else { + source.end(); + } + } + write(); + } + + let i = 0; + bench.start(); + (function next() { + if (i++ >= n) return bench.end(totalOps); + run(next); + })(); +} + +function benchWebStream(chunk, numConsumers, datasize, n, totalOps) { + async function run() { + let remaining = datasize; + const rs = new ReadableStream({ + pull(controller) { + if (remaining <= 0) { controller.close(); return; } + const size = Math.min(remaining, chunk.length); + remaining -= size; + controller.enqueue( + size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + + // Chain tee() calls to get numConsumers branches. + // tee() gives 2; for 4 we tee twice at each level. + const branches = []; + if (numConsumers === 1) { + branches.push(rs); + } else { + const pending = [rs]; + while (branches.length + pending.length < numConsumers) { + const stream = pending.shift(); + const [a, b] = stream.tee(); + pending.push(a, b); + } + branches.push(...pending); + } + + const ws = () => new WritableStream({ write() {} }); + await Promise.all(branches.map((b) => b.pipeTo(ws()))); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} + +function benchIter(chunk, numConsumers, datasize, n, totalOps) { + const { broadcast } = require('stream/iter'); + + // No-op consumer: drain all batches without collecting + async function drain(source) { + // eslint-disable-next-line no-unused-vars + for await (const _ of source) { /* drain */ } + } + + async function run() { + const { writer, broadcast: bc } = broadcast(); + const consumers = []; + for (let c = 0; c < numConsumers; c++) { + consumers.push(drain(bc.push())); + } + + let remaining = datasize; + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + const buf = size === chunk.length ? chunk : chunk.subarray(0, size); + writer.writeSync(buf); + } + writer.endSync(); + + await Promise.all(consumers); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} diff --git a/benchmark/streams/iter-throughput-compression.js b/benchmark/streams/iter-throughput-compression.js new file mode 100644 index 00000000000000..0c32ddc9afca98 --- /dev/null +++ b/benchmark/streams/iter-throughput-compression.js @@ -0,0 +1,104 @@ +// Throughput benchmark: gzip compress then decompress round-trip. +// Tests real-world compression performance across stream APIs. +'use strict'; + +const common = require('../common.js'); +const { Readable, Writable, pipeline } = require('stream'); +const zlib = require('zlib'); + +const bench = common.createBenchmark(main, { + api: ['classic', 'webstream', 'iter'], + datasize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], + n: [5], +}, { + flags: ['--experimental-stream-iter'], +}); + +const CHUNK_SIZE = 64 * 1024; + +function main({ api, datasize, n }) { + const chunk = Buffer.alloc(CHUNK_SIZE, 'abcdefghij'); + const totalOps = (datasize * n) / (1024 * 1024); + + switch (api) { + case 'classic': + return benchClassic(chunk, datasize, n, totalOps); + case 'webstream': + return benchWebStream(chunk, datasize, n, totalOps); + case 'iter': + return benchIter(chunk, datasize, n, totalOps); + } +} + +function benchClassic(chunk, datasize, n, totalOps) { + function run(cb) { + let remaining = datasize; + const r = new Readable({ + read() { + if (remaining <= 0) { this.push(null); return; } + const size = Math.min(remaining, chunk.length); + remaining -= size; + this.push(size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + const w = new Writable({ write(data, enc, cb) { cb(); } }); + pipeline(r, zlib.createGzip(), zlib.createGunzip(), w, cb); + } + + let i = 0; + bench.start(); + (function next() { + if (i++ >= n) return bench.end(totalOps); + run(next); + })(); +} + +function benchWebStream(chunk, datasize, n, totalOps) { + async function run() { + let remaining = datasize; + const rs = new ReadableStream({ + pull(controller) { + if (remaining <= 0) { controller.close(); return; } + const size = Math.min(remaining, chunk.length); + remaining -= size; + controller.enqueue( + size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + const ws = new WritableStream({ write() {} }); + await rs + .pipeThrough(new CompressionStream('gzip')) + .pipeThrough(new DecompressionStream('gzip')) + .pipeTo(ws); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} + +function benchIter(chunk, datasize, n, totalOps) { + const { pipeTo } = require('stream/iter'); + const { compressGzip, decompressGzip } = require('zlib/iter'); + + async function run() { + let remaining = datasize; + async function* source() { + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + yield [size === chunk.length ? chunk : chunk.subarray(0, size)]; + } + } + await pipeTo(source(), compressGzip(), decompressGzip(), + { write() {}, writeSync() { return true; } }); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} diff --git a/benchmark/streams/iter-throughput-identity.js b/benchmark/streams/iter-throughput-identity.js new file mode 100644 index 00000000000000..42640cf0f9d857 --- /dev/null +++ b/benchmark/streams/iter-throughput-identity.js @@ -0,0 +1,132 @@ +// Throughput benchmark: raw data flow from source to consumer, no transforms. +// Compares Node.js classic streams, Web Streams, and stream/iter. +'use strict'; + +const common = require('../common.js'); +const { Readable, Writable, pipeline } = require('stream'); + +const bench = common.createBenchmark(main, { + api: ['classic', 'webstream', 'iter', 'iter-sync'], + datasize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], + n: [5], +}, { + flags: ['--experimental-stream-iter'], +}); + +const CHUNK_SIZE = 64 * 1024; + +function main({ api, datasize, n }) { + const chunk = Buffer.alloc(CHUNK_SIZE, 'abcdefghij'); + const totalOps = (datasize * n) / (1024 * 1024); // MB + + switch (api) { + case 'classic': + return benchClassic(chunk, datasize, n, totalOps); + case 'webstream': + return benchWebStream(chunk, datasize, n, totalOps); + case 'iter': + return benchIter(chunk, datasize, n, totalOps); + case 'iter-sync': + return benchIterSync(chunk, datasize, n, totalOps); + } +} + +function benchClassic(chunk, datasize, n, totalOps) { + let remaining = 0; + + function run(cb) { + remaining = datasize; + const r = new Readable({ + read() { + if (remaining <= 0) { + this.push(null); + return; + } + const size = Math.min(remaining, chunk.length); + remaining -= size; + this.push(size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + const w = new Writable({ + write(data, enc, cb) { cb(); }, + }); + pipeline(r, w, cb); + } + + let i = 0; + bench.start(); + (function next() { + if (i++ >= n) return bench.end(totalOps); + run(next); + })(); +} + +function benchWebStream(chunk, datasize, n, totalOps) { + async function run() { + let remaining = datasize; + const rs = new ReadableStream({ + pull(controller) { + if (remaining <= 0) { + controller.close(); + return; + } + const size = Math.min(remaining, chunk.length); + remaining -= size; + controller.enqueue( + size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + const ws = new WritableStream({ + write() {}, + }); + await rs.pipeTo(ws); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} + +function benchIter(chunk, datasize, n, totalOps) { + const { pipeTo } = require('stream/iter'); + + async function run() { + let remaining = datasize; + async function* source() { + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + yield [size === chunk.length ? chunk : chunk.subarray(0, size)]; + } + } + // Drain to no-op sink, matching classic/webstream behavior + await pipeTo(source(), { write() {}, writeSync() { return true; } }); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} + +function benchIterSync(chunk, datasize, n, totalOps) { + const { pipeToSync } = require('stream/iter'); + + bench.start(); + for (let i = 0; i < n; i++) { + let remaining = datasize; + function* source() { + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + yield [size === chunk.length ? chunk : chunk.subarray(0, size)]; + } + } + // Drain to no-op sink, matching other benchmarks + pipeToSync(source(), { writeSync() {} }); + } + bench.end(totalOps); +} diff --git a/benchmark/streams/iter-throughput-pipeto.js b/benchmark/streams/iter-throughput-pipeto.js new file mode 100644 index 00000000000000..117a78aead1088 --- /dev/null +++ b/benchmark/streams/iter-throughput-pipeto.js @@ -0,0 +1,121 @@ +// Throughput benchmark: pipe source to a no-op sink (write-only destination). +// Measures pure pipe throughput without consumer-side collection overhead. +'use strict'; + +const common = require('../common.js'); +const { Readable, Writable, pipeline } = require('stream'); + +const bench = common.createBenchmark(main, { + api: ['classic', 'webstream', 'iter', 'iter-sync'], + datasize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], + n: [5], +}, { + flags: ['--experimental-stream-iter'], +}); + +const CHUNK_SIZE = 64 * 1024; + +function main({ api, datasize, n }) { + const chunk = Buffer.alloc(CHUNK_SIZE, 'abcdefghij'); + const totalOps = (datasize * n) / (1024 * 1024); + + switch (api) { + case 'classic': + return benchClassic(chunk, datasize, n, totalOps); + case 'webstream': + return benchWebStream(chunk, datasize, n, totalOps); + case 'iter': + return benchIter(chunk, datasize, n, totalOps); + case 'iter-sync': + return benchIterSync(chunk, datasize, n, totalOps); + } +} + +function benchClassic(chunk, datasize, n, totalOps) { + function run(cb) { + let remaining = datasize; + const r = new Readable({ + read() { + if (remaining <= 0) { this.push(null); return; } + const size = Math.min(remaining, chunk.length); + remaining -= size; + this.push(size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + const w = new Writable({ write(data, enc, cb) { cb(); } }); + pipeline(r, w, cb); + } + + let i = 0; + bench.start(); + (function next() { + if (i++ >= n) return bench.end(totalOps); + run(next); + })(); +} + +function benchWebStream(chunk, datasize, n, totalOps) { + async function run() { + let remaining = datasize; + const rs = new ReadableStream({ + pull(controller) { + if (remaining <= 0) { controller.close(); return; } + const size = Math.min(remaining, chunk.length); + remaining -= size; + controller.enqueue( + size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + const ws = new WritableStream({ write() {} }); + await rs.pipeTo(ws); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} + +function benchIter(chunk, datasize, n, totalOps) { + const { pipeTo } = require('stream/iter'); + + async function run() { + let remaining = datasize; + async function* source() { + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + yield [size === chunk.length ? chunk : chunk.subarray(0, size)]; + } + } + // Provide writeSync for the sync fast path in pipeTo + const writer = { write() {}, writeSync() { return true; } }; + await pipeTo(source(), writer); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} + +function benchIterSync(chunk, datasize, n, totalOps) { + const { pipeToSync } = require('stream/iter'); + + bench.start(); + for (let i = 0; i < n; i++) { + let remaining = datasize; + function* source() { + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + yield [size === chunk.length ? chunk : chunk.subarray(0, size)]; + } + } + const writer = { writeSync() {} }; + pipeToSync(source(), writer); + } + bench.end(totalOps); +} diff --git a/benchmark/streams/iter-throughput-transform.js b/benchmark/streams/iter-throughput-transform.js new file mode 100644 index 00000000000000..b251aea4af2752 --- /dev/null +++ b/benchmark/streams/iter-throughput-transform.js @@ -0,0 +1,146 @@ +// Throughput benchmark: data flow through a single stateless transform. +// Uses buffer copy (allocate + memcpy) so pipeline overhead is measurable. +'use strict'; + +const common = require('../common.js'); +const { Readable, Transform, Writable, pipeline } = require('stream'); + +const bench = common.createBenchmark(main, { + api: ['classic', 'webstream', 'iter', 'iter-sync'], + datasize: [1024 * 1024, 16 * 1024 * 1024, 64 * 1024 * 1024], + n: [5], +}, { + flags: ['--experimental-stream-iter'], +}); + +const CHUNK_SIZE = 64 * 1024; + +// Buffer copy transform: allocate + memcpy. Cheap enough that pipeline +// overhead is a measurable fraction of total time, but non-trivial (new +// buffer per chunk, so it's a real transform that produces new data). +function copyBuf(buf) { + return Buffer.copyBytesFrom(buf); +} + +function main({ api, datasize, n }) { + const chunk = Buffer.alloc(CHUNK_SIZE, 'abcdefghij'); + const totalOps = (datasize * n) / (1024 * 1024); + + switch (api) { + case 'classic': + return benchClassic(chunk, datasize, n, totalOps); + case 'webstream': + return benchWebStream(chunk, datasize, n, totalOps); + case 'iter': + return benchIter(chunk, datasize, n, totalOps); + case 'iter-sync': + return benchIterSync(chunk, datasize, n, totalOps); + } +} + +function benchClassic(chunk, datasize, n, totalOps) { + function run(cb) { + let remaining = datasize; + const r = new Readable({ + read() { + if (remaining <= 0) { this.push(null); return; } + const size = Math.min(remaining, chunk.length); + remaining -= size; + this.push(size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + const t = new Transform({ + transform(data, enc, cb) { + cb(null, copyBuf(data)); + }, + }); + const w = new Writable({ write(data, enc, cb) { cb(); } }); + pipeline(r, t, w, cb); + } + + let i = 0; + bench.start(); + (function next() { + if (i++ >= n) return bench.end(totalOps); + run(next); + })(); +} + +function benchWebStream(chunk, datasize, n, totalOps) { + async function run() { + let remaining = datasize; + const rs = new ReadableStream({ + pull(controller) { + if (remaining <= 0) { controller.close(); return; } + const size = Math.min(remaining, chunk.length); + remaining -= size; + controller.enqueue( + size === chunk.length ? chunk : chunk.subarray(0, size)); + }, + }); + const ts = new TransformStream({ + transform(c, controller) { + controller.enqueue(copyBuf(c)); + }, + }); + const ws = new WritableStream({ write() {} }); + await rs.pipeThrough(ts).pipeTo(ws); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} + +function benchIter(chunk, datasize, n, totalOps) { + const { pipeTo } = require('stream/iter'); + + const upper = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => copyBuf(c)); + }; + + async function run() { + let remaining = datasize; + async function* source() { + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + yield [size === chunk.length ? chunk : chunk.subarray(0, size)]; + } + } + await pipeTo(source(), upper, + { write() {}, writeSync() { return true; } }); + } + + (async () => { + bench.start(); + for (let i = 0; i < n; i++) await run(); + bench.end(totalOps); + })(); +} + +function benchIterSync(chunk, datasize, n, totalOps) { + const { pipeToSync } = require('stream/iter'); + + const upper = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => copyBuf(c)); + }; + + bench.start(); + for (let i = 0; i < n; i++) { + let remaining = datasize; + function* source() { + while (remaining > 0) { + const size = Math.min(remaining, chunk.length); + remaining -= size; + yield [size === chunk.length ? chunk : chunk.subarray(0, size)]; + } + } + pipeToSync(source(), upper, { writeSync() {} }); + } + bench.end(totalOps); +} diff --git a/doc/api/cli.md b/doc/api/cli.md index b1a0d674ca4ded..34fa321a228dd5 100644 --- a/doc/api/cli.md +++ b/doc/api/cli.md @@ -1277,6 +1277,16 @@ added: Enable experimental support for storage inspection +### `--experimental-stream-iter` + + + +> Stability: 1 - Experimental + +Enable the experimental [`node:stream/iter`][] module. + ### `--experimental-test-coverage` + +> Stability: 1 - Experimental + +* `...transforms` {Function|Object} Optional transforms to apply via + [`stream/iter pull()`][]. +* `options` {Object} + * `signal` {AbortSignal} + * `autoClose` {boolean} Close the file handle when the stream ends. + **Default:** `false`. + * `start` {number} Byte offset to begin reading from. When specified, + reads use explicit positioning (`pread` semantics). **Default:** current + file position. + * `limit` {number} Maximum number of bytes to read before ending the + iterator. Reads stop when `limit` bytes have been delivered or EOF is + reached, whichever comes first. **Default:** read until EOF. + * `chunkSize` {number} Size in bytes of the buffer allocated for each + read operation. **Default:** `131072` (128 KB). +* Returns: {AsyncIterable\} + +Return the file contents as an async iterable using the +[`node:stream/iter`][] pull model. Reads are performed in `chunkSize`-byte +chunks (default 128 KB). If transforms are provided, they are applied +via [`stream/iter pull()`][]. + +The file handle is locked while the iterable is being consumed and unlocked +when iteration completes, an error occurs, or the consumer breaks. + +This function is only available when the `--experimental-stream-iter` flag is +enabled. + +```mjs +import { open } from 'node:fs/promises'; +import { text } from 'node:stream/iter'; +import { compressGzip } from 'node:zlib/iter'; + +const fh = await open('input.txt', 'r'); + +// Read as text +console.log(await text(fh.pull({ autoClose: true }))); + +// Read 1 KB starting at byte 100 +const fh2 = await open('input.txt', 'r'); +console.log(await text(fh2.pull({ start: 100, limit: 1024, autoClose: true }))); + +// Read with compression +const fh3 = await open('input.txt', 'r'); +const compressed = fh3.pull(compressGzip(), { autoClose: true }); +``` + +```cjs +const { open } = require('node:fs/promises'); +const { text } = require('node:stream/iter'); +const { compressGzip } = require('node:zlib/iter'); + +async function run() { + const fh = await open('input.txt', 'r'); + + // Read as text + console.log(await text(fh.pull({ autoClose: true }))); + + // Read 1 KB starting at byte 100 + const fh2 = await open('input.txt', 'r'); + console.log(await text(fh2.pull({ start: 100, limit: 1024, autoClose: true }))); + + // Read with compression + const fh3 = await open('input.txt', 'r'); + const compressed = fh3.pull(compressGzip(), { autoClose: true }); +} + +run().catch(console.error); +``` + +#### `filehandle.pullSync([...transforms][, options])` + + + +> Stability: 1 - Experimental + +* `...transforms` {Function|Object} Optional transforms to apply via + [`stream/iter pullSync()`][]. +* `options` {Object} + * `autoClose` {boolean} Close the file handle when the stream ends. + **Default:** `false`. + * `start` {number} Byte offset to begin reading from. When specified, + reads use explicit positioning. **Default:** current file position. + * `limit` {number} Maximum number of bytes to read before ending the + iterator. **Default:** read until EOF. + * `chunkSize` {number} Size in bytes of the buffer allocated for each + read operation. **Default:** `131072` (128 KB). +* Returns: {Iterable\} + +Synchronous counterpart of [`filehandle.pull()`][]. Returns a sync iterable +that reads the file using synchronous I/O on the main thread. Reads are +performed in `chunkSize`-byte chunks (default 128 KB). + +The file handle is locked while the iterable is being consumed. Unlike the +async `pull()`, this method does not support `AbortSignal` since all +operations are synchronous. + +This function is only available when the `--experimental-stream-iter` flag is +enabled. + +```mjs +import { open } from 'node:fs/promises'; +import { textSync, pipeToSync } from 'node:stream/iter'; +import { compressGzipSync, decompressGzipSync } from 'node:zlib/iter'; + +const fh = await open('input.txt', 'r'); + +// Read as text (sync) +console.log(textSync(fh.pullSync({ autoClose: true }))); + +// Sync compress pipeline: file -> gzip -> file +const src = await open('input.txt', 'r'); +const dst = await open('output.gz', 'w'); +pipeToSync(src.pullSync(compressGzipSync(), { autoClose: true }), dst.writer({ autoClose: true })); +``` + +```cjs +const { open } = require('node:fs/promises'); +const { textSync, pipeToSync } = require('node:stream/iter'); +const { compressGzipSync, decompressGzipSync } = require('node:zlib/iter'); + +async function run() { + const fh = await open('input.txt', 'r'); + + // Read as text (sync) + console.log(textSync(fh.pullSync({ autoClose: true }))); + + // Sync compress pipeline: file -> gzip -> file + const src = await open('input.txt', 'r'); + const dst = await open('output.gz', 'w'); + pipeToSync( + src.pullSync(compressGzipSync(), { autoClose: true }), + dst.writer({ autoClose: true }), + ); +} + +run().catch(console.error); +``` + #### `filehandle.read(buffer, offset, length, position)` + +> Stability: 1 - Experimental + +* `options` {Object} + * `autoClose` {boolean} Close the file handle when the writer ends or + fails. **Default:** `false`. + * `start` {number} Byte offset to start writing at. When specified, + writes use explicit positioning. **Default:** current file position. + * `limit` {number} Maximum number of bytes the writer will accept. + Async writes (`write()`, `writev()`) that would exceed the limit reject + with `ERR_OUT_OF_RANGE`. Sync writes (`writeSync()`, `writevSync()`) + return `false`. **Default:** no limit. + * `chunkSize` {number} Maximum chunk size in bytes for synchronous write + operations. Writes larger than this threshold fall back to async I/O. + Set this to match the reader's `chunkSize` for optimal `pipeTo()` + performance. **Default:** `131072` (128 KB). +* Returns: {Object} + * `write(chunk[, options])` {Function} Returns {Promise\}. + Accepts `Uint8Array`, `Buffer`, or string (UTF-8 encoded). + * `chunk` {Buffer|TypedArray|DataView|string} + * `options` {Object} + * `signal` {AbortSignal} If the signal is already aborted, the write + rejects with `AbortError` without performing I/O. + * `writev(chunks[, options])` {Function} Returns {Promise\}. Uses + scatter/gather I/O via a single `writev()` syscall. Accepts mixed + `Uint8Array`/string arrays. + * `chunks` {Array\} + * `options` {Object} + * `signal` {AbortSignal} If the signal is already aborted, the write + rejects with `AbortError` without performing I/O. + * `writeSync(chunk)` {Function} Returns {boolean}. Attempts a synchronous + write. Returns `true` if the write succeeded, `false` if the caller + should fall back to async `write()`. Returns `false` when: the writer + is closed/errored, an async operation is in flight, the chunk exceeds + `chunkSize`, or the write would exceed `limit`. + * `chunk` {Buffer|TypedArray|DataView|string} + * `writevSync(chunks)` {Function} Returns {boolean}. Synchronous batch + write. Same fallback semantics as `writeSync()`. + * `chunks` {Array\} + * `end([options])` {Function} Returns {Promise\} total bytes + written. Idempotent: returns `totalBytesWritten` if already closed, + returns the pending promise if already closing. Rejects if the writer + is in an errored state. + * `options` {Object} + * `signal` {AbortSignal} If the signal is already aborted, `end()` + rejects with `AbortError` and the writer remains open. + * `endSync()` {Function} Returns {number|number} total bytes written on + success, `-1` if the writer is errored or an async operation is in + flight. Idempotent when already closed. + * `fail(reason)` {Function} Puts the writer into a terminal error state. + Synchronous. If the writer is already closed or errored, this is a + no-op. If `autoClose` is true, closes the file handle synchronously. + +Return a [`node:stream/iter`][] writer backed by this file handle. + +The writer supports both `Symbol.asyncDispose` and `Symbol.dispose`: + +* `await using w = fh.writer()` — if the writer is still open (no `end()` + called), `asyncDispose` calls `fail()`. If `end()` is pending, it waits + for it to complete. +* `using w = fh.writer()` — calls `fail()` unconditionally. + +The `writeSync()` and `writevSync()` methods enable the try-sync fast path +used by [`stream/iter pipeTo()`][]. When the reader's chunk size matches the +writer's `chunkSize`, all writes in a `pipeTo()` pipeline complete +synchronously with zero promise overhead. + +This function is only available when the `--experimental-stream-iter` flag is +enabled. + +```mjs +import { open } from 'node:fs/promises'; +import { from, pipeTo } from 'node:stream/iter'; +import { compressGzip } from 'node:zlib/iter'; + +// Async pipeline +const fh = await open('output.gz', 'w'); +await pipeTo(from('Hello!'), compressGzip(), fh.writer({ autoClose: true })); + +// Sync pipeline with limit +const src = await open('input.txt', 'r'); +const dst = await open('output.txt', 'w'); +const w = dst.writer({ limit: 1024 * 1024 }); // Max 1 MB +await pipeTo(src.pull({ autoClose: true }), w); +await w.end(); +await dst.close(); +``` + +```cjs +const { open } = require('node:fs/promises'); +const { from, pipeTo } = require('node:stream/iter'); +const { compressGzip } = require('node:zlib/iter'); + +async function run() { + // Async pipeline + const fh = await open('output.gz', 'w'); + await pipeTo(from('Hello!'), compressGzip(), fh.writer({ autoClose: true })); + + // Sync pipeline with limit + const src = await open('input.txt', 'r'); + const dst = await open('output.txt', 'w'); + const w = dst.writer({ limit: 1024 * 1024 }); // Max 1 MB + await pipeTo(src.pull({ autoClose: true }), w); + await w.end(); + await dst.close(); +} + +run().catch(console.error); +``` + #### `filehandle[Symbol.asyncDispose]()` + +> Stability: 1 - Experimental + + + +The `node:stream/iter` module provides a streaming API built on iterables +rather than the event-driven `Readable`/`Writable`/`Transform` class hierarchy, +or the Web Streams `ReadableStream`/`WritableStream`/`TransformStream` interfaces. + +This module is available only when the `--experimental-stream-iter` CLI flag +is enabled. + +Streams are represented as `AsyncIterable` (async) or +`Iterable` (sync). There are no base classes to extend -- any +object implementing the iterable protocol can participate. Transforms are plain +functions or objects with a `transform` method. + +Data flows in **batches** (`Uint8Array[]` per iteration) to amortize the cost +of async operations. + +```mjs +import { from, pull, text } from 'node:stream/iter'; +import { compressGzip, decompressGzip } from 'node:zlib/iter'; + +// Compress and decompress a string +const compressed = pull(from('Hello, world!'), compressGzip()); +const result = await text(pull(compressed, decompressGzip())); +console.log(result); // 'Hello, world!' +``` + +```cjs +const { from, pull, text } = require('node:stream/iter'); +const { compressGzip, decompressGzip } = require('node:zlib/iter'); + +async function run() { + // Compress and decompress a string + const compressed = pull(from('Hello, world!'), compressGzip()); + const result = await text(pull(compressed, decompressGzip())); + console.log(result); // 'Hello, world!' +} + +run().catch(console.error); +``` + +```mjs +import { open } from 'node:fs/promises'; +import { text, pipeTo } from 'node:stream/iter'; +import { compressGzip, decompressGzip } from 'node:zlib/iter'; + +// Read a file, compress, write to another file +const src = await open('input.txt', 'r'); +const dst = await open('output.gz', 'w'); +await pipeTo(src.pull(), compressGzip(), dst.writer({ autoClose: true })); +await src.close(); + +// Read it back +const gz = await open('output.gz', 'r'); +console.log(await text(gz.pull(decompressGzip(), { autoClose: true }))); +``` + +```cjs +const { open } = require('node:fs/promises'); +const { text, pipeTo } = require('node:stream/iter'); +const { compressGzip, decompressGzip } = require('node:zlib/iter'); + +async function run() { + // Read a file, compress, write to another file + const src = await open('input.txt', 'r'); + const dst = await open('output.gz', 'w'); + await pipeTo(src.pull(), compressGzip(), dst.writer({ autoClose: true })); + await src.close(); + + // Read it back + const gz = await open('output.gz', 'r'); + console.log(await text(gz.pull(decompressGzip(), { autoClose: true }))); +} + +run().catch(console.error); +``` + +## Concepts + +### Byte streams + +All data in this API is represented as `Uint8Array` bytes. Strings +are automatically UTF-8 encoded when passed to `from()`, `push()`, or +`pipeTo()`. This removes ambiguity around encodings and enables zero-copy +transfers between streams and native code. + +### Batching + +Each iteration yields a **batch** -- an array of `Uint8Array` chunks +(`Uint8Array[]`). Batching amortizes the cost of `await` and Promise creation +across multiple chunks. A consumer that processes one chunk at a time can +simply iterate the inner array: + +```mjs +for await (const batch of source) { + for (const chunk of batch) { + handle(chunk); + } +} +``` + +```cjs +async function run() { + for await (const batch of source) { + for (const chunk of batch) { + handle(chunk); + } + } +} +``` + +### Transforms + +Transforms come in two forms: + +* **Stateless** -- a function `(chunks, options) => result` called once per + batch. Receives `Uint8Array[]` (or `null` as the flush signal) and an + `options` object. Returns `Uint8Array[]`, `null`, or an iterable of chunks. + +* **Stateful** -- an object `{ transform(source, options) }` where `transform` + is a generator (sync or async) that receives the entire upstream iterable + and an `options` object, and yields output. This form is used for + compression, encryption, and any transform that needs to buffer across + batches. + +Both forms receive an `options` parameter with the following property: + +* `options.signal` {AbortSignal} An AbortSignal that fires when the pipeline + is cancelled, encounters an error, or the consumer stops reading. Transforms + can check `signal.aborted` or listen for the `'abort'` event to perform + early cleanup. + +The flush signal (`null`) is sent after the source ends, giving transforms +a chance to emit trailing data (e.g., compression footers). + +```js +// Stateless: uppercase transform +const upper = (chunks) => { + if (chunks === null) return null; // flush + return chunks.map((c) => new TextEncoder().encode( + new TextDecoder().decode(c).toUpperCase(), + )); +}; + +// Stateful: line splitter +const lines = { + transform: async function*(source) { + let partial = ''; + for await (const chunks of source) { + if (chunks === null) { + if (partial) yield [new TextEncoder().encode(partial)]; + continue; + } + for (const chunk of chunks) { + const str = partial + new TextDecoder().decode(chunk); + const parts = str.split('\n'); + partial = parts.pop(); + for (const line of parts) { + yield [new TextEncoder().encode(`${line}\n`)]; + } + } + } + }, +}; +``` + +### Pull vs. push + +The API supports two models: + +* **Pull** -- data flows on demand. `pull()` and `pullSync()` create lazy + pipelines that only read from the source when the consumer iterates. + +* **Push** -- data is written explicitly. `push()` creates a writer/readable + pair with backpressure. The writer pushes data in; the readable is consumed + as an async iterable. + +### Backpressure + +Pull streams have natural backpressure -- the consumer drives the pace, so +the source is never read faster than the consumer can process. Push streams +need explicit backpressure because the producer and consumer run +independently. The `highWaterMark` and `backpressure` options on `push()`, +`broadcast()`, and `share()` control how this works. + +#### The two-buffer model + +Push streams use a two-part buffering system. Think of it like a bucket +(slots) being filled through a hose (pending writes), with a float valve +that closes when the bucket is full: + +```text + highWaterMark (e.g., 3) + | + Producer v + | +---------+ + v | | + [ write() ] ----+ +--->| slots |---> Consumer pulls + [ write() ] | | | (bucket)| for await (...) + [ write() ] v | +---------+ + +--------+ ^ + | pending| | + | writes | float valve + | (hose) | (backpressure) + +--------+ + ^ + | + 'strict' mode limits this too! +``` + +* **Slots (the bucket)** -- data ready for the consumer, capped at + `highWaterMark`. When the consumer pulls, it drains all slots at once + into a single batch. + +* **Pending writes (the hose)** -- writes waiting for slot space. After + the consumer drains, pending writes are promoted into the now-empty + slots and their promises resolve. + +How each policy uses these buffers: + +| Policy | Slots limit | Pending writes limit | +| --------------- | --------------- | -------------------- | +| `'strict'` | `highWaterMark` | `highWaterMark` | +| `'block'` | `highWaterMark` | Unbounded | +| `'drop-oldest'` | `highWaterMark` | N/A (never waits) | +| `'drop-newest'` | `highWaterMark` | N/A (never waits) | + +#### Strict (default) + +Strict mode catches "fire-and-forget" patterns where the producer calls +`write()` without awaiting, which would cause unbounded memory growth. +It limits both the slots buffer and the pending writes queue to +`highWaterMark`. + +If you properly await each write, you can only ever have one pending +write at a time (yours), so you never hit the pending writes limit. +Unawaited writes accumulate in the pending queue and throw once it +overflows: + +```mjs +import { push, text } from 'node:stream/iter'; + +const { writer, readable } = push({ highWaterMark: 16 }); + +// Consumer must run concurrently -- without it, the first write +// that fills the buffer blocks the producer forever. +const consuming = text(readable); + +// GOOD: awaited writes. The producer waits for the consumer to +// make room when the buffer is full. +for (const item of dataset) { + await writer.write(item); +} +await writer.end(); +console.log(await consuming); +``` + +```cjs +const { push, text } = require('node:stream/iter'); + +async function run() { + const { writer, readable } = push({ highWaterMark: 16 }); + + // Consumer must run concurrently -- without it, the first write + // that fills the buffer blocks the producer forever. + const consuming = text(readable); + + // GOOD: awaited writes. The producer waits for the consumer to + // make room when the buffer is full. + for (const item of dataset) { + await writer.write(item); + } + await writer.end(); + console.log(await consuming); +} + +run().catch(console.error); +``` + +Forgetting to `await` will eventually throw: + +```js +// BAD: fire-and-forget. Strict mode throws once both buffers fill. +for (const item of dataset) { + writer.write(item); // Not awaited -- queues without bound +} +// --> throws "Backpressure violation: too many pending writes" +``` + +#### Block + +Block mode caps slots at `highWaterMark` but places no limit on the +pending writes queue. Awaited writes block until the consumer makes room, +just like strict mode. The difference is that unawaited writes silently +queue forever instead of throwing -- a potential memory leak if the +producer forgets to `await`. + +This is the mode that existing Node.js classic streams and Web Streams +default to. Use it when you control the producer and know it awaits +properly, or when migrating code from those APIs. + +```mjs +import { push, text } from 'node:stream/iter'; + +const { writer, readable } = push({ + highWaterMark: 16, + backpressure: 'block', +}); + +const consuming = text(readable); + +// Safe -- awaited writes block until the consumer reads. +for (const item of dataset) { + await writer.write(item); +} +await writer.end(); +console.log(await consuming); +``` + +```cjs +const { push, text } = require('node:stream/iter'); + +async function run() { + const { writer, readable } = push({ + highWaterMark: 16, + backpressure: 'block', + }); + + const consuming = text(readable); + + // Safe -- awaited writes block until the consumer reads. + for (const item of dataset) { + await writer.write(item); + } + await writer.end(); + console.log(await consuming); +} + +run().catch(console.error); +``` + +#### Drop-oldest + +Writes never wait. When the slots buffer is full, the oldest buffered +chunk is evicted to make room for the incoming write. The consumer +always sees the most recent data. Useful for live feeds, telemetry, or +any scenario where stale data is less valuable than current data. + +```mjs +import { push } from 'node:stream/iter'; + +// Keep only the 5 most recent readings +const { writer, readable } = push({ + highWaterMark: 5, + backpressure: 'drop-oldest', +}); +``` + +```cjs +const { push } = require('node:stream/iter'); + +// Keep only the 5 most recent readings +const { writer, readable } = push({ + highWaterMark: 5, + backpressure: 'drop-oldest', +}); +``` + +#### Drop-newest + +Writes never wait. When the slots buffer is full, the incoming write is +silently discarded. The consumer processes what is already buffered +without being overwhelmed by new data. Useful for rate-limiting or +shedding load under pressure. + +```mjs +import { push } from 'node:stream/iter'; + +// Accept up to 10 buffered items; discard anything beyond that +const { writer, readable } = push({ + highWaterMark: 10, + backpressure: 'drop-newest', +}); +``` + +```cjs +const { push } = require('node:stream/iter'); + +// Accept up to 10 buffered items; discard anything beyond that +const { writer, readable } = push({ + highWaterMark: 10, + backpressure: 'drop-newest', +}); +``` + +### Writer interface + +A writer is any object conforming to the Writer interface. Only `write()` is +required; all other methods are optional. + +Each async method has a synchronous `*Sync` counterpart designed for a +try-fallback pattern: attempt the fast synchronous path first, and fall back +to the async version only when the synchronous call indicates it could not +complete: + +```mjs +if (!writer.writeSync(chunk)) await writer.write(chunk); +if (!writer.writevSync(chunks)) await writer.writev(chunks); +if (writer.endSync() < 0) await writer.end(); +writer.fail(err); // Always synchronous, no fallback needed +``` + +### `writer.desiredSize` + +* {number|null} + +The number of buffer slots available before the high water mark is reached. +Returns `null` if the writer is closed or the consumer has disconnected. + +The value is always non-negative. + +### `writer.end([options])` + +* `options` {Object} + * `signal` {AbortSignal} Cancel just this operation. The signal cancels only + the pending `end()` call; it does not fail the writer itself. +* Returns: {Promise\} Total bytes written. + +Signal that no more data will be written. + +### `writer.endSync()` + +* Returns: {number} Total bytes written, or `-1` if the writer is not open. + +Synchronous variant of `writer.end()`. Returns `-1` if the writer is already +closed or errored. Can be used as a try-fallback pattern: + +```cjs +const result = writer.endSync(); +if (result < 0) { + writer.end(); +} +``` + +### `writer.fail(reason)` + +* `reason` {any} + +Put the writer into a terminal error state. If the writer is already closed +or errored, this is a no-op. Unlike `write()` and `end()`, `fail()` is +unconditionally synchronous because failing a writer is a pure state +transition with no async work to perform. + +### `writer.write(chunk[, options])` + +* `chunk` {Uint8Array|string} +* `options` {Object} + * `signal` {AbortSignal} Cancel just this write operation. The signal cancels + only the pending `write()` call; it does not fail the writer itself. +* Returns: {Promise\} + +Write a chunk. The promise resolves when buffer space is available. + +### `writer.writeSync(chunk)` + +* `chunk` {Uint8Array|string} +* Returns: {boolean} `true` if the write was accepted, `false` if the + buffer is full. + +Synchronous write. Does not block; returns `false` if backpressure is active. + +### `writer.writev(chunks[, options])` + +* `chunks` {Uint8Array\[]|string\[]} +* `options` {Object} + * `signal` {AbortSignal} Cancel just this write operation. The signal cancels + only the pending `writev()` call; it does not fail the writer itself. +* Returns: {Promise\} + +Write multiple chunks as a single batch. + +### `writer.writevSync(chunks)` + +* `chunks` {Uint8Array\[]|string\[]} +* Returns: {boolean} `true` if the write was accepted, `false` if the + buffer is full. + +Synchronous batch write. + +## The `stream/iter` module + +All functions are available both as named exports and as properties of the +`Stream` namespace object: + +```mjs +// Named exports +import { from, pull, bytes, Stream } from 'node:stream/iter'; + +// Namespace access +Stream.from('hello'); +``` + +```cjs +// Named exports +const { from, pull, bytes, Stream } = require('node:stream/iter'); + +// Namespace access +Stream.from('hello'); +``` + +Including the `node:` prefix on the module specifier is optional. + +## Sources + +### `from(input)` + + + +* `input` {string|ArrayBuffer|ArrayBufferView|Iterable|AsyncIterable|Object} + Must not be `null` or `undefined`. +* Returns: {AsyncIterable\} + +Create an async byte stream from the given input. Strings are UTF-8 encoded. +`ArrayBuffer` and `ArrayBufferView` values are wrapped as `Uint8Array`. Arrays +and iterables are recursively flattened and normalized. + +Objects implementing `Symbol.for('Stream.toAsyncStreamable')` or +`Symbol.for('Stream.toStreamable')` are converted via those protocols. The +`toAsyncStreamable` protocol takes precedence over `toStreamable`, which takes +precedence over the iteration protocols (`Symbol.asyncIterator`, +`Symbol.iterator`). + +```mjs +import { Buffer } from 'node:buffer'; +import { from, text } from 'node:stream/iter'; + +console.log(await text(from('hello'))); // 'hello' +console.log(await text(from(Buffer.from('hello')))); // 'hello' +``` + +```cjs +const { Buffer } = require('node:buffer'); +const { from, text } = require('node:stream/iter'); + +async function run() { + console.log(await text(from('hello'))); // 'hello' + console.log(await text(from(Buffer.from('hello')))); // 'hello' +} + +run().catch(console.error); +``` + +### `fromSync(input)` + + + +* `input` {string|ArrayBuffer|ArrayBufferView|Iterable|Object} + Must not be `null` or `undefined`. +* Returns: {Iterable\} + +Synchronous version of [`from()`][]. Returns a sync iterable. Cannot accept +async iterables or promises. Objects implementing +`Symbol.for('Stream.toStreamable')` are converted via that protocol (takes +precedence over `Symbol.iterator`). The `toAsyncStreamable` protocol is +ignored entirely. + +```mjs +import { fromSync, textSync } from 'node:stream/iter'; + +console.log(textSync(fromSync('hello'))); // 'hello' +``` + +```cjs +const { fromSync, textSync } = require('node:stream/iter'); + +console.log(textSync(fromSync('hello'))); // 'hello' +``` + +## Pipelines + +### `pipeTo(source[, ...transforms], writer[, options])` + + + +* `source` {AsyncIterable|Iterable} The data source. +* `...transforms` {Function|Object} Zero or more transforms to apply. +* `writer` {Object} Destination with `write(chunk)` method. +* `options` {Object} + * `signal` {AbortSignal} Abort the pipeline. + * `preventClose` {boolean} If `true`, do not call `writer.end()` when + the source ends. **Default:** `false`. + * `preventFail` {boolean} If `true`, do not call `writer.fail()` on + error. **Default:** `false`. +* Returns: {Promise\} Total bytes written. + +Pipe a source through transforms into a writer. If the writer has a +`writev(chunks)` method, entire batches are passed in a single call (enabling +scatter/gather I/O). + +If the writer implements the optional `*Sync` methods (`writeSync`, `writevSync`, +`endSync`), `pipeTo()` will attempt to use the synchronous methods +first as a fast path, and fall back to the async versions only when the sync +methods indicate they cannot complete (e.g., backpressure or waiting for the +next tick). `fail()` is always called synchronously. + +```mjs +import { from, pipeTo } from 'node:stream/iter'; +import { compressGzip } from 'node:zlib/iter'; +import { open } from 'node:fs/promises'; + +const fh = await open('output.gz', 'w'); +const totalBytes = await pipeTo( + from('Hello, world!'), + compressGzip(), + fh.writer({ autoClose: true }), +); +``` + +```cjs +const { from, pipeTo } = require('node:stream/iter'); +const { compressGzip } = require('node:zlib/iter'); +const { open } = require('node:fs/promises'); + +async function run() { + const fh = await open('output.gz', 'w'); + const totalBytes = await pipeTo( + from('Hello, world!'), + compressGzip(), + fh.writer({ autoClose: true }), + ); +} + +run().catch(console.error); +``` + +### `pipeToSync(source[, ...transforms], writer[, options])` + + + +* `source` {Iterable} The sync data source. +* `...transforms` {Function|Object} Zero or more sync transforms. +* `writer` {Object} Destination with `write(chunk)` method. +* `options` {Object} + * `preventClose` {boolean} **Default:** `false`. + * `preventFail` {boolean} **Default:** `false`. +* Returns: {number} Total bytes written. + +Synchronous version of [`pipeTo()`][]. The `source`, all transforms, and the +`writer` must be synchronous. Cannot accept async iterables or promises. + +The `writer` must have the `*Sync` methods (`writeSync`, `writevSync`, +`endSync`) and `fail()` for this to work. + +### `pull(source[, ...transforms][, options])` + + + +* `source` {AsyncIterable|Iterable} The data source. +* `...transforms` {Function|Object} Zero or more transforms to apply. +* `options` {Object} + * `signal` {AbortSignal} Abort the pipeline. +* Returns: {AsyncIterable\} + +Create a lazy async pipeline. Data is not read from `source` until the +returned iterable is consumed. Transforms are applied in order. + +```mjs +import { from, pull, text } from 'node:stream/iter'; + +const asciiUpper = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => { + for (let i = 0; i < c.length; i++) { + c[i] -= (c[i] >= 97 && c[i] <= 122) * 32; + } + return c; + }); +}; + +const result = pull(from('hello'), asciiUpper); +console.log(await text(result)); // 'HELLO' +``` + +```cjs +const { from, pull, text } = require('node:stream/iter'); + +const asciiUpper = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => { + for (let i = 0; i < c.length; i++) { + c[i] -= (c[i] >= 97 && c[i] <= 122) * 32; + } + return c; + }); +}; + +async function run() { + const result = pull(from('hello'), asciiUpper); + console.log(await text(result)); // 'HELLO' +} + +run().catch(console.error); +``` + +Using an `AbortSignal`: + +```mjs +import { pull } from 'node:stream/iter'; + +const ac = new AbortController(); +const result = pull(source, transform, { signal: ac.signal }); +ac.abort(); // Pipeline throws AbortError on next iteration +``` + +```cjs +const { pull } = require('node:stream/iter'); + +const ac = new AbortController(); +const result = pull(source, transform, { signal: ac.signal }); +ac.abort(); // Pipeline throws AbortError on next iteration +``` + +### `pullSync(source[, ...transforms])` + + + +* `source` {Iterable} The sync data source. +* `...transforms` {Function|Object} Zero or more sync transforms. +* Returns: {Iterable\} + +Synchronous version of [`pull()`][]. All transforms must be synchronous. + +## Push streams + +### `push([...transforms][, options])` + + + +* `...transforms` {Function|Object} Optional transforms applied to the + readable side. +* `options` {Object} + * `highWaterMark` {number} Maximum number of buffered slots before + backpressure is applied. Must be >= 1; values below 1 are clamped to 1. + **Default:** `4`. + * `backpressure` {string} Backpressure policy: `'strict'`, `'block'`, + `'drop-oldest'`, or `'drop-newest'`. **Default:** `'strict'`. + * `signal` {AbortSignal} Abort the stream. +* Returns: {Object} + * `writer` {PushWriter} The writer side. + * `readable` {AsyncIterable\} The readable side. + +Create a push stream with backpressure. The writer pushes data in; the +readable side is consumed as an async iterable. + +```mjs +import { push, text } from 'node:stream/iter'; + +const { writer, readable } = push(); + +// Producer and consumer must run concurrently. With strict backpressure +// (the default), awaited writes block until the consumer reads. +const producing = (async () => { + await writer.write('hello'); + await writer.write(' world'); + await writer.end(); +})(); + +console.log(await text(readable)); // 'hello world' +await producing; +``` + +```cjs +const { push, text } = require('node:stream/iter'); + +async function run() { + const { writer, readable } = push(); + + // Producer and consumer must run concurrently. With strict backpressure + // (the default), awaited writes block until the consumer reads. + const producing = (async () => { + await writer.write('hello'); + await writer.write(' world'); + await writer.end(); + })(); + + console.log(await text(readable)); // 'hello world' + await producing; +} + +run().catch(console.error); +``` + +The writer returned by `push()` conforms to the \[Writer interface]\[]. + +## Duplex channels + +### `duplex([options])` + + + +* `options` {Object} + * `highWaterMark` {number} Buffer size for both directions. + **Default:** `4`. + * `backpressure` {string} Policy for both directions. + **Default:** `'strict'`. + * `signal` {AbortSignal} Cancellation signal for both channels. + * `a` {Object} Options specific to the A-to-B direction. Overrides + shared options. + * `highWaterMark` {number} + * `backpressure` {string} + * `b` {Object} Options specific to the B-to-A direction. Overrides + shared options. + * `highWaterMark` {number} + * `backpressure` {string} +* Returns: {Array} A pair `[channelA, channelB]` of duplex channels. + +Create a pair of connected duplex channels for bidirectional communication, +similar to `socketpair()`. Data written to one channel's writer appears in +the other channel's readable. + +Each channel has: + +* `writer` — a \[Writer interface]\[] object for sending data to the peer. +* `readable` — an `AsyncIterable` for reading data from + the peer. +* `close()` — close this end of the channel (idempotent). +* `[Symbol.asyncDispose]()` — async dispose support for `await using`. + +```mjs +import { duplex, text } from 'node:stream/iter'; + +const [client, server] = duplex(); + +// Server echoes back +const serving = (async () => { + for await (const chunks of server.readable) { + await server.writer.writev(chunks); + } +})(); + +await client.writer.write('hello'); +await client.writer.end(); + +console.log(await text(server.readable)); // handled by echo +await serving; +``` + +```cjs +const { duplex, text } = require('node:stream/iter'); + +async function run() { + const [client, server] = duplex(); + + // Server echoes back + const serving = (async () => { + for await (const chunks of server.readable) { + await server.writer.writev(chunks); + } + })(); + + await client.writer.write('hello'); + await client.writer.end(); + + console.log(await text(server.readable)); // handled by echo + await serving; +} + +run().catch(console.error); +``` + +## Consumers + +### `array(source[, options])` + + + +* `source` {AsyncIterable\|Iterable\} +* `options` {Object} + * `signal` {AbortSignal} + * `limit` {number} Maximum number of bytes to consume. If the total bytes + collected exceeds limit, an `ERR_OUT_OF_RANGE` error is thrown +* Returns: {Promise\} + +Collect all chunks as an array of `Uint8Array` values (without concatenating). + +### `arrayBuffer(source[, options])` + + + +* `source` {AsyncIterable\|Iterable\} +* `options` {Object} + * `signal` {AbortSignal} + * `limit` {number} Maximum number of bytes to consume. If the total bytes + collected exceeds limit, an `ERR_OUT_OF_RANGE` error is thrown +* Returns: {Promise\} + +Collect all bytes into an `ArrayBuffer`. + +### `arrayBufferSync(source[, options])` + + + +* `source` {Iterable\} +* `options` {Object} + * `limit` {number} Maximum number of bytes to consume. If the total bytes + collected exceeds limit, an `ERR_OUT_OF_RANGE` error is thrown +* Returns: {ArrayBuffer} + +Synchronous version of [`arrayBuffer()`][]. + +### `arraySync(source[, options])` + + + +* `source` {Iterable\} +* `options` {Object} + * `limit` {number} Maximum number of bytes to consume. If the total bytes + collected exceeds limit, an `ERR_OUT_OF_RANGE` error is thrown +* Returns: {Uint8Array\[]} + +Synchronous version of [`array()`][]. + +### `bytes(source[, options])` + + + +* `source` {AsyncIterable\|Iterable\} +* `options` {Object} + * `signal` {AbortSignal} + * `limit` {number} Maximum number of bytes to consume. If the total bytes + collected exceeds limit, an `ERR_OUT_OF_RANGE` error is thrown +* Returns: {Promise\} + +Collect all bytes from a stream into a single `Uint8Array`. + +```mjs +import { from, bytes } from 'node:stream/iter'; + +const data = await bytes(from('hello')); +console.log(data); // Uint8Array(5) [ 104, 101, 108, 108, 111 ] +``` + +```cjs +const { from, bytes } = require('node:stream/iter'); + +async function run() { + const data = await bytes(from('hello')); + console.log(data); // Uint8Array(5) [ 104, 101, 108, 108, 111 ] +} + +run().catch(console.error); +``` + +### `bytesSync(source[, options])` + + + +* `source` {Iterable\} +* `options` {Object} + * `limit` {number} Maximum number of bytes to consume. If the total bytes + collected exceeds limit, an `ERR_OUT_OF_RANGE` error is thrown +* Returns: {Uint8Array} + +Synchronous version of [`bytes()`][]. + +### `text(source[, options])` + + + +* `source` {AsyncIterable\|Iterable\} +* `options` {Object} + * `encoding` {string} Text encoding. **Default:** `'utf-8'`. + * `signal` {AbortSignal} + * `limit` {number} Maximum number of bytes to consume. If the total bytes + collected exceeds limit, an `ERR_OUT_OF_RANGE` error is thrown +* Returns: {Promise\} + +Collect all bytes and decode as text. + +```mjs +import { from, text } from 'node:stream/iter'; + +console.log(await text(from('hello'))); // 'hello' +``` + +```cjs +const { from, text } = require('node:stream/iter'); + +async function run() { + console.log(await text(from('hello'))); // 'hello' +} + +run().catch(console.error); +``` + +### `textSync(source[, options])` + + + +* `source` {Iterable\} +* `options` {Object} + * `encoding` {string} **Default:** `'utf-8'`. + * `limit` {number} Maximum number of bytes to consume. If the total bytes + collected exceeds limit, an `ERR_OUT_OF_RANGE` error is thrown +* Returns: {string} + +Synchronous version of [`text()`][]. + +## Utilities + +### `ondrain(drainable)` + + + +* `drainable` {Object} An object implementing the drainable protocol. +* Returns: {Promise\|null} + +Wait for a drainable writer's backpressure to clear. Returns a promise that +resolves to `true` when the writer can accept more data, or `null` if the +object does not implement the drainable protocol. + +```mjs +import { push, ondrain, text } from 'node:stream/iter'; + +const { writer, readable } = push({ highWaterMark: 2 }); +writer.writeSync('a'); +writer.writeSync('b'); + +// Start consuming so the buffer can actually drain +const consuming = text(readable); + +// Buffer is full -- wait for drain +const canWrite = await ondrain(writer); +if (canWrite) { + await writer.write('c'); +} +await writer.end(); +await consuming; +``` + +```cjs +const { push, ondrain, text } = require('node:stream/iter'); + +async function run() { + const { writer, readable } = push({ highWaterMark: 2 }); + writer.writeSync('a'); + writer.writeSync('b'); + + // Start consuming so the buffer can actually drain + const consuming = text(readable); + + // Buffer is full -- wait for drain + const canWrite = await ondrain(writer); + if (canWrite) { + await writer.write('c'); + } + await writer.end(); + await consuming; +} + +run().catch(console.error); +``` + +### `merge(...sources[, options])` + + + +* `...sources` {AsyncIterable\|Iterable\} Two or more iterables. +* `options` {Object} + * `signal` {AbortSignal} +* Returns: {AsyncIterable\} + +Merge multiple async iterables by yielding batches in temporal order +(whichever source produces data first). All sources are consumed +concurrently. + +```mjs +import { from, merge, text } from 'node:stream/iter'; + +const merged = merge(from('hello '), from('world')); +console.log(await text(merged)); // Order depends on timing +``` + +```cjs +const { from, merge, text } = require('node:stream/iter'); + +async function run() { + const merged = merge(from('hello '), from('world')); + console.log(await text(merged)); // Order depends on timing +} + +run().catch(console.error); +``` + +### `tap(callback)` + + + +* `callback` {Function} `(chunks) => void` Called with each batch. +* Returns: {Function} A stateless transform. + +Create a pass-through transform that observes batches without modifying them. +Useful for logging, metrics, or debugging. + +```mjs +import { from, pull, text, tap } from 'node:stream/iter'; + +const result = pull( + from('hello'), + tap((chunks) => console.log('Batch size:', chunks.length)), +); +console.log(await text(result)); +``` + +```cjs +const { from, pull, text, tap } = require('node:stream/iter'); + +async function run() { + const result = pull( + from('hello'), + tap((chunks) => console.log('Batch size:', chunks.length)), + ); + console.log(await text(result)); +} + +run().catch(console.error); +``` + +`tap()` intentionally does not prevent in-place modification of the +chunks by the tapping callback; but return values are ignored. + +### `tapSync(callback)` + + + +* `callback` {Function} +* Returns: {Function} + +Synchronous version of [`tap()`][]. + +## Multi-consumer + +### `broadcast([options])` + + + +* `options` {Object} + * `highWaterMark` {number} Buffer size in slots. Must be >= 1; values + below 1 are clamped to 1. **Default:** `16`. + * `backpressure` {string} `'strict'`, `'block'`, `'drop-oldest'`, or + `'drop-newest'`. **Default:** `'strict'`. + * `signal` {AbortSignal} +* Returns: {Object} + * `writer` {BroadcastWriter} + * `broadcast` {Broadcast} + +Create a push-model multi-consumer broadcast channel. A single writer pushes +data to multiple consumers. Each consumer has an independent cursor into a +shared buffer. + +```mjs +import { broadcast, text } from 'node:stream/iter'; + +const { writer, broadcast: bc } = broadcast(); + +// Create consumers before writing +const c1 = bc.push(); // Consumer 1 +const c2 = bc.push(); // Consumer 2 + +// Producer and consumers must run concurrently. Awaited writes +// block when the buffer fills until consumers read. +const producing = (async () => { + await writer.write('hello'); + await writer.end(); +})(); + +const [r1, r2] = await Promise.all([text(c1), text(c2)]); +console.log(r1); // 'hello' +console.log(r2); // 'hello' +await producing; +``` + +```cjs +const { broadcast, text } = require('node:stream/iter'); + +async function run() { + const { writer, broadcast: bc } = broadcast(); + + // Create consumers before writing + const c1 = bc.push(); // Consumer 1 + const c2 = bc.push(); // Consumer 2 + + // Producer and consumers must run concurrently. Awaited writes + // block when the buffer fills until consumers read. + const producing = (async () => { + await writer.write('hello'); + await writer.end(); + })(); + + const [r1, r2] = await Promise.all([text(c1), text(c2)]); + console.log(r1); // 'hello' + console.log(r2); // 'hello' + await producing; +} + +run().catch(console.error); +``` + +#### `broadcast.bufferSize` + +* {number} + +The number of chunks currently buffered. + +#### `broadcast.cancel([reason])` + +* `reason` {Error} + +Cancel the broadcast. All consumers receive an error. + +#### `broadcast.consumerCount` + +* {number} + +The number of active consumers. + +#### `broadcast.push([...transforms][, options])` + +* `...transforms` {Function|Object} +* `options` {Object} + * `signal` {AbortSignal} +* Returns: {AsyncIterable\} + +Create a new consumer. Each consumer receives all data written to the +broadcast from the point of subscription onward. Optional transforms are +applied to this consumer's view of the data. + +#### `broadcast[Symbol.dispose]()` + +Alias for `broadcast.cancel()`. + +### `Broadcast.from(input[, options])` + + + +* `input` {AsyncIterable|Iterable|Broadcastable} +* `options` {Object} Same as `broadcast()`. +* Returns: {Object} `{ writer, broadcast }` + +Create a {Broadcast} from an existing source. The source is consumed +automatically and pushed to all subscribers. + +### `share(source[, options])` + + + +* `source` {AsyncIterable} The source to share. +* `options` {Object} + * `highWaterMark` {number} Buffer size. Must be >= 1; values below 1 + are clamped to 1. **Default:** `16`. + * `backpressure` {string} `'strict'`, `'block'`, `'drop-oldest'`, or + `'drop-newest'`. **Default:** `'strict'`. +* Returns: {Share} + +Create a pull-model multi-consumer shared stream. Unlike `broadcast()`, the +source is only read when a consumer pulls. Multiple consumers share a single +buffer. + +```mjs +import { from, share, text } from 'node:stream/iter'; + +const shared = share(from('hello')); + +const c1 = shared.pull(); +const c2 = shared.pull(); + +// Consume concurrently to avoid deadlock with small buffers. +const [r1, r2] = await Promise.all([text(c1), text(c2)]); +console.log(r1); // 'hello' +console.log(r2); // 'hello' +``` + +```cjs +const { from, share, text } = require('node:stream/iter'); + +async function run() { + const shared = share(from('hello')); + + const c1 = shared.pull(); + const c2 = shared.pull(); + + // Consume concurrently to avoid deadlock with small buffers. + const [r1, r2] = await Promise.all([text(c1), text(c2)]); + console.log(r1); // 'hello' + console.log(r2); // 'hello' +} + +run().catch(console.error); +``` + +#### `share.bufferSize` + +* {number} + +The number of chunks currently buffered. + +#### `share.cancel([reason])` + +* `reason` {Error} + +Cancel the share. All consumers receive an error. + +#### `share.consumerCount` + +* {number} + +The number of active consumers. + +#### `share.pull([...transforms][, options])` + +* `...transforms` {Function|Object} +* `options` {Object} + * `signal` {AbortSignal} +* Returns: {AsyncIterable\} + +Create a new consumer of the shared source. + +#### `share[Symbol.dispose]()` + +Alias for `share.cancel()`. + +### `Share.from(input[, options])` + + + +* `input` {AsyncIterable|Shareable} +* `options` {Object} Same as `share()`. +* Returns: {Share} + +Create a {Share} from an existing source. + +### `shareSync(source[, options])` + + + +* `source` {Iterable} The sync source to share. +* `options` {Object} + * `highWaterMark` {number} Must be >= 1; values below 1 are clamped + to 1. **Default:** `16`. + * `backpressure` {string} **Default:** `'strict'`. +* Returns: {SyncShare} + +Synchronous version of [`share()`][]. + +### `SyncShare.fromSync(input[, options])` + + + +* `input` {Iterable|SyncShareable} +* `options` {Object} +* Returns: {SyncShare} + +## Compression and decompression transforms + +Compression and decompression transforms for use with `pull()`, `pullSync()`, +`pipeTo()`, and `pipeToSync()` are available via the [`node:zlib/iter`][] +module. See the [`node:zlib/iter` documentation][] for details. + +## Protocol symbols + +These well-known symbols allow third-party objects to participate in the +streaming protocol without importing from `node:stream/iter` directly. + +### `Stream.broadcastProtocol` + +* Value: `Symbol.for('Stream.broadcastProtocol')` + +The value must be a function. When called by `Broadcast.from()`, it receives +the options passed to `Broadcast.from()` and must return an object conforming +to the {Broadcast} interface. The implementation is fully custom -- it can +manage consumers, buffering, and backpressure however it wants. + +```mjs +import { Broadcast, text } from 'node:stream/iter'; + +// This example defers to the built-in Broadcast, but a custom +// implementation could use any mechanism. +class MessageBus { + #broadcast; + #writer; + + constructor() { + const { writer, broadcast } = Broadcast(); + this.#writer = writer; + this.#broadcast = broadcast; + } + + [Symbol.for('Stream.broadcastProtocol')](options) { + return this.#broadcast; + } + + send(data) { + this.#writer.write(new TextEncoder().encode(data)); + } + + close() { + this.#writer.end(); + } +} + +const bus = new MessageBus(); +const { broadcast } = Broadcast.from(bus); +const consumer = broadcast.push(); +bus.send('hello'); +bus.close(); +console.log(await text(consumer)); // 'hello' +``` + +```cjs +const { Broadcast, text } = require('node:stream/iter'); + +// This example defers to the built-in Broadcast, but a custom +// implementation could use any mechanism. +class MessageBus { + #broadcast; + #writer; + + constructor() { + const { writer, broadcast } = Broadcast(); + this.#writer = writer; + this.#broadcast = broadcast; + } + + [Symbol.for('Stream.broadcastProtocol')](options) { + return this.#broadcast; + } + + send(data) { + this.#writer.write(new TextEncoder().encode(data)); + } + + close() { + this.#writer.end(); + } +} + +const bus = new MessageBus(); +const { broadcast } = Broadcast.from(bus); +const consumer = broadcast.push(); +bus.send('hello'); +bus.close(); +text(consumer).then(console.log); // 'hello' +``` + +### `Stream.drainableProtocol` + +* Value: `Symbol.for('Stream.drainableProtocol')` + +Implement to make a writer compatible with `ondrain()`. The method should +return a promise that resolves when backpressure clears, or `null` if no +backpressure. + +```mjs +import { ondrain } from 'node:stream/iter'; + +class CustomWriter { + #queue = []; + #drain = null; + #closed = false; + [Symbol.for('Stream.drainableProtocol')]() { + if (this.#closed) return null; + if (this.#queue.length < 3) return Promise.resolve(true); + this.#drain ??= Promise.withResolvers(); + return this.#drain.promise; + } + write(chunk) { + this.#queue.push(chunk); + } + flush() { + this.#queue.length = 0; + this.#drain?.resolve(true); + this.#drain = null; + } + close() { + this.#closed = true; + } +} +const writer = new CustomWriter(); +const ready = ondrain(writer); +console.log(ready); // Promise { true } -- no backpressure +``` + +```cjs +const { ondrain } = require('node:stream/iter'); + +class CustomWriter { + #queue = []; + #drain = null; + #closed = false; + + [Symbol.for('Stream.drainableProtocol')]() { + if (this.#closed) return null; + if (this.#queue.length < 3) return Promise.resolve(true); + this.#drain ??= Promise.withResolvers(); + return this.#drain.promise; + } + + write(chunk) { + this.#queue.push(chunk); + } + + flush() { + this.#queue.length = 0; + this.#drain?.resolve(true); + this.#drain = null; + } + + close() { + this.#closed = true; + } +} + +const writer = new CustomWriter(); +const ready = ondrain(writer); +console.log(ready); // Promise { true } -- no backpressure +``` + +### `Stream.shareProtocol` + +* Value: `Symbol.for('Stream.shareProtocol')` + +The value must be a function. When called by `Share.from()`, it receives the +options passed to `Share.from()` and must return an object conforming the the +{Share} interface. The implementation is fully custom -- it can manage the shared +source, consumers, buffering, and backpressure however it wants. + +```mjs +import { share, Share, text } from 'node:stream/iter'; + +// This example defers to the built-in share(), but a custom +// implementation could use any mechanism. +class DataPool { + #share; + + constructor(source) { + this.#share = share(source); + } + + [Symbol.for('Stream.shareProtocol')](options) { + return this.#share; + } +} + +const pool = new DataPool( + (async function* () { + yield 'hello'; + })(), +); + +const shared = Share.from(pool); +const consumer = shared.pull(); +console.log(await text(consumer)); // 'hello' +``` + +```cjs +const { share, Share, text } = require('node:stream/iter'); + +// This example defers to the built-in share(), but a custom +// implementation could use any mechanism. +class DataPool { + #share; + + constructor(source) { + this.#share = share(source); + } + + [Symbol.for('Stream.shareProtocol')](options) { + return this.#share; + } +} + +const pool = new DataPool( + (async function* () { + yield 'hello'; + })(), +); + +const shared = Share.from(pool); +const consumer = shared.pull(); +text(consumer).then(console.log); // 'hello' +``` + +### `Stream.shareSyncProtocol` + +* Value: `Symbol.for('Stream.shareSyncProtocol')` + +The value must be a function. When called by `SyncShare.fromSync()`, it receives +the options passed to `SyncShare.fromSync()` and must return an object conforming +to the {SyncShare} interface. The implementation is fully custom -- it can manage +the shared source, consumers, and buffering however it wants. + +```mjs +import { shareSync, SyncShare, textSync } from 'node:stream/iter'; + +// This example defers to the built-in shareSync(), but a custom +// implementation could use any mechanism. +class SyncDataPool { + #share; + + constructor(source) { + this.#share = shareSync(source); + } + + [Symbol.for('Stream.shareSyncProtocol')](options) { + return this.#share; + } +} + +const encoder = new TextEncoder(); +const pool = new SyncDataPool( + function* () { + yield [encoder.encode('hello')]; + }(), +); + +const shared = SyncShare.fromSync(pool); +const consumer = shared.pull(); +console.log(textSync(consumer)); // 'hello' +``` + +```cjs +const { shareSync, SyncShare, textSync } = require('node:stream/iter'); + +// This example defers to the built-in shareSync(), but a custom +// implementation could use any mechanism. +class SyncDataPool { + #share; + + constructor(source) { + this.#share = shareSync(source); + } + + [Symbol.for('Stream.shareSyncProtocol')](options) { + return this.#share; + } +} + +const encoder = new TextEncoder(); +const pool = new SyncDataPool( + function* () { + yield [encoder.encode('hello')]; + }(), +); + +const shared = SyncShare.fromSync(pool); +const consumer = shared.pull(); +console.log(textSync(consumer)); // 'hello' +``` + +### `Stream.toAsyncStreamable` + +* Value: `Symbol.for('Stream.toAsyncStreamable')` + +The value must be a function that converts the object into a streamable value. +When the object is encountered anywhere in the streaming pipeline (as a source +passed to `from()`, or as a value returned from a transform), this method is +called to produce the actual data. It may return (or resolve to) any streamable +value: a string, `Uint8Array`, `AsyncIterable`, `Iterable`, or another streamable +object. + +```mjs +import { from, text } from 'node:stream/iter'; + +class Greeting { + #name; + + constructor(name) { + this.#name = name; + } + + [Symbol.for('Stream.toAsyncStreamable')]() { + return `hello ${this.#name}`; + } +} + +const stream = from(new Greeting('world')); +console.log(await text(stream)); // 'hello world' +``` + +```cjs +const { from, text } = require('node:stream/iter'); + +class Greeting { + #name; + + constructor(name) { + this.#name = name; + } + + [Symbol.for('Stream.toAsyncStreamable')]() { + return `hello ${this.#name}`; + } +} + +const stream = from(new Greeting('world')); +text(stream).then(console.log); // 'hello world' +``` + +### `Stream.toStreamable` + +* Value: `Symbol.for('Stream.toStreamable')` + +The value must be a function that synchronously converts the object into a +streamable value. When the object is encountered anywhere in the streaming +pipeline (as a source passed to `fromSync()`, or as a value returned from a +sync transform), this method is called to produce the actual data. It must +synchronously return a streamable value: a string, `Uint8Array`, or `Iterable`. + +```mjs +import { fromSync, textSync } from 'node:stream/iter'; + +class Greeting { + #name; + + constructor(name) { + this.#name = name; + } + + [Symbol.for('Stream.toStreamable')]() { + return `hello ${this.#name}`; + } +} + +const stream = fromSync(new Greeting('world')); +console.log(textSync(stream)); // 'hello world' +``` + +```cjs +const { fromSync, textSync } = require('node:stream/iter'); + +class Greeting { + #name; + + constructor(name) { + this.#name = name; + } + + [Symbol.for('Stream.toStreamable')]() { + return `hello ${this.#name}`; + } +} + +const stream = fromSync(new Greeting('world')); +console.log(textSync(stream)); // 'hello world' +``` + +[`array()`]: #arraysource-options +[`arrayBuffer()`]: #arraybuffersource-options +[`bytes()`]: #bytessource-options +[`from()`]: #frominput +[`node:zlib/iter`]: zlib_iter.md +[`node:zlib/iter` documentation]: zlib_iter.md +[`pipeTo()`]: #pipetosource-transforms-writer-options +[`pull()`]: #pullsource-transforms-options +[`share()`]: #sharesource-options +[`tap()`]: #tapcallback +[`text()`]: #textsource-options diff --git a/doc/api/zlib_iter.md b/doc/api/zlib_iter.md new file mode 100644 index 00000000000000..73bdf4f7561c24 --- /dev/null +++ b/doc/api/zlib_iter.md @@ -0,0 +1,254 @@ +# Iterable Compression + + + +> Stability: 1 - Experimental + + + +The `node:zlib/iter` module provides compression and decompression transforms +for use with the [`node:stream/iter`][] iterable streams API. + +This module is available only when the `--experimental-stream-iter` CLI flag +is enabled. + +Each algorithm has both an async variant (stateful async generator, for use +with [`pull()`][] and [`pipeTo()`][]) and a sync variant (stateful sync +generator, for use with `pullSync()` and `pipeToSync()`). + +The async transforms run compression on the libuv threadpool, overlapping +I/O with JavaScript execution. The sync transforms run compression directly +on the main thread. + +> Note: The defaults for these transforms are tuned for streaming throughput, +> and differ from the defaults in `node:zlib`. In particular, gzip/deflate +> default to level 4 (not 6) and memLevel 9 (not 8), and Brotli defaults to +> quality 6 (not 11). These choices match common HTTP server configurations +> and provide significantly faster compression with only a small reduction in +> compression ratio. All defaults can be overridden via options. + +```mjs +import { from, pull, bytes, text } from 'node:stream/iter'; +import { compressGzip, decompressGzip } from 'node:zlib/iter'; + +// Async round-trip +const compressed = await bytes(pull(from('hello'), compressGzip())); +const original = await text(pull(from(compressed), decompressGzip())); +console.log(original); // 'hello' +``` + +```cjs +const { from, pull, bytes, text } = require('node:stream/iter'); +const { compressGzip, decompressGzip } = require('node:zlib/iter'); + +async function run() { + const compressed = await bytes(pull(from('hello'), compressGzip())); + const original = await text(pull(from(compressed), decompressGzip())); + console.log(original); // 'hello' +} + +run().catch(console.error); +``` + +```mjs +import { fromSync, pullSync, textSync } from 'node:stream/iter'; +import { compressGzipSync, decompressGzipSync } from 'node:zlib/iter'; + +// Sync round-trip +const compressed = pullSync(fromSync('hello'), compressGzipSync()); +const original = textSync(pullSync(compressed, decompressGzipSync())); +console.log(original); // 'hello' +``` + +```cjs +const { fromSync, pullSync, textSync } = require('node:stream/iter'); +const { compressGzipSync, decompressGzipSync } = require('node:zlib/iter'); + +const compressed = pullSync(fromSync('hello'), compressGzipSync()); +const original = textSync(pullSync(compressed, decompressGzipSync())); +console.log(original); // 'hello' +``` + +## `compressBrotli([options])` + +## `compressBrotliSync([options])` + + + +* `options` {Object} + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `params` {Object} Key-value object where keys and values are + `zlib.constants` entries. The most important compressor parameters are: + * `BROTLI_PARAM_MODE` -- `BROTLI_MODE_GENERIC` (default), + `BROTLI_MODE_TEXT`, or `BROTLI_MODE_FONT`. + * `BROTLI_PARAM_QUALITY` -- ranges from `BROTLI_MIN_QUALITY` to + `BROTLI_MAX_QUALITY`. **Default:** `6` (not `BROTLI_DEFAULT_QUALITY` + which is 11). Quality 6 is appropriate for streaming; quality 11 is + intended for offline/build-time compression. + * `BROTLI_PARAM_SIZE_HINT` -- expected input size. **Default:** `0` + (unknown). + * `BROTLI_PARAM_LGWIN` -- window size (log2). **Default:** `20` (1 MB). + The Brotli library default is 22 (4 MB); the reduced default saves + memory without significant compression impact for streaming workloads. + * `BROTLI_PARAM_LGBLOCK` -- input block size (log2). + See the [Brotli compressor options][] in the zlib documentation for the + full list. + * `dictionary` {Buffer|TypedArray|DataView} +* Returns: {Object} A stateful transform. + +Create a Brotli compression transform. Output is compatible with +`zlib.brotliDecompress()` and `decompressBrotli()`/`decompressBrotliSync()`. + +## `compressDeflate([options])` + +## `compressDeflateSync([options])` + + + +* `options` {Object} + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `level` {number} Compression level (`0`-`9`). **Default:** `4`. + * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS` (15). + * `memLevel` {number} **Default:** `9`. + * `strategy` {number} **Default:** `Z_DEFAULT_STRATEGY`. + * `dictionary` {Buffer|TypedArray|DataView} +* Returns: {Object} A stateful transform. + +Create a deflate compression transform. Output is compatible with +`zlib.inflate()` and `decompressDeflate()`/`decompressDeflateSync()`. + +## `compressGzip([options])` + +## `compressGzipSync([options])` + + + +* `options` {Object} + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `level` {number} Compression level (`0`-`9`). **Default:** `4`. + * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS` (15). + * `memLevel` {number} **Default:** `9`. + * `strategy` {number} **Default:** `Z_DEFAULT_STRATEGY`. + * `dictionary` {Buffer|TypedArray|DataView} +* Returns: {Object} A stateful transform. + +Create a gzip compression transform. Output is compatible with `zlib.gunzip()` +and `decompressGzip()`/`decompressGzipSync()`. + +## `compressZstd([options])` + +## `compressZstdSync([options])` + + + +* `options` {Object} + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `params` {Object} Key-value object where keys and values are + `zlib.constants` entries. The most important compressor parameters are: + * `ZSTD_c_compressionLevel` -- **Default:** `ZSTD_CLEVEL_DEFAULT` (3). + * `ZSTD_c_checksumFlag` -- generate a checksum. **Default:** `0`. + * `ZSTD_c_strategy` -- compression strategy. Values include + `ZSTD_fast`, `ZSTD_dfast`, `ZSTD_greedy`, `ZSTD_lazy`, + `ZSTD_lazy2`, `ZSTD_btlazy2`, `ZSTD_btopt`, `ZSTD_btultra`, + `ZSTD_btultra2`. + See the [Zstd compressor options][] in the zlib documentation for the + full list. + * `pledgedSrcSize` {number} Expected uncompressed size (optional hint). + * `dictionary` {Buffer|TypedArray|DataView} +* Returns: {Object} A stateful transform. + +Create a Zstandard compression transform. Output is compatible with +`zlib.zstdDecompress()` and `decompressZstd()`/`decompressZstdSync()`. + +## `decompressBrotli([options])` + +## `decompressBrotliSync([options])` + + + +* `options` {Object} + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `params` {Object} Key-value object where keys and values are + `zlib.constants` entries. Available decompressor parameters: + * `BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION` -- boolean + flag affecting internal memory allocation. + * `BROTLI_DECODER_PARAM_LARGE_WINDOW` -- boolean flag enabling "Large + Window Brotli" mode (not compatible with [RFC 7932][]). + See the [Brotli decompressor options][] in the zlib documentation for + details. + * `dictionary` {Buffer|TypedArray|DataView} +* Returns: {Object} A stateful transform. + +Create a Brotli decompression transform. + +## `decompressDeflate([options])` + +## `decompressDeflateSync([options])` + + + +* `options` {Object} + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS` (15). + * `dictionary` {Buffer|TypedArray|DataView} +* Returns: {Object} A stateful transform. + +Create a deflate decompression transform. + +## `decompressGzip([options])` + +## `decompressGzipSync([options])` + + + +* `options` {Object} + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `windowBits` {number} **Default:** `Z_DEFAULT_WINDOWBITS` (15). + * `dictionary` {Buffer|TypedArray|DataView} +* Returns: {Object} A stateful transform. + +Create a gzip decompression transform. + +## `decompressZstd([options])` + +## `decompressZstdSync([options])` + + + +* `options` {Object} + * `chunkSize` {number} Output buffer size. **Default:** `65536` (64 KB). + * `params` {Object} Key-value object where keys and values are + `zlib.constants` entries. Available decompressor parameters: + * `ZSTD_d_windowLogMax` -- maximum window size (log2) the decompressor + will allocate. Limits memory usage against malicious input. + See the [Zstd decompressor options][] in the zlib documentation for + details. + * `dictionary` {Buffer|TypedArray|DataView} +* Returns: {Object} A stateful transform. + +Create a Zstandard decompression transform. + +[Brotli compressor options]: zlib.md#compressor-options +[Brotli decompressor options]: zlib.md#decompressor-options +[RFC 7932]: https://www.rfc-editor.org/rfc/rfc7932 +[Zstd compressor options]: zlib.md#compressor-options-1 +[Zstd decompressor options]: zlib.md#decompressor-options-1 +[`node:stream/iter`]: stream_iter.md +[`pipeTo()`]: stream_iter.md#pipetosource-transforms-writer-options +[`pull()`]: stream_iter.md#pullsource-transforms-options diff --git a/doc/node.1 b/doc/node.1 index e88c005731b40f..d3c6654ecaf391 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -720,6 +720,11 @@ top-level awaits, and print their location to help users find them. .It Fl -experimental-quic Enable experimental support for the QUIC protocol. . +.It Fl -experimental-stream-iter +Enable the experimental +.Sy node:stream/iter +module. +. .It Fl -experimental-sea-config Use this flag to generate a blob that can be injected into the Node.js binary to produce a single executable application. See the documentation diff --git a/lib/internal/bootstrap/realm.js b/lib/internal/bootstrap/realm.js index f49f0814bbc687..2ccceb493e68bb 100644 --- a/lib/internal/bootstrap/realm.js +++ b/lib/internal/bootstrap/realm.js @@ -131,7 +131,7 @@ const schemelessBlockList = new SafeSet([ 'test/reporters', ]); // Modules that will only be enabled at run time. -const experimentalModuleList = new SafeSet(['sqlite', 'quic']); +const experimentalModuleList = new SafeSet(['sqlite', 'quic', 'stream/iter', 'zlib/iter']); // Set up process.binding() and process._linkedBinding(). { diff --git a/lib/internal/fs/promises.js b/lib/internal/fs/promises.js index 2f95c4b79e17fd..982ee89c3b17c8 100644 --- a/lib/internal/fs/promises.js +++ b/lib/internal/fs/promises.js @@ -16,6 +16,9 @@ const { SafePromisePrototypeFinally, Symbol, SymbolAsyncDispose, + SymbolAsyncIterator, + SymbolDispose, + SymbolIterator, Uint8Array, } = primordials; @@ -40,6 +43,8 @@ const { ERR_INVALID_ARG_VALUE, ERR_INVALID_STATE, ERR_METHOD_NOT_IMPLEMENTED, + ERR_OPERATION_FAILED, + ERR_OUT_OF_RANGE, }, } = require('internal/errors'); const { isArrayBufferView } = require('internal/util/types'); @@ -62,6 +67,7 @@ const { stringToFlags, stringToSymlinkType, toUnixTimestamp, + handleErrorFromBinding: handleSyncErrorFromBinding, validateBufferArray, validateCpOptions, validateOffsetLengthRead, @@ -93,6 +99,7 @@ const { isWindows, isMacOS, } = require('internal/util'); +const { getOptionValue } = require('internal/options'); const EventEmitter = require('events'); const { StringDecoder } = require('string_decoder'); const { kFSWatchStart, watch } = require('internal/fs/watchers'); @@ -112,6 +119,7 @@ const kCloseReject = Symbol('kCloseReject'); const kRef = Symbol('kRef'); const kUnref = Symbol('kUnref'); const kLocked = Symbol('kLocked'); +const kCloseSync = Symbol('kCloseSync'); const { kUsePromises } = binding; const { Interface } = require('internal/readline/interface'); @@ -139,6 +147,24 @@ const lazyReadableStream = getLazy(() => require('internal/webstreams/readablestream').ReadableStream, ); +// Lazy loaded to avoid circular dependency with new streams. +let newStreamsPull; +let newStreamsPullSync; +let newStreamsParsePullArgs; +let newStreamsToUint8Array; +let newStreamsConvertChunks; +function lazyNewStreams() { + if (newStreamsPull === undefined) { + const pullModule = require('internal/streams/iter/pull'); + newStreamsPull = pullModule.pull; + newStreamsPullSync = pullModule.pullSync; + const utils = require('internal/streams/iter/utils'); + newStreamsParsePullArgs = utils.parsePullArgs; + newStreamsToUint8Array = utils.toUint8Array; + newStreamsConvertChunks = utils.convertChunks; + } +} + // By the time the C++ land creates an error for a promise rejection (likely from a // libuv callback), there is already no JS frames on the stack. So we need to // wait until V8 resumes execution back to JS land before we have enough information @@ -266,6 +292,16 @@ class FileHandle extends EventEmitter { return this[kClosePromise]; }; + [kCloseSync]() { + if (this[kFd] === -1) return; + if (this[kClosePromise]) { + throw new ERR_INVALID_STATE('The FileHandle is closing'); + } + this[kFd] = -1; + this[kHandle].closeSync(); + this.emit('close'); + } + async [SymbolAsyncDispose]() { await this.close(); } @@ -421,6 +457,612 @@ class FileHandle extends EventEmitter { } } +if (getOptionValue('--experimental-stream-iter')) { + const kNullPrototo = { __proto__: null }; + const kDefaultChunkSize = 131072; + const kNone = -1; + /** + * Return the file contents as an AsyncIterable using the + * new streams pull model. Optional transforms and options (including + * AbortSignal) may be provided as trailing arguments, mirroring the + * Stream.pull() signature. + * @param {...(Function|object)} args - Optional transforms and/or options + * @returns {AsyncIterable} + */ + FileHandle.prototype.pull = function pull(...args) { + if (this[kFd] === kNone) + throw new ERR_INVALID_STATE('The FileHandle is closed'); + if (this[kClosePromise]) + throw new ERR_INVALID_STATE('The FileHandle is closing'); + if (this[kLocked]) + throw new ERR_INVALID_STATE('The FileHandle is locked'); + + lazyNewStreams(); + const { transforms, options = kNullPrototo } = newStreamsParsePullArgs(args); + + const { + autoClose = false, + chunkSize: readSize = kDefaultChunkSize, + signal, + } = options; + let { + start: pos = kNone, + limit: remaining = kNone, + } = options; + + const handle = this; + const fd = this[kFd]; + + validateBoolean(autoClose, 'options.autoClose'); + + if (pos !== kNone) { + validateInteger(pos, 'options.start', 0); + } + if (remaining !== kNone) { + validateInteger(remaining, 'options.limit', 1); + } + if (readSize !== undefined) { + validateInteger(readSize, 'options.chunkSize', 1); + } + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + } + + this[kLocked] = true; + + const source = { + __proto__: null, + async *[SymbolAsyncIterator]() { + handle[kRef](); + try { + if (signal) { + // Signal-aware path + while (remaining !== 0) { + if (signal.aborted) { + throw signal.reason ?? + lazyDOMException('The operation was aborted', + 'AbortError'); + } + const toRead = remaining > 0 ? + MathMin(readSize, remaining) : readSize; + const buf = Buffer.allocUnsafe(toRead); + let bytesRead; + try { + bytesRead = + (await binding.read(fd, buf, 0, + toRead, pos, kUsePromises)) || 0; + } catch (err) { + ErrorCaptureStackTrace(err, handleErrorFromBinding); + throw err; + } + if (bytesRead === 0) break; + if (pos >= 0) pos += bytesRead; + if (remaining > 0) remaining -= bytesRead; + yield [bytesRead < toRead ? buf.subarray(0, bytesRead) : buf]; + } + } else { + // Fast path - no signal check per iteration + while (remaining !== 0) { + const toRead = remaining > 0 ? + MathMin(readSize, remaining) : readSize; + const buf = Buffer.allocUnsafe(toRead); + let bytesRead; + try { + bytesRead = + (await binding.read(fd, buf, 0, + toRead, pos, kUsePromises)) || 0; + } catch (err) { + ErrorCaptureStackTrace(err, handleErrorFromBinding); + throw err; + } + if (bytesRead === 0) break; + if (pos >= 0) pos += bytesRead; + if (remaining > 0) remaining -= bytesRead; + yield [bytesRead < toRead ? buf.subarray(0, bytesRead) : buf]; + } + } + } finally { + handle[kLocked] = false; + handle[kUnref](); + if (autoClose) { + await handle.close(); + } + } + }, + }; + + // If transforms provided, wrap with pull pipeline + if (transforms.length > 0) { + const pullArgs = [...transforms]; + if (options) { + ArrayPrototypePush(pullArgs, options); + } + return newStreamsPull(source, ...pullArgs); + } + return source; + }; + + /** + * Return the file contents as an Iterable using synchronous + * reads. Optional transforms and options may be provided as trailing + * arguments, mirroring the Stream.pullSync() signature. + * @param {...(Function|object)} args - Optional transforms and/or options + * @returns {Iterable} + */ + FileHandle.prototype.pullSync = function pullSync(...args) { + if (this[kFd] === kNone) + throw new ERR_INVALID_STATE('The FileHandle is closed'); + if (this[kClosePromise]) + throw new ERR_INVALID_STATE('The FileHandle is closing'); + if (this[kLocked]) + throw new ERR_INVALID_STATE('The FileHandle is locked'); + + lazyNewStreams(); + const { transforms, options = kNullPrototo } = newStreamsParsePullArgs(args); + + const { + autoClose = false, + chunkSize: readSize = kDefaultChunkSize, + } = options; + let { + start: pos = kNone, + limit: remaining = kNone, + } = options; + + const handle = this; + const fd = this[kFd]; + + validateBoolean(autoClose, 'options.autoClose'); + + if (pos !== kNone) { + validateInteger(pos, 'options.start', 0); + } + if (remaining !== kNone) { + validateInteger(remaining, 'options.limit', 1); + } + if (readSize !== undefined) { + validateInteger(readSize, 'options.chunkSize', 1); + } + + this[kLocked] = true; + + handle[kRef](); + + function cleanup() { + handle[kLocked] = false; + handle[kUnref](); + if (autoClose) { + handle[kCloseSync](); + } + } + + const source = { + __proto__: null, + [SymbolIterator]() { + let done = false; + return { + __proto__: null, + next() { + if (done || remaining === 0) { + if (!done) { + done = true; + cleanup(); + } + return { value: undefined, done: true }; + } + const toRead = remaining > 0 ? + MathMin(readSize, remaining) : readSize; + const buf = Buffer.allocUnsafe(toRead); + let bytesRead; + try { + bytesRead = binding.read(fd, buf, 0, toRead, pos) || 0; + } catch (err) { + done = true; + cleanup(); + throw err; + } + if (bytesRead === 0) { + done = true; + cleanup(); + return { value: undefined, done: true }; + } + if (pos >= 0) pos += bytesRead; + if (remaining > 0) remaining -= bytesRead; + const chunk = bytesRead < toRead ? + buf.subarray(0, bytesRead) : buf; + return { value: [chunk], done: false }; + }, + return() { + if (!done) { + done = true; + cleanup(); + } + return { value: undefined, done: true }; + }, + }; + }, + }; + + if (transforms.length > 0) { + return newStreamsPullSync(source, ...transforms); + } + return source; + }; + + /** + * Return a new-streams Writer backed by this file handle. + * The writer uses direct binding.writeBuffer / binding.writeBuffers + * calls, bypassing the FileHandle.write() validation chain. + * + * Supports writev() for batch writes (single syscall per batch). + * Handles EAGAIN with retry (up to 5 attempts), matching WriteStream. + * @param {{ + * autoClose?: boolean; + * start?: number; + * }} [options] + * @returns {{ write, writev, end, fail }} + */ + FileHandle.prototype.writer = function writer(options = kNullPrototo) { + if (this[kFd] === kNone) + throw new ERR_INVALID_STATE('The FileHandle is closed'); + if (this[kClosePromise]) + throw new ERR_INVALID_STATE('The FileHandle is closing'); + if (this[kLocked]) + throw new ERR_INVALID_STATE('The FileHandle is locked'); + + lazyNewStreams(); + + validateObject(options, 'options'); + const { + autoClose = false, + chunkSize: syncWriteThreshold = kDefaultChunkSize, + } = options; + let { + start: pos = kNone, + limit: bytesRemaining = kNone, + } = options; + + const handle = this; + const fd = this[kFd]; + let totalBytesWritten = 0; + let closed = false; + let closing = false; + let pendingEndPromise = null; + let error = null; + let asyncPending = false; + + validateBoolean(autoClose, 'options.autoClose'); + + if (pos !== kNone) { + validateInteger(pos, 'options.start', 0); + } + if (bytesRemaining !== kNone) { + validateInteger(bytesRemaining, 'options.limit', 1); + } + if (syncWriteThreshold !== undefined) { + validateInteger(syncWriteThreshold, 'options.chunkSize', 1); + } + + this[kLocked] = true; + handle[kRef](); + + // Write a single buffer with EAGAIN retry (up to 5 retries). + async function writeAll(buf, offset, length, position, signal) { + asyncPending = true; + try { + let retries = 0; + while (length > 0) { + const bytesWritten = (await PromisePrototypeThen( + binding.writeBuffer(fd, buf, offset, length, position, + kUsePromises), + undefined, + handleErrorFromBinding, + )) || 0; + + signal?.throwIfAborted(); + + if (bytesWritten === 0) { + if (++retries > 5) { + throw new ERR_OPERATION_FAILED('write failed after retries'); + } + } else { + retries = 0; + } + + totalBytesWritten += bytesWritten; + offset += bytesWritten; + length -= bytesWritten; + if (position >= 0) position += bytesWritten; + } + } finally { + asyncPending = false; + } + } + + // Writev with EAGAIN retry. On partial write, concatenates remaining + // buffers and falls back to writeAll (same approach as WriteStream). + async function writevAll(buffers, position, signal) { + asyncPending = true; + try { + let totalSize = 0; + for (let i = 0; i < buffers.length; i++) { + totalSize += buffers[i].byteLength; + } + + let retries = 0; + while (totalSize > 0) { + const bytesWritten = (await PromisePrototypeThen( + binding.writeBuffers(fd, buffers, position, kUsePromises), + undefined, + handleErrorFromBinding, + )) || 0; + + signal?.throwIfAborted(); + + if (bytesWritten === 0) { + if (++retries > 5) { + throw new ERR_OPERATION_FAILED('writev failed after retries'); + } + } else { + retries = 0; + } + + totalBytesWritten += bytesWritten; + totalSize -= bytesWritten; + if (position >= 0) position += bytesWritten; + + if (totalSize > 0) { + // Partial write - concatenate remaining and use writeAll. + const remaining = Buffer.concat(buffers); + const wrote = bytesWritten; + // writeAll is already inside asyncPending = true, but + // writeAll sets it again - that's fine (idempotent). + await writeAll(remaining, wrote, remaining.length - wrote, + position, signal); + return; + } + } + } finally { + asyncPending = false; + } + } + + // Synchronous write with EAGAIN retry. Throws on I/O error. + // Used by writeSync for the full write, and by writevSync for + // completing a partial writev. + function writeSyncAll(buf, offset, length, position) { + let retries = 0; + while (length > 0) { + const ctx = {}; + const bytesWritten = binding.writeBuffer( + fd, buf, offset, length, position, undefined, ctx) || 0; + if (ctx.errno !== undefined) { + handleSyncErrorFromBinding(ctx); + } + if (bytesWritten === 0) { + if (++retries > 5) { + throw new ERR_OPERATION_FAILED('write failed after retries'); + } + } else { + retries = 0; + } + totalBytesWritten += bytesWritten; + offset += bytesWritten; + length -= bytesWritten; + if (position >= 0) position += bytesWritten; + } + } + + async function cleanup() { + if (closed) return; + closed = true; + handle[kLocked] = false; + handle[kUnref](); + if (autoClose) { + await handle.close(); + } + } + + return { + __proto__: null, + write(chunk, options = kNullPrototo) { + if (error) { + return PromiseReject(error); + } + if (closed) { + return PromiseReject( + new ERR_INVALID_STATE.TypeError('The writer is closed')); + } + validateObject(options, 'options'); + const { + signal, + } = options; + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + if (signal.aborted) { + return PromiseReject(signal.reason); + } + } + chunk = newStreamsToUint8Array(chunk); + if (bytesRemaining >= 0 && chunk.byteLength > bytesRemaining) { + return PromiseReject( + new ERR_OUT_OF_RANGE('write', `<= ${bytesRemaining} bytes`, + chunk.byteLength)); + } + if (bytesRemaining > 0) bytesRemaining -= chunk.byteLength; + const position = pos; + if (pos >= 0) pos += chunk.byteLength; + return writeAll(chunk, 0, chunk.byteLength, position, signal); + }, + + writev(chunks, options = kNullPrototo) { + if (error) { + return PromiseReject(error); + } + if (closed) { + return PromiseReject( + new ERR_INVALID_STATE.TypeError('The writer is closed')); + } + validateObject(options, 'options'); + const { + signal, + } = options; + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + if (signal?.aborted) { + return PromiseReject(signal.reason); + } + } + chunks = newStreamsConvertChunks(chunks); + let totalSize = 0; + for (let i = 0; i < chunks.length; i++) { + totalSize += chunks[i].byteLength; + } + if (bytesRemaining >= 0 && totalSize > bytesRemaining) { + return PromiseReject( + new ERR_OUT_OF_RANGE('writev', `<= ${bytesRemaining} bytes`, + totalSize)); + } + if (bytesRemaining > 0) bytesRemaining -= totalSize; + const position = pos; + if (pos >= 0) pos += totalSize; + return writevAll(chunks, position, signal); + }, + + writeSync(chunk) { + if (error || closed || asyncPending) return false; + chunk = newStreamsToUint8Array(chunk); + const length = chunk.byteLength; + if (length > syncWriteThreshold) return false; + if (length === 0) return true; + if (bytesRemaining >= 0 && length > bytesRemaining) return false; + const position = pos; + // First attempt - if this fails with zero bytes written, + // return false so pipeTo can fall back to async write(). + const ctx = {}; + const bytesWritten = binding.writeBuffer( + fd, chunk, 0, length, position, undefined, ctx) || 0; + if (ctx.errno !== undefined) return false; + totalBytesWritten += bytesWritten; + if (position >= 0) { + pos = position + bytesWritten; + } + if (bytesWritten === length) { + if (bytesRemaining > 0) bytesRemaining -= length; + return true; + } + // Partial write - bytes are on disk. Must complete or throw. + // Cannot return false here because pipeTo would re-send the + // full chunk, causing duplicate data on disk. + writeSyncAll(chunk, bytesWritten, length - bytesWritten, + position >= 0 ? position + bytesWritten : -1); + if (bytesRemaining > 0) bytesRemaining -= length; + return true; + }, + + writevSync(chunks) { + if (error || closed || asyncPending) return false; + chunks = newStreamsConvertChunks(chunks); + let totalSize = 0; + for (let i = 0; i < chunks.length; i++) { + totalSize += chunks[i].byteLength; + } + if (totalSize > syncWriteThreshold) return false; + if (totalSize === 0) return true; + if (bytesRemaining >= 0 && totalSize > bytesRemaining) return false; + const position = pos; + // writeBuffers throws on error (zero bytes written) - safe + // to catch and return false for async fallback. + let bytesWritten; + try { + bytesWritten = binding.writeBuffers(fd, chunks, position) || 0; + } catch { + return false; + } + totalBytesWritten += bytesWritten; + if (position >= 0) { + pos = position + bytesWritten; + } + if (bytesWritten === totalSize) { + if (bytesRemaining > 0) bytesRemaining -= totalSize; + return true; + } + // Partial writev - bytes are on disk. Must complete or throw. + const rest = Buffer.concat(chunks); + writeSyncAll(rest, bytesWritten, + rest.byteLength - bytesWritten, + position >= 0 ? position + bytesWritten : -1); + if (bytesRemaining > 0) bytesRemaining -= totalSize; + return true; + }, + + end(options = kNullPrototo) { + if (error) { + return PromiseReject(error); + } + if (closed) { + return PromiseResolve(totalBytesWritten); + } + if (closing) { + return pendingEndPromise; + } + validateObject(options, 'options'); + const { + signal, + } = options; + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + if (signal.aborted) { + return PromiseReject(signal.reason); + } + } + closing = true; + pendingEndPromise = PromisePrototypeThen( + cleanup(), () => totalBytesWritten); + return pendingEndPromise; + }, + + endSync() { + if (error) return -1; + if (closed) return totalBytesWritten; + if (asyncPending) return -1; + closed = true; + handle[kLocked] = false; + handle[kUnref](); + if (autoClose) { + handle[kCloseSync](); + } + return totalBytesWritten; + }, + + fail(reason) { + if (closed || error) return; + error = reason ?? new ERR_INVALID_STATE('Failed'); + closed = true; + handle[kLocked] = false; + handle[kUnref](); + if (autoClose) { + handle[kCloseSync](); + } + }, + + [SymbolAsyncDispose]() { + if (closing) { + return pendingEndPromise ?? PromiseResolve(); + } + if (!closed && !error) { + this.fail(); + } + return PromiseResolve(); + }, + + [SymbolDispose]() { + this.fail(); + }, + }; + }; +} + async function handleFdClose(fileOpPromise, closeFunc) { return PromisePrototypeThen( fileOpPromise, diff --git a/lib/internal/process/pre_execution.js b/lib/internal/process/pre_execution.js index b68b4e26d4a7c9..404b29f88d6f70 100644 --- a/lib/internal/process/pre_execution.js +++ b/lib/internal/process/pre_execution.js @@ -115,6 +115,7 @@ function prepareExecution(options) { setupNavigator(); setupWarningHandler(); setupSQLite(); + setupStreamIter(); setupQuic(); setupWebStorage(); setupWebsocket(); @@ -392,6 +393,16 @@ function initializeConfigFileSupport() { } } +function setupStreamIter() { + if (!getOptionValue('--experimental-stream-iter')) { + return; + } + + const { BuiltinModule } = require('internal/bootstrap/realm'); + BuiltinModule.allowRequireByUsers('stream/iter'); + BuiltinModule.allowRequireByUsers('zlib/iter'); +} + function setupQuic() { if (!getOptionValue('--experimental-quic')) { return; diff --git a/lib/internal/streams/iter/broadcast.js b/lib/internal/streams/iter/broadcast.js new file mode 100644 index 00000000000000..769f93b5404c30 --- /dev/null +++ b/lib/internal/streams/iter/broadcast.js @@ -0,0 +1,762 @@ +'use strict'; + +// New Streams API - Broadcast +// +// Push-model multi-consumer streaming. A single writer can push data to +// multiple consumers. Each consumer has an independent cursor into a +// shared buffer. + +const { + ArrayIsArray, + ArrayPrototypePush, + MathMax, + PromisePrototypeThen, + PromiseReject, + PromiseResolve, + PromiseWithResolvers, + SafeSet, + Symbol, + SymbolAsyncDispose, + SymbolAsyncIterator, + SymbolDispose, + TypedArrayPrototypeGetByteLength, +} = primordials; + +const { lazyDOMException } = require('internal/util'); + +const { + codes: { + ERR_INVALID_ARG_TYPE, + ERR_INVALID_RETURN_VALUE, + ERR_INVALID_STATE, + }, +} = require('internal/errors'); +const { + validateAbortSignal, + validateInteger, + validateObject, +} = require('internal/validators'); + +const { + broadcastProtocol, + drainableProtocol, +} = require('internal/streams/iter/types'); + +const { + isAsyncIterable, + isSyncIterable, +} = require('internal/streams/iter/from'); + +const { + pull: pullWithTransforms, +} = require('internal/streams/iter/pull'); + +const { + kMultiConsumerDefaultHWM, + kResolvedPromise, + clampHWM, + convertChunks, + getMinCursor, + hasProtocol, + onSignalAbort, + parsePullArgs, + wrapError, + toUint8Array, + validateBackpressure, +} = require('internal/streams/iter/utils'); + +const { + RingBuffer, +} = require('internal/streams/iter/ringbuffer'); + +const kCancelWriter = Symbol('kCancelWriter'); +const kWrite = Symbol('kWrite'); +const kEnd = Symbol('kEnd'); +const kAbort = Symbol('kAbort'); +const kGetDesiredSize = Symbol('kGetDesiredSize'); +const kCanWrite = Symbol('kCanWrite'); +const kOnBufferDrained = Symbol('kOnBufferDrained'); + +// ============================================================================= +// Broadcast Implementation +// ============================================================================= + +class BroadcastImpl { + #buffer = new RingBuffer(); + #bufferStart = 0; + #consumers = new SafeSet(); + #waiters = []; // Consumers with pending resolve (subset of #consumers) + #ended = false; + #error = null; + #cancelled = false; + #options; + #writer = null; + #cachedMinCursor = 0; + #minCursorDirty = false; + + constructor(options) { + this.#options = options; + this[kOnBufferDrained] = null; + } + + setWriter(writer) { + this.#writer = writer; + } + + get backpressurePolicy() { + return this.#options.backpressure; + } + + get highWaterMark() { + return this.#options.highWaterMark; + } + + get consumerCount() { + return this.#consumers.size; + } + + get bufferSize() { + return this.#buffer.length; + } + + push(...args) { + const { transforms, options } = parsePullArgs(args); + const rawConsumer = this.#createRawConsumer(); + + // When transforms are present, delegate to pull() which creates its + // own internal AbortController that follows the external signal. + // When no transforms, return rawConsumer directly (controller elided + // per PULL-02 optimization -- no transforms means no signal recipient). + if (transforms.length > 0) { + const pullArgs = [...transforms]; + if (options?.signal) { + ArrayPrototypePush(pullArgs, + { __proto__: null, signal: options.signal }); + } + return pullWithTransforms(rawConsumer, ...pullArgs); + } + return rawConsumer; + } + + #createRawConsumer() { + const state = { + __proto__: null, + // Start at the oldest buffered entry so late-joining consumers + // can read data already in the buffer. + cursor: this.#bufferStart, + resolve: null, + reject: null, + detached: false, + }; + + this.#consumers.add(state); + // New consumer starts at buffer start; recalculate min cursor + // since this consumer may now be the slowest. + if (this.#consumers.size === 1) { + this.#cachedMinCursor = state.cursor; + this.#minCursorDirty = false; + } else { + this.#minCursorDirty = true; + } + const self = this; + + const kDone = PromiseResolve( + { __proto__: null, done: true, value: undefined }); + + function detach() { + state.detached = true; + state.resolve = null; + state.reject = null; + self.#consumers.delete(state); + self.#minCursorDirty = true; + self.#tryTrimBuffer(); + } + + return { + __proto__: null, + [SymbolAsyncIterator]() { + return { + __proto__: null, + next() { + if (state.detached) { + if (self.#error) return PromiseReject(self.#error); + return kDone; + } + + const bufferIndex = state.cursor - self.#bufferStart; + if (bufferIndex < self.#buffer.length) { + const chunk = self.#buffer.get(bufferIndex); + // If this consumer was at the min cursor, mark dirty + if (state.cursor <= self.#cachedMinCursor) { + self.#minCursorDirty = true; + } + state.cursor++; + self.#tryTrimBuffer(); + return PromiseResolve( + { __proto__: null, done: false, value: chunk }); + } + + if (self.#error) { + state.detached = true; + self.#consumers.delete(state); + return PromiseReject(self.#error); + } + + if (self.#ended || self.#cancelled) { + detach(); + return kDone; + } + + const { promise, resolve, reject } = PromiseWithResolvers(); + state.resolve = resolve; + state.reject = reject; + ArrayPrototypePush(self.#waiters, state); + return promise; + }, + + return() { + detach(); + return kDone; + }, + + throw() { + detach(); + return kDone; + }, + }; + }, + }; + } + + cancel(reason) { + if (this.#cancelled) return; + this.#cancelled = true; + this.#ended = true; // Prevents [kAbort]() from redundantly iterating consumers + + if (reason !== undefined) { + this.#error = reason; + } + + // Reject pending writes on the writer so the pump doesn't hang + this.#writer?.[kCancelWriter](); + + for (const consumer of this.#consumers) { + if (consumer.resolve) { + if (reason !== undefined) { + consumer.reject?.(reason); + } else { + consumer.resolve({ __proto__: null, done: true, value: undefined }); + } + consumer.resolve = null; + consumer.reject = null; + } + consumer.detached = true; + } + this.#consumers.clear(); + } + + [SymbolDispose]() { + this.cancel(); + } + + // Methods accessed by BroadcastWriter via symbol keys + + [kWrite](chunk) { + if (this.#ended || this.#cancelled) return false; + + if (this.#buffer.length >= this.#options.highWaterMark) { + switch (this.#options.backpressure) { + case 'strict': + case 'block': + return false; + case 'drop-oldest': + this.#buffer.shift(); + this.#bufferStart++; + for (const consumer of this.#consumers) { + if (consumer.cursor < this.#bufferStart) { + consumer.cursor = this.#bufferStart; + } + } + break; + case 'drop-newest': + return true; + } + } + + this.#buffer.push(chunk); + this.#notifyConsumers(); + return true; + } + + [kEnd]() { + if (this.#ended) return; + this.#ended = true; + + for (const consumer of this.#consumers) { + if (consumer.resolve) { + const bufferIndex = consumer.cursor - this.#bufferStart; + if (bufferIndex < this.#buffer.length) { + const chunk = this.#buffer.get(bufferIndex); + consumer.cursor++; + consumer.resolve({ __proto__: null, done: false, value: chunk }); + } else { + consumer.resolve({ __proto__: null, done: true, value: undefined }); + } + consumer.resolve = null; + consumer.reject = null; + } + } + } + + [kAbort](reason) { + if (this.#ended || this.#error) return; + this.#error = reason; + this.#ended = true; + + // Notify all waiting consumers and detach them + for (const consumer of this.#consumers) { + if (consumer.reject) { + consumer.reject(reason); + consumer.resolve = null; + consumer.reject = null; + } + consumer.detached = true; + } + this.#consumers.clear(); + } + + [kGetDesiredSize]() { + if (this.#ended || this.#cancelled) return null; + return MathMax(0, this.#options.highWaterMark - this.#buffer.length); + } + + [kCanWrite]() { + if (this.#ended || this.#cancelled) return false; + if ((this.#options.backpressure === 'strict' || + this.#options.backpressure === 'block') && + this.#buffer.length >= this.#options.highWaterMark) { + return false; + } + return true; + } + + // Private methods + + #recomputeMinCursor() { + this.#cachedMinCursor = getMinCursor( + this.#consumers, this.#bufferStart + this.#buffer.length); + this.#minCursorDirty = false; + } + + #tryTrimBuffer() { + if (this.#minCursorDirty) { + this.#recomputeMinCursor(); + } + const trimCount = this.#cachedMinCursor - this.#bufferStart; + if (trimCount > 0) { + this.#buffer.trimFront(trimCount); + this.#bufferStart = this.#cachedMinCursor; + + if (this[kOnBufferDrained] && + this.#buffer.length < this.#options.highWaterMark) { + this[kOnBufferDrained](); + } + } + } + + #notifyConsumers() { + const waiters = this.#waiters; + if (waiters.length === 0) return; + // Swap out the waiters list so consumers that re-wait during + // resolve don't get processed twice in this cycle. + this.#waiters = []; + for (let i = 0; i < waiters.length; i++) { + const consumer = waiters[i]; + if (consumer.resolve) { + const bufferIndex = consumer.cursor - this.#bufferStart; + if (bufferIndex < this.#buffer.length) { + const chunk = this.#buffer.get(bufferIndex); + if (consumer.cursor <= this.#cachedMinCursor) { + this.#minCursorDirty = true; + } + consumer.cursor++; + const resolve = consumer.resolve; + consumer.resolve = null; + consumer.reject = null; + resolve({ __proto__: null, done: false, value: chunk }); + } else { + // Still waiting -- put back + ArrayPrototypePush(this.#waiters, consumer); + } + } + } + } +} + +// ============================================================================= +// BroadcastWriter +// ============================================================================= + +let getBroadcastPendingWrites; + +class BroadcastWriter { + #broadcast; + #totalBytes = 0; + #closed; + #aborted = false; + #pendingWrites = new RingBuffer(); + #pendingDrains = []; + + static { + // Used in wireBroadcastWriteSignal ensure the signal listener can be + // constructed without closing over the chunk data, which may be large. + getBroadcastPendingWrites = (obj) => obj.#pendingWrites; + } + + constructor(broadcastImpl) { + this.#broadcast = broadcastImpl; + + this.#broadcast[kOnBufferDrained] = () => { + this.#resolvePendingWrites(); + this.#resolvePendingDrains(true); + }; + } + + // The drainable protocol works with Stream.ondrain to provide a notification + // when the writer can accept more data after being backpressured. + [drainableProtocol]() { + const desired = this.desiredSize; + if (desired === null) return null; + if (desired > 0) return PromiseResolve(true); + const { promise, resolve, reject } = PromiseWithResolvers(); + ArrayPrototypePush(this.#pendingDrains, { __proto__: null, resolve, reject }); + return promise; + } + + #isClosed() { + return this.#closed !== undefined; + } + + #isClosedOrAborted() { + return this.#isClosed() || this.#aborted; + } + + get desiredSize() { + return this.#isClosedOrAborted() ? null : this.#broadcast[kGetDesiredSize](); + } + + #canUseWriteFastPath(options) { + return !options?.signal && !this.#isClosed() && !this.#aborted && + this.#broadcast[kCanWrite](); + } + + write(chunk, options) { + // Fast path: no signal, writer open, buffer has space + if (this.#canUseWriteFastPath(options)) { + const converted = toUint8Array(chunk); + this.#broadcast[kWrite]([converted]); + this.#totalBytes += TypedArrayPrototypeGetByteLength(converted); + return kResolvedPromise; + } + return this.#writevSlow([chunk], options); + } + + writev(chunks, options) { + // Fast path: no signal, writer open, buffer has space + if (this.#canUseWriteFastPath(options)) { + const converted = convertChunks(chunks); + this.#broadcast[kWrite](converted); + for (let i = 0; i < converted.length; i++) { + this.#totalBytes += TypedArrayPrototypeGetByteLength(converted[i]); + } + return kResolvedPromise; + } + return this.#writevSlow(chunks, options); + } + + async #writevSlow(chunks, options) { + const signal = options?.signal; + + // Check for pre-aborted + signal?.throwIfAborted(); + + if (this.#isClosedOrAborted()) { + throw new ERR_INVALID_STATE.TypeError('Writer is closed'); + } + + const converted = convertChunks(chunks); + + if (this.#broadcast[kWrite](converted)) { + for (let i = 0; i < converted.length; i++) { + this.#totalBytes += TypedArrayPrototypeGetByteLength(converted[i]); + } + return; + } + + const policy = this.#broadcast.backpressurePolicy; + const hwm = this.#broadcast.highWaterMark; + + if (policy === 'strict') { + if (this.#pendingWrites.length >= hwm) { + throw new ERR_INVALID_STATE.TypeError( + 'Backpressure violation: too many pending writes. ' + + 'Await each write() call to respect backpressure.'); + } + return this.#createPendingWrite(converted, signal); + } + + // 'block' policy + return this.#createPendingWrite(converted, signal); + } + + writeSync(chunk) { + if (this.#isClosedOrAborted()) return false; + if (!this.#broadcast[kCanWrite]()) return false; + const converted = + toUint8Array(chunk); + if (this.#broadcast[kWrite]([converted])) { + this.#totalBytes += TypedArrayPrototypeGetByteLength(converted); + return true; + } + return false; + } + + writevSync(chunks) { + if (this.#isClosedOrAborted()) return false; + if (!this.#broadcast[kCanWrite]()) return false; + const converted = convertChunks(chunks); + if (this.#broadcast[kWrite](converted)) { + for (let i = 0; i < converted.length; i++) { + this.#totalBytes += TypedArrayPrototypeGetByteLength(converted[i]); + } + return true; + } + return false; + } + + // end() is synchronous internally - signal accepted for interface compliance. + end(options) { + if (this.#isClosed()) return this.#closed; + this.#closed = PromiseResolve(this.#totalBytes); + this.#broadcast[kEnd](); + this.#resolvePendingDrains(false); + return this.#closed; + } + + endSync() { + if (this.#closed) return this.#totalBytes; + this.#closed = PromiseResolve(this.#totalBytes); + this.#broadcast[kEnd](); + this.#resolvePendingDrains(false); + return this.#totalBytes; + } + + fail(reason) { + if (this.#isClosedOrAborted()) return; + this.#aborted = true; + this.#closed = PromiseResolve(this.#totalBytes); + const error = reason ?? new ERR_INVALID_STATE.TypeError('Failed'); + this.#rejectPendingWrites(error); + this.#rejectPendingDrains(error); + this.#broadcast[kAbort](error); + } + + [SymbolAsyncDispose]() { + this.fail(); + return PromiseResolve(); + } + + [SymbolDispose]() { + this.fail(); + } + + [kCancelWriter]() { + if (this.#isClosed()) return; + this.#closed = PromiseResolve(this.#totalBytes); + this.#rejectPendingWrites( + lazyDOMException('Broadcast cancelled', 'AbortError')); + this.#resolvePendingDrains(false); + } + + /** + * Create a pending write promise, optionally racing against a signal. + * If the signal fires, the entry is removed from pendingWrites and the + * promise rejects. Signal listeners are cleaned up on normal resolution. + * @returns {Promise} + */ + #createPendingWrite(chunk, signal) { + const { promise, resolve, reject } = PromiseWithResolvers(); + const entry = { __proto__: null, chunk, resolve, reject }; + this.#pendingWrites.push(entry); + if (signal) { + wireBroadcastWriteSignal(entry, signal, resolve, reject, this); + } + return promise; + } + + #resolvePendingWrites() { + while (this.#pendingWrites.length > 0 && this.#broadcast[kCanWrite]()) { + const pending = this.#pendingWrites.shift(); + if (this.#broadcast[kWrite](pending.chunk)) { + for (let i = 0; i < pending.chunk.length; i++) { + this.#totalBytes += TypedArrayPrototypeGetByteLength(pending.chunk[i]); + } + pending.resolve(); + } else { + this.#pendingWrites.unshift(pending); + break; + } + } + } + + #rejectPendingWrites(error) { + while (this.#pendingWrites.length > 0) { + this.#pendingWrites.shift().reject(error); + } + } + + #resolvePendingDrains(canWrite) { + const drains = this.#pendingDrains; + this.#pendingDrains = []; + for (let i = 0; i < drains.length; i++) { + drains[i].resolve(canWrite); + } + } + + #rejectPendingDrains(error) { + const drains = this.#pendingDrains; + this.#pendingDrains = []; + for (let i = 0; i < drains.length; i++) { + drains[i].reject(error); + } + } +} + +function wireBroadcastWriteSignal(entry, signal, resolve, reject, self) { + const onAbort = () => { + const pendingWrites = getBroadcastPendingWrites(self); + const idx = pendingWrites.indexOf(entry); + if (idx !== -1) pendingWrites.removeAt(idx); + entry.chunk = null; + reject(signal.reason ?? lazyDOMException('Aborted', 'AbortError')); + }; + entry.resolve = function() { + signal.removeEventListener('abort', onAbort); + entry.chunk = null; + resolve(); + }; + entry.reject = function(reason) { + signal.removeEventListener('abort', onAbort); + entry.chunk = null; + reject(reason); + }; + signal.addEventListener('abort', onAbort, { __proto__: null, once: true }); +} + +// ============================================================================= +// Public API +// ============================================================================= + +/** + * Create a broadcast channel for push-model multi-consumer streaming. + * @param {{ highWaterMark?: number, backpressure?: string, signal?: AbortSignal }} [options] + * @returns {{ writer: Writer, broadcast: Broadcast }} + */ +function broadcast(options = { __proto__: null }) { + validateObject(options, 'options'); + const { + highWaterMark = kMultiConsumerDefaultHWM, + backpressure = 'strict', + signal, + } = options; + validateInteger(highWaterMark, 'options.highWaterMark'); + validateBackpressure(backpressure); + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + } + + const opts = { + __proto__: null, + highWaterMark: clampHWM(highWaterMark), + backpressure, + signal, + }; + + const broadcastImpl = new BroadcastImpl(opts); + const writer = new BroadcastWriter(broadcastImpl); + broadcastImpl.setWriter(writer); + + if (signal) { + onSignalAbort(signal, () => broadcastImpl.cancel()); + } + + return { __proto__: null, writer, broadcast: broadcastImpl }; +} + +function isBroadcastable(value) { + return hasProtocol(value, broadcastProtocol); +} + +const Broadcast = { + __proto__: null, + from(input, options) { + if (isBroadcastable(input)) { + const bc = input[broadcastProtocol](options); + if (bc === null || typeof bc !== 'object') { + throw new ERR_INVALID_RETURN_VALUE( + 'an object', '[Symbol.for(\'Stream.broadcastProtocol\')]', bc); + } + return { __proto__: null, writer: { __proto__: null }, broadcast: bc }; + } + + if (!isAsyncIterable(input) && !isSyncIterable(input)) { + throw new ERR_INVALID_ARG_TYPE( + 'input', ['Broadcastable', 'AsyncIterable', 'Iterable'], input); + } + + const result = broadcast(options); + const signal = options?.signal; + + const pump = async () => { + const w = result.writer; + try { + if (isAsyncIterable(input)) { + for await (const chunks of input) { + signal?.throwIfAborted(); + if (ArrayIsArray(chunks)) { + if (!w.writevSync(chunks)) { + await w.writev(chunks, signal ? { signal } : undefined); + } + } else if (!w.writeSync(chunks)) { + await w.write(chunks, signal ? { signal } : undefined); + } + } + } else if (isSyncIterable(input)) { + for (const chunks of input) { + signal?.throwIfAborted(); + if (ArrayIsArray(chunks)) { + if (!w.writevSync(chunks)) { + await w.writev(chunks, signal ? { signal } : undefined); + } + } else if (!w.writeSync(chunks)) { + await w.write(chunks, signal ? { signal } : undefined); + } + } + } + if (w.endSync() < 0) { + await w.end(signal ? { signal } : undefined); + } + } catch (error) { + w.fail(wrapError(error)); + } + }; + PromisePrototypeThen(pump(), undefined, () => {}); + + return result; + }, +}; + +module.exports = { + Broadcast, + broadcast, +}; diff --git a/lib/internal/streams/iter/consumers.js b/lib/internal/streams/iter/consumers.js new file mode 100644 index 00000000000000..6e47d3cf9638fd --- /dev/null +++ b/lib/internal/streams/iter/consumers.js @@ -0,0 +1,523 @@ +'use strict'; + +// New Streams API - Consumers & Utilities +// +// bytes(), text(), arrayBuffer() - collect entire stream +// tap(), tapSync() - observe without modifying +// merge() - temporal combining of sources +// ondrain() - backpressure drain utility + +const { + ArrayBufferPrototypeGetByteLength, + ArrayBufferPrototypeSlice, + ArrayPrototypeMap, + ArrayPrototypePush, + ArrayPrototypeSlice, + Promise, + PromisePrototypeThen, + SafePromiseAllReturnVoid, + SymbolAsyncIterator, + TypedArrayPrototypeGetBuffer, + TypedArrayPrototypeGetByteLength, + TypedArrayPrototypeGetByteOffset, +} = primordials; + +const { + codes: { + ERR_INVALID_ARG_TYPE, + ERR_INVALID_ARG_VALUE, + ERR_OUT_OF_RANGE, + }, +} = require('internal/errors'); +const { TextDecoder } = require('internal/encoding'); +const { + validateAbortSignal, + validateFunction, + validateInteger, + validateObject, +} = require('internal/validators'); + +const { + from, + fromSync, + isAsyncIterable, + isSyncIterable, +} = require('internal/streams/iter/from'); + +const { + concatBytes, +} = require('internal/streams/iter/utils'); + +const { + drainableProtocol, +} = require('internal/streams/iter/types'); + +const { + isSharedArrayBuffer, +} = require('internal/util/types'); + +// ============================================================================= +// Type Guards +// ============================================================================= + +function isMergeOptions(value) { + return ( + value !== null && + typeof value === 'object' && + !isAsyncIterable(value) && + !isSyncIterable(value) + ); +} + +// ============================================================================= +// Shared chunk collection helpers +// ============================================================================= + +/** + * Collect chunks from a sync source into an array. + * @param {Iterable} source + * @param {number} [limit] + * @returns {Uint8Array[]} + */ +function collectSync(source, limit) { + // Normalize source via fromSync() - accepts strings, ArrayBuffers, protocols, etc. + const normalized = fromSync(source); + const chunks = []; + let totalBytes = 0; + + for (const batch of normalized) { + for (let i = 0; i < batch.length; i++) { + const chunk = batch[i]; + if (limit !== undefined) { + totalBytes += TypedArrayPrototypeGetByteLength(chunk); + if (totalBytes > limit) { + throw new ERR_OUT_OF_RANGE('totalBytes', `<= ${limit}`, totalBytes); + } + } + ArrayPrototypePush(chunks, chunk); + } + } + + return chunks; +} + +/** + * Collect chunks from an async or sync source into an array. + * @param {AsyncIterable|Iterable} source + * @param {AbortSignal} [signal] + * @param {number} [limit] + * @returns {Promise} + */ +async function collectAsync(source, signal, limit) { + signal?.throwIfAborted(); + + // Normalize source via from() - accepts strings, ArrayBuffers, protocols, etc. + const normalized = from(source); + const chunks = []; + + // Fast path: no signal and no limit + if (!signal && limit === undefined) { + for await (const batch of normalized) { + for (let i = 0; i < batch.length; i++) { + ArrayPrototypePush(chunks, batch[i]); + } + } + return chunks; + } + + // Slow path: with signal or limit checks + let totalBytes = 0; + + for await (const batch of normalized) { + signal?.throwIfAborted(); + for (let i = 0; i < batch.length; i++) { + const chunk = batch[i]; + if (limit !== undefined) { + totalBytes += TypedArrayPrototypeGetByteLength(chunk); + if (totalBytes > limit) { + throw new ERR_OUT_OF_RANGE('totalBytes', `<= ${limit}`, totalBytes); + } + } + ArrayPrototypePush(chunks, chunk); + } + } + + return chunks; +} + +/** + * Convert a Uint8Array to its backing ArrayBuffer, slicing if necessary. + * Handles both ArrayBuffer and SharedArrayBuffer backing stores. + * @param {Uint8Array} data + * @returns {ArrayBuffer|SharedArrayBuffer} + */ +function toArrayBuffer(data) { + const byteOffset = TypedArrayPrototypeGetByteOffset(data); + const byteLength = TypedArrayPrototypeGetByteLength(data); + const buffer = TypedArrayPrototypeGetBuffer(data); + // SharedArrayBuffer is not available in primordials, so use + // direct property access for its byteLength and slice. + if (isSharedArrayBuffer(buffer)) { + if (byteOffset === 0 && byteLength === buffer.byteLength) { + return buffer; + } + return buffer.slice(byteOffset, byteOffset + byteLength); + } + if (byteOffset === 0 && + byteLength === ArrayBufferPrototypeGetByteLength(buffer)) { + return buffer; + } + return ArrayBufferPrototypeSlice(buffer, byteOffset, + byteOffset + byteLength); +} + +// ============================================================================= +// Shared option validation +// ============================================================================= + +function validateBaseConsumerOptions(options) { + validateObject(options, 'options'); + if (options.limit !== undefined) { + validateInteger(options.limit, 'options.limit', 0); + } + if (options.encoding !== undefined) { + if (typeof options.encoding !== 'string') { + throw new ERR_INVALID_ARG_TYPE('options.encoding', 'string', + options.encoding); + } + try { + new TextDecoder(options.encoding); + } catch { + throw new ERR_INVALID_ARG_VALUE.RangeError( + 'options.encoding', options.encoding); + } + } +} + +function validateConsumerOptions(options) { + validateBaseConsumerOptions(options); + if (options.signal !== undefined) { + validateAbortSignal(options.signal, 'options.signal'); + } +} + +function validateSyncConsumerOptions(options) { + validateBaseConsumerOptions(options); +} + +// ============================================================================= +// Sync Consumers +// ============================================================================= + +const kNullPrototype = { __proto__: null }; + +/** + * Collect all bytes from a sync source. + * @param {Iterable} source + * @param {{ limit?: number }} [options] + * @returns {Uint8Array} + */ +function bytesSync(source, options = kNullPrototype) { + validateSyncConsumerOptions(options); + return concatBytes(collectSync(source, options.limit)); +} + +/** + * Collect and decode text from a sync source. + * @param {Iterable} source + * @param {{ encoding?: string, limit?: number }} [options] + * @returns {string} + */ +function textSync(source, options = kNullPrototype) { + validateSyncConsumerOptions(options); + const data = concatBytes(collectSync(source, options.limit)); + const decoder = new TextDecoder(options.encoding ?? 'utf-8', { + __proto__: null, + fatal: true, + }); + return decoder.decode(data); +} + +/** + * Collect bytes as ArrayBuffer from a sync source. + * @param {Iterable} source + * @param {{ limit?: number }} [options] + * @returns {ArrayBuffer} + */ +function arrayBufferSync(source, options = kNullPrototype) { + validateSyncConsumerOptions(options); + return toArrayBuffer(concatBytes(collectSync(source, options.limit))); +} + +/** + * Collect all chunks as an array from a sync source. + * @param {Iterable} source + * @param {{ limit?: number }} [options] + * @returns {Uint8Array[]} + */ +function arraySync(source, options = kNullPrototype) { + validateSyncConsumerOptions(options); + return collectSync(source, options.limit); +} + +// ============================================================================= +// Async Consumers +// ============================================================================= + +/** + * Collect all bytes from an async or sync source. + * @param {AsyncIterable|Iterable} source + * @param {{ signal?: AbortSignal, limit?: number }} [options] + * @returns {Promise} + */ +async function bytes(source, options = kNullPrototype) { + validateConsumerOptions(options); + const chunks = await collectAsync(source, options.signal, options.limit); + return concatBytes(chunks); +} + +/** + * Collect and decode text from an async or sync source. + * @param {AsyncIterable|Iterable} source + * @param {{ encoding?: string, signal?: AbortSignal, limit?: number }} [options] + * @returns {Promise} + */ +async function text(source, options = kNullPrototype) { + validateConsumerOptions(options); + const chunks = await collectAsync(source, options.signal, options.limit); + const data = concatBytes(chunks); + const decoder = new TextDecoder(options.encoding ?? 'utf-8', { + __proto__: null, + fatal: true, + }); + return decoder.decode(data); +} + +/** + * Collect bytes as ArrayBuffer from an async or sync source. + * @param {AsyncIterable|Iterable} source + * @param {{ signal?: AbortSignal, limit?: number }} [options] + * @returns {Promise} + */ +async function arrayBuffer(source, options = kNullPrototype) { + validateConsumerOptions(options); + const chunks = await collectAsync(source, options.signal, options.limit); + return toArrayBuffer(concatBytes(chunks)); +} + +/** + * Collect all chunks as an array from an async or sync source. + * @param {AsyncIterable|Iterable} source + * @param {{ signal?: AbortSignal, limit?: number }} [options] + * @returns {Promise} + */ +async function array(source, options = kNullPrototype) { + validateConsumerOptions(options); + return collectAsync(source, options.signal, options.limit); +} + +// ============================================================================= +// Tap Utilities +// ============================================================================= + +/** + * Create a pass-through transform that observes chunks without modifying them. + * @param {Function} callback + * @returns {Function} + */ +function tap(callback) { + validateFunction(callback, 'callback'); + return async (chunks, options) => { + await callback(chunks, options); + return chunks; + }; +} + +/** + * Create a sync pass-through transform that observes chunks. + * @param {Function} callback + * @returns {Function} + */ +function tapSync(callback) { + validateFunction(callback, 'callback'); + return (chunks) => { + callback(chunks); + return chunks; + }; +} + +// ============================================================================= +// Drain Utility +// ============================================================================= + +/** + * Wait for a drainable object's backpressure to clear. + * @param {object} drainable + * @returns {Promise|null} + */ +function ondrain(drainable) { + if ( + drainable === null || + drainable === undefined || + typeof drainable !== 'object' + ) { + return null; + } + + if ( + !(drainableProtocol in drainable) || + typeof drainable[drainableProtocol] !== 'function' + ) { + return null; + } + + return drainable[drainableProtocol](); +} + +// ============================================================================= +// Merge Utility +// ============================================================================= + +/** + * Merge multiple async iterables by yielding values in temporal order. + * @param {...(AsyncIterable|object)} args + * @returns {AsyncIterable} + */ +function merge(...args) { + let sources; + let options; + + if (args.length > 0 && isMergeOptions(args[args.length - 1])) { + options = args[args.length - 1]; + sources = ArrayPrototypeSlice(args, 0, -1); + } else { + sources = args; + } + + if (options?.signal !== undefined) { + validateAbortSignal(options.signal, 'options.signal'); + } + + // Normalize each source via from() + const normalized = ArrayPrototypeMap(sources, (source) => from(source)); + + return { + __proto__: null, + async *[SymbolAsyncIterator]() { + const signal = options?.signal; + + signal?.throwIfAborted(); + + if (normalized.length === 0) return; + + if (normalized.length === 1) { + for await (const batch of normalized[0]) { + signal?.throwIfAborted(); + yield batch; + } + return; + } + + // Multiple sources - use a ready queue so that batches that settle + // between consumer pulls are drained synchronously without an extra + // async tick per batch. Each source has at most one pending .next() + // at a time. Every batch from every source is preserved. + const ready = []; + let activeCount = normalized.length; + let waitResolve = null; + + // Called when a source's .next() settles. Pushes the result into + // the ready queue and wakes the consumer if it's waiting. + const onSettled = (iterator, result) => { + if (result.done) { + activeCount--; + } else { + ArrayPrototypePush(ready, result.value); + // Immediately request the next value from this source + // (at most one pending .next() per source) + PromisePrototypeThen( + iterator.next(), + (r) => onSettled(iterator, r), + (err) => { + ArrayPrototypePush(ready, { __proto__: null, error: err }); + if (waitResolve) { + waitResolve(); + waitResolve = null; + } + }, + ); + } + if (waitResolve) { + waitResolve(); + waitResolve = null; + } + }; + + // Start one .next() per source + const iterators = []; + for (let i = 0; i < normalized.length; i++) { + const iterator = normalized[i][SymbolAsyncIterator](); + ArrayPrototypePush(iterators, iterator); + PromisePrototypeThen( + iterator.next(), + (r) => onSettled(iterator, r), + (err) => { + ArrayPrototypePush(ready, { __proto__: null, error: err }); + if (waitResolve) { + waitResolve(); + waitResolve = null; + } + }, + ); + } + + try { + while (activeCount > 0 || ready.length > 0) { + signal?.throwIfAborted(); + + // Drain ready queue synchronously + while (ready.length > 0) { + const item = ready.shift(); + if (item?.error) { + throw item.error; + } + yield item; + } + + // If sources are still active, wait for the next settlement + if (activeCount > 0) { + await new Promise((resolve) => { + waitResolve = resolve; + }); + } + } + } finally { + // Clean up: return all iterators + await SafePromiseAllReturnVoid(iterators, async (iterator) => { + if (iterator.return) { + try { + await iterator.return(); + } catch { + // Ignore return errors + } + } + }); + } + }, + }; +} + +module.exports = { + array, + arrayBuffer, + arrayBufferSync, + arraySync, + bytes, + bytesSync, + merge, + ondrain, + tap, + tapSync, + text, + textSync, +}; diff --git a/lib/internal/streams/iter/duplex.js b/lib/internal/streams/iter/duplex.js new file mode 100644 index 00000000000000..591837f70eb4cb --- /dev/null +++ b/lib/internal/streams/iter/duplex.js @@ -0,0 +1,141 @@ +'use strict'; + +// New Streams API - Duplex Channel +// +// Creates a pair of connected channels where data written to one +// channel's writer appears in the other channel's readable. + +const { + SymbolAsyncDispose, + SymbolAsyncIterator, +} = primordials; + +const { + push, +} = require('internal/streams/iter/push'); +const { + validateAbortSignal, + validateObject, +} = require('internal/validators'); + +/** + * Create a pair of connected duplex channels for bidirectional communication. + * @param {{ highWaterMark?: number, backpressure?: string, signal?: AbortSignal, + * a?: object, b?: object }} [options] + * @returns {[DuplexChannel, DuplexChannel]} + */ +function duplex(options = { __proto__: null }) { + validateObject(options, 'options'); + const { highWaterMark, backpressure, signal, a, b } = options; + if (a !== undefined) { + validateObject(a, 'options.a'); + } + if (b !== undefined) { + validateObject(b, 'options.b'); + } + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + } + + // Channel A writes to B's readable (A->B direction). + // Signal is NOT passed to push() -- we handle abort via close() below. + const { writer: aWriter, readable: bReadable } = push({ + highWaterMark: a?.highWaterMark ?? highWaterMark, + backpressure: a?.backpressure ?? backpressure, + }); + + // Channel B writes to A's readable (B->A direction) + const { writer: bWriter, readable: aReadable } = push({ + highWaterMark: b?.highWaterMark ?? highWaterMark, + backpressure: b?.backpressure ?? backpressure, + }); + + let aClosed = false; + let bClosed = false; + // Track active iterators so close() can call .return() on them + let aReadableIterator = null; + let bReadableIterator = null; + + const channelA = { + __proto__: null, + get writer() { return aWriter; }, + // Wrap readable to track the iterator for cleanup on close() + get readable() { + return { + __proto__: null, + [SymbolAsyncIterator]() { + const iter = aReadable[SymbolAsyncIterator](); + aReadableIterator = iter; + return iter; + }, + }; + }, + async close() { + if (aClosed) return; + aClosed = true; + // End the writer (signals end-of-stream to B's readable) + if (aWriter.endSync() < 0) { + await aWriter.end(); + } + // Stop iteration of this channel's readable + if (aReadableIterator?.return) { + await aReadableIterator.return(); + aReadableIterator = null; + } + }, + [SymbolAsyncDispose]() { + return this.close(); + }, + }; + + const channelB = { + __proto__: null, + get writer() { return bWriter; }, + get readable() { + return { + __proto__: null, + [SymbolAsyncIterator]() { + const iter = bReadable[SymbolAsyncIterator](); + bReadableIterator = iter; + return iter; + }, + }; + }, + async close() { + if (bClosed) return; + bClosed = true; + if (bWriter.endSync() < 0) { + await bWriter.end(); + } + if (bReadableIterator?.return) { + await bReadableIterator.return(); + bReadableIterator = null; + } + }, + [SymbolAsyncDispose]() { + return this.close(); + }, + }; + + // Signal handler: fail both writers with the abort reason so consumers + // see the error. This is an error-path shutdown, not a clean close. + if (signal) { + const abortBoth = () => { + const reason = signal.reason; + aWriter.fail(reason); + bWriter.fail(reason); + }; + if (signal.aborted) { + abortBoth(); + } else { + signal.addEventListener('abort', abortBoth, + { __proto__: null, once: true }); + } + } + + return [channelA, channelB]; +} + +module.exports = { + duplex, +}; diff --git a/lib/internal/streams/iter/from.js b/lib/internal/streams/iter/from.js new file mode 100644 index 00000000000000..d76f430ab0d51e --- /dev/null +++ b/lib/internal/streams/iter/from.js @@ -0,0 +1,577 @@ +'use strict'; + +// New Streams API - from() and fromSync() +// +// Creates normalized byte stream iterables from various input types. +// Handles recursive flattening of nested iterables and protocol conversions. + +const { + ArrayBufferIsView, + ArrayIsArray, + ArrayPrototypeEvery, + ArrayPrototypePush, + ArrayPrototypeSlice, + DataViewPrototypeGetBuffer, + DataViewPrototypeGetByteLength, + DataViewPrototypeGetByteOffset, + FunctionPrototypeCall, + SymbolAsyncIterator, + SymbolIterator, + TypedArrayPrototypeGetBuffer, + TypedArrayPrototypeGetByteLength, + TypedArrayPrototypeGetByteOffset, + Uint8Array, +} = primordials; + +const { + codes: { + ERR_INVALID_ARG_TYPE, + }, +} = require('internal/errors'); + +const { + isAnyArrayBuffer, + isDataView, + isPromise, + isUint8Array, +} = require('internal/util/types'); + +const { + toStreamable, + toAsyncStreamable, +} = require('internal/streams/iter/types'); + +const { + hasProtocol, + toUint8Array, +} = require('internal/streams/iter/utils'); + +// Maximum number of chunks to yield per batch from from(Uint8Array[]). +// Bounds peak memory when arrays flow through transforms, which must +// allocate output for the entire batch at once. +const FROM_BATCH_SIZE = 128; + +// ============================================================================= +// Type Guards and Detection +// ============================================================================= + +/** + * Check if value is a primitive chunk (string, ArrayBuffer, or ArrayBufferView). + * @returns {boolean} + */ +function isPrimitiveChunk(value) { + return typeof value === 'string' || isAnyArrayBuffer(value) || ArrayBufferIsView(value); +} + +/** + * Check if value is a sync iterable (has Symbol.iterator). + * @returns {boolean} + */ +function isSyncIterable(value) { + // We do not consider regular strings to be sync iterables in this context. + // We don't care about boxed strings (String objects) since they are uncommon. + return typeof value !== 'string' && + typeof value?.[SymbolIterator] === 'function'; +} + +/** + * Check if value is an async iterable (has Symbol.asyncIterator). + * @returns {boolean} + */ +function isAsyncIterable(value) { + return typeof value?.[SymbolAsyncIterator] === 'function'; +} + +// ============================================================================= +// Primitive Conversion +// ============================================================================= + +/** + * Convert a primitive chunk to Uint8Array. + * - string: UTF-8 encoded + * - ArrayBuffer: wrapped as Uint8Array view (no copy) + * - ArrayBufferView: converted to Uint8Array view of same memory + * @param {string|ArrayBuffer|ArrayBufferView} chunk + * @returns {Uint8Array} + */ +function primitiveToUint8Array(chunk) { + if (typeof chunk === 'string') { + return toUint8Array(chunk); + } + if (isAnyArrayBuffer(chunk)) { + return new Uint8Array(chunk); + } + if (isUint8Array(chunk)) { + return chunk; + } + // Other ArrayBufferView types (Int8Array, DataView, etc.) + if (isDataView(chunk)) { + return new Uint8Array( + DataViewPrototypeGetBuffer(chunk), + DataViewPrototypeGetByteOffset(chunk), + DataViewPrototypeGetByteLength(chunk), + ); + } + return new Uint8Array( + TypedArrayPrototypeGetBuffer(chunk), + TypedArrayPrototypeGetByteOffset(chunk), + TypedArrayPrototypeGetByteLength(chunk), + ); +} + +// ============================================================================= +// Sync Normalization (for fromSync and sync contexts) +// ============================================================================= + +/** + * Normalize a sync streamable yield value to Uint8Array chunks. + * Recursively flattens arrays, iterables, and protocol conversions. + * @yields {Uint8Array} + */ +function* normalizeSyncValue(value) { + // Handle primitives + if (isPrimitiveChunk(value)) { + yield primitiveToUint8Array(value); + return; + } + + // Handle ToStreamable protocol + if (hasProtocol(value, toStreamable)) { + const result = FunctionPrototypeCall(value[toStreamable], value); + yield* normalizeSyncValue(result); + return; + } + + // Handle arrays (which are also iterable, but check first for efficiency) + if (ArrayIsArray(value)) { + for (let i = 0; i < value.length; i++) { + yield* normalizeSyncValue(value[i]); + } + return; + } + + // Handle other sync iterables + if (isSyncIterable(value)) { + for (const item of value) { + yield* normalizeSyncValue(item); + } + return; + } + + // Reject: no valid conversion + throw new ERR_INVALID_ARG_TYPE( + 'value', + ['string', 'ArrayBuffer', 'ArrayBufferView', 'Iterable', 'toStreamable'], + value, + ); +} + +/** + * Check if value is already a Uint8Array[] batch (fast path). + * @returns {boolean} + */ +function isUint8ArrayBatch(value) { + if (!ArrayIsArray(value)) return false; + const len = value.length; + if (len === 0) return true; + // Fast path: single-element batch (most common from transforms) + if (len === 1) return isUint8Array(value[0]); + // Check first and last before iterating all elements + if (!isUint8Array(value[0]) || !isUint8Array(value[len - 1])) return false; + if (len === 2) return true; + for (let i = 1; i < len - 1; i++) { + if (!isUint8Array(value[i])) return false; + } + return true; +} + +/** + * Normalize a sync streamable source, yielding batches of Uint8Array. + * @param {Iterable} source + * @yields {Uint8Array[]} + */ +function* normalizeSyncSource(source) { + for (const value of source) { + // Fast path 1: value is already a Uint8Array[] batch + if (isUint8ArrayBatch(value)) { + if (value.length > 0) { + yield value; + } + continue; + } + // Fast path 2: value is a single Uint8Array (very common) + if (isUint8Array(value)) { + yield [value]; + continue; + } + // Slow path: normalize the value + const batch = []; + for (const chunk of normalizeSyncValue(value)) { + ArrayPrototypePush(batch, chunk); + } + if (batch.length > 0) { + yield batch; + } + } +} + +// ============================================================================= +// Async Normalization (for from and async contexts) +// ============================================================================= + +/** + * Normalize an async streamable yield value to Uint8Array chunks. + * Recursively flattens arrays, iterables, async iterables, promises, + * and protocol conversions. + * @yields {Uint8Array} + */ +async function* normalizeAsyncValue(value) { + // Handle promises first + if (isPromise(value)) { + const resolved = await value; + yield* normalizeAsyncValue(resolved); + return; + } + + // Handle primitives + if (isPrimitiveChunk(value)) { + yield primitiveToUint8Array(value); + return; + } + + // Handle ToAsyncStreamable protocol (check before ToStreamable) + if (hasProtocol(value, toAsyncStreamable)) { + const result = FunctionPrototypeCall(value[toAsyncStreamable], value); + if (isPromise(result)) { + yield* normalizeAsyncValue(await result); + } else { + yield* normalizeAsyncValue(result); + } + return; + } + + // Handle ToStreamable protocol + if (hasProtocol(value, toStreamable)) { + const result = FunctionPrototypeCall(value[toStreamable], value); + yield* normalizeAsyncValue(result); + return; + } + + // Handle arrays (which are also iterable, but check first for efficiency) + if (ArrayIsArray(value)) { + for (let i = 0; i < value.length; i++) { + yield* normalizeAsyncValue(value[i]); + } + return; + } + + // Handle async iterables (check before sync iterables since some objects + // have both) + if (isAsyncIterable(value)) { + for await (const item of value) { + yield* normalizeAsyncValue(item); + } + return; + } + + // Handle sync iterables + if (isSyncIterable(value)) { + for (const item of value) { + yield* normalizeAsyncValue(item); + } + return; + } + + // Reject: no valid conversion + throw new ERR_INVALID_ARG_TYPE( + 'value', + ['string', 'ArrayBuffer', 'ArrayBufferView', 'Iterable', 'AsyncIterable', + 'toStreamable', 'toAsyncStreamable'], + value, + ); +} + +/** + * Normalize an async streamable source, yielding batches of Uint8Array. + * @param {AsyncIterable|Iterable} source + * @yields {Uint8Array[]} + */ +async function* normalizeAsyncSource(source) { + // Prefer async iteration if available + if (isAsyncIterable(source)) { + for await (const value of source) { + // Fast path 1: value is already a Uint8Array[] batch + if (isUint8ArrayBatch(value)) { + if (value.length > 0) { + yield value; + } + continue; + } + // Fast path 2: value is a single Uint8Array (very common) + if (isUint8Array(value)) { + yield [value]; + continue; + } + // Slow path: normalize the value + const batch = []; + for await (const chunk of normalizeAsyncValue(value)) { + ArrayPrototypePush(batch, chunk); + } + if (batch.length > 0) { + yield batch; + } + } + return; + } + + // Fall back to sync iteration - batch all sync values together + if (isSyncIterable(source)) { + const batch = []; + + for (const value of source) { + // Fast path 1: value is already a Uint8Array[] batch + if (isUint8ArrayBatch(value)) { + // Flush any accumulated batch first + if (batch.length > 0) { + yield ArrayPrototypeSlice(batch); + batch.length = 0; + } + if (value.length > 0) { + yield value; + } + continue; + } + // Fast path 2: value is a single Uint8Array (very common) + if (isUint8Array(value)) { + ArrayPrototypePush(batch, value); + continue; + } + // Slow path: normalize the value - must flush and yield individually + if (batch.length > 0) { + yield ArrayPrototypeSlice(batch); + batch.length = 0; + } + const asyncBatch = []; + for await (const chunk of normalizeAsyncValue(value)) { + ArrayPrototypePush(asyncBatch, chunk); + } + if (asyncBatch.length > 0) { + yield asyncBatch; + } + } + + // Yield any remaining batched values + if (batch.length > 0) { + yield batch; + } + return; + } + + throw new ERR_INVALID_ARG_TYPE( + 'source', + ['Iterable', 'AsyncIterable'], + source, + ); +} + +// ============================================================================= +// Public API: from() and fromSync() +// ============================================================================= + +/** + * Create a SyncByteStreamReadable from a ByteInput or SyncStreamable. + * @param {string|ArrayBuffer|ArrayBufferView|Iterable} input + * @returns {Iterable} + */ +function fromSync(input) { + if (input == null) { + throw new ERR_INVALID_ARG_TYPE('input', 'a non-null value', input); + } + + // Check for primitives first (ByteInput) + if (isPrimitiveChunk(input)) { + const chunk = primitiveToUint8Array(input); + return { + __proto__: null, + *[SymbolIterator]() { + yield [chunk]; + }, + }; + } + + // Fast path: Uint8Array[] - yield in bounded sub-batches. + // Yielding the entire array as one batch forces downstream transforms + // to process all data at once, causing peak memory proportional to total + // data volume. Sub-batching keeps peak memory bounded while preserving + // the throughput benefit of batched processing. + if (ArrayIsArray(input)) { + if (input.length === 0) { + return { + __proto__: null, + *[SymbolIterator]() { + // Empty - yield nothing + }, + }; + } + // Check if it's an array of Uint8Array (common case) + if (isUint8Array(input[0])) { + const allUint8 = ArrayPrototypeEvery(input, isUint8Array); + if (allUint8) { + const batch = input; + return { + __proto__: null, + *[SymbolIterator]() { + if (batch.length <= FROM_BATCH_SIZE) { + yield batch; + } else { + for (let i = 0; i < batch.length; i += FROM_BATCH_SIZE) { + yield ArrayPrototypeSlice(batch, i, i + FROM_BATCH_SIZE); + } + } + }, + }; + } + } + } + + // Check toStreamable protocol (takes precedence over iteration protocols). + // toAsyncStreamable is ignored entirely in fromSync. + if (typeof input[toStreamable] === 'function') { + return fromSync(input[toStreamable]()); + } + + // Reject explicit async inputs + if (isAsyncIterable(input)) { + throw new ERR_INVALID_ARG_TYPE( + 'input', + 'a synchronous input (not AsyncIterable)', + input, + ); + } + if (typeof input === 'object' && input !== null && typeof input.then === 'function') { + throw new ERR_INVALID_ARG_TYPE( + 'input', + 'a synchronous input (not Promise)', + input, + ); + } + + // Must be a SyncStreamable + if (!isSyncIterable(input)) { + throw new ERR_INVALID_ARG_TYPE( + 'input', + ['string', 'ArrayBuffer', 'ArrayBufferView', 'Iterable', 'toStreamable'], + input, + ); + } + + return { + __proto__: null, + *[SymbolIterator]() { + yield* normalizeSyncSource(input); + }, + }; +} + +/** + * Create a ByteStreamReadable from a ByteInput or Streamable. + * @param {string|ArrayBuffer|ArrayBufferView|Iterable|AsyncIterable} input + * @returns {AsyncIterable} + */ +function from(input) { + if (input == null) { + throw new ERR_INVALID_ARG_TYPE('input', 'a non-null value', input); + } + + // Check for primitives first (ByteInput) + if (isPrimitiveChunk(input)) { + const chunk = primitiveToUint8Array(input); + return { + __proto__: null, + async *[SymbolAsyncIterator]() { + yield [chunk]; + }, + }; + } + + // Fast path: Uint8Array[] - yield in bounded sub-batches. + // Yielding the entire array as one batch forces downstream transforms + // to process all data at once, causing peak memory proportional to total + // data volume. Sub-batching keeps peak memory bounded while preserving + // the throughput benefit of batched processing. + if (ArrayIsArray(input)) { + if (input.length === 0) { + return { + __proto__: null, + async *[SymbolAsyncIterator]() { + // Empty - yield nothing + }, + }; + } + if (isUint8Array(input[0])) { + const allUint8 = ArrayPrototypeEvery(input, isUint8Array); + if (allUint8) { + const batch = input; + return { + __proto__: null, + async *[SymbolAsyncIterator]() { + if (batch.length <= FROM_BATCH_SIZE) { + yield batch; + } else { + for (let i = 0; i < batch.length; i += FROM_BATCH_SIZE) { + yield ArrayPrototypeSlice(batch, i, i + FROM_BATCH_SIZE); + } + } + }, + }; + } + } + } + + // Check toAsyncStreamable protocol (takes precedence over toStreamable and + // iteration protocols) + if (typeof input[toAsyncStreamable] === 'function') { + return { + __proto__: null, + async *[SymbolAsyncIterator]() { + const result = await input[toAsyncStreamable](); + yield* from(result)[SymbolAsyncIterator](); + }, + }; + } + + // Check toStreamable protocol (takes precedence over iteration protocols) + if (typeof input[toStreamable] === 'function') { + return from(input[toStreamable]()); + } + + // Must be a Streamable (sync or async iterable) + if (!isSyncIterable(input) && !isAsyncIterable(input)) { + throw new ERR_INVALID_ARG_TYPE( + 'input', + ['string', 'ArrayBuffer', 'ArrayBufferView', 'Iterable', + 'AsyncIterable', 'toStreamable', 'toAsyncStreamable'], + input, + ); + } + + return normalizeAsyncSource(input); +} + +// ============================================================================= +// Exports +// ============================================================================= + +module.exports = { + from, + fromSync, + isAsyncIterable, + isPrimitiveChunk, + isSyncIterable, + isUint8ArrayBatch, + normalizeAsyncSource, + normalizeAsyncValue, + normalizeSyncSource, + normalizeSyncValue, + primitiveToUint8Array, +}; diff --git a/lib/internal/streams/iter/pull.js b/lib/internal/streams/iter/pull.js new file mode 100644 index 00000000000000..8c49941ddb54ed --- /dev/null +++ b/lib/internal/streams/iter/pull.js @@ -0,0 +1,935 @@ +'use strict'; + +// New Streams API - Pull Pipeline +// +// pull(), pullSync(), pipeTo(), pipeToSync() +// Pull-through pipelines with transforms. Data flows on-demand from source +// through transforms to consumer. + +const { + ArrayBufferIsView, + ArrayPrototypePush, + ArrayPrototypeSlice, + PromisePrototypeThen, + SymbolAsyncIterator, + SymbolIterator, + TypedArrayPrototypeGetByteLength, + Uint8Array, +} = primordials; + +const { + codes: { + ERR_INVALID_ARG_TYPE, + ERR_INVALID_ARG_VALUE, + }, +} = require('internal/errors'); +const { lazyDOMException } = require('internal/util'); +const { validateAbortSignal } = require('internal/validators'); +const { + isAnyArrayBuffer, + isPromise, + isUint8Array, +} = require('internal/util/types'); +const { AbortController } = require('internal/abort_controller'); + +const { + from, + fromSync, + primitiveToUint8Array, + isSyncIterable, + isAsyncIterable, + isUint8ArrayBatch, +} = require('internal/streams/iter/from'); + +const { + isPullOptions, + isTransform, + isTransformObject, + parsePullArgs, + toUint8Array, + wrapError, +} = require('internal/streams/iter/utils'); + +const { + kTrustedTransform, +} = require('internal/streams/iter/types'); + +// ============================================================================= +// Type Guards and Helpers +// ============================================================================= + +/** + * Check if a value is a Writer (has write method). + * @returns {boolean} + */ +function hasMethod(value, name) { + return typeof value?.[name] === 'function'; +} + +/** + * Parse pipeTo/pipeToSync arguments: [...transforms, writer, options?] + * @param {Array} args + * @param {string} requiredMethod - 'write' for pipeTo, 'writeSync' for pipeToSync + * @returns {{ transforms: Array, writer: object, options: object }} + */ +function parsePipeToArgs(args, requiredMethod) { + if (args.length === 0) { + throw new ERR_INVALID_ARG_VALUE('args', args, 'pipeTo requires a writer argument'); + } + + let options; + let writerIndex = args.length - 1; + + // Check if last arg is options + const last = args[args.length - 1]; + if (isPullOptions(last) && !hasMethod(last, requiredMethod)) { + options = last; + writerIndex = args.length - 2; + } + + if (writerIndex < 0) { + throw new ERR_INVALID_ARG_VALUE('args', args, 'pipeTo requires a writer argument'); + } + + const writer = args[writerIndex]; + if (!hasMethod(writer, requiredMethod)) { + throw new ERR_INVALID_ARG_TYPE( + 'writer', `object with a ${requiredMethod} method`, writer); + } + + const transforms = ArrayPrototypeSlice(args, 0, writerIndex); + for (let i = 0; i < transforms.length; i++) { + if (!isTransform(transforms[i])) { + throw new ERR_INVALID_ARG_TYPE( + `transforms[${i}]`, ['Function', 'Object with transform()'], + transforms[i]); + } + } + + return { + __proto__: null, + transforms, + writer, + options, + }; +} + +// ============================================================================= +// Transform Output Flattening +// ============================================================================= + +/** + * Flatten transform yield to Uint8Array chunks (sync). + * @yields {Uint8Array} + */ +function* flattenTransformYieldSync(value) { + if (isUint8Array(value)) { + yield value; + return; + } + if (typeof value === 'string') { + yield toUint8Array(value); + return; + } + if (isAnyArrayBuffer(value)) { + yield new Uint8Array(value); + return; + } + if (ArrayBufferIsView(value)) { + yield primitiveToUint8Array(value); + return; + } + // Must be Iterable + if (isSyncIterable(value)) { + for (const item of value) { + yield* flattenTransformYieldSync(item); + } + return; + } + throw new ERR_INVALID_ARG_TYPE( + 'value', + ['Uint8Array', 'string', 'ArrayBuffer', 'ArrayBufferView', 'Iterable'], + value); +} + +/** + * Flatten transform yield to Uint8Array chunks (async). + * @yields {Uint8Array} + */ +async function* flattenTransformYieldAsync(value) { + if (isUint8Array(value)) { + yield value; + return; + } + if (typeof value === 'string') { + yield toUint8Array(value); + return; + } + if (isAnyArrayBuffer(value)) { + yield new Uint8Array(value); + return; + } + if (ArrayBufferIsView(value)) { + yield primitiveToUint8Array(value); + return; + } + // Check for async iterable first + if (isAsyncIterable(value)) { + for await (const item of value) { + yield* flattenTransformYieldAsync(item); + } + return; + } + // Must be sync Iterable + if (isSyncIterable(value)) { + for (const item of value) { + yield* flattenTransformYieldAsync(item); + } + return; + } + throw new ERR_INVALID_ARG_TYPE( + 'value', + ['Uint8Array', 'string', 'ArrayBuffer', 'ArrayBufferView', + 'Iterable', 'AsyncIterable'], + value); +} + +/** + * Process transform result (sync). + * @yields {Uint8Array[]} + */ +function* processTransformResultSync(result) { + if (result === null) { + return; + } + // Single Uint8Array -> wrap as batch + if (isUint8Array(result)) { + yield [result]; + return; + } + // String -> UTF-8 encode and wrap as batch + if (typeof result === 'string') { + yield [toUint8Array(result)]; + return; + } + // ArrayBuffer / ArrayBufferView -> convert and wrap + if (isAnyArrayBuffer(result)) { + yield [new Uint8Array(result)]; + return; + } + if (ArrayBufferIsView(result)) { + yield [primitiveToUint8Array(result)]; + return; + } + // Uint8Array[] batch + if (isUint8ArrayBatch(result)) { + if (result.length > 0) { + yield result; + } + return; + } + // Iterable or Generator + if (isSyncIterable(result)) { + const batch = []; + for (const item of result) { + for (const chunk of flattenTransformYieldSync(item)) { + ArrayPrototypePush(batch, chunk); + } + } + if (batch.length > 0) { + yield batch; + } + return; + } + throw new ERR_INVALID_ARG_TYPE( + 'result', + ['null', 'Uint8Array', 'string', 'ArrayBuffer', + 'ArrayBufferView', 'Array', 'Iterable'], + result); +} + +/** + * Process transform result (async). + * @yields {Uint8Array[]} + */ +async function* processTransformResultAsync(result) { + // Handle Promise + if (isPromise(result)) { + const resolved = await result; + yield* processTransformResultAsync(resolved); + return; + } + if (result === null) { + return; + } + // Single Uint8Array -> wrap as batch + if (isUint8Array(result)) { + yield [result]; + return; + } + // String -> UTF-8 encode and wrap as batch + if (typeof result === 'string') { + yield [toUint8Array(result)]; + return; + } + // ArrayBuffer / ArrayBufferView -> convert and wrap + if (isAnyArrayBuffer(result)) { + yield [new Uint8Array(result)]; + return; + } + if (ArrayBufferIsView(result)) { + yield [primitiveToUint8Array(result)]; + return; + } + // Uint8Array[] batch + if (isUint8ArrayBatch(result)) { + if (result.length > 0) { + yield result; + } + return; + } + // Check for async iterable/generator first + if (isAsyncIterable(result)) { + const batch = []; + for await (const item of result) { + if (isUint8Array(item)) { + ArrayPrototypePush(batch, item); + continue; + } + for await (const chunk of flattenTransformYieldAsync(item)) { + ArrayPrototypePush(batch, chunk); + } + } + if (batch.length > 0) { + yield batch; + } + return; + } + // Sync Iterable or Generator + if (isSyncIterable(result)) { + const batch = []; + for (const item of result) { + if (isUint8Array(item)) { + ArrayPrototypePush(batch, item); + continue; + } + for await (const chunk of flattenTransformYieldAsync(item)) { + ArrayPrototypePush(batch, chunk); + } + } + if (batch.length > 0) { + yield batch; + } + return; + } + throw new ERR_INVALID_ARG_TYPE( + 'result', + ['null', 'Uint8Array', 'string', 'ArrayBuffer', + 'ArrayBufferView', 'Array', 'Iterable', 'AsyncIterable', 'Promise'], + result); +} + +// ============================================================================= +// Sync Pipeline Implementation +// ============================================================================= + +/** + * Apply a single stateless sync transform to a source. + * @yields {Uint8Array[]} + */ +/** + * Apply a fused run of stateless sync transforms. + * @param {Iterable} source + * @param {Array} run - Array of stateless transform functions + * @yields {Uint8Array[]} + */ +function* applyFusedStatelessSyncTransforms(source, run) { + for (const chunks of source) { + let current = chunks; + for (let i = 0; i < run.length; i++) { + const result = run[i](current); + if (result === null) { + current = null; + break; + } + current = result; + } + if (current === null) continue; + // Inline normalization with Uint8Array[] batch as the fast path, + // matching the async pipeline's check order. + if (isUint8ArrayBatch(current)) { + if (current.length > 0) yield current; + } else if (isUint8Array(current)) { + yield [current]; + } else if (typeof current === 'string') { + yield [toUint8Array(current)]; + } else if (isAnyArrayBuffer(current)) { + yield [new Uint8Array(current)]; + } else if (ArrayBufferIsView(current)) { + yield [primitiveToUint8Array(current)]; + } else { + yield* processTransformResultSync(current); + } + } + // Flush + let current = null; + for (let i = 0; i < run.length; i++) { + const result = run[i](current); + if (result === null) { + current = null; + continue; + } + current = result; + } + if (current != null) { + yield* processTransformResultSync(current); + } +} + +/** + * Apply a single stateful sync transform to a source. + * @yields {Uint8Array[]} + */ +function* withFlushSync(source) { + yield* source; + yield null; +} + +function* applyStatefulSyncTransform(source, transform) { + const output = transform(withFlushSync(source)); + for (const item of output) { + const batch = []; + for (const chunk of flattenTransformYieldSync(item)) { + ArrayPrototypePush(batch, chunk); + } + if (batch.length > 0) { + yield batch; + } + } +} + +/** + * Create a sync pipeline from source through transforms. + * @yields {Uint8Array[]} + */ +function* createSyncPipeline(source, transforms) { + let current = source; + + // Apply transforms - fuse consecutive stateless transforms into a single + // generator layer to avoid unnecessary generator ticks. + let statelessRun = []; + + for (let i = 0; i < transforms.length; i++) { + const transform = transforms[i]; + if (isTransformObject(transform)) { + if (statelessRun.length > 0) { + current = applyFusedStatelessSyncTransforms(current, statelessRun); + statelessRun = []; + } + current = applyStatefulSyncTransform(current, transform.transform); + } else { + statelessRun.push(transform); + } + } + if (statelessRun.length > 0) { + current = applyFusedStatelessSyncTransforms(current, statelessRun); + } + + yield* current; +} + +// ============================================================================= +// Async Pipeline Implementation +// ============================================================================= + +/** + * Apply a single stateless async transform to a source. + * @yields {Uint8Array[]} + */ +/** + * Apply a fused run of stateless async transforms to a source. + * All transforms in the run are applied in a tight synchronous loop per batch, + * avoiding the overhead of N async generator ticks for N transforms. + * + * INVARIANT: This function accepts a signal, NOT a pre-built options object. + * A fresh { __proto__: null, signal } options object is created for each + * transform invocation to prevent cross-transform mutation. + * @param {AsyncIterable} source + * @param {Array} run - Array of stateless transform functions + * @param {AbortSignal} signal - The pipeline's abort signal + * @yields {Uint8Array[]} + */ +async function* applyFusedStatelessAsyncTransforms(source, run, signal) { + for await (const chunks of source) { + let current = chunks; + for (let i = 0; i < run.length; i++) { + const result = run[i](current, { __proto__: null, signal }); + if (result === null) { + current = null; + break; + } + if (isPromise(result)) { + const resolved = await result; + if (resolved === null) { + current = null; + break; + } + current = resolved; + } else { + current = result; + } + } + if (current === null) continue; + // Normalize the final output + if (isUint8ArrayBatch(current)) { + if (current.length > 0) yield current; + } else if (isUint8Array(current)) { + yield [current]; + } else if (typeof current === 'string') { + yield [toUint8Array(current)]; + } else if (isAnyArrayBuffer(current)) { + yield [new Uint8Array(current)]; + } else if (ArrayBufferIsView(current)) { + yield [primitiveToUint8Array(current)]; + } else { + yield* processTransformResultAsync(current); + } + } + // Flush: send null through each transform in order + let current = null; + for (let i = 0; i < run.length; i++) { + const result = run[i](current, { __proto__: null, signal }); + if (result === null) { + current = null; + continue; + } + if (isPromise(result)) { + current = await result; + } else { + current = result; + } + } + if (current !== null) { + if (isUint8ArrayBatch(current)) { + if (current.length > 0) yield current; + } else if (isUint8Array(current)) { + yield [current]; + } else if (typeof current === 'string') { + yield [toUint8Array(current)]; + } else { + yield* processTransformResultAsync(current); + } + } +} + +/** + * Append a null flush signal after the source is exhausted. + * @yields {Uint8Array[]} + */ +/** + * Append a null flush signal after the source is exhausted. + * @yields {Uint8Array[]} + */ +async function* withFlushAsync(source) { + for await (const batch of source) { + yield batch; + } + yield null; +} + +async function* applyStatefulAsyncTransform(source, transform, options) { + const output = transform(withFlushAsync(source), options); + for await (const item of output) { + // Fast path: item is already a Uint8Array[] batch (e.g. compression transforms) + if (isUint8ArrayBatch(item)) { + if (item.length > 0) { + yield item; + } + continue; + } + // Fast path: single Uint8Array + if (isUint8Array(item)) { + yield [item]; + continue; + } + // Slow path: flatten arbitrary transform yield + const batch = []; + for await (const chunk of flattenTransformYieldAsync(item)) { + ArrayPrototypePush(batch, chunk); + } + if (batch.length > 0) { + yield batch; + } + } +} + +/** + * Fast path for trusted stateful transforms (e.g. compression). + * Skips withFlushAsync (transform handles done internally) and + * skips isUint8ArrayBatch validation (transform guarantees valid output). + * @yields {Uint8Array[]} + */ +async function* applyTrustedStatefulAsyncTransform(source, transform, options) { + const output = transform(source, options); + for await (const batch of output) { + if (batch.length > 0) { + yield batch; + } + } + // Check abort after the transform completes - without the + // withFlushAsync wrapper there is no extra yield to give + // the outer pipeline a chance to see the abort. + options.signal?.throwIfAborted(); +} + +/** + * Create an async pipeline from source through transforms. + * @yields {Uint8Array[]} + */ +async function* createAsyncPipeline(source, transforms, signal) { + // Check for abort + signal?.throwIfAborted(); + + const normalized = source; + + // Fast path: no transforms, just yield normalized source directly + if (transforms.length === 0) { + for await (const batch of normalized) { + signal?.throwIfAborted(); + yield batch; + } + return; + } + + // Create internal controller for transform cancellation. + // Note: if signal was already aborted, we threw above - no need to check here. + const controller = new AbortController(); + let abortHandler; + if (signal) { + abortHandler = () => { + controller.abort(signal.reason ?? + lazyDOMException('Aborted', 'AbortError')); + }; + signal.addEventListener('abort', abortHandler, { __proto__: null, once: true }); + } + + // Apply transforms - fuse consecutive stateless transforms into a single + // generator layer to avoid unnecessary async generator ticks. + // + // INVARIANT: Each transform invocation MUST receive its own fresh options + // object ({ __proto__: null, signal }). Transforms may mutate the options + // object, so sharing a single object across invocations would allow one + // transform to corrupt the options seen by another. The signal is shared + // across calls (mutations to it are acceptable), but the containing options + // object must be unique per call. This is enforced inside + // applyFusedStatelessAsyncTransforms and applyStatefulAsyncTransform, which + // accept the signal directly and create the options object per invocation. + // DO NOT pass a pre-built options object. + let current = normalized; + const transformSignal = controller.signal; + let statelessRun = []; + + for (let i = 0; i < transforms.length; i++) { + const transform = transforms[i]; + if (isTransformObject(transform)) { + // Flush any accumulated stateless run before the stateful transform + if (statelessRun.length > 0) { + current = applyFusedStatelessAsyncTransforms(current, statelessRun, + transformSignal); + statelessRun = []; + } + const opts = { __proto__: null, signal: transformSignal }; + if (transform[kTrustedTransform]) { + current = applyTrustedStatefulAsyncTransform( + current, transform.transform, opts); + } else { + current = applyStatefulAsyncTransform( + current, transform.transform, opts); + } + } else { + statelessRun.push(transform); + } + } + // Flush remaining stateless run + if (statelessRun.length > 0) { + current = applyFusedStatelessAsyncTransforms(current, statelessRun, + transformSignal); + } + + let completed = false; + try { + for await (const batch of current) { + controller.signal.throwIfAborted(); + yield batch; + } + completed = true; + } catch (error) { + if (!controller.signal.aborted) { + controller.abort(wrapError(error)); + } + throw error; + } finally { + if (!completed && !controller.signal.aborted) { + // Consumer stopped early or generator return() was called. + // If a transform listener throws here, let it propagate. + controller.abort(lazyDOMException('Aborted', 'AbortError')); + } + // Clean up user signal listener to prevent holding controller alive + if (signal && abortHandler) { + signal.removeEventListener('abort', abortHandler); + } + } +} + +// ============================================================================= +// Public API: pull() and pullSync() +// ============================================================================= + +/** + * Create a sync pull-through pipeline with transforms. + * @param {Iterable} source - The sync streamable source + * @param {...Function} transforms - Variadic transforms + * @returns {Iterable} + */ +function pullSync(source, ...transforms) { + for (let i = 0; i < transforms.length; i++) { + if (!isTransform(transforms[i])) { + throw new ERR_INVALID_ARG_TYPE( + `transforms[${i}]`, ['Function', 'Object with transform()'], + transforms[i]); + } + } + return { + __proto__: null, + *[SymbolIterator]() { + yield* createSyncPipeline(fromSync(source), transforms); + }, + }; +} + +/** + * Create an async pull-through pipeline with transforms. + * @param {Iterable|AsyncIterable} source - The streamable source + * @param {...(Function|object)} args - Transforms, with optional PullOptions + * as last argument + * @returns {AsyncIterable} + */ +function pull(source, ...args) { + const { transforms, options } = parsePullArgs(args); + const signal = options?.signal; + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + // Eagerly check abort at call time per spec + if (signal.aborted) { + return { + __proto__: null, + // eslint-disable-next-line require-yield + async *[SymbolAsyncIterator]() { + throw signal.reason; + }, + }; + } + } + + return { + __proto__: null, + async *[SymbolAsyncIterator]() { + yield* createAsyncPipeline(from(source), transforms, signal); + }, + }; +} + +// ============================================================================= +// Public API: pipeTo() and pipeToSync() +// ============================================================================= + +/** + * Write a sync source through transforms to a sync writer. + * @param {Iterable} source + * @param {...(Function|object)} args - Transforms, writer, and optional options + * @returns {number} Total bytes written + */ +function pipeToSync(source, ...args) { + const { transforms, writer, options } = parsePipeToArgs(args, 'writeSync'); + + // Handle transform-writer + if (isTransformObject(writer)) { + ArrayPrototypePush(transforms, writer); + } + + // Normalize source and create pipeline + const normalized = fromSync(source); + const pipeline = transforms.length > 0 ? + createSyncPipeline(normalized, transforms) : + normalized; + + let totalBytes = 0; + const hasWriteSync = typeof writer.writeSync === 'function'; + const hasWritevSync = typeof writer.writevSync === 'function'; + const hasEndSync = typeof writer.endSync === 'function'; + + try { + for (const batch of pipeline) { + if (hasWritevSync && batch.length > 1) { + writer.writevSync(batch); + for (let i = 0; i < batch.length; i++) { + totalBytes += TypedArrayPrototypeGetByteLength(batch[i]); + } + } else { + for (let i = 0; i < batch.length; i++) { + const chunk = batch[i]; + if (hasWriteSync) { + writer.writeSync(chunk); + } else { + writer.write(chunk); + } + totalBytes += TypedArrayPrototypeGetByteLength(chunk); + } + } + } + + if (!options?.preventClose) { + if (!hasEndSync || writer.endSync() < 0) { + writer.end?.(); + } + } + } catch (error) { + if (!options?.preventFail) { + writer.fail?.(wrapError(error)); + } + throw error; + } + + return totalBytes; +} + +/** + * Write an async source through transforms to a writer. + * @param {AsyncIterable|Iterable} source + * @param {...(Function|object)} args - Transforms, writer, and optional options + * @returns {Promise} Total bytes written + */ +async function pipeTo(source, ...args) { + const { transforms, writer, options } = parsePipeToArgs(args, 'write'); + if (options?.signal !== undefined) { + validateAbortSignal(options.signal, 'options.signal'); + } + + // Handle transform-writer + if (isTransformObject(writer)) { + ArrayPrototypePush(transforms, writer); + } + + const signal = options?.signal; + + // Check for abort + signal?.throwIfAborted(); + + // Normalize source via from() + const normalized = from(source); + + let totalBytes = 0; + const hasWritev = typeof writer.writev === 'function'; + const hasWriteSync = typeof writer.writeSync === 'function'; + const hasWritevSync = typeof writer.writevSync === 'function'; + const hasEndSync = typeof writer.endSync === 'function'; + // Async fallback for writeBatch when sync write fails partway through. + // Continues writing from batch[startIndex] using async write(). + async function writeBatchAsyncFallback(batch, startIndex) { + for (let i = startIndex; i < batch.length; i++) { + const chunk = batch[i]; + if (hasWriteSync && writer.writeSync(chunk)) { + // Sync retry succeeded + } else { + const result = writer.write( + chunk, signal ? { __proto__: null, signal } : undefined); + if (result !== undefined) { + await result; + } + } + totalBytes += TypedArrayPrototypeGetByteLength(chunk); + } + } + + // Write a batch using try-fallback: sync first, async if needed. + // Returns undefined on sync success, or a Promise when async fallback + // is required. Callers must check: const p = writeBatch(b); if (p) await p; + function writeBatch(batch) { + if (hasWritev && batch.length > 1) { + if (!hasWritevSync || !writer.writevSync(batch)) { + const opts = signal ? { __proto__: null, signal } : undefined; + return PromisePrototypeThen(writer.writev(batch, opts), () => { + for (let i = 0; i < batch.length; i++) { + totalBytes += TypedArrayPrototypeGetByteLength(batch[i]); + } + }); + } + for (let i = 0; i < batch.length; i++) { + totalBytes += TypedArrayPrototypeGetByteLength(batch[i]); + } + return; + } + for (let i = 0; i < batch.length; i++) { + const chunk = batch[i]; + if (!hasWriteSync || !writer.writeSync(chunk)) { + // Sync path failed at index i - fall back to async for the rest. + // Count bytes for chunks already written synchronously (0..i-1). + return writeBatchAsyncFallback(batch, i); + } + totalBytes += TypedArrayPrototypeGetByteLength(chunk); + } + } + + try { + // Fast path: no transforms - iterate normalized source directly + if (transforms.length === 0) { + if (signal) { + for await (const batch of normalized) { + signal.throwIfAborted(); + const p = writeBatch(batch); + if (p) await p; + } + } else { + for await (const batch of normalized) { + const p = writeBatch(batch); + if (p) await p; + } + } + } else { + const pipeline = createAsyncPipeline(normalized, transforms, signal); + + if (signal) { + for await (const batch of pipeline) { + signal.throwIfAborted(); + const p = writeBatch(batch); + if (p) await p; + } + } else { + for await (const batch of pipeline) { + const p = writeBatch(batch); + if (p) await p; + } + } + } + + if (!options?.preventClose) { + if (!hasEndSync || writer.endSync() < 0) { + await writer.end?.(signal ? { __proto__: null, signal } : undefined); + } + } + } catch (error) { + if (!options?.preventFail) { + writer.fail?.(wrapError(error)); + } + throw error; + } + + return totalBytes; +} + +module.exports = { + pipeTo, + pipeToSync, + pull, + pullSync, +}; diff --git a/lib/internal/streams/iter/push.js b/lib/internal/streams/iter/push.js new file mode 100644 index 00000000000000..4c0b3240d45fdb --- /dev/null +++ b/lib/internal/streams/iter/push.js @@ -0,0 +1,721 @@ +'use strict'; + +// New Streams API - Push Stream Implementation +// +// Creates a bonded pair of writer and async iterable for push-based streaming +// with built-in backpressure. + +const { + ArrayIsArray, + ArrayPrototypePush, + MathMax, + PromiseReject, + PromiseResolve, + PromiseWithResolvers, + SymbolAsyncDispose, + SymbolAsyncIterator, + SymbolDispose, + TypedArrayPrototypeGetByteLength, +} = primordials; + +const { + codes: { + ERR_INVALID_ARG_TYPE, + ERR_INVALID_STATE, + }, +} = require('internal/errors'); +const { isError, lazyDOMException } = require('internal/util'); +const { + validateAbortSignal, + validateInteger, +} = require('internal/validators'); + +const { + drainableProtocol, +} = require('internal/streams/iter/types'); + +const { + kPushDefaultHWM, + kResolvedPromise, + clampHWM, + onSignalAbort, + toUint8Array, + convertChunks, + parsePullArgs, + validateBackpressure, +} = require('internal/streams/iter/utils'); + +const { + pull: pullWithTransforms, +} = require('internal/streams/iter/pull'); + +const { + RingBuffer, +} = require('internal/streams/iter/ringbuffer'); + +// ============================================================================= +// PushQueue - Internal Queue with Chunk-Based Backpressure +// ============================================================================= + +class PushQueue { + /** Buffered chunks (each slot is from one write/writev call) */ + #slots = new RingBuffer(); + /** Pending writes waiting for buffer space */ + #pendingWrites = new RingBuffer(); + /** Pending reads waiting for data */ + #pendingReads = new RingBuffer(); + /** Pending drains waiting for backpressure to clear */ + #pendingDrains = []; + /** Writer state: 'open' | 'closing' | 'closed' | 'errored' */ + #writerState = 'open'; + /** Consumer state: 'active' | 'returned' | 'thrown' */ + #consumerState = 'active'; + /** Error that closed the stream */ + #error = null; + /** Total bytes written */ + #bytesWritten = 0; + /** Pending end promise (resolves when consumer drains past end sentinel) */ + #pendingEnd = null; + + /** Configuration */ + #highWaterMark; + #backpressure; + #signal; + #abortHandler; + + constructor(options = { __proto__: null }) { + const { + highWaterMark = kPushDefaultHWM, + backpressure = 'strict', + signal, + } = options; + validateInteger(highWaterMark, 'options.highWaterMark'); + validateBackpressure(backpressure); + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + } + this.#highWaterMark = clampHWM(highWaterMark); + this.#backpressure = backpressure; + this.#signal = signal; + this.#abortHandler = undefined; + + if (this.#signal) { + this.#abortHandler = () => { + this.fail(isError(this.#signal.reason) ? + this.#signal.reason : + lazyDOMException('Aborted', 'AbortError')); + }; + onSignalAbort(this.#signal, this.#abortHandler); + } + } + + // =========================================================================== + // Writer Methods + // =========================================================================== + + /** + * Get slots available before hitting highWaterMark. + * Returns null if writer is closed/errored or consumer has terminated. + * @returns {number | null} + */ + get desiredSize() { + if (this.#writerState !== 'open' || this.#consumerState !== 'active') { + return null; + } + return MathMax(0, this.#highWaterMark - this.#slots.length); + } + + /** + * Check if a sync write would be accepted. + * @returns {boolean} + */ + canWriteSync() { + if (this.#writerState !== 'open') return false; + if (this.#consumerState !== 'active') return false; + if ((this.#backpressure === 'strict' || + this.#backpressure === 'block') && + this.#slots.length >= this.#highWaterMark) { + return false; + } + return true; + } + + /** + * Write chunks synchronously if possible. + * Returns true if write completed, false if buffer is full. + * @returns {boolean} + */ + writeSync(chunks) { + if (this.#writerState !== 'open') return false; + if (this.#consumerState !== 'active') return false; + + if (this.#slots.length >= this.#highWaterMark) { + switch (this.#backpressure) { + case 'strict': + return false; + case 'block': + return false; + case 'drop-oldest': + if (this.#slots.length > 0) { + this.#slots.shift(); + } + break; + case 'drop-newest': + // Discard this write, but return true + for (let i = 0; i < chunks.length; i++) { + this.#bytesWritten += TypedArrayPrototypeGetByteLength(chunks[i]); + } + return true; + } + } + + this.#slots.push(chunks); + for (let i = 0; i < chunks.length; i++) { + this.#bytesWritten += TypedArrayPrototypeGetByteLength(chunks[i]); + } + + this.#resolvePendingReads(); + return true; + } + + /** + * Write chunks asynchronously. + * If signal is provided, a write blocked on backpressure will reject + * immediately when the signal fires. The cancelled write is removed from + * pendingWrites so it does not occupy a slot. The queue itself is NOT put + * into an error state - this is per-operation cancellation, not terminal + * failure. + * @returns {Promise} + */ + async writeAsync(chunks, signal) { + // Check writer state before signal (spec order: state, then signal) + if (this.#writerState === 'closed') { + throw new ERR_INVALID_STATE.TypeError('Writer is closed'); + } + if (this.#writerState === 'closing') { + throw new ERR_INVALID_STATE.TypeError('Writer is closing'); + } + if (this.#writerState === 'errored') { + throw this.#error; + } + if (this.#consumerState !== 'active') { + throw this.#consumerState === 'thrown' && this.#error ? + this.#error : + new ERR_INVALID_STATE.TypeError('Stream closed by consumer'); + } + + // Check for pre-aborted signal (after state checks per spec) + signal?.throwIfAborted(); + + // Try sync first + if (this.writeSync(chunks)) { + return; + } + + // Buffer is full + switch (this.#backpressure) { + case 'strict': + if (this.#pendingWrites.length >= this.#highWaterMark) { + throw new ERR_INVALID_STATE.RangeError( + 'Backpressure violation: too many pending writes. ' + + 'Await each write() call to respect backpressure.'); + } + return this.#createPendingWrite(chunks, signal); + case 'block': + return this.#createPendingWrite(chunks, signal); + default: + throw new ERR_INVALID_STATE( + 'Unexpected: writeSync should have handled non-strict policy'); + } + } + + /** + * Create a pending write promise, optionally racing against a signal. + * If the signal fires, the entry is removed from pendingWrites and the + * promise rejects. Signal listeners are cleaned up on normal resolution. + * @returns {Promise} + */ + #createPendingWrite(chunks, signal) { + const { promise, resolve, reject } = PromiseWithResolvers(); + const entry = { __proto__: null, chunks, resolve, reject }; + this.#pendingWrites.push(entry); + + if (signal) { + const onAbort = () => { + // Remove from queue so it doesn't occupy a slot + const idx = this.#pendingWrites.indexOf(entry); + if (idx !== -1) this.#pendingWrites.removeAt(idx); + reject(signal.reason ?? lazyDOMException('Aborted', 'AbortError')); + }; + + // Wrap resolve/reject to clean up signal listener + entry.resolve = function() { + signal.removeEventListener('abort', onAbort); + resolve(); + }; + entry.reject = function(reason) { + signal.removeEventListener('abort', onAbort); + reject(reason); + }; + + signal.addEventListener('abort', onAbort, { __proto__: null, once: true }); + } + + return promise; + } + + /** + * Signal end of stream. Returns total bytes written. + * @returns {number} + */ + end() { + if (this.#writerState === 'errored') { + return -2; // Signal to reject with stored error + } + if (this.#writerState === 'closing' || this.#writerState === 'closed') { + return this.#bytesWritten; // Idempotent + } + + this.#cleanup(); + this.#rejectPendingWrites( + new ERR_INVALID_STATE.TypeError('Writer closed')); + this.#resolvePendingDrains(false); + + // If buffer is empty, close immediately + if (this.#slots.length === 0) { + this.#writerState = 'closed'; + this.#resolvePendingReads(); + return this.#bytesWritten; + } + + // Buffer has data: transition to closing, defer completion until drained + this.#writerState = 'closing'; + return -3; // Signal to PushWriter: create deferred end promise + } + + /** + * Called by the read path when the consumer has drained all data while + * the writer is in the 'closing' state. Transitions to 'closed' and + * resolves the pending end promise. + */ + endDrained() { + if (this.#writerState !== 'closing') return; + this.#writerState = 'closed'; + if (this.#pendingEnd) { + this.#pendingEnd.resolve(this.#bytesWritten); + this.#pendingEnd = null; + } + } + + /** + * Put queue into terminal error state. + * No-op if errored or closed (fully drained). + * If closing (draining), short-circuits the drain. + */ + fail(reason) { + if (this.#writerState === 'errored' || this.#writerState === 'closed') { + return; + } + + const wasClosing = this.#writerState === 'closing'; + this.#writerState = 'errored'; + this.#error = reason ?? new ERR_INVALID_STATE('Failed'); + this.#cleanup(); + this.#rejectPendingReads(this.#error); + this.#rejectPendingDrains(this.#error); + + if (wasClosing) { + // Short-circuit the graceful drain: reject the pending end promise + if (this.#pendingEnd) { + this.#pendingEnd.reject(this.#error); + this.#pendingEnd = null; + } + } else { + this.#rejectPendingWrites(this.#error); + } + } + + get totalBytesWritten() { + return this.#bytesWritten; + } + + get error() { + return this.#error; + } + + get backpressurePolicy() { + return this.#backpressure; + } + + get writerState() { + return this.#writerState; + } + + get pendingEndPromise() { + return this.#pendingEnd?.promise ?? null; + } + + setPendingEnd(pending) { + this.#pendingEnd = pending; + } + + /** + * Force-enqueue chunks into the slots buffer, bypassing capacity checks. + * Used by PushWriter.writeSync() for 'block' policy where the data is + * accepted but false is returned as a backpressure signal. + */ + forceEnqueue(chunks) { + this.#slots.push(chunks); + for (let i = 0; i < chunks.length; i++) { + this.#bytesWritten += TypedArrayPrototypeGetByteLength(chunks[i]); + } + this.#resolvePendingReads(); + } + + /** + * Wait for backpressure to clear (desiredSize > 0). + * @returns {Promise} + */ + waitForDrain() { + const { promise, resolve, reject } = PromiseWithResolvers(); + ArrayPrototypePush(this.#pendingDrains, { __proto__: null, resolve, reject }); + return promise; + } + + // =========================================================================== + // Consumer Methods + // =========================================================================== + + async read() { + // If there's data in the buffer, return it immediately + if (this.#slots.length > 0) { + const result = this.#drain(); + this.#resolvePendingWrites(); + // After draining, check if writer was closing and buffer is now empty + if (this.#writerState === 'closing' && this.#slots.length === 0) { + this.endDrained(); + } + return { __proto__: null, value: result, done: false }; + } + + // Buffer empty and writer closing = drain complete + if (this.#writerState === 'closing') { + this.endDrained(); + return { __proto__: null, value: undefined, done: true }; + } + + if (this.#writerState === 'closed') { + return { __proto__: null, value: undefined, done: true }; + } + + if (this.#writerState === 'errored' && this.#error) { + throw this.#error; + } + + const { promise, resolve, reject } = PromiseWithResolvers(); + this.#pendingReads.push({ __proto__: null, resolve, reject }); + return promise; + } + + consumerReturn() { + if (this.#consumerState !== 'active') return; + this.#consumerState = 'returned'; + this.#cleanup(); + this.#rejectPendingWrites( + new ERR_INVALID_STATE.TypeError('Stream closed by consumer')); + // If closing, reject the pending end promise + if (this.#writerState === 'closing' && this.#pendingEnd) { + this.#pendingEnd.reject( + new ERR_INVALID_STATE.TypeError('Stream closed by consumer')); + this.#pendingEnd = null; + } + // Resolve pending drains with false - no more data will be consumed + this.#resolvePendingDrains(false); + } + + consumerThrow(error) { + if (this.#consumerState !== 'active') return; + this.#consumerState = 'thrown'; + this.#error = error; + this.#cleanup(); + this.#rejectPendingWrites(error); + // Reject pending drains - the consumer errored + this.#rejectPendingDrains(error); + } + + // =========================================================================== + // Private Methods + // =========================================================================== + + #drain() { + const result = []; + for (let i = 0; i < this.#slots.length; i++) { + const slot = this.#slots.get(i); + for (let j = 0; j < slot.length; j++) { + ArrayPrototypePush(result, slot[j]); + } + } + this.#slots.clear(); + return result; + } + + #resolvePendingReads() { + while (this.#pendingReads.length > 0) { + if (this.#slots.length > 0) { + const pending = this.#pendingReads.shift(); + const result = this.#drain(); + this.#resolvePendingWrites(); + pending.resolve({ __proto__: null, value: result, done: false }); + } else if (this.#writerState === 'closing' && this.#slots.length === 0) { + this.endDrained(); + const pending = this.#pendingReads.shift(); + pending.resolve({ __proto__: null, value: undefined, done: true }); + } else if (this.#writerState === 'closed') { + const pending = this.#pendingReads.shift(); + pending.resolve({ __proto__: null, value: undefined, done: true }); + } else if (this.#writerState === 'errored' && this.#error) { + const pending = this.#pendingReads.shift(); + pending.reject(this.#error); + } else { + break; + } + } + } + + #resolvePendingWrites() { + while (this.#pendingWrites.length > 0 && + this.#slots.length < this.#highWaterMark) { + const pending = this.#pendingWrites.shift(); + this.#slots.push(pending.chunks); + for (let i = 0; i < pending.chunks.length; i++) { + this.#bytesWritten += TypedArrayPrototypeGetByteLength(pending.chunks[i]); + } + pending.resolve(); + } + + if (this.#slots.length < this.#highWaterMark) { + this.#resolvePendingDrains(true); + } + } + + #resolvePendingDrains(canWrite) { + const drains = this.#pendingDrains; + this.#pendingDrains = []; + for (let i = 0; i < drains.length; i++) { + drains[i].resolve(canWrite); + } + } + + #rejectPendingDrains(error) { + const drains = this.#pendingDrains; + this.#pendingDrains = []; + for (let i = 0; i < drains.length; i++) { + drains[i].reject(error); + } + } + + #rejectPendingReads(error) { + while (this.#pendingReads.length > 0) { + this.#pendingReads.shift().reject(error); + } + } + + #rejectPendingWrites(error) { + while (this.#pendingWrites.length > 0) { + this.#pendingWrites.shift().reject(error); + } + } + + #cleanup() { + if (this.#signal && this.#abortHandler) { + this.#signal.removeEventListener('abort', this.#abortHandler); + this.#abortHandler = undefined; + } + } +} + +// ============================================================================= +// PushWriter Implementation +// ============================================================================= + +class PushWriter { + #queue; + + constructor(queue) { + this.#queue = queue; + } + + [drainableProtocol]() { + const desired = this.desiredSize; + if (desired === null) return null; + if (desired > 0) return PromiseResolve(true); + return this.#queue.waitForDrain(); + } + + get desiredSize() { + return this.#queue.desiredSize; + } + + write(chunk, options) { + if (!options?.signal && this.#queue.canWriteSync()) { + const bytes = toUint8Array(chunk); + this.#queue.writeSync([bytes]); + return kResolvedPromise; + } + const bytes = toUint8Array(chunk); + return this.#queue.writeAsync([bytes], options?.signal); + } + + writev(chunks, options) { + if (!ArrayIsArray(chunks)) { + throw new ERR_INVALID_ARG_TYPE('chunks', 'Array', chunks); + } + if (!options?.signal && this.#queue.canWriteSync()) { + const bytes = convertChunks(chunks); + this.#queue.writeSync(bytes); + return kResolvedPromise; + } + const bytes = convertChunks(chunks); + return this.#queue.writeAsync(bytes, options?.signal); + } + + writeSync(chunk) { + const bytes = toUint8Array(chunk); + const result = this.#queue.writeSync([bytes]); + if (!result && this.#queue.backpressurePolicy === 'block') { + // Block policy: force-enqueue and return false as backpressure signal. + // Data IS accepted; false tells caller to slow down. + this.#queue.forceEnqueue([bytes]); + return false; + } + return result; + } + + writevSync(chunks) { + if (!ArrayIsArray(chunks)) { + throw new ERR_INVALID_ARG_TYPE('chunks', 'Array', chunks); + } + const bytes = convertChunks(chunks); + const result = this.#queue.writeSync(bytes); + if (!result && this.#queue.backpressurePolicy === 'block') { + this.#queue.forceEnqueue(bytes); + return false; + } + return result; + } + + end(options) { + const result = this.#queue.end(); + if (result === -2) { + // Errored: reject with stored error + return PromiseReject(this.#queue.error); + } + if (result === -3) { + // Closing: buffer has data, create deferred promise that resolves + // when consumer drains past the end sentinel + const { promise, resolve, reject } = PromiseWithResolvers(); + this.#queue.setPendingEnd({ __proto__: null, promise, resolve, reject }); + return promise; + } + // >= 0: byte count (immediate close or idempotent) + return PromiseResolve(result); + } + + endSync() { + const result = this.#queue.end(); + if (result === -2) return -1; // Errored + if (result === -3) return -1; // Buffer not empty, can't wait + return result; + } + + fail(reason) { + this.#queue.fail(reason); + } + + [SymbolAsyncDispose]() { + const state = this.#queue.writerState; + if (state === 'closing') { + // Wait for graceful drain + return this.#queue.pendingEndPromise ?? PromiseResolve(); + } + if (state === 'open') { + this.fail(); + } + return PromiseResolve(); + } + + [SymbolDispose]() { + this.fail(); + } +} + +// ============================================================================= +// Readable Implementation +// ============================================================================= + +function createReadable(queue) { + return { + __proto__: null, + [SymbolAsyncIterator]() { + return { + __proto__: null, + async next() { + return queue.read(); + }, + async return() { + queue.consumerReturn(); + return { __proto__: null, value: undefined, done: true }; + }, + async throw(error) { + queue.consumerThrow(error); + return { __proto__: null, value: undefined, done: true }; + }, + }; + }, + }; +} + +// ============================================================================= +// Stream.push() Factory +// ============================================================================= + +function parseArgs(args) { + const result = parsePullArgs(args); + // PushQueue constructor requires a non-undefined options object. + if (result.options === undefined) { + result.options = { __proto__: null }; + } + return result; +} + +/** + * Create a push stream with optional transforms. + * @param {...(Function|object)} args - Transforms, then options (optional) + * @returns {{ writer: Writer, readable: AsyncIterable }} + */ +function push(...args) { + const { transforms, options } = parseArgs(args); + + const queue = new PushQueue(options); + const writer = new PushWriter(queue); + const rawReadable = createReadable(queue); + + // Apply transforms lazily if provided + let readable; + if (transforms.length > 0) { + if (options.signal) { + readable = pullWithTransforms( + rawReadable, ...transforms, { __proto__: null, signal: options.signal }); + } else { + readable = pullWithTransforms(rawReadable, ...transforms); + } + } else { + readable = rawReadable; + } + + return { __proto__: null, writer, readable }; +} + +module.exports = { + push, +}; diff --git a/lib/internal/streams/iter/ringbuffer.js b/lib/internal/streams/iter/ringbuffer.js new file mode 100644 index 00000000000000..a05b7825fb86ae --- /dev/null +++ b/lib/internal/streams/iter/ringbuffer.js @@ -0,0 +1,151 @@ +'use strict'; + +// RingBuffer - O(1) FIFO queue with indexed access. +// +// Replaces plain JS arrays that are used as queues with shift()/push(). +// Array.shift() is O(n) because it copies all remaining elements; +// RingBuffer.shift() is O(1) -- it just advances a head pointer. +// +// Also provides O(1) trimFront(count) to replace Array.splice(0, count). +// +// Capacity is always a power of 2, so modulo is replaced with bitwise AND. + +const { + Array, +} = primordials; + +class RingBuffer { + #backing; + #head = 0; + #size = 0; + #mask; + + constructor(initialCapacity = 16) { + this.#mask = initialCapacity - 1; + this.#backing = new Array(initialCapacity); + } + + get length() { + return this.#size; + } + + /** + * Append an item to the tail. O(1) amortized. + */ + push(item) { + if (this.#size > this.#mask) { + this.#grow(); + } + this.#backing[(this.#head + this.#size) & this.#mask] = item; + this.#size++; + } + + /** + * Prepend an item to the head. O(1) amortized. + */ + unshift(item) { + if (this.#size > this.#mask) { + this.#grow(); + } + this.#head = (this.#head - 1 + this.#mask + 1) & this.#mask; + this.#backing[this.#head] = item; + this.#size++; + } + + /** + * Remove and return the item at the head. O(1). + * @returns {any} + */ + shift() { + if (this.#size === 0) return undefined; + const item = this.#backing[this.#head]; + this.#backing[this.#head] = undefined; // Help GC + this.#head = (this.#head + 1) & this.#mask; + this.#size--; + return item; + } + + /** + * Read item at a logical index (0 = head). O(1). + * Returns undefined if index is out of bounds. + * @returns {any} + */ + get(index) { + if (index < 0 || index >= this.#size) return undefined; + return this.#backing[(this.#head + index) & this.#mask]; + } + + /** + * Remove `count` items from the head without returning them. + * O(count) for GC cleanup. + */ + trimFront(count) { + if (count <= 0) return; + if (count >= this.#size) { + this.clear(); + return; + } + for (let i = 0; i < count; i++) { + this.#backing[(this.#head + i) & this.#mask] = undefined; + } + this.#head = (this.#head + count) & this.#mask; + this.#size -= count; + } + + /** + * Find the logical index of `item` (reference equality). O(n). + * Returns -1 if not found. + * @returns {number} + */ + indexOf(item) { + for (let i = 0; i < this.#size; i++) { + if (this.#backing[(this.#head + i) & this.#mask] === item) { + return i; + } + } + return -1; + } + + /** + * Remove the item at logical `index`, shifting later elements. O(n) worst case. + * Used only on rare abort-signal cancellation path. + */ + removeAt(index) { + if (index < 0 || index >= this.#size) return; + for (let i = index; i < this.#size - 1; i++) { + const from = (this.#head + i + 1) & this.#mask; + const to = (this.#head + i) & this.#mask; + this.#backing[to] = this.#backing[from]; + } + const last = (this.#head + this.#size - 1) & this.#mask; + this.#backing[last] = undefined; + this.#size--; + } + + /** + * Remove all items. O(n) for GC cleanup. + */ + clear() { + for (let i = 0; i < this.#size; i++) { + this.#backing[(this.#head + i) & this.#mask] = undefined; + } + this.#head = 0; + this.#size = 0; + } + + /** + * Double the backing capacity, linearizing the circular layout. + */ + #grow() { + const newCapacity = (this.#mask + 1) * 2; + const newBacking = new Array(newCapacity); + for (let i = 0; i < this.#size; i++) { + newBacking[i] = this.#backing[(this.#head + i) & this.#mask]; + } + this.#backing = newBacking; + this.#head = 0; + this.#mask = newCapacity - 1; + } +} + +module.exports = { RingBuffer }; diff --git a/lib/internal/streams/iter/share.js b/lib/internal/streams/iter/share.js new file mode 100644 index 00000000000000..752c0bfcbcab8f --- /dev/null +++ b/lib/internal/streams/iter/share.js @@ -0,0 +1,651 @@ +'use strict'; + +// New Streams API - Share +// +// Pull-model multi-consumer streaming. Shares a single source among +// multiple consumers with explicit buffering. + +const { + ArrayPrototypePush, + PromisePrototypeThen, + PromiseResolve, + PromiseWithResolvers, + SafeSet, + SymbolAsyncIterator, + SymbolDispose, + SymbolIterator, +} = primordials; + +const { + shareProtocol, + shareSyncProtocol, +} = require('internal/streams/iter/types'); + +const { + from, + fromSync, + isAsyncIterable, + isSyncIterable, +} = require('internal/streams/iter/from'); + +const { + pull: pullWithTransforms, + pullSync: pullSyncWithTransforms, +} = require('internal/streams/iter/pull'); + +const { + kMultiConsumerDefaultHWM, + clampHWM, + getMinCursor, + hasProtocol, + onSignalAbort, + wrapError, + parsePullArgs, + validateBackpressure, +} = require('internal/streams/iter/utils'); + +const { + RingBuffer, +} = require('internal/streams/iter/ringbuffer'); + +const { + codes: { + ERR_INVALID_ARG_TYPE, + ERR_INVALID_RETURN_VALUE, + ERR_OUT_OF_RANGE, + }, +} = require('internal/errors'); +const { + validateAbortSignal, + validateInteger, + validateObject, +} = require('internal/validators'); + +// ============================================================================= +// Async Share Implementation +// ============================================================================= + +class ShareImpl { + #source; + #options; + #buffer = new RingBuffer(); + #bufferStart = 0; + #consumers = new SafeSet(); + #sourceIterator = null; + #sourceExhausted = false; + #sourceError = null; + #cancelled = false; + #pulling = false; + #pullWaiters = []; + + constructor(source, options) { + this.#source = source; + this.#options = options; + } + + get consumerCount() { + return this.#consumers.size; + } + + get bufferSize() { + return this.#buffer.length; + } + + pull(...args) { + const { transforms, options } = parsePullArgs(args); + const rawConsumer = this.#createRawConsumer(); + + if (transforms.length > 0) { + if (options) { + return pullWithTransforms(rawConsumer, ...transforms, options); + } + return pullWithTransforms(rawConsumer, ...transforms); + } + return rawConsumer; + } + + #createRawConsumer() { + const state = { + __proto__: null, + cursor: this.#bufferStart, + resolve: null, + reject: null, + detached: false, + }; + + this.#consumers.add(state); + const self = this; + + return { + __proto__: null, + [SymbolAsyncIterator]() { + return { + __proto__: null, + async next() { + if (self.#sourceError) { + state.detached = true; + self.#consumers.delete(state); + throw self.#sourceError; + } + + // Loop until we get data, source is exhausted, or + // consumer is detached. Multiple consumers may be woken + // after a single pull - those that find no data at their + // cursor must re-pull rather than terminating prematurely. + for (;;) { + if (state.detached) { + return { __proto__: null, done: true, value: undefined }; + } + + if (self.#cancelled) { + state.detached = true; + self.#consumers.delete(state); + return { __proto__: null, done: true, value: undefined }; + } + + // Check if data is available in buffer + const bufferIndex = state.cursor - self.#bufferStart; + if (bufferIndex < self.#buffer.length) { + const chunk = self.#buffer.get(bufferIndex); + state.cursor++; + self.#tryTrimBuffer(); + return { __proto__: null, done: false, value: chunk }; + } + + if (self.#sourceExhausted) { + state.detached = true; + self.#consumers.delete(state); + if (self.#sourceError) throw self.#sourceError; + return { __proto__: null, done: true, value: undefined }; + } + + // Need to pull from source - check buffer limit + const canPull = await self.#waitForBufferSpace(); + if (!canPull) { + state.detached = true; + self.#consumers.delete(state); + if (self.#sourceError) throw self.#sourceError; + return { __proto__: null, done: true, value: undefined }; + } + + await self.#pullFromSource(); + } + }, + + async return() { + state.detached = true; + state.resolve = null; + state.reject = null; + self.#consumers.delete(state); + self.#tryTrimBuffer(); + return { __proto__: null, done: true, value: undefined }; + }, + + async throw() { + state.detached = true; + state.resolve = null; + state.reject = null; + self.#consumers.delete(state); + self.#tryTrimBuffer(); + return { __proto__: null, done: true, value: undefined }; + }, + }; + }, + }; + } + + cancel(reason) { + if (this.#cancelled) return; + this.#cancelled = true; + + if (reason !== undefined) { + this.#sourceError = reason; + } + + if (this.#sourceIterator?.return) { + PromisePrototypeThen(this.#sourceIterator.return(), undefined, () => {}); + } + + for (const consumer of this.#consumers) { + if (consumer.resolve) { + if (reason !== undefined) { + consumer.reject?.(reason); + } else { + consumer.resolve({ __proto__: null, done: true, value: undefined }); + } + consumer.resolve = null; + consumer.reject = null; + } + consumer.detached = true; + } + this.#consumers.clear(); + + for (let i = 0; i < this.#pullWaiters.length; i++) { + this.#pullWaiters[i](); + } + this.#pullWaiters = []; + } + + [SymbolDispose]() { + this.cancel(); + } + + // Internal methods + + async #waitForBufferSpace() { + while (this.#buffer.length >= this.#options.highWaterMark) { + if (this.#cancelled || this.#sourceError || this.#sourceExhausted) { + return !this.#cancelled; + } + + switch (this.#options.backpressure) { + case 'strict': + throw new ERR_OUT_OF_RANGE( + 'buffer size', `<= ${this.#options.highWaterMark}`, + this.#buffer.length); + case 'block': { + const { promise, resolve } = PromiseWithResolvers(); + ArrayPrototypePush(this.#pullWaiters, resolve); + await promise; + break; + } + case 'drop-oldest': + this.#buffer.shift(); + this.#bufferStart++; + for (const consumer of this.#consumers) { + if (consumer.cursor < this.#bufferStart) { + consumer.cursor = this.#bufferStart; + } + } + return true; + case 'drop-newest': + return true; + } + } + return true; + } + + #pullFromSource() { + if (this.#sourceExhausted || this.#cancelled) { + return PromiseResolve(); + } + + if (this.#pulling) { + const { promise, resolve } = PromiseWithResolvers(); + ArrayPrototypePush(this.#pullWaiters, resolve); + return promise; + } + + this.#pulling = true; + + return (async () => { + try { + if (!this.#sourceIterator) { + if (isAsyncIterable(this.#source)) { + this.#sourceIterator = + this.#source[SymbolAsyncIterator](); + } else if (isSyncIterable(this.#source)) { + const syncIterator = + this.#source[SymbolIterator](); + this.#sourceIterator = { + __proto__: null, + async next() { + return syncIterator.next(); + }, + async return() { + return syncIterator.return?.() ?? + { __proto__: null, done: true, value: undefined }; + }, + }; + } else { + throw new ERR_INVALID_ARG_TYPE( + 'source', ['AsyncIterable', 'Iterable'], this.#source); + } + } + + const result = await this.#sourceIterator.next(); + + if (result.done) { + this.#sourceExhausted = true; + } else { + this.#buffer.push(result.value); + } + } catch (error) { + this.#sourceError = wrapError(error); + this.#sourceExhausted = true; + } finally { + this.#pulling = false; + for (let i = 0; i < this.#pullWaiters.length; i++) { + this.#pullWaiters[i](); + } + this.#pullWaiters = []; + } + })(); + } + + #tryTrimBuffer() { + const minCursor = getMinCursor( + this.#consumers, this.#bufferStart + this.#buffer.length); + const trimCount = minCursor - this.#bufferStart; + if (trimCount > 0) { + this.#buffer.trimFront(trimCount); + this.#bufferStart = minCursor; + for (let i = 0; i < this.#pullWaiters.length; i++) { + this.#pullWaiters[i](); + } + this.#pullWaiters = []; + } + } +} + +// ============================================================================= +// Sync Share Implementation +// ============================================================================= + +class SyncShareImpl { + #source; + #options; + #buffer = new RingBuffer(); + #bufferStart = 0; + #consumers = new SafeSet(); + #sourceIterator = null; + #sourceExhausted = false; + #sourceError = null; + #cancelled = false; + + constructor(source, options) { + this.#source = source; + this.#options = options; + } + + get consumerCount() { + return this.#consumers.size; + } + + get bufferSize() { + return this.#buffer.length; + } + + pull(...transforms) { + const rawConsumer = this.#createRawConsumer(); + + if (transforms.length > 0) { + return pullSyncWithTransforms(rawConsumer, ...transforms); + } + return rawConsumer; + } + + #createRawConsumer() { + const state = { + __proto__: null, + cursor: this.#bufferStart, + detached: false, + }; + + this.#consumers.add(state); + const self = this; + + return { + __proto__: null, + [SymbolIterator]() { + return { + __proto__: null, + next() { + if (state.detached) { + return { __proto__: null, done: true, value: undefined }; + } + if (self.#sourceError) { + state.detached = true; + self.#consumers.delete(state); + throw self.#sourceError; + } + if (self.#cancelled) { + state.detached = true; + self.#consumers.delete(state); + return { __proto__: null, done: true, value: undefined }; + } + + const bufferIndex = state.cursor - self.#bufferStart; + if (bufferIndex < self.#buffer.length) { + const chunk = self.#buffer.get(bufferIndex); + state.cursor++; + self.#tryTrimBuffer(); + return { __proto__: null, done: false, value: chunk }; + } + + if (self.#sourceExhausted) { + state.detached = true; + self.#consumers.delete(state); + return { __proto__: null, done: true, value: undefined }; + } + + // Check buffer limit + if (self.#buffer.length >= self.#options.highWaterMark) { + switch (self.#options.backpressure) { + case 'strict': + throw new ERR_OUT_OF_RANGE( + 'buffer size', `<= ${self.#options.highWaterMark}`, + self.#buffer.length); + case 'block': + throw new ERR_OUT_OF_RANGE( + 'buffer size', `<= ${self.#options.highWaterMark} ` + + '(blocking not available in sync context)', + self.#buffer.length); + case 'drop-oldest': + self.#buffer.shift(); + self.#bufferStart++; + for (const consumer of self.#consumers) { + if (consumer.cursor < self.#bufferStart) { + consumer.cursor = self.#bufferStart; + } + } + break; + case 'drop-newest': + state.detached = true; + self.#consumers.delete(state); + return { __proto__: null, done: true, value: undefined }; + } + } + + self.#pullFromSource(); + + if (self.#sourceError) { + state.detached = true; + self.#consumers.delete(state); + throw self.#sourceError; + } + + const newBufferIndex = state.cursor - self.#bufferStart; + if (newBufferIndex < self.#buffer.length) { + const chunk = self.#buffer.get(newBufferIndex); + state.cursor++; + self.#tryTrimBuffer(); + return { __proto__: null, done: false, value: chunk }; + } + + if (self.#sourceExhausted) { + state.detached = true; + self.#consumers.delete(state); + return { __proto__: null, done: true, value: undefined }; + } + + return { __proto__: null, done: true, value: undefined }; + }, + + return() { + state.detached = true; + self.#consumers.delete(state); + self.#tryTrimBuffer(); + return { __proto__: null, done: true, value: undefined }; + }, + + throw() { + state.detached = true; + self.#consumers.delete(state); + self.#tryTrimBuffer(); + return { __proto__: null, done: true, value: undefined }; + }, + }; + }, + }; + } + + cancel(reason) { + if (this.#cancelled) return; + this.#cancelled = true; + + if (reason !== undefined) { + this.#sourceError = reason; + } + + if (this.#sourceIterator?.return) { + this.#sourceIterator.return(); + } + + for (const consumer of this.#consumers) { + consumer.detached = true; + } + this.#consumers.clear(); + } + + [SymbolDispose]() { + this.cancel(); + } + + #pullFromSource() { + if (this.#sourceExhausted || this.#cancelled) return; + + try { + this.#sourceIterator ||= this.#source[SymbolIterator](); + + const result = this.#sourceIterator.next(); + + if (result.done) { + this.#sourceExhausted = true; + } else { + this.#buffer.push(result.value); + } + } catch (error) { + this.#sourceError = wrapError(error); + this.#sourceExhausted = true; + } + } + + #tryTrimBuffer() { + const minCursor = getMinCursor( + this.#consumers, this.#bufferStart + this.#buffer.length); + const trimCount = minCursor - this.#bufferStart; + if (trimCount > 0) { + this.#buffer.trimFront(trimCount); + this.#bufferStart = minCursor; + } + } +} + +// ============================================================================= +// Public API +// ============================================================================= + +function share(source, options = { __proto__: null }) { + // Normalize source via from() - accepts strings, ArrayBuffers, protocols, etc. + const normalized = from(source); + validateObject(options, 'options'); + const { + highWaterMark = kMultiConsumerDefaultHWM, + backpressure = 'strict', + signal, + } = options; + validateInteger(highWaterMark, 'options.highWaterMark'); + validateBackpressure(backpressure); + if (signal !== undefined) { + validateAbortSignal(signal, 'options.signal'); + } + + const opts = { + __proto__: null, + highWaterMark: clampHWM(highWaterMark), + backpressure, + signal, + }; + + const shareImpl = new ShareImpl(normalized, opts); + + if (signal) { + onSignalAbort(signal, () => shareImpl.cancel()); + } + + return shareImpl; +} + +function shareSync(source, options = { __proto__: null }) { + // Normalize source via fromSync() - accepts strings, ArrayBuffers, protocols, etc. + const normalized = fromSync(source); + validateObject(options, 'options'); + const { + highWaterMark = kMultiConsumerDefaultHWM, + backpressure = 'strict', + } = options; + validateInteger(highWaterMark, 'options.highWaterMark'); + validateBackpressure(backpressure); + + const opts = { + __proto__: null, + highWaterMark: clampHWM(highWaterMark), + backpressure, + }; + + return new SyncShareImpl(normalized, opts); +} + +function isShareable(value) { + return hasProtocol(value, shareProtocol); +} + +function isSyncShareable(value) { + return hasProtocol(value, shareSyncProtocol); +} + +const Share = { + __proto__: null, + from(input, options) { + if (isShareable(input)) { + const result = input[shareProtocol](options); + if (result === null || typeof result !== 'object') { + throw new ERR_INVALID_RETURN_VALUE( + 'an object', '[Symbol.for(\'Stream.shareProtocol\')]', result); + } + return result; + } + if (isAsyncIterable(input) || isSyncIterable(input)) { + return share(input, options); + } + throw new ERR_INVALID_ARG_TYPE( + 'input', ['Shareable', 'AsyncIterable', 'Iterable'], input); + }, +}; + +const SyncShare = { + __proto__: null, + fromSync(input, options) { + if (isSyncShareable(input)) { + const result = input[shareSyncProtocol](options); + if (result === null || typeof result !== 'object') { + throw new ERR_INVALID_RETURN_VALUE( + 'an object', '[Symbol.for(\'Stream.shareSyncProtocol\')]', result); + } + return result; + } + if (isSyncIterable(input)) { + return shareSync(input, options); + } + throw new ERR_INVALID_ARG_TYPE( + 'input', ['SyncShareable', 'Iterable'], input); + }, +}; + +module.exports = { + Share, + SyncShare, + share, + shareSync, +}; diff --git a/lib/internal/streams/iter/transform.js b/lib/internal/streams/iter/transform.js new file mode 100644 index 00000000000000..4cb417ed98ce32 --- /dev/null +++ b/lib/internal/streams/iter/transform.js @@ -0,0 +1,830 @@ +'use strict'; + +// Compression / Decompression Transforms +// +// Creates bare native zlib handles via internalBinding('zlib'), bypassing +// the stream.Transform / ZlibBase / EventEmitter machinery entirely. +// Compression runs on the libuv threadpool via handle.write() (async) so +// I/O and upstream transforms can overlap with compression work. +// Each factory returns a transform descriptor that can be passed to pull(). + +const { + ArrayPrototypeMap, + ArrayPrototypePush, + ArrayPrototypeShift, + MathMax, + NumberIsNaN, + ObjectEntries, + ObjectKeys, + PromiseWithResolvers, + StringPrototypeStartsWith, + SymbolAsyncIterator, + TypedArrayPrototypeFill, + TypedArrayPrototypeGetByteLength, + TypedArrayPrototypeSlice, + Uint32Array, +} = primordials; + +const { Buffer } = require('buffer'); +const { + codes: { + ERR_BROTLI_INVALID_PARAM, + ERR_INVALID_ARG_TYPE, + ERR_OUT_OF_RANGE, + ERR_ZSTD_INVALID_PARAM, + }, + genericNodeError, +} = require('internal/errors'); +const { lazyDOMException } = require('internal/util'); +const { isArrayBufferView, isAnyArrayBuffer } = require('internal/util/types'); +const { kTrustedTransform } = require('internal/streams/iter/types'); +const { + checkRangesOrGetDefault, + validateFiniteNumber, + validateObject, +} = require('internal/validators'); +const binding = internalBinding('zlib'); +const constants = internalBinding('constants').zlib; + +const { + // Zlib modes + DEFLATE, INFLATE, GZIP, GUNZIP, + BROTLI_ENCODE, BROTLI_DECODE, + ZSTD_COMPRESS, ZSTD_DECOMPRESS, + // Zlib flush + Z_NO_FLUSH, Z_FINISH, + // Zlib defaults + Z_DEFAULT_WINDOWBITS, + Z_DEFAULT_STRATEGY, + // Brotli flush + BROTLI_OPERATION_PROCESS, BROTLI_OPERATION_FINISH, + // Zlib ranges + Z_MIN_CHUNK, Z_MIN_WINDOWBITS, Z_MAX_WINDOWBITS, + Z_MIN_LEVEL, Z_MAX_LEVEL, + Z_MIN_MEMLEVEL, Z_MAX_MEMLEVEL, + Z_FIXED, + // Zstd flush + ZSTD_e_continue, ZSTD_e_end, +} = constants; + +// --------------------------------------------------------------------------- +// Option validation helpers (matching lib/zlib.js validation patterns) +// --------------------------------------------------------------------------- + +// Default output buffer size for compression transforms. Larger than +// Z_DEFAULT_CHUNK (16KB) to reduce the number of threadpool re-entries +// when the engine has more output than fits in one buffer. 64KB matches +// BATCH_HWM and the typical input chunk size from pull(). +const DEFAULT_OUTPUT_SIZE = 64 * 1024; + +// Batch high water mark - yield output in chunks of approximately this size. +const BATCH_HWM = DEFAULT_OUTPUT_SIZE; + +// Pre-allocated empty buffer for flush/finalize calls. +const kEmpty = Buffer.alloc(0); + +function validateChunkSize(options) { + let chunkSize = options.chunkSize; + if (!validateFiniteNumber(chunkSize, 'options.chunkSize')) { + chunkSize = DEFAULT_OUTPUT_SIZE; + } else if (chunkSize < Z_MIN_CHUNK) { + throw new ERR_OUT_OF_RANGE('options.chunkSize', + `>= ${Z_MIN_CHUNK}`, chunkSize); + } + return chunkSize; +} + +function validateDictionary(dictionary) { + if (dictionary === undefined) return undefined; + if (isArrayBufferView(dictionary)) return dictionary; + if (isAnyArrayBuffer(dictionary)) return Buffer.from(dictionary); + throw new ERR_INVALID_ARG_TYPE( + 'options.dictionary', + ['Buffer', 'TypedArray', 'DataView', 'ArrayBuffer'], + dictionary); +} + +function validateParams(params, maxParam, errClass) { + if (params === undefined) return; + if (typeof params !== 'object' || params === null) { + throw new ERR_INVALID_ARG_TYPE('options.params', 'Object', params); + } + const keys = ObjectKeys(params); + for (let i = 0; i < keys.length; i++) { + const origKey = keys[i]; + const key = +origKey; + if (NumberIsNaN(key) || key < 0 || key > maxParam) { + throw new errClass(origKey); + } + const value = params[origKey]; + if (typeof value !== 'number' && typeof value !== 'boolean') { + throw new ERR_INVALID_ARG_TYPE('options.params[key]', 'number', value); + } + } +} + +// --------------------------------------------------------------------------- +// Brotli / Zstd parameter arrays (computed once, reused per init call). +// Mirrors the pattern in lib/zlib.js. +// --------------------------------------------------------------------------- +const kMaxBrotliParam = MathMax( + ...ArrayPrototypeMap( + ObjectEntries(constants), + ({ 0: key, 1: value }) => + (StringPrototypeStartsWith(key, 'BROTLI_PARAM_') ? value : 0), + ), +); +const brotliInitParamsArray = new Uint32Array(kMaxBrotliParam + 1); + +const kMaxZstdCParam = MathMax( + ...ArrayPrototypeMap( + ObjectKeys(constants), + (key) => (StringPrototypeStartsWith(key, 'ZSTD_c_') ? constants[key] : 0), + ), +); +const zstdInitCParamsArray = new Uint32Array(kMaxZstdCParam + 1); + +const kMaxZstdDParam = MathMax( + ...ArrayPrototypeMap( + ObjectKeys(constants), + (key) => (StringPrototypeStartsWith(key, 'ZSTD_d_') ? constants[key] : 0), + ), +); +const zstdInitDParamsArray = new Uint32Array(kMaxZstdDParam + 1); + +// --------------------------------------------------------------------------- +// Handle creation - bare native handles, no Transform/EventEmitter overhead. +// +// Each factory accepts a processCallback (called from the threadpool +// completion path in C++) and an onError handler. +// --------------------------------------------------------------------------- + +/** + * Create a bare Zlib handle (gzip, gunzip, deflate, inflate). + * @returns {{ handle: object, writeState: Uint32Array, chunkSize: number }} + */ +function createZlibHandle(mode, options, processCallback, onError) { + // Validate all options before creating the native handle to avoid + // "close before init" assertion if validation throws. + const chunkSize = validateChunkSize(options); + const windowBits = checkRangesOrGetDefault( + options.windowBits, 'options.windowBits', + Z_MIN_WINDOWBITS, Z_MAX_WINDOWBITS, Z_DEFAULT_WINDOWBITS); + // Default compression level 4 (not Z_DEFAULT_COMPRESSION which maps to + // level 6). Level 4 is ~1.5x faster with only ~5-10% worse compression + // ratio - the sweet spot for streaming and HTTP content-encoding. + const level = checkRangesOrGetDefault( + options.level, 'options.level', + Z_MIN_LEVEL, Z_MAX_LEVEL, 4); + // memLevel 9 uses ~128KB more memory than 8 but provides faster hash + // lookups during compression. Negligible memory cost for the speed gain. + const memLevel = checkRangesOrGetDefault( + options.memLevel, 'options.memLevel', + Z_MIN_MEMLEVEL, Z_MAX_MEMLEVEL, 9); + const strategy = checkRangesOrGetDefault( + options.strategy, 'options.strategy', + Z_DEFAULT_STRATEGY, Z_FIXED, Z_DEFAULT_STRATEGY); + const dictionary = validateDictionary(options.dictionary); + + const handle = new binding.Zlib(mode); + const writeState = new Uint32Array(2); + + handle.onerror = onError; + handle.init( + windowBits, level, memLevel, strategy, + writeState, processCallback, dictionary, + ); + + return { __proto__: null, handle, writeState, chunkSize }; +} + +/** + * Create a bare Brotli handle. + * @returns {{ handle: object, writeState: Uint32Array, chunkSize: number }} + */ +function createBrotliHandle(mode, options, processCallback, onError) { + // Validate before creating native handle. + const chunkSize = validateChunkSize(options); + const dictionary = validateDictionary(options.dictionary); + validateParams(options.params, kMaxBrotliParam, ERR_BROTLI_INVALID_PARAM); + + const handle = mode === BROTLI_ENCODE ? + new binding.BrotliEncoder(mode) : new binding.BrotliDecoder(mode); + const writeState = new Uint32Array(2); + + TypedArrayPrototypeFill(brotliInitParamsArray, -1); + // Streaming-appropriate defaults: quality 6 (not 11) and lgwin 20 (1MB, + // not 4MB). Quality 11 is intended for offline/build-time compression + // and allocates ~400MB of internal state. Quality 6 is ~10x faster with + // only ~10-15% worse compression ratio - the standard for dynamic HTTP + // content-encoding (nginx, Caddy, Cloudflare all use 4-6). + if (mode === BROTLI_ENCODE) { + brotliInitParamsArray[constants.BROTLI_PARAM_QUALITY] = 6; + brotliInitParamsArray[constants.BROTLI_PARAM_LGWIN] = 20; + } + if (options.params) { + // User-supplied params override the defaults above. + const params = options.params; + const keys = ObjectKeys(params); + for (let i = 0; i < keys.length; i++) { + const key = +keys[i]; + brotliInitParamsArray[key] = params[keys[i]]; + } + } + + handle.onerror = onError; + handle.init( + brotliInitParamsArray, + writeState, + processCallback, + dictionary, + ); + + return { __proto__: null, handle, writeState, chunkSize }; +} + +/** + * Create a bare Zstd handle. + * @returns {{ handle: object, writeState: Uint32Array, chunkSize: number }} + */ +function createZstdHandle(mode, options, processCallback, onError) { + const isCompress = mode === ZSTD_COMPRESS; + + // Validate before creating native handle. + const chunkSize = validateChunkSize(options); + const dictionary = validateDictionary(options.dictionary); + const maxParam = isCompress ? kMaxZstdCParam : kMaxZstdDParam; + validateParams(options.params, maxParam, ERR_ZSTD_INVALID_PARAM); + + const pledgedSrcSize = options.pledgedSrcSize; + if (pledgedSrcSize !== undefined) { + if (typeof pledgedSrcSize !== 'number' || NumberIsNaN(pledgedSrcSize)) { + throw new ERR_INVALID_ARG_TYPE('options.pledgedSrcSize', 'number', + pledgedSrcSize); + } + if (pledgedSrcSize < 0) { + throw new ERR_OUT_OF_RANGE('options.pledgedSrcSize', '>= 0', + pledgedSrcSize); + } + } + + const handle = isCompress ? + new binding.ZstdCompress() : new binding.ZstdDecompress(); + const writeState = new Uint32Array(2); + + const initArray = isCompress ? zstdInitCParamsArray : zstdInitDParamsArray; + TypedArrayPrototypeFill(initArray, -1); + if (options.params) { + const params = options.params; + const keys = ObjectKeys(params); + for (let i = 0; i < keys.length; i++) { + const key = +keys[i]; + initArray[key] = params[keys[i]]; + } + } + + handle.onerror = onError; + handle.init( + initArray, + pledgedSrcSize, + writeState, + processCallback, + dictionary, + ); + + return { __proto__: null, handle, writeState, chunkSize }; +} + +// --------------------------------------------------------------------------- +// Core: makeZlibTransform +// +// Uses async handle.write() so compression runs on the libuv threadpool. +// The generator manually iterates the source with pre-reading: the next +// upstream read+transform is started before awaiting the current compression, +// so I/O and upstream work overlap with threadpool compression. +// --------------------------------------------------------------------------- +function makeZlibTransform(createHandleFn, processFlag, finishFlag) { + return { + __proto__: null, + [kTrustedTransform]: true, + transform: async function*(source, options) { + const { signal } = options; + + // Fail fast if already aborted - don't allocate a native handle. + signal?.throwIfAborted(); + + // ---- Per-invocation state shared with the write callback ---- + let outBuf; + let outOffset = 0; + let chunkSize; + let pending = []; + let pendingBytes = 0; + + // Current write operation state (read by the callback for looping). + let resolveWrite, rejectWrite; + let writeInput, writeFlush; + let writeInOff, writeAvailIn, writeAvailOutBefore; + + // processCallback: called by C++ AfterThreadPoolWork when compression + // on the threadpool completes. Collects output, loops if the engine + // has more output to produce (availOut === 0), then resolves the + // promise when all output for this input chunk is collected. + function onWriteComplete() { + const availOut = writeState[0]; + const availInAfter = writeState[1]; + const have = writeAvailOutBefore - availOut; + const bufferExhausted = availOut === 0 || outOffset + have >= chunkSize; + + if (have > 0) { + if (bufferExhausted && outOffset === 0) { + // Entire buffer filled from start - yield directly, no copy. + ArrayPrototypePush(pending, outBuf); + } else if (bufferExhausted) { + // Tail of buffer filled and buffer is being replaced - + // subarray is safe since outBuf reference is overwritten below. + ArrayPrototypePush(pending, + outBuf.subarray(outOffset, outOffset + have)); + } else { + // Partial fill, buffer will be reused - must copy. + ArrayPrototypePush(pending, + TypedArrayPrototypeSlice(outBuf, + outOffset, + outOffset + have)); + } + pendingBytes += have; + outOffset += have; + } + + // Reallocate output buffer if exhausted. + if (bufferExhausted) { + outBuf = Buffer.allocUnsafe(chunkSize); + outOffset = 0; + } + + if (availOut === 0) { + // Engine has more output - but if aborted, don't loop. + if (!resolveWrite) return; + + const consumed = writeAvailIn - availInAfter; + writeInOff += consumed; + writeAvailIn = availInAfter; + writeAvailOutBefore = chunkSize - outOffset; + + handle.write(writeFlush, + writeInput, writeInOff, writeAvailIn, + outBuf, outOffset, writeAvailOutBefore); + return; // Will call onWriteComplete again. + } + + // All input consumed and output collected. + handle.buffer = null; + const resolve = resolveWrite; + resolveWrite = undefined; + rejectWrite = undefined; + if (resolve) resolve(); + } + + // onError: called by C++ when the engine encounters an error. + // Fires instead of onWriteComplete - reject the promise. + function onError(message, errno, code) { + const error = genericNodeError(message, { __proto__: null, errno, code }); + error.errno = errno; + error.code = code; + const reject = rejectWrite; + resolveWrite = undefined; + rejectWrite = undefined; + if (reject) reject(error); + } + + // ---- Create the handle with our callbacks ---- + const result = createHandleFn(onWriteComplete, onError); + const handle = result.handle; + const writeState = result.writeState; + chunkSize = result.chunkSize; + outBuf = Buffer.allocUnsafe(chunkSize); + + // Abort handler: reject any in-flight threadpool operation so the + // generator doesn't block waiting for compression to finish. + const onAbort = () => { + const reject = rejectWrite; + resolveWrite = undefined; + rejectWrite = undefined; + if (reject) { + reject(signal.reason ?? + lazyDOMException('The operation was aborted', 'AbortError')); + } + }; + signal.addEventListener('abort', onAbort, { __proto__: null, once: true }); + + // Dispatch input to the threadpool and return a promise. + function processInputAsync(input, flushFlag) { + const { promise, resolve, reject } = PromiseWithResolvers(); + resolveWrite = resolve; + rejectWrite = reject; + writeInput = input; + writeFlush = flushFlag; + writeInOff = 0; + writeAvailIn = TypedArrayPrototypeGetByteLength(input); + writeAvailOutBefore = chunkSize - outOffset; + + // Keep input alive while the threadpool references it. + handle.buffer = input; + + handle.write(flushFlag, + input, 0, writeAvailIn, + outBuf, outOffset, writeAvailOutBefore); + return promise; + } + + function drainBatch() { + if (pendingBytes <= BATCH_HWM) { + // Swap instead of splice - avoids copying the array. + const batch = pending; + pending = []; + pendingBytes = 0; + return batch; + } + const batch = []; + let batchBytes = 0; + while (pending.length > 0 && batchBytes < BATCH_HWM) { + const buf = ArrayPrototypeShift(pending); + ArrayPrototypePush(batch, buf); + const len = TypedArrayPrototypeGetByteLength(buf); + batchBytes += len; + pendingBytes -= len; + } + return batch; + } + + let finalized = false; + + const iter = source[SymbolAsyncIterator](); + try { + // Manually iterate the source so we can pre-read: calling + // iter.next() starts the upstream read + transform on libuv + // before we await the current compression on the threadpool. + let nextResult = iter.next(); + + while (true) { + const { value: chunks, done } = await nextResult; + if (done) break; + + signal?.throwIfAborted(); + + if (chunks === null) { + // Flush signal - finalize the engine. + if (!finalized) { + finalized = true; + await processInputAsync(kEmpty, finishFlag); + while (pending.length > 0) { + yield drainBatch(); + } + } + nextResult = iter.next(); + continue; + } + + // Pre-read: start upstream I/O + transform for the NEXT batch + // while we compress the current batch on the threadpool. + nextResult = iter.next(); + + for (let i = 0; i < chunks.length; i++) { + await processInputAsync(chunks[i], processFlag); + } + + if (pendingBytes >= BATCH_HWM) { + while (pending.length > 0 && pendingBytes >= BATCH_HWM) { + yield drainBatch(); + } + } + if (pending.length > 0) { + yield drainBatch(); + } + } + + // Source ended - finalize if not already done by a null signal. + if (!finalized && !signal.aborted) { + finalized = true; + await processInputAsync(kEmpty, finishFlag); + while (pending.length > 0) { + yield drainBatch(); + } + } + } finally { + signal.removeEventListener('abort', onAbort); + handle.close(); + // Close the upstream iterator so its finally blocks run promptly + // rather than waiting for GC. + try { await iter.return?.(); } catch { /* Intentional no-op. */ } + } + }, + }; +} + +// --------------------------------------------------------------------------- +// Compression factories +// --------------------------------------------------------------------------- + +// --------------------------------------------------------------------------- +// Core: makeZlibTransformSync +// +// Synchronous counterpart to makeZlibTransform. Uses handle.writeSync() +// which runs compression directly on the main thread (no threadpool). +// Returns a stateful sync transform (generator function). +// --------------------------------------------------------------------------- +function makeZlibTransformSync(createHandleFn, processFlag, finishFlag) { + return { + __proto__: null, + transform: function*(source) { + // The processCallback is never called in sync mode, but handle.init() + // requires it. Pass a no-op. + let error = null; + function onError(message, errno, code) { + error = genericNodeError(message, { __proto__: null, errno, code }); + error.errno = errno; + error.code = code; + } + + const result = createHandleFn(() => {}, onError); + const handle = result.handle; + const writeState = result.writeState; + const chunkSize = result.chunkSize; + let outBuf = Buffer.allocUnsafe(chunkSize); + let outOffset = 0; + let pending = []; + let pendingBytes = 0; + + function processSyncInput(input, flushFlag) { + let inOff = 0; + let availIn = TypedArrayPrototypeGetByteLength(input); + let availOutBefore = chunkSize - outOffset; + + handle.writeSync(flushFlag, + input, inOff, availIn, + outBuf, outOffset, availOutBefore); + if (error) throw error; + + while (true) { + const availOut = writeState[0]; + const availInAfter = writeState[1]; + const have = availOutBefore - availOut; + const bufferExhausted = availOut === 0 || + outOffset + have >= chunkSize; + + if (have > 0) { + if (bufferExhausted && outOffset === 0) { + // Entire buffer filled - yield directly, no copy. + ArrayPrototypePush(pending, outBuf); + } else if (bufferExhausted) { + // Tail filled, buffer being replaced - subarray is safe. + ArrayPrototypePush(pending, + outBuf.subarray(outOffset, outOffset + have)); + } else { + // Partial fill, buffer reused - must copy. + ArrayPrototypePush(pending, + TypedArrayPrototypeSlice(outBuf, + outOffset, + outOffset + have)); + } + pendingBytes += have; + outOffset += have; + } + + if (bufferExhausted) { + outBuf = Buffer.allocUnsafe(chunkSize); + outOffset = 0; + } + + if (availOut === 0) { + // Engine has more output - loop. + const consumed = availIn - availInAfter; + inOff += consumed; + availIn = availInAfter; + availOutBefore = chunkSize - outOffset; + + handle.writeSync(flushFlag, + input, inOff, availIn, + outBuf, outOffset, availOutBefore); + if (error) throw error; + continue; + } + + // All input consumed. + break; + } + } + + function drainBatch() { + if (pendingBytes <= BATCH_HWM) { + const batch = pending; + pending = []; + pendingBytes = 0; + return batch; + } + const batch = []; + let batchBytes = 0; + while (pending.length > 0 && batchBytes < BATCH_HWM) { + const buf = ArrayPrototypeShift(pending); + const len = TypedArrayPrototypeGetByteLength(buf); + ArrayPrototypePush(batch, buf); + batchBytes += len; + pendingBytes -= len; + } + return batch; + } + + try { + for (const batch of source) { + if (batch === null) { + // Flush signal - finalize the engine. + processSyncInput(Buffer.alloc(0), finishFlag); + while (pending.length > 0) { + yield drainBatch(); + } + continue; + } + + for (let i = 0; i < batch.length; i++) { + processSyncInput(batch[i], processFlag); + } + + if (pendingBytes >= BATCH_HWM) { + while (pending.length > 0 && pendingBytes >= BATCH_HWM) { + yield drainBatch(); + } + } + if (pending.length > 0) { + yield drainBatch(); + } + } + } finally { + handle.close(); + } + }, + }; +} + +// --------------------------------------------------------------------------- +// Async compression factories +// --------------------------------------------------------------------------- + +const kNullPrototype = { __proto__: null }; + +function compressGzip(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransform( + (cb, onErr) => createZlibHandle(GZIP, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function compressDeflate(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransform( + (cb, onErr) => createZlibHandle(DEFLATE, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function compressBrotli(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransform( + (cb, onErr) => createBrotliHandle(BROTLI_ENCODE, options, cb, onErr), + BROTLI_OPERATION_PROCESS, BROTLI_OPERATION_FINISH, + ); +} + +function compressZstd(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransform( + (cb, onErr) => createZstdHandle(ZSTD_COMPRESS, options, cb, onErr), + ZSTD_e_continue, ZSTD_e_end, + ); +} + +// --------------------------------------------------------------------------- +// Decompression factories +// --------------------------------------------------------------------------- + +function decompressGzip(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransform( + (cb, onErr) => createZlibHandle(GUNZIP, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function decompressDeflate(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransform( + (cb, onErr) => createZlibHandle(INFLATE, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function decompressBrotli(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransform( + (cb, onErr) => createBrotliHandle(BROTLI_DECODE, options, cb, onErr), + BROTLI_OPERATION_PROCESS, BROTLI_OPERATION_FINISH, + ); +} + +function decompressZstd(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransform( + (cb, onErr) => createZstdHandle(ZSTD_DECOMPRESS, options, cb, onErr), + ZSTD_e_continue, ZSTD_e_end, + ); +} + +// --------------------------------------------------------------------------- +// Sync compression factories +// --------------------------------------------------------------------------- + +function compressGzipSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZlibHandle(GZIP, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function compressDeflateSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZlibHandle(DEFLATE, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function compressBrotliSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createBrotliHandle(BROTLI_ENCODE, options, cb, onErr), + BROTLI_OPERATION_PROCESS, BROTLI_OPERATION_FINISH, + ); +} + +function compressZstdSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZstdHandle(ZSTD_COMPRESS, options, cb, onErr), + ZSTD_e_continue, ZSTD_e_end, + ); +} + +// --------------------------------------------------------------------------- +// Sync decompression factories +// --------------------------------------------------------------------------- + +function decompressGzipSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZlibHandle(GUNZIP, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function decompressDeflateSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZlibHandle(INFLATE, options, cb, onErr), + Z_NO_FLUSH, Z_FINISH, + ); +} + +function decompressBrotliSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createBrotliHandle(BROTLI_DECODE, options, cb, onErr), + BROTLI_OPERATION_PROCESS, BROTLI_OPERATION_FINISH, + ); +} + +function decompressZstdSync(options = kNullPrototype) { + validateObject(options, 'options'); + return makeZlibTransformSync( + (cb, onErr) => createZstdHandle(ZSTD_DECOMPRESS, options, cb, onErr), + ZSTD_e_continue, ZSTD_e_end, + ); +} + +module.exports = { + compressBrotli, + compressBrotliSync, + compressDeflate, + compressDeflateSync, + compressGzip, + compressGzipSync, + compressZstd, + compressZstdSync, + decompressBrotli, + decompressBrotliSync, + decompressDeflate, + decompressDeflateSync, + decompressGzip, + decompressGzipSync, + decompressZstd, + decompressZstdSync, +}; diff --git a/lib/internal/streams/iter/types.js b/lib/internal/streams/iter/types.js new file mode 100644 index 00000000000000..c205db00e3782a --- /dev/null +++ b/lib/internal/streams/iter/types.js @@ -0,0 +1,66 @@ +'use strict'; + +const { + Symbol, + SymbolFor, +} = primordials; + +/** + * Symbol for sync value-to-streamable conversion protocol. + * Objects implementing this can be written to streams or yielded + * from generators. Works in both sync and async contexts. + * + * Third-party: [Symbol.for('Stream.toStreamable')]() { ... } + */ +const toStreamable = SymbolFor('Stream.toStreamable'); + +/** + * Symbol for async value-to-streamable conversion protocol. + * Objects implementing this can be written to async streams. + * Works in async contexts only. + * + * Third-party: [Symbol.for('Stream.toAsyncStreamable')]() { ... } + */ +const toAsyncStreamable = SymbolFor('Stream.toAsyncStreamable'); + +/** + * Symbol for Broadcastable protocol - object can provide a Broadcast. + */ +const broadcastProtocol = SymbolFor('Stream.broadcastProtocol'); + +/** + * Symbol for Shareable protocol - object can provide a Share. + */ +const shareProtocol = SymbolFor('Stream.shareProtocol'); + +/** + * Symbol for SyncShareable protocol - object can provide a SyncShare. + */ +const shareSyncProtocol = SymbolFor('Stream.shareSyncProtocol'); + +/** + * Symbol for Drainable protocol - object can signal when backpressure + * clears. Used to bridge event-driven sources that need drain notification. + */ +const drainableProtocol = SymbolFor('Stream.drainableProtocol'); + +/** + * Internal sentinel for trusted stateful transforms. A transform object + * with [kTrustedTransform] = true signals that: + * 1. It handles source exhaustion (done) internally - no withFlushAsync + * wrapper needed. + * 2. It always yields valid Uint8Array[] batches - no isUint8ArrayBatch + * validation needed on each yield. + * This is NOT a public protocol symbol - it uses Symbol() not Symbol.for(). + */ +const kTrustedTransform = Symbol('kTrustedTransform'); + +module.exports = { + broadcastProtocol, + drainableProtocol, + kTrustedTransform, + shareProtocol, + shareSyncProtocol, + toAsyncStreamable, + toStreamable, +}; diff --git a/lib/internal/streams/iter/utils.js b/lib/internal/streams/iter/utils.js new file mode 100644 index 00000000000000..2a156d2828a2e0 --- /dev/null +++ b/lib/internal/streams/iter/utils.js @@ -0,0 +1,291 @@ +'use strict'; + +const { + Array, + ArrayBufferPrototypeGetByteLength, + ArrayPrototypeSlice, + MathMax, + MathMin, + NumberMAX_SAFE_INTEGER, + PromiseResolve, + String, + TypedArrayPrototypeGetBuffer, + TypedArrayPrototypeGetByteLength, + TypedArrayPrototypeGetByteOffset, + Uint8Array, +} = primordials; + +const { TextEncoder } = require('internal/encoding'); +const { + codes: { + ERR_INVALID_ARG_TYPE, + ERR_OPERATION_FAILED, + }, +} = require('internal/errors'); +const { isError } = require('internal/util'); + +const { Buffer } = require('buffer'); + +const { isSharedArrayBuffer, isUint8Array } = require('internal/util/types'); + +const { validateOneOf } = require('internal/validators'); + +// Cached resolved promise to avoid allocating a new one on every sync fast-path. +const kResolvedPromise = PromiseResolve(); + +// Shared TextEncoder instance for string conversion. +const encoder = new TextEncoder(); + +// Default high water marks for push and multi-consumer streams. These values +// are somewhat arbitrary but have been tested across various workloads and +// appear to yield the best overall throughput/latency balance. + +/** Default high water mark for push streams (single-consumer). */ +const kPushDefaultHWM = 4; + +/** Default high water mark for broadcast and share streams (multi-consumer). */ +const kMultiConsumerDefaultHWM = 16; + +/** + * Clamp a high water mark to [1, MAX_SAFE_INTEGER]. + * @param {number} value + * @returns {number} + */ +function clampHWM(value) { + return MathMax(1, MathMin(NumberMAX_SAFE_INTEGER, value)); +} + +/** + * Register a handler for an AbortSignal, handling the already-aborted case. + * If the signal is already aborted, calls handler immediately. + * Otherwise, adds a one-time 'abort' listener. + * @param {AbortSignal} signal + * @param {Function} handler + */ +function onSignalAbort(signal, handler) { + if (signal.aborted) { + handler(); + } else { + signal.addEventListener('abort', handler, { __proto__: null, once: true }); + } +} + +/** + * Compute the minimum cursor across a set of consumers. + * Returns fallback if the set is empty. + * @param {Set} consumers - Set of objects with a `cursor` property + * @param {number} fallback - Value to return when set is empty + * @returns {number} + */ +function getMinCursor(consumers, fallback) { + let min = Infinity; + for (const consumer of consumers) { + if (consumer.cursor < min) { + min = consumer.cursor; + } + } + return min === Infinity ? fallback : min; +} + +/** + * Convert a chunk (string or Uint8Array) to Uint8Array. + * Strings are UTF-8 encoded. + * @param {Uint8Array|string} chunk + * @returns {Uint8Array} + */ +function toUint8Array(chunk) { + if (typeof chunk === 'string') { + return encoder.encode(chunk); + } + if (!isUint8Array(chunk)) { + throw new ERR_INVALID_ARG_TYPE('chunk', ['string', 'Uint8Array'], chunk); + } + return chunk; +} + +/** + * Check if all chunks in an array are already Uint8Array (no strings). + * Short-circuits on the first string found. + * @param {Array} chunks + * @returns {boolean} + */ +function allUint8Array(chunks) { + // Ok, well, kind of. This is more a check for "no strings"... + for (let i = 0; i < chunks.length; i++) { + if (typeof chunks[i] === 'string') return false; + } + return true; +} + +/** + * Concatenate multiple Uint8Arrays into a single Uint8Array. + * @param {Uint8Array[]} chunks + * @returns {Uint8Array} + */ +function concatBytes(chunks) { + // Empty stream: return zero-length Uint8Array + if (chunks.length === 0) { + return new Uint8Array(0); + } + // Single chunk: return directly if it covers the entire backing buffer + if (chunks.length === 1) { + const chunk = chunks[0]; + const buf = TypedArrayPrototypeGetBuffer(chunk); + // SharedArrayBuffer is not available in primordials, so use + // direct property access for its byteLength. + const bufByteLength = isSharedArrayBuffer(buf) ? + buf.byteLength : + ArrayBufferPrototypeGetByteLength(buf); + if (TypedArrayPrototypeGetByteOffset(chunk) === 0 && + TypedArrayPrototypeGetByteLength(chunk) === bufByteLength) { + return chunk; + } + } + // Multiple chunks or shared buffer: concatenate + const buf = Buffer.concat(chunks); + return new Uint8Array( + TypedArrayPrototypeGetBuffer(buf), + TypedArrayPrototypeGetByteOffset(buf), + TypedArrayPrototypeGetByteLength(buf)); +} + +/** + * Convert an array of chunks (strings or Uint8Arrays) to a Uint8Array[]. + * Always returns a fresh copy of the array. + * @param {Array} chunks + * @returns {Uint8Array[]} + */ +function convertChunks(chunks) { + if (allUint8Array(chunks)) { + return ArrayPrototypeSlice(chunks); + } + const len = chunks.length; + const result = new Array(len); + for (let i = 0; i < len; i++) { + result[i] = toUint8Array(chunks[i]); + } + return result; +} + +/** + * Wrap a caught value as an Error, converting non-Error values. + * @param {unknown} error + * @returns {Error} + */ +function wrapError(error) { + return isError(error) ? error : new ERR_OPERATION_FAILED(String(error)); +} + +/** + * Check if a value implements a Symbol-keyed protocol (has a function + * at the given symbol key). + * @param {unknown} value + * @param {symbol} symbol + * @returns {boolean} + */ +function hasProtocol(value, symbol) { + return ( + value !== null && + typeof value === 'object' && + symbol in value && + typeof value[symbol] === 'function' + ); +} + +/** + * Check if a value is PullOptions (object without transform or write property). + * @param {unknown} value + * @returns {boolean} + */ +function isPullOptions(value) { + return ( + value !== null && + typeof value === 'object' && + !('transform' in value) && + !('write' in value) + ); +} + +/** + * Check if a value is a stateful transform object (has a transform method). + * @param {unknown} value + * @returns {boolean} + */ +function isTransformObject(value) { + return typeof value?.transform === 'function'; +} + +/** + * Check if a value is a valid transform (function or transform object). + * @param {unknown} value + * @returns {boolean} + */ +function isTransform(value) { + return typeof value === 'function' || isTransformObject(value); +} + +/** + * Parse variadic arguments for pull/pullSync. + * Returns { transforms, options } + * @param {Array} args + * @returns {{ transforms: Array, options: object|undefined }} + */ +function parsePullArgs(args) { + if (args.length === 0) { + return { __proto__: null, transforms: [], options: undefined }; + } + + let transforms; + let options; + const last = args[args.length - 1]; + if (isPullOptions(last)) { + transforms = ArrayPrototypeSlice(args, 0, -1); + options = last; + } else { + transforms = args; + options = undefined; + } + + for (let i = 0; i < transforms.length; i++) { + if (!isTransform(transforms[i])) { + throw new ERR_INVALID_ARG_TYPE( + `transforms[${i}]`, ['Function', 'Object with transform()'], + transforms[i]); + } + } + + return { __proto__: null, transforms, options }; +} + +/** + * Validate backpressure option value. + * @param {string} value + */ +function validateBackpressure(value) { + validateOneOf(value, 'options.backpressure', [ + 'strict', + 'block', + 'drop-oldest', + 'drop-newest', + ]); +} + +module.exports = { + kMultiConsumerDefaultHWM, + kPushDefaultHWM, + kResolvedPromise, + allUint8Array, + clampHWM, + concatBytes, + convertChunks, + getMinCursor, + hasProtocol, + isPullOptions, + isTransform, + isTransformObject, + onSignalAbort, + parsePullArgs, + toUint8Array, + validateBackpressure, + wrapError, +}; diff --git a/lib/stream/iter.js b/lib/stream/iter.js new file mode 100644 index 00000000000000..e77e485a7f2bd5 --- /dev/null +++ b/lib/stream/iter.js @@ -0,0 +1,180 @@ +'use strict'; + +// Public entry point for the iterable streams API. +// Usage: require('stream/iter') or require('node:stream/iter') +// Requires: --experimental-stream-iter + +const { + ObjectFreeze, +} = primordials; + +const { emitExperimentalWarning } = require('internal/util'); +emitExperimentalWarning('stream/iter'); + +// Protocol symbols +const { + toStreamable, + toAsyncStreamable, + broadcastProtocol, + shareProtocol, + shareSyncProtocol, + drainableProtocol, +} = require('internal/streams/iter/types'); + +// Factories +const { push } = require('internal/streams/iter/push'); +const { duplex } = require('internal/streams/iter/duplex'); +const { from, fromSync } = require('internal/streams/iter/from'); + +// Pipelines +const { + pull, + pullSync, + pipeTo, + pipeToSync, +} = require('internal/streams/iter/pull'); + +// Consumers +const { + bytes, + bytesSync, + text, + textSync, + arrayBuffer, + arrayBufferSync, + array, + arraySync, + tap, + tapSync, + merge, + ondrain, +} = require('internal/streams/iter/consumers'); + +// Multi-consumer +const { broadcast, Broadcast } = require('internal/streams/iter/broadcast'); +const { + share, + shareSync, + Share, + SyncShare, +} = require('internal/streams/iter/share'); + +/** + * Stream namespace - unified access to all stream functions. + * @example + * const { Stream } = require('stream/iter'); + * + * const { writer, readable } = Stream.push(); + * await writer.write("hello"); + * await writer.end(); + * + * const output = Stream.pull(readable, transform1, transform2); + * const data = await Stream.bytes(output); + */ +const Stream = ObjectFreeze({ + // Factories + push, + duplex, + from, + fromSync, + + // Pipelines + pull, + pullSync, + + // Pipe to destination + pipeTo, + pipeToSync, + + // Consumers (async) + bytes, + text, + arrayBuffer, + array, + + // Consumers (sync) + bytesSync, + textSync, + arrayBufferSync, + arraySync, + + // Combining + merge, + + // Multi-consumer (push model) + broadcast, + + // Multi-consumer (pull model) + share, + shareSync, + + // Utilities + tap, + tapSync, + + // Drain utility for event source integration + ondrain, + + // Protocol symbols + toStreamable, + toAsyncStreamable, + broadcastProtocol, + shareProtocol, + shareSyncProtocol, + drainableProtocol, +}); + +module.exports = { + // The Stream namespace + Stream, + + // Also export everything individually for destructured imports + + // Protocol symbols + toStreamable, + toAsyncStreamable, + broadcastProtocol, + shareProtocol, + shareSyncProtocol, + drainableProtocol, + + // Factories + push, + duplex, + from, + fromSync, + + // Pipelines + pull, + pullSync, + pipeTo, + pipeToSync, + + // Consumers (async) + bytes, + text, + arrayBuffer, + array, + + // Consumers (sync) + bytesSync, + textSync, + arrayBufferSync, + arraySync, + + // Combining + merge, + + // Multi-consumer + broadcast, + Broadcast, + share, + shareSync, + Share, + SyncShare, + + // Utilities + tap, + tapSync, + ondrain, +}; diff --git a/lib/zlib/iter.js b/lib/zlib/iter.js new file mode 100644 index 00000000000000..7c2d64d75aa63f --- /dev/null +++ b/lib/zlib/iter.js @@ -0,0 +1,53 @@ +'use strict'; + +// Public entry point for the iterable compression/decompression API. +// Usage: require('zlib/iter') or require('node:zlib/iter') +// Requires: --experimental-stream-iter + +const { emitExperimentalWarning } = require('internal/util'); +emitExperimentalWarning('zlib/iter'); + +const { + compressGzip, + compressGzipSync, + compressDeflate, + compressDeflateSync, + compressBrotli, + compressBrotliSync, + compressZstd, + compressZstdSync, + decompressGzip, + decompressGzipSync, + decompressDeflate, + decompressDeflateSync, + decompressBrotli, + decompressBrotliSync, + decompressZstd, + decompressZstdSync, +} = require('internal/streams/iter/transform'); + +module.exports = { + // Compression transforms (async) + compressGzip, + compressDeflate, + compressBrotli, + compressZstd, + + // Compression transforms (sync) + compressGzipSync, + compressDeflateSync, + compressBrotliSync, + compressZstdSync, + + // Decompression transforms (async) + decompressGzip, + decompressDeflate, + decompressBrotli, + decompressZstd, + + // Decompression transforms (sync) + decompressGzipSync, + decompressDeflateSync, + decompressBrotliSync, + decompressZstdSync, +}; diff --git a/src/node_builtins.cc b/src/node_builtins.cc index 318ff5158e9c28..81503ae2a1ff85 100644 --- a/src/node_builtins.cc +++ b/src/node_builtins.cc @@ -134,15 +134,17 @@ BuiltinLoader::BuiltinCategories BuiltinLoader::GetBuiltinCategories() const { "internal/tls/wrap", "internal/tls/secure-context", "internal/http2/core", "internal/http2/compat", "internal/streams/lazy_transform", -#endif // !HAVE_OPENSSL +#endif // !HAVE_OPENSSL #ifndef OPENSSL_NO_QUIC "internal/quic/quic", "internal/quic/symbols", "internal/quic/stats", "internal/quic/state", -#endif // !OPENSSL_NO_QUIC - "quic", // Experimental. - "sqlite", // Experimental. - "sys", // Deprecated. - "wasi", // Experimental. +#endif // !OPENSSL_NO_QUIC + "quic", // Experimental. + "sqlite", // Experimental. + "stream/iter", // Experimental. + "zlib/iter", // Experimental. + "sys", // Deprecated. + "wasi", // Experimental. #if !HAVE_SQLITE "internal/webstorage", // Experimental. #endif diff --git a/src/node_file.cc b/src/node_file.cc index 0fe01e8b08127c..0b4711b9769b85 100644 --- a/src/node_file.cc +++ b/src/node_file.cc @@ -49,7 +49,7 @@ #include #if defined(__MINGW32__) || defined(_MSC_VER) -# include +#include #endif #ifdef _WIN32 @@ -88,7 +88,7 @@ using v8::Undefined; using v8::Value; #ifndef S_ISDIR -# define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR) +#define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR) #endif #ifdef __POSIX__ @@ -203,8 +203,7 @@ static const char* get_fs_func_name_by_type(uv_fs_type req_type) { // We sometimes need to convert a C++ lambda function to a raw C-style function. // This is helpful, because ReqWrap::Dispatch() does not recognize lambda // functions, and thus does not wrap them properly. -typedef void(*uv_fs_callback_t)(uv_fs_t*); - +typedef void (*uv_fs_callback_t)(uv_fs_t*); void FSContinuationData::MemoryInfo(MemoryTracker* tracker) const { tracker->TrackField("paths", paths_); @@ -336,7 +335,7 @@ BaseObjectPtr FileHandle::TransferData::Deserialize( int fd = fd_; fd_ = -1; - return BaseObjectPtr { FileHandle::New(bd, fd) }; + return BaseObjectPtr{FileHandle::New(bd, fd)}; } // Throw an exception if the file handle has not yet been closed. @@ -431,7 +430,7 @@ FileHandle::CloseReq::CloseReq(Environment* env, Local obj, Local promise, Local ref) - : ReqWrap(env, obj, AsyncWrap::PROVIDER_FILEHANDLECLOSEREQ) { + : ReqWrap(env, obj, AsyncWrap::PROVIDER_FILEHANDLECLOSEREQ) { promise_.Reset(env->isolate(), promise); ref_.Reset(env->isolate(), ref); } @@ -447,8 +446,6 @@ void FileHandle::CloseReq::MemoryInfo(MemoryTracker* tracker) const { tracker->TrackField("ref", ref_); } - - // Closes this FileHandle asynchronously and returns a Promise that will be // resolved when the callback is invoked, or rejects with a UVException if // there was a problem closing the fd. This is the preferred mechanism for @@ -476,8 +473,10 @@ MaybeLocal FileHandle::ClosePromise() { Local promise = resolver.As(); Local close_req_obj; - if (!env()->fdclose_constructor_template() - ->NewInstance(env()->context()).ToLocal(&close_req_obj)) { + if (!env() + ->fdclose_constructor_template() + ->NewInstance(env()->context()) + .ToLocal(&close_req_obj)) { return MaybeLocal(); } closing_ = true; @@ -520,6 +519,27 @@ void FileHandle::Close(const FunctionCallbackInfo& args) { args.GetReturnValue().Set(ret); } +void FileHandle::CloseSync(const FunctionCallbackInfo& args) { + FileHandle* fd; + ASSIGN_OR_RETURN_UNWRAP(&fd, args.This()); + + // Already closed or closing - no-op. + if (fd->closed_ || fd->closing_) return; + + uv_fs_t req; + CHECK_NE(fd->fd_, -1); + FS_SYNC_TRACE_BEGIN(close); + int ret = uv_fs_close(fd->env()->event_loop(), &req, fd->fd_, nullptr); + FS_SYNC_TRACE_END(close); + uv_fs_req_cleanup(&req); + + fd->AfterClose(); + + if (ret < 0) { + Environment* env = fd->env(); + env->ThrowUVException(ret, "close"); + } +} void FileHandle::ReleaseFD(const FunctionCallbackInfo& args) { FileHandle* fd; @@ -538,8 +558,7 @@ void FileHandle::AfterClose() { closing_ = false; closed_ = true; fd_ = -1; - if (reading_ && !persistent().IsEmpty()) - EmitRead(UV_EOF); + if (reading_ && !persistent().IsEmpty()) EmitRead(UV_EOF); } void FileHandleReadWrap::MemoryInfo(MemoryTracker* tracker) const { @@ -548,17 +567,15 @@ void FileHandleReadWrap::MemoryInfo(MemoryTracker* tracker) const { } FileHandleReadWrap::FileHandleReadWrap(FileHandle* handle, Local obj) - : ReqWrap(handle->env(), obj, AsyncWrap::PROVIDER_FSREQCALLBACK), - file_handle_(handle) {} + : ReqWrap(handle->env(), obj, AsyncWrap::PROVIDER_FSREQCALLBACK), + file_handle_(handle) {} int FileHandle::ReadStart() { - if (!IsAlive() || IsClosing()) - return UV_EOF; + if (!IsAlive() || IsClosing()) return UV_EOF; reading_ = true; - if (current_read_) - return 0; + if (current_read_) return 0; BaseObjectPtr read_wrap; @@ -604,67 +621,65 @@ int FileHandle::ReadStart() { current_read_ = std::move(read_wrap); FS_ASYNC_TRACE_BEGIN0(UV_FS_READ, current_read_.get()) - current_read_->Dispatch(uv_fs_read, - fd_, - ¤t_read_->buffer_, - 1, - read_offset_, - uv_fs_callback_t{[](uv_fs_t* req) { - FileHandle* handle; - { - FileHandleReadWrap* req_wrap = FileHandleReadWrap::from_req(req); - FS_ASYNC_TRACE_END1( - req->fs_type, req_wrap, "result", static_cast(req->result)) - handle = req_wrap->file_handle_; - CHECK_EQ(handle->current_read_.get(), req_wrap); - } - - // ReadStart() checks whether current_read_ is set to determine whether - // a read is in progress. Moving it into a local variable makes sure that - // the ReadStart() call below doesn't think we're still actively reading. - BaseObjectPtr read_wrap = - std::move(handle->current_read_); - - ssize_t result = req->result; - uv_buf_t buffer = read_wrap->buffer_; - - uv_fs_req_cleanup(req); + current_read_->Dispatch( + uv_fs_read, + fd_, + ¤t_read_->buffer_, + 1, + read_offset_, + uv_fs_callback_t{[](uv_fs_t* req) { + FileHandle* handle; + { + FileHandleReadWrap* req_wrap = FileHandleReadWrap::from_req(req); + FS_ASYNC_TRACE_END1( + req->fs_type, req_wrap, "result", static_cast(req->result)) + handle = req_wrap->file_handle_; + CHECK_EQ(handle->current_read_.get(), req_wrap); + } - // Push the read wrap back to the freelist, or let it be destroyed - // once we’re exiting the current scope. - constexpr size_t kWantedFreelistFill = 100; - auto& freelist = handle->binding_data_->file_handle_read_wrap_freelist; - if (freelist.size() < kWantedFreelistFill) { - read_wrap->Reset(); - freelist.emplace_back(std::move(read_wrap)); - } + // ReadStart() checks whether current_read_ is set to determine whether + // a read is in progress. Moving it into a local variable makes sure + // that the ReadStart() call below doesn't think we're still actively + // reading. + BaseObjectPtr read_wrap = + std::move(handle->current_read_); + + ssize_t result = req->result; + uv_buf_t buffer = read_wrap->buffer_; + + uv_fs_req_cleanup(req); + + // Push the read wrap back to the freelist, or let it be destroyed + // once we’re exiting the current scope. + constexpr size_t kWantedFreelistFill = 100; + auto& freelist = handle->binding_data_->file_handle_read_wrap_freelist; + if (freelist.size() < kWantedFreelistFill) { + read_wrap->Reset(); + freelist.emplace_back(std::move(read_wrap)); + } - if (result >= 0) { - // Read at most as many bytes as we originally planned to. - if (handle->read_length_ >= 0 && handle->read_length_ < result) - result = handle->read_length_; + if (result >= 0) { + // Read at most as many bytes as we originally planned to. + if (handle->read_length_ >= 0 && handle->read_length_ < result) + result = handle->read_length_; - // If we read data and we have an expected length, decrease it by - // how much we have read. - if (handle->read_length_ >= 0) - handle->read_length_ -= result; + // If we read data and we have an expected length, decrease it by + // how much we have read. + if (handle->read_length_ >= 0) handle->read_length_ -= result; - // If we have an offset, increase it by how much we have read. - if (handle->read_offset_ >= 0) - handle->read_offset_ += result; - } + // If we have an offset, increase it by how much we have read. + if (handle->read_offset_ >= 0) handle->read_offset_ += result; + } - // Reading 0 bytes from a file always means EOF, or that we reached - // the end of the requested range. - if (result == 0) - result = UV_EOF; + // Reading 0 bytes from a file always means EOF, or that we reached + // the end of the requested range. + if (result == 0) result = UV_EOF; - handle->EmitRead(result, buffer); + handle->EmitRead(result, buffer); - // Start over, if EmitRead() didn’t tell us to stop. - if (handle->reading_) - handle->ReadStart(); - }}); + // Start over, if EmitRead() didn’t tell us to stop. + if (handle->reading_) handle->ReadStart(); + }}); return 0; } @@ -689,23 +704,23 @@ int FileHandle::DoShutdown(ShutdownWrap* req_wrap) { closing_ = true; CHECK_NE(fd_, -1); FS_ASYNC_TRACE_BEGIN0(UV_FS_CLOSE, wrap) - wrap->Dispatch(uv_fs_close, fd_, uv_fs_callback_t{[](uv_fs_t* req) { - FileHandleCloseWrap* wrap = static_cast( - FileHandleCloseWrap::from_req(req)); - FS_ASYNC_TRACE_END1( - req->fs_type, wrap, "result", static_cast(req->result)) - FileHandle* handle = static_cast(wrap->stream()); - handle->AfterClose(); - - int result = static_cast(req->result); - uv_fs_req_cleanup(req); - wrap->Done(result); - }}); + wrap->Dispatch( + uv_fs_close, fd_, uv_fs_callback_t{[](uv_fs_t* req) { + FileHandleCloseWrap* wrap = static_cast( + FileHandleCloseWrap::from_req(req)); + FS_ASYNC_TRACE_END1( + req->fs_type, wrap, "result", static_cast(req->result)) + FileHandle* handle = static_cast(wrap->stream()); + handle->AfterClose(); + + int result = static_cast(req->result); + uv_fs_req_cleanup(req); + wrap->Done(result); + }}); return 0; } - void FSReqCallback::Reject(Local reject) { MakeCallback(env()->oncomplete_string(), 1, &reject); } @@ -719,10 +734,7 @@ void FSReqCallback::ResolveStatFs(const uv_statfs_t* stat) { } void FSReqCallback::Resolve(Local value) { - Local argv[2] { - Null(env()->isolate()), - value - }; + Local argv[2]{Null(env()->isolate()), value}; MakeCallback(env()->oncomplete_string(), value->IsUndefined() ? 1 : arraysize(argv), argv); @@ -768,7 +780,7 @@ void FSReqAfterScope::Clear() { // which is also why the errors should have been constructed // in JS for more flexibility. void FSReqAfterScope::Reject(uv_fs_t* req) { - BaseObjectPtr wrap { wrap_ }; + BaseObjectPtr wrap{wrap_}; Local exception = UVException(wrap_->env()->isolate(), static_cast(req->result), wrap_->syscall(), @@ -796,8 +808,7 @@ void AfterNoArgs(uv_fs_t* req) { FSReqAfterScope after(req_wrap, req); FS_ASYNC_TRACE_END1( req->fs_type, req_wrap, "result", static_cast(req->result)) - if (after.Proceed()) - req_wrap->Resolve(Undefined(req_wrap->env()->isolate())); + if (after.Proceed()) req_wrap->Resolve(Undefined(req_wrap->env()->isolate())); } void AfterStat(uv_fs_t* req) { @@ -949,8 +960,7 @@ void AfterScanDir(uv_fs_t* req) { uv_dirent_t ent; r = uv_fs_scandir_next(req, &ent); - if (r == UV_EOF) - break; + if (r == UV_EOF) break; if (r != 0) { return req_wrap->Reject( UVException(isolate, r, nullptr, req_wrap->syscall(), req->path)); @@ -1005,8 +1015,15 @@ void Access(const FunctionCallbackInfo& args) { path.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_ACCESS, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "access", UTF8, AfterNoArgs, - uv_fs_access, *path, mode); + AsyncCall(env, + req_wrap_async, + args, + "access", + UTF8, + AfterNoArgs, + uv_fs_access, + *path, + mode); } else { // access(path, mode) THROW_IF_INSUFFICIENT_PERMISSIONS( env, permission::PermissionScope::kFileSystemRead, path.ToStringView()); @@ -1033,8 +1050,8 @@ void Close(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 1); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_CLOSE, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "close", UTF8, AfterNoArgs, - uv_fs_close, fd); + AsyncCall( + env, req_wrap_async, args, "close", UTF8, AfterNoArgs, uv_fs_close, fd); } else { // close(fd) FSReqWrapSync req_wrap_sync("close"); FS_SYNC_TRACE_BEGIN(close); @@ -1172,7 +1189,9 @@ static void Stat(const FunctionCallbackInfo& args) { if (is_uv_error(result)) { return; } - Local arr = FillGlobalStatsArray(binding_data, use_bigint, + Local arr = FillGlobalStatsArray( + binding_data, + use_bigint, static_cast(req_wrap_sync.req.ptr)); args.GetReturnValue().Set(arr); } @@ -1196,8 +1215,14 @@ static void LStat(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_LSTAT, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "lstat", UTF8, AfterStat, - uv_fs_lstat, *path); + AsyncCall(env, + req_wrap_async, + args, + "lstat", + UTF8, + AfterStat, + uv_fs_lstat, + *path); } else { // lstat(path, use_bigint, undefined, throw_if_no_entry) bool do_not_throw_if_no_entry = args[3]->IsFalse(); FSReqWrapSync req_wrap_sync("lstat", *path); @@ -1214,7 +1239,9 @@ static void LStat(const FunctionCallbackInfo& args) { return; } - Local arr = FillGlobalStatsArray(binding_data, use_bigint, + Local arr = FillGlobalStatsArray( + binding_data, + use_bigint, static_cast(req_wrap_sync.req.ptr)); args.GetReturnValue().Set(arr); } @@ -1238,8 +1265,8 @@ static void FStat(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 2, use_bigint); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FSTAT, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fstat", UTF8, AfterStat, - uv_fs_fstat, fd); + AsyncCall( + env, req_wrap_async, args, "fstat", UTF8, AfterStat, uv_fs_fstat, fd); } else { // fstat(fd, use_bigint, undefined, do_not_throw_error) bool do_not_throw_error = args[2]->IsTrue(); const auto should_throw = [do_not_throw_error](int result) { @@ -1254,7 +1281,9 @@ static void FStat(const FunctionCallbackInfo& args) { return; } - Local arr = FillGlobalStatsArray(binding_data, use_bigint, + Local arr = FillGlobalStatsArray( + binding_data, + use_bigint, static_cast(req_wrap_sync.req.ptr)); args.GetReturnValue().Set(arr); } @@ -1345,8 +1374,18 @@ static void Symlink(const FunctionCallbackInfo& args) { TRACE_STR_COPY(*target), "path", TRACE_STR_COPY(*path)) - AsyncDestCall(env, req_wrap_async, args, "symlink", *path, path.length(), - UTF8, AfterNoArgs, uv_fs_symlink, *target, *path, flags); + AsyncDestCall(env, + req_wrap_async, + args, + "symlink", + *path, + path.length(), + UTF8, + AfterNoArgs, + uv_fs_symlink, + *target, + *path, + flags); } else { // symlink(target, path, flags, undefined, ctx) FSReqWrapSync req_wrap_sync("symlink", *target, *path); FS_SYNC_TRACE_BEGIN(symlink); @@ -1401,8 +1440,17 @@ static void Link(const FunctionCallbackInfo& args) { TRACE_STR_COPY(*src), "dest", TRACE_STR_COPY(*dest)) - AsyncDestCall(env, req_wrap_async, args, "link", *dest, dest.length(), UTF8, - AfterNoArgs, uv_fs_link, *src, *dest); + AsyncDestCall(env, + req_wrap_async, + args, + "link", + *dest, + dest.length(), + UTF8, + AfterNoArgs, + uv_fs_link, + *src, + *dest); } else { // link(src, dest) // To avoid bypass the link target should be allowed to read and write THROW_IF_INSUFFICIENT_PERMISSIONS( @@ -1439,8 +1487,14 @@ static void ReadLink(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_READLINK, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "readlink", encoding, AfterStringPtr, - uv_fs_readlink, *path); + AsyncCall(env, + req_wrap_async, + args, + "readlink", + encoding, + AfterStringPtr, + uv_fs_readlink, + *path); } else { // readlink(path, encoding) FSReqWrapSync req_wrap_sync("readlink", *path); FS_SYNC_TRACE_BEGIN(readlink); @@ -1499,9 +1553,17 @@ static void Rename(const FunctionCallbackInfo& args) { TRACE_STR_COPY(*old_path), "new_path", TRACE_STR_COPY(*new_path)) - AsyncDestCall(env, req_wrap_async, args, "rename", *new_path, - new_path.length(), UTF8, AfterNoArgs, uv_fs_rename, - *old_path, *new_path); + AsyncDestCall(env, + req_wrap_async, + args, + "rename", + *new_path, + new_path.length(), + UTF8, + AfterNoArgs, + uv_fs_rename, + *old_path, + *new_path); } else { // rename(old_path, new_path) THROW_IF_INSUFFICIENT_PERMISSIONS( env, permission::PermissionScope::kFileSystemRead, view_old_path); @@ -1537,8 +1599,15 @@ static void FTruncate(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 2); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FTRUNCATE, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "ftruncate", UTF8, AfterNoArgs, - uv_fs_ftruncate, fd, len); + AsyncCall(env, + req_wrap_async, + args, + "ftruncate", + UTF8, + AfterNoArgs, + uv_fs_ftruncate, + fd, + len); } else { // ftruncate(fd, len) FSReqWrapSync req_wrap_sync("ftruncate"); FS_SYNC_TRACE_BEGIN(ftruncate); @@ -1562,8 +1631,14 @@ static void Fdatasync(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 1); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FDATASYNC, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fdatasync", UTF8, AfterNoArgs, - uv_fs_fdatasync, fd); + AsyncCall(env, + req_wrap_async, + args, + "fdatasync", + UTF8, + AfterNoArgs, + uv_fs_fdatasync, + fd); } else { // fdatasync(fd) FSReqWrapSync req_wrap_sync("fdatasync"); FS_SYNC_TRACE_BEGIN(fdatasync); @@ -1587,8 +1662,8 @@ static void Fsync(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 1); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FSYNC, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fsync", UTF8, AfterNoArgs, - uv_fs_fsync, fd); + AsyncCall( + env, req_wrap_async, args, "fsync", UTF8, AfterNoArgs, uv_fs_fsync, fd); } else { FSReqWrapSync req_wrap_sync("fsync"); FS_SYNC_TRACE_BEGIN(fsync); @@ -1617,8 +1692,14 @@ static void Unlink(const FunctionCallbackInfo& args) { path.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_UNLINK, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "unlink", UTF8, AfterNoArgs, - uv_fs_unlink, *path); + AsyncCall(env, + req_wrap_async, + args, + "unlink", + UTF8, + AfterNoArgs, + uv_fs_unlink, + *path); } else { // unlink(path) THROW_IF_INSUFFICIENT_PERMISSIONS( env, @@ -1648,8 +1729,14 @@ static void RMDir(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_RMDIR, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "rmdir", UTF8, AfterNoArgs, - uv_fs_rmdir, *path); + AsyncCall(env, + req_wrap_async, + args, + "rmdir", + UTF8, + AfterNoArgs, + uv_fs_rmdir, + *path); } else { // rmdir(path) FSReqWrapSync req_wrap_sync("rmdir", *path); FS_SYNC_TRACE_BEGIN(rmdir); @@ -1809,7 +1896,7 @@ int MKDirpSync(uv_loop_t* loop, if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) { uv_fs_req_cleanup(req); if (orig_err == UV_EEXIST && - req_wrap->continuation_data()->paths().size() > 0) { + req_wrap->continuation_data()->paths().size() > 0) { return UV_ENOTDIR; } return UV_EEXIST; @@ -1825,11 +1912,8 @@ int MKDirpSync(uv_loop_t* loop, return 0; } -int MKDirpAsync(uv_loop_t* loop, - uv_fs_t* req, - const char* path, - int mode, - uv_fs_cb cb) { +int MKDirpAsync( + uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb) { FSReqBase* req_wrap = FSReqBase::from_req(req); // on the first iteration of algorithm, stash state information. if (req_wrap->continuation_data() == nullptr) { @@ -1840,83 +1924,93 @@ int MKDirpAsync(uv_loop_t* loop, // on each iteration of algorithm, mkdir directory on top of stack. std::string next_path = req_wrap->continuation_data()->PopPath(); - int err = uv_fs_mkdir(loop, req, next_path.c_str(), mode, - uv_fs_callback_t{[](uv_fs_t* req) { - FSReqBase* req_wrap = FSReqBase::from_req(req); - Environment* env = req_wrap->env(); - uv_loop_t* loop = env->event_loop(); - std::string path = req->path; - int err = static_cast(req->result); - - while (true) { - switch (err) { - // Note: uv_fs_req_cleanup in terminal paths will be called by - // FSReqAfterScope::~FSReqAfterScope() - case 0: { - if (req_wrap->continuation_data()->paths().empty()) { - req_wrap->continuation_data()->MaybeSetFirstPath(path); - req_wrap->continuation_data()->Done(0); - } else { - req_wrap->continuation_data()->MaybeSetFirstPath(path); - uv_fs_req_cleanup(req); - MKDirpAsync(loop, req, path.c_str(), - req_wrap->continuation_data()->mode(), nullptr); - } - break; - } - case UV_EACCES: - case UV_ENOSPC: - case UV_ENOTDIR: - case UV_EPERM: { - req_wrap->continuation_data()->Done(err); - break; - } - case UV_ENOENT: { - std::string dirname = - path.substr(0, path.find_last_of(kPathSeparator)); - if (dirname != path) { - req_wrap->continuation_data()->PushPath(path); - req_wrap->continuation_data()->PushPath(std::move(dirname)); - } else if (req_wrap->continuation_data()->paths().empty()) { - err = UV_EEXIST; - continue; - } - uv_fs_req_cleanup(req); - MKDirpAsync(loop, req, path.c_str(), - req_wrap->continuation_data()->mode(), nullptr); - break; - } - default: - uv_fs_req_cleanup(req); - // Stash err for use in the callback. - req->data = reinterpret_cast(static_cast(err)); - int err = uv_fs_stat(loop, req, path.c_str(), - uv_fs_callback_t{[](uv_fs_t* req) { - FSReqBase* req_wrap = FSReqBase::from_req(req); - int err = static_cast(req->result); - if (reinterpret_cast(req->data) == UV_EEXIST && - req_wrap->continuation_data()->paths().size() > 0) { - if (err == 0 && S_ISDIR(req->statbuf.st_mode)) { - Environment* env = req_wrap->env(); - uv_loop_t* loop = env->event_loop(); - std::string path = req->path; + int err = uv_fs_mkdir( + loop, req, next_path.c_str(), mode, uv_fs_callback_t{[](uv_fs_t* req) { + FSReqBase* req_wrap = FSReqBase::from_req(req); + Environment* env = req_wrap->env(); + uv_loop_t* loop = env->event_loop(); + std::string path = req->path; + int err = static_cast(req->result); + + while (true) { + switch (err) { + // Note: uv_fs_req_cleanup in terminal paths will be called by + // FSReqAfterScope::~FSReqAfterScope() + case 0: { + if (req_wrap->continuation_data()->paths().empty()) { + req_wrap->continuation_data()->MaybeSetFirstPath(path); + req_wrap->continuation_data()->Done(0); + } else { + req_wrap->continuation_data()->MaybeSetFirstPath(path); uv_fs_req_cleanup(req); - MKDirpAsync(loop, req, path.c_str(), - req_wrap->continuation_data()->mode(), nullptr); - return; + MKDirpAsync(loop, + req, + path.c_str(), + req_wrap->continuation_data()->mode(), + nullptr); + } + break; + } + case UV_EACCES: + case UV_ENOSPC: + case UV_ENOTDIR: + case UV_EPERM: { + req_wrap->continuation_data()->Done(err); + break; + } + case UV_ENOENT: { + std::string dirname = + path.substr(0, path.find_last_of(kPathSeparator)); + if (dirname != path) { + req_wrap->continuation_data()->PushPath(path); + req_wrap->continuation_data()->PushPath(std::move(dirname)); + } else if (req_wrap->continuation_data()->paths().empty()) { + err = UV_EEXIST; + continue; } - err = UV_ENOTDIR; + uv_fs_req_cleanup(req); + MKDirpAsync(loop, + req, + path.c_str(), + req_wrap->continuation_data()->mode(), + nullptr); + break; } - // verify that the path pointed to is actually a directory. - if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) err = UV_EEXIST; - req_wrap->continuation_data()->Done(err); - }}); - if (err < 0) req_wrap->continuation_data()->Done(err); + default: + uv_fs_req_cleanup(req); + // Stash err for use in the callback. + req->data = reinterpret_cast(static_cast(err)); + int err = uv_fs_stat( + loop, req, path.c_str(), uv_fs_callback_t{[](uv_fs_t* req) { + FSReqBase* req_wrap = FSReqBase::from_req(req); + int err = static_cast(req->result); + if (reinterpret_cast(req->data) == UV_EEXIST && + req_wrap->continuation_data()->paths().size() > 0) { + if (err == 0 && S_ISDIR(req->statbuf.st_mode)) { + Environment* env = req_wrap->env(); + uv_loop_t* loop = env->event_loop(); + std::string path = req->path; + uv_fs_req_cleanup(req); + MKDirpAsync(loop, + req, + path.c_str(), + req_wrap->continuation_data()->mode(), + nullptr); + return; + } + err = UV_ENOTDIR; + } + // verify that the path pointed to is actually a directory. + if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) + err = UV_EEXIST; + req_wrap->continuation_data()->Done(err); + }}); + if (err < 0) req_wrap->continuation_data()->Done(err); + break; + } break; - } - break; - } - }}); + } + }}); return err; } @@ -1945,9 +2039,15 @@ static void MKDir(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_UNLINK, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "mkdir", UTF8, + AsyncCall(env, + req_wrap_async, + args, + "mkdir", + UTF8, mkdirp ? AfterMkdirp : AfterNoArgs, - mkdirp ? MKDirpAsync : uv_fs_mkdir, *path, mode); + mkdirp ? MKDirpAsync : uv_fs_mkdir, + *path, + mode); } else { // mkdir(path, mode, recursive) FSReqWrapSync req_wrap_sync("mkdir", *path); FS_SYNC_TRACE_BEGIN(mkdir); @@ -1992,8 +2092,14 @@ static void RealPath(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_REALPATH, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "realpath", encoding, AfterStringPtr, - uv_fs_realpath, *path); + AsyncCall(env, + req_wrap_async, + args, + "realpath", + encoding, + AfterStringPtr, + uv_fs_realpath, + *path); } else { // realpath(path, encoding, undefined, ctx) FSReqWrapSync req_wrap_sync("realpath", *path); FS_SYNC_TRACE_BEGIN(realpath); @@ -2089,8 +2195,7 @@ static void ReadDir(const FunctionCallbackInfo& args) { uv_dirent_t ent; r = uv_fs_scandir_next(&(req_wrap_sync.req), &ent); - if (r == UV_EOF) - break; + if (r == UV_EOF) break; if (is_uv_error(r)) { env->ThrowUVException(r, "scandir", nullptr, *path); return; @@ -2108,13 +2213,10 @@ static void ReadDir(const FunctionCallbackInfo& args) { } } - Local names = Array::New(isolate, name_v.data(), name_v.size()); if (with_types) { Local result[] = { - names, - Array::New(isolate, type_v.data(), type_v.size()) - }; + names, Array::New(isolate, type_v.data(), type_v.size())}; args.GetReturnValue().Set(Array::New(isolate, result, arraysize(result))); } else { args.GetReturnValue().Set(names); @@ -2209,8 +2311,16 @@ static void Open(const FunctionCallbackInfo& args) { req_wrap_async->set_is_plain_open(true); FS_ASYNC_TRACE_BEGIN1( UV_FS_OPEN, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "open", UTF8, AfterInteger, - uv_fs_open, *path, flags, mode); + AsyncCall(env, + req_wrap_async, + args, + "open", + UTF8, + AfterInteger, + uv_fs_open, + *path, + flags, + mode); } else { // open(path, flags, mode) if (CheckOpenPermissions(env, path, flags).IsNothing()) return; FSReqWrapSync req_wrap_sync("open", *path); @@ -2248,8 +2358,16 @@ static void OpenFileHandle(const FunctionCallbackInfo& args) { if (req_wrap_async != nullptr) { // openFileHandle(path, flags, mode, req) FS_ASYNC_TRACE_BEGIN1( UV_FS_OPEN, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "open", UTF8, AfterOpenFileHandle, - uv_fs_open, *path, flags, mode); + AsyncCall(env, + req_wrap_async, + args, + "open", + UTF8, + AfterOpenFileHandle, + uv_fs_open, + *path, + flags, + mode); } else { // openFileHandle(path, flags, mode, undefined, ctx) CHECK_EQ(argc, 5); FSReqWrapSync req_wrap_sync; @@ -2316,9 +2434,18 @@ static void CopyFile(const FunctionCallbackInfo& args) { TRACE_STR_COPY(*src), "dest", TRACE_STR_COPY(*dest)) - AsyncDestCall(env, req_wrap_async, args, "copyfile", - *dest, dest.length(), UTF8, AfterNoArgs, - uv_fs_copyfile, *src, *dest, flags); + AsyncDestCall(env, + req_wrap_async, + args, + "copyfile", + *dest, + dest.length(), + UTF8, + AfterNoArgs, + uv_fs_copyfile, + *src, + *dest, + flags); } else { // copyFile(src, dest, flags) THROW_IF_INSUFFICIENT_PERMISSIONS( env, permission::PermissionScope::kFileSystemRead, src.ToStringView()); @@ -2379,8 +2506,17 @@ static void WriteBuffer(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 5); if (req_wrap_async != nullptr) { // write(fd, buffer, off, len, pos, req) FS_ASYNC_TRACE_BEGIN0(UV_FS_WRITE, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "write", UTF8, AfterInteger, - uv_fs_write, fd, &uvbuf, 1, pos); + AsyncCall(env, + req_wrap_async, + args, + "write", + UTF8, + AfterInteger, + uv_fs_write, + fd, + &uvbuf, + 1, + pos); } else { // write(fd, buffer, off, len, pos, undefined, ctx) CHECK_EQ(argc, 7); FSReqWrapSync req_wrap_sync; @@ -2404,7 +2540,6 @@ static void WriteBuffer(const FunctionCallbackInfo& args) { } } - // Wrapper for writev(2). // // bytesWritten = writev(fd, chunks, position, callback) @@ -2465,7 +2600,6 @@ static void WriteBuffers(const FunctionCallbackInfo& args) { } } - // Wrapper for write(2). // // bytesWritten = write(fd, string, position, enc, callback) @@ -2529,12 +2663,8 @@ static void WriteString(const FunctionCallbackInfo& args) { stack_buffer.SetLengthAndZeroTerminate(len); uv_buf_t uvbuf = uv_buf_init(*stack_buffer, len); FS_ASYNC_TRACE_BEGIN0(UV_FS_WRITE, req_wrap_async) - int err = req_wrap_async->Dispatch(uv_fs_write, - fd, - &uvbuf, - 1, - pos, - AfterInteger); + int err = + req_wrap_async->Dispatch(uv_fs_write, fd, &uvbuf, 1, pos, AfterInteger); if (err < 0) { uv_fs_t* uv_req = req_wrap_async->req(); uv_req->result = err; @@ -2546,13 +2676,11 @@ static void WriteString(const FunctionCallbackInfo& args) { CHECK_EQ(argc, 6); FSReqBase::FSReqBuffer stack_buffer; if (buf == nullptr) { - if (!StringBytes::StorageSize(isolate, value, enc).To(&len)) - return; + if (!StringBytes::StorageSize(isolate, value, enc).To(&len)) return; stack_buffer.AllocateSufficientStorage(len + 1); // StorageSize may return too large a char, so correct the actual length // by the write size - len = StringBytes::Write(isolate, *stack_buffer, - len, args[1], enc); + len = StringBytes::Write(isolate, *stack_buffer, len, args[1], enc); stack_buffer.SetLengthAndZeroTerminate(len); buf = *stack_buffer; } @@ -2696,9 +2824,8 @@ static void Read(const FunctionCallbackInfo& args) { CHECK(Buffer::IsWithinBounds(off, len, buffer_length)); CHECK(IsSafeJsInt(args[4]) || args[4]->IsBigInt()); - const int64_t pos = args[4]->IsNumber() ? - args[4].As()->Value() : - args[4].As()->Int64Value(); + const int64_t pos = args[4]->IsNumber() ? args[4].As()->Value() + : args[4].As()->Int64Value(); char* buf = buffer_data + off; uv_buf_t uvbuf = uv_buf_init(buf, len); @@ -2707,8 +2834,17 @@ static void Read(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 5); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_READ, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "read", UTF8, AfterInteger, - uv_fs_read, fd, &uvbuf, 1, pos); + AsyncCall(env, + req_wrap_async, + args, + "read", + UTF8, + AfterInteger, + uv_fs_read, + fd, + &uvbuf, + 1, + pos); } else { // read(fd, buffer, offset, len, pos) FSReqWrapSync req_wrap_sync("read"); FS_SYNC_TRACE_BEGIN(read); @@ -2832,8 +2968,17 @@ static void ReadBuffers(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 3); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_READ, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "read", UTF8, AfterInteger, - uv_fs_read, fd, *iovs, iovs.length(), pos); + AsyncCall(env, + req_wrap_async, + args, + "read", + UTF8, + AfterInteger, + uv_fs_read, + fd, + *iovs, + iovs.length(), + pos); } else { // readBuffers(fd, buffers, undefined, ctx) FSReqWrapSync req_wrap_sync("read"); FS_SYNC_TRACE_BEGIN(read); @@ -2847,7 +2992,6 @@ static void ReadBuffers(const FunctionCallbackInfo& args) { } } - /* fs.chmod(path, mode); * Wrapper for chmod(1) / EIO_CHMOD */ @@ -2871,8 +3015,15 @@ static void Chmod(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_CHMOD, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "chmod", UTF8, AfterNoArgs, - uv_fs_chmod, *path, mode); + AsyncCall(env, + req_wrap_async, + args, + "chmod", + UTF8, + AfterNoArgs, + uv_fs_chmod, + *path, + mode); } else { // chmod(path, mode) FSReqWrapSync req_wrap_sync("chmod", *path); FS_SYNC_TRACE_BEGIN(chmod); @@ -2881,7 +3032,6 @@ static void Chmod(const FunctionCallbackInfo& args) { } } - /* fs.fchmod(fd, mode); * Wrapper for fchmod(1) / EIO_FCHMOD */ @@ -2903,8 +3053,15 @@ static void FChmod(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 2); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FCHMOD, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fchmod", UTF8, AfterNoArgs, - uv_fs_fchmod, fd, mode); + AsyncCall(env, + req_wrap_async, + args, + "fchmod", + UTF8, + AfterNoArgs, + uv_fs_fchmod, + fd, + mode); } else { // fchmod(fd, mode) FSReqWrapSync req_wrap_sync("fchmod"); FS_SYNC_TRACE_BEGIN(fchmod); @@ -2942,8 +3099,16 @@ static void Chown(const FunctionCallbackInfo& args) { path.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_CHOWN, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "chown", UTF8, AfterNoArgs, - uv_fs_chown, *path, uid, gid); + AsyncCall(env, + req_wrap_async, + args, + "chown", + UTF8, + AfterNoArgs, + uv_fs_chown, + *path, + uid, + gid); } else { // chown(path, uid, gid) THROW_IF_INSUFFICIENT_PERMISSIONS( env, @@ -2956,7 +3121,6 @@ static void Chown(const FunctionCallbackInfo& args) { } } - /* fs.fchown(fd, uid, gid); * Wrapper for fchown(1) / EIO_FCHOWN */ @@ -2981,8 +3145,16 @@ static void FChown(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 3); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FCHOWN, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "fchown", UTF8, AfterNoArgs, - uv_fs_fchown, fd, uid, gid); + AsyncCall(env, + req_wrap_async, + args, + "fchown", + UTF8, + AfterNoArgs, + uv_fs_fchown, + fd, + uid, + gid); } else { // fchown(fd, uid, gid) FSReqWrapSync req_wrap_sync("fchown"); FS_SYNC_TRACE_BEGIN(fchown); @@ -2991,7 +3163,6 @@ static void FChown(const FunctionCallbackInfo& args) { } } - static void LChown(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); @@ -3018,8 +3189,16 @@ static void LChown(const FunctionCallbackInfo& args) { path.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_LCHOWN, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "lchown", UTF8, AfterNoArgs, - uv_fs_lchown, *path, uid, gid); + AsyncCall(env, + req_wrap_async, + args, + "lchown", + UTF8, + AfterNoArgs, + uv_fs_lchown, + *path, + uid, + gid); } else { // lchown(path, uid, gid) THROW_IF_INSUFFICIENT_PERMISSIONS( env, @@ -3032,7 +3211,6 @@ static void LChown(const FunctionCallbackInfo& args) { } } - static void UTimes(const FunctionCallbackInfo& args) { Environment* env = Environment::GetCurrent(args); @@ -3056,8 +3234,16 @@ static void UTimes(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_UTIME, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "utime", UTF8, AfterNoArgs, - uv_fs_utime, *path, atime, mtime); + AsyncCall(env, + req_wrap_async, + args, + "utime", + UTF8, + AfterNoArgs, + uv_fs_utime, + *path, + atime, + mtime); } else { // utimes(path, atime, mtime) FSReqWrapSync req_wrap_sync("utime", *path); FS_SYNC_TRACE_BEGIN(utimes); @@ -3088,8 +3274,16 @@ static void FUTimes(const FunctionCallbackInfo& args) { FSReqBase* req_wrap_async = GetReqWrap(args, 3); CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN0(UV_FS_FUTIME, req_wrap_async) - AsyncCall(env, req_wrap_async, args, "futime", UTF8, AfterNoArgs, - uv_fs_futime, fd, atime, mtime); + AsyncCall(env, + req_wrap_async, + args, + "futime", + UTF8, + AfterNoArgs, + uv_fs_futime, + fd, + atime, + mtime); } else { // futimes(fd, atime, mtime) FSReqWrapSync req_wrap_sync("futime"); FS_SYNC_TRACE_BEGIN(futimes); @@ -3122,8 +3316,16 @@ static void LUTimes(const FunctionCallbackInfo& args) { CHECK_NOT_NULL(req_wrap_async); FS_ASYNC_TRACE_BEGIN1( UV_FS_LUTIME, req_wrap_async, "path", TRACE_STR_COPY(*path)) - AsyncCall(env, req_wrap_async, args, "lutime", UTF8, AfterNoArgs, - uv_fs_lutime, *path, atime, mtime); + AsyncCall(env, + req_wrap_async, + args, + "lutime", + UTF8, + AfterNoArgs, + uv_fs_lutime, + *path, + atime, + mtime); } else { // lutimes(path, atime, mtime) FSReqWrapSync req_wrap_sync("lutime", *path); FS_SYNC_TRACE_BEGIN(lutimes); @@ -3160,8 +3362,14 @@ static void Mkdtemp(const FunctionCallbackInfo& args) { tmpl.ToStringView()); FS_ASYNC_TRACE_BEGIN1( UV_FS_MKDTEMP, req_wrap_async, "path", TRACE_STR_COPY(*tmpl)) - AsyncCall(env, req_wrap_async, args, "mkdtemp", encoding, AfterStringPath, - uv_fs_mkdtemp, *tmpl); + AsyncCall(env, + req_wrap_async, + args, + "mkdtemp", + encoding, + AfterStringPath, + uv_fs_mkdtemp, + *tmpl); } else { // mkdtemp(tmpl, encoding) THROW_IF_INSUFFICIENT_PERMISSIONS( env, @@ -4008,8 +4216,7 @@ static void CreatePerIsolateProperties(IsolateData* isolate_data, // Create Function Template for FSReqPromise Local fpt = FunctionTemplate::New(isolate); fpt->Inherit(AsyncWrap::GetConstructorTemplate(isolate_data)); - Local promiseString = - FIXED_ONE_BYTE_STRING(isolate, "FSReqPromise"); + Local promiseString = FIXED_ONE_BYTE_STRING(isolate, "FSReqPromise"); fpt->SetClassName(promiseString); Local fpo = fpt->InstanceTemplate(); fpo->SetInternalFieldCount(FSReqBase::kInternalFieldCount); @@ -4019,6 +4226,7 @@ static void CreatePerIsolateProperties(IsolateData* isolate_data, Local fd = NewFunctionTemplate(isolate, FileHandle::New); fd->Inherit(AsyncWrap::GetConstructorTemplate(isolate_data)); SetProtoMethod(isolate, fd, "close", FileHandle::Close); + SetProtoMethod(isolate, fd, "closeSync", FileHandle::CloseSync); SetProtoMethod(isolate, fd, "releaseFD", FileHandle::ReleaseFD); Local fdt = fd->InstanceTemplate(); fdt->SetInternalFieldCount(FileHandle::kInternalFieldCount); @@ -4028,8 +4236,7 @@ static void CreatePerIsolateProperties(IsolateData* isolate_data, // Create FunctionTemplate for FileHandle::CloseReq Local fdclose = FunctionTemplate::New(isolate); - fdclose->SetClassName(FIXED_ONE_BYTE_STRING(isolate, - "FileHandleCloseReq")); + fdclose->SetClassName(FIXED_ONE_BYTE_STRING(isolate, "FileHandleCloseReq")); fdclose->Inherit(AsyncWrap::GetConstructorTemplate(isolate_data)); Local fdcloset = fdclose->InstanceTemplate(); fdcloset->SetInternalFieldCount(FSReqBase::kInternalFieldCount); @@ -4107,6 +4314,7 @@ void RegisterExternalReferences(ExternalReferenceRegistry* registry) { registry->Register(FileHandle::New); registry->Register(FileHandle::Close); + registry->Register(FileHandle::CloseSync); registry->Register(FileHandle::ReleaseFD); StreamBase::RegisterExternalReferences(registry); } diff --git a/src/node_file.h b/src/node_file.h index 2213d590659595..5d04a7d4dd6af3 100644 --- a/src/node_file.h +++ b/src/node_file.h @@ -80,8 +80,7 @@ class BindingData : public SnapshotableObject { AliasedFloat64Array statfs_field_array; AliasedBigInt64Array statfs_field_bigint_array; - std::vector> - file_handle_read_wrap_freelist; + std::vector> file_handle_read_wrap_freelist; SERIALIZABLE_OBJECT_METHODS() SET_BINDING_ID(fs_binding_data) @@ -146,7 +145,8 @@ class FSReqBase : public ReqWrap { const char* data, size_t len, enum encoding encoding); - inline FSReqBuffer& Init(const char* syscall, size_t len, + inline FSReqBuffer& Init(const char* syscall, + size_t len, enum encoding encoding); virtual void Reject(v8::Local reject) = 0; @@ -240,8 +240,7 @@ inline v8::Local FillGlobalStatFsArray(BindingData* binding_data, template class FSReqPromise final : public FSReqBase { public: - static inline FSReqPromise* New(BindingData* binding_data, - bool use_bigint); + static inline FSReqPromise* New(BindingData* binding_data, bool use_bigint); inline ~FSReqPromise() override; inline void Reject(v8::Local reject) override; @@ -345,6 +344,9 @@ class FileHandle final : public AsyncWrap, public StreamBase { // be resolved once closing is complete. static void Close(const v8::FunctionCallbackInfo& args); + // Synchronously closes the FD. Throws on error. + static void CloseSync(const v8::FunctionCallbackInfo& args); + // Releases ownership of the FD. static void ReleaseFD(const v8::FunctionCallbackInfo& args); @@ -499,19 +501,27 @@ inline FSReqBase* GetReqWrap(const v8::FunctionCallbackInfo& args, // Returns nullptr if the operation fails from the start. template -inline FSReqBase* AsyncDestCall(Environment* env, FSReqBase* req_wrap, +inline FSReqBase* AsyncDestCall(Environment* env, + FSReqBase* req_wrap, const v8::FunctionCallbackInfo& args, - const char* syscall, const char* dest, - size_t len, enum encoding enc, uv_fs_cb after, - Func fn, Args... fn_args); + const char* syscall, + const char* dest, + size_t len, + enum encoding enc, + uv_fs_cb after, + Func fn, + Args... fn_args); // Returns nullptr if the operation fails from the start. template inline FSReqBase* AsyncCall(Environment* env, FSReqBase* req_wrap, const v8::FunctionCallbackInfo& args, - const char* syscall, enum encoding enc, - uv_fs_cb after, Func fn, Args... fn_args); + const char* syscall, + enum encoding enc, + uv_fs_cb after, + Func fn, + Args... fn_args); // Template counterpart of SYNC_CALL, except that it only puts // the error number and the syscall in the context instead of diff --git a/src/node_options.cc b/src/node_options.cc index d48641ae3ffe07..55dfeab420844c 100644 --- a/src/node_options.cc +++ b/src/node_options.cc @@ -599,6 +599,10 @@ EnvironmentOptionsParser::EnvironmentOptionsParser() { &EnvironmentOptions::experimental_sqlite, kAllowedInEnvvar, true); + AddOption("--experimental-stream-iter", + "experimental iterable streams API (node:stream/iter)", + &EnvironmentOptions::experimental_stream_iter, + kAllowedInEnvvar); AddOption("--experimental-quic", #ifndef OPENSSL_NO_QUIC "experimental QUIC support", diff --git a/src/node_options.h b/src/node_options.h index 2f0adb5ae491ec..3cbb636c026cb2 100644 --- a/src/node_options.h +++ b/src/node_options.h @@ -127,6 +127,7 @@ class EnvironmentOptions : public Options { bool experimental_fetch = true; bool experimental_websocket = true; bool experimental_sqlite = true; + bool experimental_stream_iter = false; bool webstorage = HAVE_SQLITE; #ifndef OPENSSL_NO_QUIC bool experimental_quic = false; diff --git a/test/parallel/test-fs-promises-file-handle-pull.js b/test/parallel/test-fs-promises-file-handle-pull.js new file mode 100644 index 00000000000000..3fc531baf7138b --- /dev/null +++ b/test/parallel/test-fs-promises-file-handle-pull.js @@ -0,0 +1,440 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const fs = require('fs'); +const { open } = fs.promises; +const path = require('path'); +const tmpdir = require('../common/tmpdir'); +const { text, bytes } = require('stream/iter'); + +tmpdir.refresh(); + +const tmpDir = tmpdir.path; + +// ============================================================================= +// Basic pull() +// ============================================================================= + +async function testBasicPull() { + const filePath = path.join(tmpDir, 'pull-basic.txt'); + fs.writeFileSync(filePath, 'hello from file'); + + const fh = await open(filePath, 'r'); + try { + const readable = fh.pull(); + const data = await text(readable); + assert.strictEqual(data, 'hello from file'); + } finally { + await fh.close(); + } +} + +async function testPullBinary() { + const filePath = path.join(tmpDir, 'pull-binary.bin'); + const buf = Buffer.alloc(256); + for (let i = 0; i < 256; i++) buf[i] = i; + fs.writeFileSync(filePath, buf); + + const fh = await open(filePath, 'r'); + try { + const readable = fh.pull(); + const data = await bytes(readable); + assert.strictEqual(data.byteLength, 256); + for (let i = 0; i < 256; i++) { + assert.strictEqual(data[i], i); + } + } finally { + await fh.close(); + } +} + +async function testPullEmptyFile() { + const filePath = path.join(tmpDir, 'pull-empty.txt'); + fs.writeFileSync(filePath, ''); + + const fh = await open(filePath, 'r'); + try { + const readable = fh.pull(); + const data = await bytes(readable); + assert.strictEqual(data.byteLength, 0); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Large file (multi-chunk) +// ============================================================================= + +async function testPullLargeFile() { + const filePath = path.join(tmpDir, 'pull-large.bin'); + // Write 64KB - enough for multiple 16KB read chunks + const size = 64 * 1024; + const buf = Buffer.alloc(size, 0x42); + fs.writeFileSync(filePath, buf); + + const fh = await open(filePath, 'r'); + try { + const readable = fh.pull(); + const data = await bytes(readable); + assert.strictEqual(data.byteLength, size); + // Verify content + for (let i = 0; i < data.byteLength; i++) { + assert.strictEqual(data[i], 0x42); + } + } finally { + await fh.close(); + } +} + +// ============================================================================= +// With transforms +// ============================================================================= + +async function testPullWithTransform() { + const filePath = path.join(tmpDir, 'pull-transform.txt'); + fs.writeFileSync(filePath, 'hello'); + + const fh = await open(filePath, 'r'); + try { + const upper = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => { + const str = new TextDecoder().decode(c); + return new TextEncoder().encode(str.toUpperCase()); + }); + }; + + const readable = fh.pull(upper); + const data = await text(readable); + assert.strictEqual(data, 'HELLO'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// autoClose option +// ============================================================================= + +async function testPullAutoClose() { + const filePath = path.join(tmpDir, 'pull-autoclose.txt'); + fs.writeFileSync(filePath, 'auto close data'); + + const fh = await open(filePath, 'r'); + const readable = fh.pull({ autoClose: true }); + const data = await text(readable); + assert.strictEqual(data, 'auto close data'); + + // After consuming with autoClose, the file handle should be closed + // Trying to read again should throw + await assert.rejects( + async () => { + await fh.stat(); + }, + (err) => err.code === 'ERR_INVALID_STATE' || err.code === 'EBADF', + ); +} + +// ============================================================================= +// Locking +// ============================================================================= + +async function testPullLocking() { + const filePath = path.join(tmpDir, 'pull-lock.txt'); + fs.writeFileSync(filePath, 'lock data'); + + const fh = await open(filePath, 'r'); + try { + // First pull locks the handle + const readable = fh.pull(); + + // Second pull while locked should throw + assert.throws( + () => fh.pull(), + { code: 'ERR_INVALID_STATE' }, + ); + + // Consume the first stream to unlock + await text(readable); + + // Now it should be usable again + const readable2 = fh.pull(); + const data = await text(readable2); + assert.strictEqual(data, ''); // Already read to end + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Closed handle +// ============================================================================= + +async function testPullClosedHandle() { + const filePath = path.join(tmpDir, 'pull-closed.txt'); + fs.writeFileSync(filePath, 'data'); + + const fh = await open(filePath, 'r'); + await fh.close(); + + assert.throws( + () => fh.pull(), + { code: 'ERR_INVALID_STATE' }, + ); +} + +// ============================================================================= +// AbortSignal +// ============================================================================= + +async function testPullAbortSignal() { + const filePath = path.join(tmpDir, 'pull-abort.txt'); + // Write enough data that we can abort mid-stream + fs.writeFileSync(filePath, 'a'.repeat(1024)); + + const ac = new AbortController(); + const fh = await open(filePath, 'r'); + try { + ac.abort(); + const readable = fh.pull({ signal: ac.signal }); + + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { + assert.fail('Should not reach here'); + } + }, + (err) => err.name === 'AbortError', + ); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Iterate batches directly +// ============================================================================= + +async function testPullIterateBatches() { + const filePath = path.join(tmpDir, 'pull-batches.txt'); + fs.writeFileSync(filePath, 'batch data'); + + const fh = await open(filePath, 'r'); + try { + const readable = fh.pull(); + const batches = []; + for await (const batch of readable) { + batches.push(batch); + // Each batch should be an array of Uint8Array + assert.ok(Array.isArray(batch)); + for (const chunk of batch) { + assert.ok(chunk instanceof Uint8Array); + } + } + assert.ok(batches.length > 0); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with start option - read from specific position +// ============================================================================= + +async function testPullStart() { + const filePath = path.join(tmpDir, 'pull-start.txt'); + fs.writeFileSync(filePath, 'AAABBBCCC'); + + const fh = await open(filePath, 'r'); + try { + // Read from offset 3 + const data = await text(fh.pull({ start: 3 })); + assert.strictEqual(data, 'BBBCCC'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with limit option - read at most N bytes +// ============================================================================= + +async function testPullLimit() { + const filePath = path.join(tmpDir, 'pull-limit.txt'); + fs.writeFileSync(filePath, 'Hello, World! Extra data here.'); + + const fh = await open(filePath, 'r'); + try { + const data = await text(fh.pull({ limit: 13 })); + assert.strictEqual(data, 'Hello, World!'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with start + limit - read a slice +// ============================================================================= + +async function testPullStartAndLimit() { + const filePath = path.join(tmpDir, 'pull-start-limit.txt'); + fs.writeFileSync(filePath, 'AAABBBCCCDDD'); + + const fh = await open(filePath, 'r'); + try { + // Read 3 bytes starting at offset 3 + const data = await text(fh.pull({ start: 3, limit: 3 })); + assert.strictEqual(data, 'BBB'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with limit larger than file - reads whole file +// ============================================================================= + +async function testPullLimitLargerThanFile() { + const filePath = path.join(tmpDir, 'pull-limit-large.txt'); + fs.writeFileSync(filePath, 'short'); + + const fh = await open(filePath, 'r'); + try { + const data = await text(fh.pull({ limit: 1000000 })); + assert.strictEqual(data, 'short'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with limit spanning multiple chunks +// ============================================================================= + +async function testPullLimitMultiChunk() { + const filePath = path.join(tmpDir, 'pull-limit-multi.bin'); + // 300KB file - spans multiple 128KB reads + const input = Buffer.alloc(300 * 1024, 'x'); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + // Read exactly 200KB from offset 50KB + const data = await bytes(fh.pull({ start: 50 * 1024, limit: 200 * 1024 })); + assert.strictEqual(data.byteLength, 200 * 1024); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with start + limit + transforms +// ============================================================================= + +async function testPullStartLimitWithTransforms() { + const filePath = path.join(tmpDir, 'pull-start-limit-transform.txt'); + fs.writeFileSync(filePath, 'aaabbbcccddd'); + + const fh = await open(filePath, 'r'); + try { + const { compressGzip, decompressGzip } = require('zlib/iter'); + const compressed = fh.pull(compressGzip(), { start: 3, limit: 6 }); + const decompressed = await text( + require('stream/iter').pull(compressed, decompressGzip())); + assert.strictEqual(decompressed, 'bbbccc'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pull() with chunkSize option +// ============================================================================= + +async function testPullChunkSize() { + const filePath = path.join(tmpDir, 'pull-chunksize.bin'); + // Write 64KB of data + const input = Buffer.alloc(64 * 1024, 'z'); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + // Use 16KB chunks - should produce 4 batches + let batchCount = 0; + for await (const batch of fh.pull({ chunkSize: 16 * 1024 })) { + batchCount++; + for (const chunk of batch) { + assert.ok(chunk.byteLength <= 16 * 1024, + `Chunk ${chunk.byteLength} should be <= 16384`); + } + } + assert.strictEqual(batchCount, 4); + } finally { + await fh.close(); + } +} + +async function testPullChunkSizeSmall() { + const filePath = path.join(tmpDir, 'pull-chunksize-small.txt'); + fs.writeFileSync(filePath, 'hello'); + + const fh = await open(filePath, 'r'); + try { + // 1-byte chunks + let totalBytes = 0; + let batchCount = 0; + for await (const batch of fh.pull({ chunkSize: 1 })) { + batchCount++; + for (const chunk of batch) totalBytes += chunk.byteLength; + } + assert.strictEqual(totalBytes, 5); + assert.strictEqual(batchCount, 5); + } finally { + await fh.close(); + } +} + +async function testPullSyncArgumentValidation() { + const filePath = path.join(tmpDir, 'pull-arg-validation.txt'); + fs.writeFileSync(filePath, 'data'); + + const fh = await open(filePath, 'r'); + try { + assert.throws(() => fh.pull({ autoClose: 'no' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ start: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ limit: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ chunkSize: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ signal: {} }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pull({ start: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.pull({ limit: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.pull({ chunkSize: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + } finally { + await fh.close(); + } +} + +Promise.all([ + testBasicPull(), + testPullBinary(), + testPullEmptyFile(), + testPullLargeFile(), + testPullWithTransform(), + testPullAutoClose(), + testPullLocking(), + testPullClosedHandle(), + testPullAbortSignal(), + testPullIterateBatches(), + testPullStart(), + testPullLimit(), + testPullStartAndLimit(), + testPullLimitLargerThanFile(), + testPullLimitMultiChunk(), + testPullStartLimitWithTransforms(), + testPullChunkSize(), + testPullChunkSizeSmall(), + testPullSyncArgumentValidation(), +]).then(common.mustCall()); diff --git a/test/parallel/test-fs-promises-file-handle-pullsync.js b/test/parallel/test-fs-promises-file-handle-pullsync.js new file mode 100644 index 00000000000000..20c429972573d6 --- /dev/null +++ b/test/parallel/test-fs-promises-file-handle-pullsync.js @@ -0,0 +1,498 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const fs = require('fs'); +const { open } = fs.promises; +const path = require('path'); +const tmpdir = require('../common/tmpdir'); +const { + textSync, + bytesSync, + pipeToSync, + pullSync, +} = require('stream/iter'); +const { + compressGzipSync, + decompressGzipSync, +} = require('zlib/iter'); + +tmpdir.refresh(); + +const tmpDir = tmpdir.path; + +// ============================================================================= +// Basic pullSync() +// ============================================================================= + +async function testBasicPullSync() { + const filePath = path.join(tmpDir, 'pullsync-basic.txt'); + fs.writeFileSync(filePath, 'hello from sync file read'); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync()); + assert.strictEqual(data, 'hello from sync file read'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Large file (multi-chunk) +// ============================================================================= + +async function testLargeFile() { + const filePath = path.join(tmpDir, 'pullsync-large.txt'); + const input = 'sync large data test. '.repeat(10000); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync()); + assert.strictEqual(data, input); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Binary data round-trip +// ============================================================================= + +async function testBinaryData() { + const filePath = path.join(tmpDir, 'pullsync-binary.bin'); + const input = Buffer.alloc(200000); + for (let i = 0; i < input.length; i++) input[i] = i & 0xff; + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + const data = bytesSync(fh.pullSync()); + assert.deepStrictEqual(Buffer.from(data), input); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync with sync compression transform round-trip +// ============================================================================= + +async function testPullSyncWithCompression() { + const filePath = path.join(tmpDir, 'pullsync-compress-src.txt'); + const dstPath = path.join(tmpDir, 'pullsync-compress-dst.gz'); + const input = 'compress via sync pullSync. '.repeat(1000); + fs.writeFileSync(filePath, input); + + // Compress: pullSync -> compressGzipSync -> write to file + const srcFh = await open(filePath, 'r'); + const dstFh = await open(dstPath, 'w'); + try { + const w = dstFh.writer(); + pipeToSync(srcFh.pullSync(compressGzipSync()), w); + } finally { + await srcFh.close(); + await dstFh.close(); + } + + // Verify compressed file is smaller + const compressedSize = fs.statSync(dstPath).size; + assert.ok(compressedSize < Buffer.byteLength(input), + `Compressed ${compressedSize} should be < original ` + + `${Buffer.byteLength(input)}`); + + // Decompress and verify + const readFh = await open(dstPath, 'r'); + try { + const result = textSync(readFh.pullSync(decompressGzipSync())); + assert.strictEqual(result, input); + } finally { + await readFh.close(); + } +} + +// ============================================================================= +// pullSync with stateless transform +// ============================================================================= + +async function testPullSyncWithStatelessTransform() { + const filePath = path.join(tmpDir, 'pullsync-upper.txt'); + fs.writeFileSync(filePath, 'hello world'); + + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + const src = chunks[j]; + const buf = Buffer.allocUnsafe(src.length); + for (let i = 0; i < src.length; i++) { + const b = src[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[j] = buf; + } + return out; + }; + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync(upper)); + assert.strictEqual(data, 'HELLO WORLD'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync with mixed stateless + stateful transforms +// ============================================================================= + +async function testPullSyncMixedTransforms() { + const filePath = path.join(tmpDir, 'pullsync-mixed.txt'); + const input = 'mixed transform test '.repeat(500); + fs.writeFileSync(filePath, input); + + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + const src = chunks[j]; + const buf = Buffer.allocUnsafe(src.length); + for (let i = 0; i < src.length; i++) { + const b = src[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[j] = buf; + } + return out; + }; + + const fh = await open(filePath, 'r'); + try { + // Upper + compress + decompress + const data = textSync( + pullSync(fh.pullSync(upper, compressGzipSync()), decompressGzipSync()), + ); + assert.strictEqual(data, input.toUpperCase()); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// autoClose: true - handle closed after iteration completes +// ============================================================================= + +async function testAutoClose() { + const filePath = path.join(tmpDir, 'pullsync-autoclose.txt'); + fs.writeFileSync(filePath, 'auto close test'); + + const fh = await open(filePath, 'r'); + const data = textSync(fh.pullSync({ autoClose: true })); + assert.strictEqual(data, 'auto close test'); + + // Handle should be closed + await assert.rejects(fh.stat(), { code: 'EBADF' }); +} + +// ============================================================================= +// autoClose: true with early break +// ============================================================================= + +async function testAutoCloseEarlyBreak() { + const filePath = path.join(tmpDir, 'pullsync-autoclose-break.txt'); + fs.writeFileSync(filePath, 'x'.repeat(1000000)); + + const fh = await open(filePath, 'r'); + // eslint-disable-next-line no-unused-vars + for (const batch of fh.pullSync({ autoClose: true })) { + break; // Early exit + } + + // Handle should be closed by autoClose + await assert.rejects(fh.stat(), { code: 'EBADF' }); +} + +// ============================================================================= +// autoClose: false (default) - handle stays open +// ============================================================================= + +async function testNoAutoClose() { + const filePath = path.join(tmpDir, 'pullsync-no-autoclose.txt'); + fs.writeFileSync(filePath, 'still open'); + + const fh = await open(filePath, 'r'); + const data = textSync(fh.pullSync()); + assert.strictEqual(data, 'still open'); + + // Handle should still be open and reusable + const stat = await fh.stat(); + assert.ok(stat.size > 0); + await fh.close(); +} + +// ============================================================================= +// Lock semantics - pullSync locks the handle +// ============================================================================= + +async function testLocked() { + const filePath = path.join(tmpDir, 'pullsync-locked.txt'); + fs.writeFileSync(filePath, 'lock test'); + + const fh = await open(filePath, 'r'); + const iter = fh.pullSync()[Symbol.iterator](); + iter.next(); // Start iteration, handle is locked + + assert.throws(() => fh.pullSync(), { + code: 'ERR_INVALID_STATE', + }); + + assert.throws(() => fh.pull(), { + code: 'ERR_INVALID_STATE', + }); + + // Finish iteration to unlock + while (!iter.next().done) { /* drain */ } + await fh.close(); +} + +// ============================================================================= +// Empty file +// ============================================================================= + +async function testEmptyFile() { + const filePath = path.join(tmpDir, 'pullsync-empty.txt'); + fs.writeFileSync(filePath, ''); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync()); + assert.strictEqual(data, ''); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pipeToSync: file-to-file sync pipeline +// ============================================================================= + +async function testPipeToSync() { + const srcPath = path.join(tmpDir, 'pullsync-pipeto-src.txt'); + const dstPath = path.join(tmpDir, 'pullsync-pipeto-dst.txt'); + const input = 'pipeToSync test data '.repeat(200); + fs.writeFileSync(srcPath, input); + + const srcFh = await open(srcPath, 'r'); + const dstFh = await open(dstPath, 'w'); + try { + const w = dstFh.writer(); + pipeToSync(srcFh.pullSync(), w); + } finally { + await srcFh.close(); + await dstFh.close(); + } + + assert.strictEqual(fs.readFileSync(dstPath, 'utf8'), input); +} + +// ============================================================================= +// pullSync() with start option +// ============================================================================= + +async function testPullSyncStart() { + const filePath = path.join(tmpDir, 'pullsync-start.txt'); + fs.writeFileSync(filePath, 'AAABBBCCC'); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync({ start: 3 })); + assert.strictEqual(data, 'BBBCCC'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with limit option +// ============================================================================= + +async function testPullSyncLimit() { + const filePath = path.join(tmpDir, 'pullsync-limit.txt'); + fs.writeFileSync(filePath, 'Hello, World! Extra data here.'); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync({ limit: 13 })); + assert.strictEqual(data, 'Hello, World!'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with start + limit +// ============================================================================= + +async function testPullSyncStartAndLimit() { + const filePath = path.join(tmpDir, 'pullsync-start-limit.txt'); + fs.writeFileSync(filePath, 'AAABBBCCCDDD'); + + const fh = await open(filePath, 'r'); + try { + const data = textSync(fh.pullSync({ start: 3, limit: 3 })); + assert.strictEqual(data, 'BBB'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with limit spanning multiple chunks +// ============================================================================= + +async function testPullSyncLimitMultiChunk() { + const filePath = path.join(tmpDir, 'pullsync-limit-multi.bin'); + const input = Buffer.alloc(300 * 1024, 'x'); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + const data = bytesSync(fh.pullSync({ start: 50 * 1024, limit: 200 * 1024 })); + assert.strictEqual(data.byteLength, 200 * 1024); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with start + limit + compression transform +// ============================================================================= + +async function testPullSyncStartLimitWithTransforms() { + const filePath = path.join(tmpDir, 'pullsync-start-limit-transform.txt'); + fs.writeFileSync(filePath, 'aaabbbcccddd'); + + const fh = await open(filePath, 'r'); + try { + const compressed = fh.pullSync(compressGzipSync(), + { start: 3, limit: 6 }); + const decompressed = textSync(pullSync(compressed, decompressGzipSync())); + assert.strictEqual(decompressed, 'bbbccc'); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// pullSync() with start + autoClose +// ============================================================================= + +async function testPullSyncStartAutoClose() { + const filePath = path.join(tmpDir, 'pullsync-start-autoclose.txt'); + fs.writeFileSync(filePath, 'AAABBBCCC'); + + const fh = await open(filePath, 'r'); + const data = textSync(fh.pullSync({ start: 3, autoClose: true })); + assert.strictEqual(data, 'BBBCCC'); + + // Handle should be closed + await assert.rejects(fh.stat(), { code: 'EBADF' }); +} + +// ============================================================================= +// pullSync() with chunkSize option +// ============================================================================= + +async function testPullSyncChunkSize() { + const filePath = path.join(tmpDir, 'pullsync-chunksize.bin'); + const input = Buffer.alloc(64 * 1024, 'z'); + fs.writeFileSync(filePath, input); + + const fh = await open(filePath, 'r'); + try { + let batchCount = 0; + for (const batch of fh.pullSync({ chunkSize: 16 * 1024 })) { + batchCount++; + for (const chunk of batch) { + assert.ok(chunk.byteLength <= 16 * 1024, + `Chunk ${chunk.byteLength} should be <= 16384`); + } + } + assert.strictEqual(batchCount, 4); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// writer() with chunkSize option (sync write threshold) +// ============================================================================= + +async function testWriterChunkSize() { + const filePath = path.join(tmpDir, 'pullsync-writer-chunksize.txt'); + const fh = await open(filePath, 'w'); + // Set chunkSize to 1024 - writes larger than this should fall back to async + const w = fh.writer({ chunkSize: 1024 }); + + // Small write should succeed sync + assert.strictEqual(w.writeSync(Buffer.alloc(512, 'a')), true); + + // Write larger than chunkSize should return false + assert.strictEqual(w.writeSync(Buffer.alloc(2048, 'b')), false); + + await w.end(); + await fh.close(); +} + +// ============================================================================= +// Argument validation +// ============================================================================= + +async function testPullArgumentValidation() { + const filePath = path.join(tmpDir, 'pull-arg-validation.txt'); + fs.writeFileSync(filePath, 'data'); + + const fh = await open(filePath, 'r'); + try { + assert.throws(() => fh.pullSync({ autoClose: 'no' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pullSync({ start: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pullSync({ limit: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pullSync({ chunkSize: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.pullSync({ start: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.pullSync({ limit: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.pullSync({ chunkSize: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Run all tests +// ============================================================================= + +Promise.all([ + testBasicPullSync(), + testLargeFile(), + testBinaryData(), + testPullSyncWithCompression(), + testPullSyncWithStatelessTransform(), + testPullSyncMixedTransforms(), + testAutoClose(), + testAutoCloseEarlyBreak(), + testNoAutoClose(), + testLocked(), + testEmptyFile(), + testPipeToSync(), + testPullSyncStart(), + testPullSyncLimit(), + testPullSyncStartAndLimit(), + testPullSyncLimitMultiChunk(), + testPullSyncStartLimitWithTransforms(), + testPullSyncStartAutoClose(), + testPullSyncChunkSize(), + testWriterChunkSize(), + testPullArgumentValidation(), +]).then(common.mustCall()); diff --git a/test/parallel/test-fs-promises-file-handle-writer.js b/test/parallel/test-fs-promises-file-handle-writer.js new file mode 100644 index 00000000000000..95ba8756fbe3cd --- /dev/null +++ b/test/parallel/test-fs-promises-file-handle-writer.js @@ -0,0 +1,1126 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const fs = require('fs'); +const { open } = fs.promises; +const path = require('path'); +const tmpdir = require('../common/tmpdir'); +const { + pipeTo, text, +} = require('stream/iter'); +const { + compressGzip, decompressGzip, +} = require('zlib/iter'); + +tmpdir.refresh(); + +const tmpDir = tmpdir.path; + +// ============================================================================= +// Basic write() +// ============================================================================= + +async function testBasicWrite() { + const filePath = path.join(tmpDir, 'writer-basic.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.write(Buffer.from('Hello ')); + await w.write(Buffer.from('World!')); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 12); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'Hello World!'); +} + +// ============================================================================= +// Basic writev() +// ============================================================================= + +async function testBasicWritev() { + const filePath = path.join(tmpDir, 'writer-writev.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.writev([ + Buffer.from('aaa'), + Buffer.from('bbb'), + Buffer.from('ccc'), + ]); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 9); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'aaabbbccc'); +} + +// ============================================================================= +// Mixed write() and writev() +// ============================================================================= + +async function testMixedWriteAndWritev() { + const filePath = path.join(tmpDir, 'writer-mixed.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.write(Buffer.from('head-')); + await w.writev([Buffer.from('mid1-'), Buffer.from('mid2-')]); + await w.write(Buffer.from('tail')); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 19); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'head-mid1-mid2-tail'); +} + +// ============================================================================= +// end() returns totalBytesWritten +// ============================================================================= + +async function testEndReturnsTotalBytes() { + const filePath = path.join(tmpDir, 'writer-totalbytes.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + // Write some data in various sizes + const sizes = [100, 200, 300, 400, 500]; + let expected = 0; + for (const size of sizes) { + await w.write(Buffer.alloc(size, 0x41)); + expected += size; + } + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, expected); + assert.strictEqual(totalBytes, 1500); + assert.strictEqual(fs.statSync(filePath).size, 1500); +} + +// ============================================================================= +// autoClose: true - handle closed after end() +// ============================================================================= + +async function testAutoCloseOnEnd() { + const filePath = path.join(tmpDir, 'writer-autoclose-end.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ autoClose: true }); + await w.write(Buffer.from('auto close test')); + await w.end(); + + // Handle should be closed + await assert.rejects(fh.stat(), { code: 'EBADF' }); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'auto close test'); +} + +// ============================================================================= +// autoClose: true - handle closed after fail() +// ============================================================================= + +async function testAutoCloseOnFail() { + const filePath = path.join(tmpDir, 'writer-autoclose-fail.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ autoClose: true }); + await w.write(Buffer.from('partial')); + w.fail(new Error('test fail')); + + // Handle should be closed + await assert.rejects(fh.stat(), { code: 'EBADF' }); + // Partial data should still be on disk (fail doesn't truncate) + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'partial'); +} + +// ============================================================================= +// start option - write at specified offset +// ============================================================================= + +async function testStartOption() { + const filePath = path.join(tmpDir, 'writer-start.txt'); + // Pre-fill with 10 A's + fs.writeFileSync(filePath, 'AAAAAAAAAA'); + + const fh = await open(filePath, 'r+'); + const w = fh.writer({ start: 3 }); + await w.write(Buffer.from('BBB')); + await w.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'AAABBBAAAA'); +} + +// ============================================================================= +// start option - sequential writes advance position +// ============================================================================= + +async function testStartSequentialPosition() { + const filePath = path.join(tmpDir, 'writer-start-seq.txt'); + fs.writeFileSync(filePath, 'XXXXXXXXXX'); + + const fh = await open(filePath, 'r+'); + const w = fh.writer({ start: 2 }); + await w.write(Buffer.from('AA')); + await w.write(Buffer.from('BB')); + await w.writev([Buffer.from('C'), Buffer.from('D')]); + await w.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'XXAABBCDXX'); +} + +// ============================================================================= +// Locked state - can't create second writer while active +// ============================================================================= + +async function testLockedState() { + const filePath = path.join(tmpDir, 'writer-locked.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + assert.throws(() => fh.writer(), { + name: 'Error', + message: /locked/, + }); + + // Also can't pull while writer is active + assert.throws(() => fh.pull(), { + name: 'Error', + message: /locked/, + }); + + await w.end(); + await fh.close(); +} + +// ============================================================================= +// Unlock after end - handle reusable +// ============================================================================= + +async function testUnlockAfterEnd() { + const filePath = path.join(tmpDir, 'writer-unlock.txt'); + const fh = await open(filePath, 'w'); + + const w1 = fh.writer(); + await w1.write(Buffer.from('first')); + await w1.end(); + + // Should work - handle is unlocked + const w2 = fh.writer(); + await w2.write(Buffer.from(' second')); + await w2.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'first second'); +} + +// ============================================================================= +// Unlock after fail - handle reusable +// ============================================================================= + +async function testUnlockAfterFail() { + const filePath = path.join(tmpDir, 'writer-unlock-fail.txt'); + const fh = await open(filePath, 'w'); + + const w1 = fh.writer(); + await w1.write(Buffer.from('failed')); + await w1.fail(new Error('test')); + + // Should work - handle is unlocked + const w2 = fh.writer(); + await w2.write(Buffer.from('recovered')); + await w2.end(); + await fh.close(); + + // 'recovered' is appended after 'failed' at current file offset + const content = fs.readFileSync(filePath, 'utf8'); + assert.ok(content.startsWith('failed')); + assert.ok(content.includes('recovered')); +} + +// ============================================================================= +// Write after end/fail rejects +// ============================================================================= + +async function testWriteAfterEndRejects() { + const filePath = path.join(tmpDir, 'writer-closed.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.write(Buffer.from('data')); + await w.end(); + + await assert.rejects(w.write(Buffer.from('more')), { + name: 'TypeError', + message: /closed/, + }); + await assert.rejects(w.writev([Buffer.from('more')]), { + name: 'TypeError', + message: /closed/, + }); + + await fh.close(); +} + +// ============================================================================= +// Closed handle - writer() throws +// ============================================================================= + +async function testClosedHandle() { + const filePath = path.join(tmpDir, 'writer-closed-handle.txt'); + const fh = await open(filePath, 'w'); + await fh.close(); + + assert.throws(() => fh.writer(), { + name: 'Error', + message: /closed/, + }); +} + +// ============================================================================= +// pipeTo() integration - pipe source through writer +// ============================================================================= + +async function testPipeToIntegration() { + const srcPath = path.join(tmpDir, 'writer-pipeto-src.txt'); + const dstPath = path.join(tmpDir, 'writer-pipeto-dst.txt'); + const data = 'The quick brown fox jumps over the lazy dog.\n'.repeat(500); + fs.writeFileSync(srcPath, data); + + const rfh = await open(srcPath, 'r'); + const wfh = await open(dstPath, 'w'); + const w = wfh.writer(); + + const totalBytes = await pipeTo(rfh.pull(), w); + + await rfh.close(); + await wfh.close(); + + assert.strictEqual(totalBytes, Buffer.byteLength(data)); + assert.strictEqual(fs.readFileSync(dstPath, 'utf8'), data); +} + +// ============================================================================= +// pipeTo() with transforms - uppercase through writer +// ============================================================================= + +async function testPipeToWithTransform() { + const srcPath = path.join(tmpDir, 'writer-transform-src.txt'); + const dstPath = path.join(tmpDir, 'writer-transform-dst.txt'); + const data = 'hello world from transforms test\n'.repeat(200); + fs.writeFileSync(srcPath, data); + + function uppercase(chunks) { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let i = 0; i < chunks.length; i++) { + const src = chunks[i]; + const buf = Buffer.allocUnsafe(src.length); + for (let j = 0; j < src.length; j++) { + const b = src[j]; + buf[j] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[i] = buf; + } + return out; + } + + const rfh = await open(srcPath, 'r'); + const wfh = await open(dstPath, 'w'); + const w = wfh.writer(); + + await pipeTo(rfh.pull(), uppercase, w); + + await rfh.close(); + await wfh.close(); + + assert.strictEqual(fs.readFileSync(dstPath, 'utf8'), data.toUpperCase()); +} + +// ============================================================================= +// Round-trip: pull → compress → writer, pull → decompress → verify +// ============================================================================= + +async function testCompressRoundTrip() { + const srcPath = path.join(tmpDir, 'writer-rt-src.txt'); + const gzPath = path.join(tmpDir, 'writer-rt.gz'); + const original = 'Round trip compression test data. '.repeat(2000); + fs.writeFileSync(srcPath, original); + + // Compress: pull → gzip → writer + { + const rfh = await open(srcPath, 'r'); + const wfh = await open(gzPath, 'w'); + const w = wfh.writer({ autoClose: true }); + await pipeTo(rfh.pull(), compressGzip(), w); + await rfh.close(); + } + + // Verify compressed file is smaller + const compressedSize = fs.statSync(gzPath).size; + assert.ok(compressedSize < Buffer.byteLength(original), + `Compressed ${compressedSize} should be < original ${Buffer.byteLength(original)}`); + + // Decompress: pull → gunzip → text → verify + { + const rfh = await open(gzPath, 'r'); + const result = await text(rfh.pull(decompressGzip())); + await rfh.close(); + assert.strictEqual(result, original); + } +} + +// ============================================================================= +// Large file write - write 1MB in 64KB chunks +// ============================================================================= + +async function testLargeFileWrite() { + const filePath = path.join(tmpDir, 'writer-large.bin'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + const chunkSize = 65536; + const totalSize = 1024 * 1024; // 1MB + const chunk = Buffer.alloc(chunkSize, 0x42); + let written = 0; + + while (written < totalSize) { + await w.write(chunk); + written += chunkSize; + } + + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, totalSize); + assert.strictEqual(fs.statSync(filePath).size, totalSize); + + // Verify content + const data = fs.readFileSync(filePath); + for (let i = 0; i < data.length; i++) { + if (data[i] !== 0x42) { + assert.fail(`Byte at offset ${i} is ${data[i]}, expected 0x42`); + } + } +} + +// ============================================================================= +// Symbol.asyncDispose - await using +// ============================================================================= + +async function testAsyncDispose() { + const filePath = path.join(tmpDir, 'writer-async-dispose.txt'); + { + await using fh = await open(filePath, 'w'); + await using w = fh.writer({ autoClose: true }); + await w.write(Buffer.from('async dispose')); + } + // Both writer and file handle should be cleaned up + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'async dispose'); + + // Verify the handle is actually closed by trying to open a new one + // (if the old one were still open with a write lock on some OSes, + // this could fail - but it should succeed). + const fh2 = await open(filePath, 'r'); + await fh2.close(); +} + +// ============================================================================= +// Symbol.asyncDispose - cleanup on error (await using unwinds) +// ============================================================================= + +async function testAsyncDisposeOnError() { + const filePath = path.join(tmpDir, 'writer-dispose-error.txt'); + const fh = await open(filePath, 'w'); + + try { + await using w = fh.writer(); + await w.write(Buffer.from('before error')); + throw new Error('intentional'); + } catch (e) { + assert.strictEqual(e.message, 'intentional'); + } + + // If asyncDispose ran, the handle should be unlocked and reusable + const w2 = fh.writer(); + await w2.write(Buffer.from('after error')); + await w2.end(); + await fh.close(); + + const content = fs.readFileSync(filePath, 'utf8'); + assert.ok(content.includes('after error'), + `Expected 'after error' in ${JSON.stringify(content)}`); +} + +// ============================================================================= +// Pre-aborted signal rejects write/writev/end +// ============================================================================= + +async function testWriteWithAbortedSignalRejects() { + const filePath = path.join(tmpDir, 'writer-signal-write.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + const ac = new AbortController(); + ac.abort(); + + await assert.rejects( + w.write(Buffer.from('data'), { signal: ac.signal }), + { name: 'AbortError' }, + ); + + // Writer should still be usable after a signal rejection + await w.write(Buffer.from('ok')); + await w.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'ok'); +} + +async function testWritevWithAbortedSignalRejects() { + const filePath = path.join(tmpDir, 'writer-signal-writev.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + const ac = new AbortController(); + ac.abort(); + + await assert.rejects( + w.writev([Buffer.from('a'), Buffer.from('b')], { signal: ac.signal }), + { name: 'AbortError' }, + ); + + await w.writev([Buffer.from('ok')]); + await w.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'ok'); +} + +async function testEndWithAbortedSignalRejects() { + const filePath = path.join(tmpDir, 'writer-signal-end.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.write(Buffer.from('data')); + + const ac = new AbortController(); + ac.abort(); + + await assert.rejects( + w.end({ signal: ac.signal }), + { name: 'AbortError' }, + ); + + // end() was rejected so writer is still open - end it cleanly + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 4); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'data'); +} + +// ============================================================================= +// write() with string input (UTF-8 encoding) +// ============================================================================= + +async function testWriteString() { + const filePath = path.join(tmpDir, 'writer-string.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.write('Hello '); + await w.write('World!'); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 12); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'Hello World!'); +} + +// ============================================================================= +// write() with string containing multi-byte UTF-8 characters +// ============================================================================= + +async function testWriteStringMultibyte() { + const filePath = path.join(tmpDir, 'writer-string-multibyte.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + const input = 'café ☕ 日本語'; + await w.write(input); + const totalBytes = await w.end(); + await fh.close(); + + const expected = Buffer.from(input, 'utf8'); + assert.strictEqual(totalBytes, expected.byteLength); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), input); +} + +// ============================================================================= +// writev() with string chunks (UTF-8 encoding) +// ============================================================================= + +async function testWritevStrings() { + const filePath = path.join(tmpDir, 'writer-writev-strings.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.writev(['aaa', 'bbb', 'ccc']); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 9); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'aaabbbccc'); +} + +// ============================================================================= +// writev() with mixed string and Uint8Array chunks +// ============================================================================= + +async function testWritevMixed() { + const filePath = path.join(tmpDir, 'writer-writev-mixed.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + await w.writev(['hello', Buffer.from(' '), 'world']); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 11); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'hello world'); +} + +// ============================================================================= +// Symbol.dispose calls fail() +// ============================================================================= + +async function testSyncDispose() { + const filePath = path.join(tmpDir, 'writer-sync-dispose.txt'); + const fh = await open(filePath, 'w'); + + { + using w = fh.writer(); + await w.write(Buffer.from('before dispose')); + } + // Symbol.dispose calls fail(), which unlocks the handle. + // The handle should be reusable. + const w2 = fh.writer(); + await w2.write(Buffer.from('after dispose')); + await w2.end(); + await fh.close(); + + const content = fs.readFileSync(filePath, 'utf8'); + assert.ok(content.includes('after dispose'), + `Expected 'after dispose' in ${JSON.stringify(content)}`); +} + +// ============================================================================= +// Symbol.dispose on error unwind +// ============================================================================= + +async function testSyncDisposeOnError() { + const filePath = path.join(tmpDir, 'writer-sync-dispose-error.txt'); + const fh = await open(filePath, 'w'); + + try { + using w = fh.writer(); + await w.write(Buffer.from('data')); + throw new Error('intentional'); + } catch (e) { + assert.strictEqual(e.message, 'intentional'); + } + + // Handle should be unlocked and reusable after sync dispose + const w2 = fh.writer(); + await w2.write(Buffer.from('recovered')); + await w2.end(); + await fh.close(); + + const content = fs.readFileSync(filePath, 'utf8'); + assert.ok(content.includes('recovered'), + `Expected 'recovered' in ${JSON.stringify(content)}`); +} + +// ============================================================================= +// writeSync() basic +// ============================================================================= + +async function testWriteSyncBasic() { + const filePath = path.join(tmpDir, 'writer-writesync-basic.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + assert.strictEqual(w.writeSync('Hello '), true); + assert.strictEqual(w.writeSync(Buffer.from('World!')), true); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 12); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'Hello World!'); +} + +// ============================================================================= +// writevSync() basic +// ============================================================================= + +async function testWritevSyncBasic() { + const filePath = path.join(tmpDir, 'writer-writevsync-basic.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + assert.strictEqual(w.writevSync(['aaa', Buffer.from('bbb'), 'ccc']), true); + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 9); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'aaabbbccc'); +} + +// ============================================================================= +// writeSync() returns false for large chunks +// ============================================================================= + +async function testWriteSyncLargeChunk() { + const filePath = path.join(tmpDir, 'writer-writesync-large.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + // Chunk larger than 131072 should return false + const bigChunk = Buffer.alloc(131073, 'x'); + assert.strictEqual(w.writeSync(bigChunk), false); + + // Chunk at exactly 131072 should succeed + const exactChunk = Buffer.alloc(131072, 'y'); + assert.strictEqual(w.writeSync(exactChunk), true); + + await w.end(); + await fh.close(); + + // Only the exact chunk should have been written + const content = fs.readFileSync(filePath); + assert.strictEqual(content.length, 131072); +} + +// ============================================================================= +// writeSync() returns false when async op is in flight +// ============================================================================= + +async function testWriteSyncReturnsFalseDuringAsync() { + const filePath = path.join(tmpDir, 'writer-writesync-async.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + // Start an async write but don't await yet + const p = w.write(Buffer.from('async')); + + // Sync write should return false because async is in flight + assert.strictEqual(w.writeSync(Buffer.from('sync')), false); + + await p; + + // After async completes, sync should work again + assert.strictEqual(w.writeSync(Buffer.from(' then sync')), true); + + await w.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'async then sync'); +} + +// ============================================================================= +// writeSync() returns false on closed/errored writer +// ============================================================================= + +async function testWriteSyncClosedErrored() { + const filePath = path.join(tmpDir, 'writer-writesync-closed.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.end(); + + // Should return false after end() + assert.strictEqual(w.writeSync(Buffer.from('data')), false); + await fh.close(); + + // Test errored state + const fh2 = await open(filePath, 'w'); + const w2 = fh2.writer(); + w2.fail(new Error('test')); + assert.strictEqual(w2.writeSync(Buffer.from('data')), false); + await fh2.close(); +} + +// ============================================================================= +// endSync() basic +// ============================================================================= + +async function testEndSyncBasic() { + const filePath = path.join(tmpDir, 'writer-endsync-basic.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + w.writeSync(Buffer.from('hello')); + const totalBytes = w.endSync(); + await fh.close(); + + assert.strictEqual(totalBytes, 5); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'hello'); +} + +// ============================================================================= +// endSync() returns -1 when async op is in flight +// ============================================================================= + +async function testEndSyncReturnsFalseDuringAsync() { + const filePath = path.join(tmpDir, 'writer-endsync-async.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + const p = w.write(Buffer.from('data')); + assert.strictEqual(w.endSync(), -1); + + await p; + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 4); +} + +// ============================================================================= +// endSync() idempotent on closed writer +// ============================================================================= + +async function testEndSyncIdempotent() { + const filePath = path.join(tmpDir, 'writer-endsync-idempotent.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + w.writeSync(Buffer.from('data')); + const first = w.endSync(); + const second = w.endSync(); + + assert.strictEqual(first, 4); + assert.strictEqual(second, 4); // Idempotent + await fh.close(); +} + +// ============================================================================= +// endSync() with autoClose fires handle.close() +// ============================================================================= + +async function testEndSyncAutoClose() { + const filePath = path.join(tmpDir, 'writer-endsync-autoclose.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ autoClose: true }); + + w.writeSync(Buffer.from('auto')); + const totalBytes = w.endSync(); + + assert.strictEqual(totalBytes, 4); + + // Handle should be closed synchronously + await assert.rejects(fh.stat(), { code: 'EBADF' }); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'auto'); +} + +// ============================================================================= +// Full sync pipeline: writeSync + endSync (no async at all) +// ============================================================================= + +async function testFullSyncPipeline() { + const filePath = path.join(tmpDir, 'writer-full-sync.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + // Entirely synchronous write pipeline + w.writeSync('line 1\n'); + w.writeSync('line 2\n'); + w.writevSync(['line 3\n', 'line 4\n']); + const totalBytes = w.endSync(); + await fh.close(); + + assert.strictEqual(totalBytes, 28); + assert.strictEqual( + fs.readFileSync(filePath, 'utf8'), + 'line 1\nline 2\nline 3\nline 4\n', + ); +} + +// ============================================================================= +// end() rejects on errored writer +// ============================================================================= + +async function testEndRejectsOnErrored() { + const filePath = path.join(tmpDir, 'writer-end-errored.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.write(Buffer.from('data')); + w.fail(new Error('test error')); + + await assert.rejects( + w.end(), + { message: 'test error' }, + ); + await fh.close(); +} + +// ============================================================================= +// end() is idempotent when closing/closed +// ============================================================================= + +async function testEndIdempotent() { + const filePath = path.join(tmpDir, 'writer-end-idempotent.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.write(Buffer.from('data')); + + // Call end() twice concurrently - second should return same promise + const p1 = w.end(); + const p2 = w.end(); + const [bytes1, bytes2] = await Promise.all([p1, p2]); + + assert.strictEqual(bytes1, 4); + assert.strictEqual(bytes2, 4); + + // After closed, calling end() again returns totalBytesWritten + const bytes3 = await w.end(); + assert.strictEqual(bytes3, 4); + + await fh.close(); +} + +// ============================================================================= +// asyncDispose waits for pending end() when closing +// ============================================================================= + +async function testAsyncDisposeWhileClosing() { + const filePath = path.join(tmpDir, 'writer-dispose-closing.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ autoClose: true }); + + await w.write(Buffer.from('closing test')); + + // Start end() but don't await - writer is now "closing" + const endPromise = w.end(); + + // asyncDispose should wait for the pending end, not call fail() + await w[Symbol.asyncDispose](); + await endPromise; + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), 'closing test'); +} + +// ============================================================================= +// asyncDispose calls fail() on open writer (not graceful cleanup) +// ============================================================================= + +async function testAsyncDisposeCallsFail() { + const filePath = path.join(tmpDir, 'writer-dispose-fails.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer(); + + await w.write(Buffer.from('some data')); + + // Dispose without end() - should call fail(), not graceful cleanup + await w[Symbol.asyncDispose](); + + // Writer should be in errored state - write should reject + await assert.rejects( + w.write(Buffer.from('more')), + (err) => err instanceof Error, + ); + + // Handle should be unlocked and reusable + const w2 = fh.writer(); + await w2.end(); + await fh.close(); +} + +// ============================================================================= +// writer() with limit - async write within limit succeeds +// ============================================================================= + +async function testWriterLimit() { + const filePath = path.join(tmpDir, 'writer-limit.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 10 }); + + await w.write(Buffer.from('12345')); // 5 bytes, 5 remaining + await w.write(Buffer.from('67890')); // 5 bytes, 0 remaining + const totalBytes = await w.end(); + await fh.close(); + + assert.strictEqual(totalBytes, 10); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), '1234567890'); +} + +// ============================================================================= +// writer() with limit - async write exceeding limit rejects +// ============================================================================= + +async function testWriterLimitExceeded() { + const filePath = path.join(tmpDir, 'writer-limit-exceeded.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 5 }); + + await w.write(Buffer.from('123')); // 3 bytes, 2 remaining + + await assert.rejects( + w.write(Buffer.from('45678')), // 5 bytes > 2 remaining + { code: 'ERR_OUT_OF_RANGE' }, + ); + + await w.end(); + await fh.close(); +} + +// ============================================================================= +// writer() with limit - writev exceeding limit rejects +// ============================================================================= + +async function testWriterLimitWritev() { + const filePath = path.join(tmpDir, 'writer-limit-writev.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 6 }); + + await w.writev([Buffer.from('ab'), Buffer.from('cd')]); // 4 bytes + + await assert.rejects( + w.writev([Buffer.from('ef'), Buffer.from('gh')]), // 4 bytes > 2 remaining + { code: 'ERR_OUT_OF_RANGE' }, + ); + + await w.end(); + await fh.close(); +} + +// ============================================================================= +// writer() with limit - writeSync returns false when exceeding limit +// ============================================================================= + +async function testWriterLimitWriteSync() { + const filePath = path.join(tmpDir, 'writer-limit-writesync.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 10 }); + + assert.strictEqual(w.writeSync(Buffer.from('12345')), true); // 5 ok + assert.strictEqual(w.writeSync(Buffer.from('678')), true); // 3 ok + assert.strictEqual(w.writeSync(Buffer.from('901')), false); // 3 > 2 remaining + + const totalBytes = w.endSync(); + await fh.close(); + + assert.strictEqual(totalBytes, 8); + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), '12345678'); +} + +// ============================================================================= +// writer() with limit - writevSync returns false when exceeding limit +// ============================================================================= + +async function testWriterLimitWritevSync() { + const filePath = path.join(tmpDir, 'writer-limit-writevsync.txt'); + const fh = await open(filePath, 'w'); + const w = fh.writer({ limit: 5 }); + + assert.strictEqual(w.writevSync([Buffer.from('ab')]), true); + // 4 bytes > 3 remaining + assert.strictEqual( + w.writevSync([Buffer.from('cd'), Buffer.from('ef')]), false); + + w.endSync(); + await fh.close(); +} + +// ============================================================================= +// writer() with limit + start +// ============================================================================= + +async function testWriterLimitAndStart() { + const filePath = path.join(tmpDir, 'writer-limit-start.txt'); + // Pre-fill file with dots + fs.writeFileSync(filePath, '...........'); // 11 dots + + const fh = await open(filePath, 'r+'); + const w = fh.writer({ start: 3, limit: 5 }); + + await w.write(Buffer.from('HELLO')); // Write at offset 3 + await w.end(); + await fh.close(); + + assert.strictEqual(fs.readFileSync(filePath, 'utf8'), '...HELLO...'); +} + +// ============================================================================= +// Argument validation +// ============================================================================= + +async function testWriterArgumentValidation() { + const filePath = path.join(tmpDir, 'pull-arg-validation.txt'); + fs.writeFileSync(filePath, 'data'); + + const fh = await open(filePath, 'r'); + try { + assert.throws(() => fh.writer({ autoClose: 'no' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.writer({ start: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.writer({ limit: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.writer({ chunkSize: 'a' }), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => fh.writer({ start: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.writer({ limit: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + assert.throws(() => fh.writer({ chunkSize: 1.1 }), { code: 'ERR_OUT_OF_RANGE' }); + } finally { + await fh.close(); + } +} + +// ============================================================================= +// Run all tests +// ============================================================================= + +Promise.all([ + testBasicWrite(), + testBasicWritev(), + testMixedWriteAndWritev(), + testEndReturnsTotalBytes(), + testAutoCloseOnEnd(), + testAutoCloseOnFail(), + testStartOption(), + testStartSequentialPosition(), + testLockedState(), + testUnlockAfterEnd(), + testUnlockAfterFail(), + testWriteAfterEndRejects(), + testClosedHandle(), + testPipeToIntegration(), + testPipeToWithTransform(), + testCompressRoundTrip(), + testLargeFileWrite(), + testAsyncDispose(), + testAsyncDisposeOnError(), + testWriteWithAbortedSignalRejects(), + testWritevWithAbortedSignalRejects(), + testEndWithAbortedSignalRejects(), + testWriteString(), + testWriteStringMultibyte(), + testWritevStrings(), + testWritevMixed(), + testSyncDispose(), + testSyncDisposeOnError(), + testWriteSyncBasic(), + testWritevSyncBasic(), + testWriteSyncLargeChunk(), + testWriteSyncReturnsFalseDuringAsync(), + testWriteSyncClosedErrored(), + testEndSyncBasic(), + testEndSyncReturnsFalseDuringAsync(), + testEndSyncIdempotent(), + testEndSyncAutoClose(), + testFullSyncPipeline(), + testEndRejectsOnErrored(), + testEndIdempotent(), + testAsyncDisposeWhileClosing(), + testAsyncDisposeCallsFail(), + testWriterLimit(), + testWriterLimitExceeded(), + testWriterLimitWritev(), + testWriterLimitWriteSync(), + testWriterLimitWritevSync(), + testWriterLimitAndStart(), + testWriterArgumentValidation(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-broadcast-backpressure.js b/test/parallel/test-stream-iter-broadcast-backpressure.js new file mode 100644 index 00000000000000..d1e466c3ac44cf --- /dev/null +++ b/test/parallel/test-stream-iter-broadcast-backpressure.js @@ -0,0 +1,138 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { broadcast, text } = require('stream/iter'); + +// ============================================================================= +// Backpressure policies +// ============================================================================= + +async function testDropOldest() { + const { writer, broadcast: bc } = broadcast({ + highWaterMark: 2, + backpressure: 'drop-oldest', + }); + const consumer = bc.push(); + + writer.writeSync('first'); + writer.writeSync('second'); + // This should drop 'first' + writer.writeSync('third'); + writer.endSync(); + + const data = await text(consumer); + assert.strictEqual(data, 'secondthird'); +} + +async function testDropNewest() { + const { writer, broadcast: bc } = broadcast({ + highWaterMark: 1, + backpressure: 'drop-newest', + }); + const consumer = bc.push(); + + writer.writeSync('kept'); + // This should be silently dropped + writer.writeSync('dropped'); + writer.endSync(); + + const data = await text(consumer); + assert.strictEqual(data, 'kept'); +} + +// ============================================================================= +// Block backpressure +// ============================================================================= + +async function testBlockBackpressure() { + const { writer, broadcast: bc } = broadcast({ + highWaterMark: 1, + backpressure: 'block', + }); + const consumer = bc.push(); + writer.writeSync('a'); + + // Next write should block + let writeResolved = false; + const writePromise = writer.write('b').then(() => { writeResolved = true; }); + await new Promise(setImmediate); + assert.strictEqual(writeResolved, false); + + // Drain consumer to unblock the pending write + const iter = consumer[Symbol.asyncIterator](); + const first = await iter.next(); + assert.strictEqual(first.done, false); + await new Promise(setImmediate); + assert.strictEqual(writeResolved, true); + + writer.endSync(); + // Drain remaining data and verify completion + const second = await iter.next(); + assert.strictEqual(second.done, false); + await writePromise; +} + +// Verify block backpressure data flows correctly end-to-end +async function testBlockBackpressureContent() { + const { writer, broadcast: bc } = broadcast({ + highWaterMark: 1, + backpressure: 'block', + }); + const consumer = bc.push(); + + writer.writeSync('a'); + const writePromise = writer.write('b'); + await new Promise(setImmediate); + + // Read all and verify content + const iter = consumer[Symbol.asyncIterator](); + const first = await iter.next(); + assert.strictEqual(first.done, false); + const firstStr = new TextDecoder().decode(first.value[0]); + assert.strictEqual(firstStr, 'a'); + + await writePromise; + writer.endSync(); + + const second = await iter.next(); + assert.strictEqual(second.done, false); + const secondStr = new TextDecoder().decode(second.value[0]); + assert.strictEqual(secondStr, 'b'); + + const done = await iter.next(); + assert.strictEqual(done.done, true); +} + +// Writev async path +async function testWritevAsync() { + const { writer, broadcast: bc } = broadcast({ highWaterMark: 10 }); + const consumer = bc.push(); + + await writer.writev(['hello', ' ', 'world']); + await writer.end(); + + const data = await text(consumer); + assert.strictEqual(data, 'hello world'); +} + +// endSync returns the total byte count +async function testEndSyncReturnValue() { + const { writer, broadcast: bc } = broadcast({ highWaterMark: 10 }); + bc.push(); // Need a consumer to write to + + writer.writeSync('hello'); // 5 bytes + writer.writeSync(' world'); // 6 bytes + const total = writer.endSync(); + assert.strictEqual(total, 11); +} + +Promise.all([ + testDropOldest(), + testDropNewest(), + testBlockBackpressure(), + testBlockBackpressureContent(), + testWritevAsync(), + testEndSyncReturnValue(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-broadcast-basic.js b/test/parallel/test-stream-iter-broadcast-basic.js new file mode 100644 index 00000000000000..ab2c81304ec2ac --- /dev/null +++ b/test/parallel/test-stream-iter-broadcast-basic.js @@ -0,0 +1,260 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { broadcast, text } = require('stream/iter'); + +// ============================================================================= +// Basic broadcast +// ============================================================================= + +async function testBasicBroadcast() { + const { writer, broadcast: bc } = broadcast(); + + // Create two consumers + const consumer1 = bc.push(); + const consumer2 = bc.push(); + + assert.strictEqual(bc.consumerCount, 2); + + await writer.write('hello'); + await writer.end(); + + const [data1, data2] = await Promise.all([ + text(consumer1), + text(consumer2), + ]); + + assert.strictEqual(data1, 'hello'); + assert.strictEqual(data2, 'hello'); +} + +async function testMultipleWrites() { + const { writer, broadcast: bc } = broadcast({ highWaterMark: 10 }); + + const consumer = bc.push(); + + await writer.write('a'); + await writer.write('b'); + await writer.write('c'); + await writer.end(); + + const data = await text(consumer); + assert.strictEqual(data, 'abc'); +} + +async function testConsumerCount() { + const { broadcast: bc } = broadcast(); + + assert.strictEqual(bc.consumerCount, 0); + + const c1 = bc.push(); + assert.strictEqual(bc.consumerCount, 1); + + bc.push(); + assert.strictEqual(bc.consumerCount, 2); + + bc.cancel(); + + // After cancel, consumer count drops to 0 + assert.strictEqual(bc.consumerCount, 0); + + // Consumers are detached and yield nothing + const batches = []; + for await (const batch of c1) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +// ============================================================================= +// Writer methods +// ============================================================================= + +async function testWriteSync() { + const { writer, broadcast: bc } = broadcast({ highWaterMark: 2 }); + const consumer = bc.push(); + + assert.strictEqual(writer.writeSync('a'), true); + assert.strictEqual(writer.writeSync('b'), true); + // Buffer full (highWaterMark=2, strict policy) + assert.strictEqual(writer.writeSync('c'), false); + + writer.endSync(); + + const data = await text(consumer); + assert.strictEqual(data, 'ab'); +} + +async function testWritevSync() { + const { writer, broadcast: bc } = broadcast({ highWaterMark: 10 }); + const consumer = bc.push(); + + assert.strictEqual(writer.writevSync(['hello', ' ', 'world']), true); + writer.endSync(); + + const data = await text(consumer); + assert.strictEqual(data, 'hello world'); +} + +async function testWriterEnd() { + const { writer, broadcast: bc } = broadcast(); + const consumer = bc.push(); + + await writer.write('data'); + const totalBytes = await writer.end(); + assert.strictEqual(totalBytes, 4); // 'data' = 4 UTF-8 bytes + + const data = await text(consumer); + assert.strictEqual(data, 'data'); +} + +async function testWriterFail() { + const { writer, broadcast: bc } = broadcast(); + const consumer = bc.push(); + + writer.fail(new Error('test error')); + + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of consumer) { + assert.fail('Should not reach here'); + } + }, + { message: 'test error' }, + ); +} + +// ============================================================================= +// Cancel +// ============================================================================= + +async function testCancelWithoutReason() { + const { broadcast: bc } = broadcast(); + const consumer = bc.push(); + + bc.cancel(); + + const batches = []; + for await (const batch of consumer) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +async function testCancelWithReason() { + const { broadcast: bc } = broadcast(); + + // Start a consumer that is waiting for data (promise pending) + const consumer = bc.push(); + const resultPromise = text(consumer).catch((err) => err); + + // Give the consumer time to enter the waiting state + await new Promise((resolve) => setImmediate(resolve)); + + bc.cancel(new Error('cancelled')); + + const result = await resultPromise; + assert.ok(result instanceof Error); + assert.strictEqual(result.message, 'cancelled'); +} + +// ============================================================================= +// Writer fail detaches consumers +// ============================================================================= + +async function testFailDetachesConsumers() { + const { writer, broadcast: bc } = broadcast(); + const consumer1 = bc.push(); + const consumer2 = bc.push(); + + assert.strictEqual(bc.consumerCount, 2); + + // Write some data, then fail the writer + await writer.write('data'); + await writer.fail(new Error('writer failed')); + + // After fail, consumers are detached + assert.strictEqual(bc.consumerCount, 0); + + // Both consumers should see the error + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of consumer1) { + assert.fail('Should not reach here'); + } + }, + { message: 'writer failed' }, + ); + + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of consumer2) { + assert.fail('Should not reach here'); + } + }, + { message: 'writer failed' }, + ); +} + +// ============================================================================= +// Writer fail idempotent +// ============================================================================= + +async function testWriterFailIdempotent() { + const { writer, broadcast: bc } = broadcast(); + const consumer = bc.push(); + writer.writeSync('hello'); + writer.fail(new Error('fail!')); + // Second call is a no-op (already errored) + writer.fail(new Error('fail2')); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of consumer) { /* consume */ } + }, { message: 'fail!' }); +} + +// cancel() with falsy reason (0, "", false) should still treat as error +async function testCancelWithFalsyReason() { + const { broadcast: bc } = broadcast(); + const consumer = bc.push(); + const resultPromise = text(consumer).catch((err) => err); + await new Promise((resolve) => setImmediate(resolve)); + bc.cancel(0); + const result = await resultPromise; + assert.strictEqual(result, 0); +} + +// Late-joining consumer should read from oldest buffered entry +async function testLateJoinerSeesBufferedData() { + const { writer, broadcast: bc } = broadcast({ highWaterMark: 16 }); + + // Write data before any consumer joins + writer.writeSync('before-join'); + writer.endSync(); + + // Consumer joins after data is written + const consumer = bc.push(); + const result = await text(consumer); + assert.strictEqual(result, 'before-join'); +} + +Promise.all([ + testBasicBroadcast(), + testMultipleWrites(), + testConsumerCount(), + testWriteSync(), + testWritevSync(), + testWriterEnd(), + testWriterFail(), + testCancelWithoutReason(), + testCancelWithReason(), + testCancelWithFalsyReason(), + testFailDetachesConsumers(), + testWriterFailIdempotent(), + testLateJoinerSeesBufferedData(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-broadcast-coverage.js b/test/parallel/test-stream-iter-broadcast-coverage.js new file mode 100644 index 00000000000000..bc86f44867dcc5 --- /dev/null +++ b/test/parallel/test-stream-iter-broadcast-coverage.js @@ -0,0 +1,115 @@ +// Flags: --experimental-stream-iter +'use strict'; + +// Coverage tests for broadcast.js: signal abort on pending write, +// sync iterable from, ringbuffer grow. + +const common = require('../common'); +const assert = require('assert'); +const { + broadcast, + Broadcast, + text, +} = require('stream/iter'); + +// Signal abort on pending write (covers wireBroadcastWriteSignal + removeAt) +async function testBroadcastWriteAbort() { + const { writer, broadcast: bc } = broadcast({ + highWaterMark: 1, + backpressure: 'block', + }); + const consumer = bc.push(); + + // Fill the buffer to capacity + writer.writeSync(new Uint8Array([1])); + + // Next write will block — pass a signal + const ac = new AbortController(); + const writePromise = writer.write(new Uint8Array([2]), + { signal: ac.signal }); + + // Abort the signal + ac.abort(); + + await assert.rejects(writePromise, { name: 'AbortError' }); + + // Clean up + writer.endSync(); + // Drain the consumer + const result = []; + for await (const batch of consumer) { + result.push(...batch); + } + assert.ok(result.length >= 1); +} + +// Broadcast.from with sync iterable (generator) +async function testBroadcastFromSyncIterable() { + function* source() { + yield [new Uint8Array([10, 20])]; + yield [new Uint8Array([30, 40])]; + } + + const { broadcast: bc } = Broadcast.from(source()); + const consumer = bc.push(); + // Just verify it completes without error and produces data + let count = 0; + for await (const batch of consumer) { + count += batch.length; + } + assert.ok(count > 0); +} + +// Broadcast.from with sync iterable — string chunks +async function testBroadcastFromSyncIterableStrings() { + function* source() { + yield 'hello'; + yield ' world'; + } + const { broadcast: bc } = Broadcast.from(source()); + const consumer = bc.push(); + const result = await text(consumer); + assert.strictEqual(result, 'hello world'); +} + +// Ringbuffer grow — push > 16 items without consumer draining +async function testRingbufferGrow() { + const { writer, broadcast: bc } = broadcast({ highWaterMark: 32 }); + const consumer = bc.push(); + + // Push 20 items (exceeds default ringbuffer capacity of 16) + for (let i = 0; i < 20; i++) { + writer.writeSync(new Uint8Array([i])); + } + writer.endSync(); + + // Read all items back and verify order + const items = []; + for await (const batch of consumer) { + for (const chunk of batch) { + items.push(chunk[0]); + } + } + assert.strictEqual(items.length, 20); + for (let i = 0; i < 20; i++) { + assert.strictEqual(items[i], i); + } +} + +// Broadcast drainableProtocol after close returns null +async function testDrainableAfterClose() { + const { drainableProtocol } = require('stream/iter'); + const { writer } = broadcast(); + writer.endSync(); + const result = writer[drainableProtocol](); + // After close, desired should be null + assert.strictEqual(result, null); +} + +Promise.all([ + testBroadcastWriteAbort(), + testBroadcastFromSyncIterable(), + testBroadcastFromSyncIterableStrings(), + testRingbufferGrow(), + testDrainableAfterClose(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-broadcast-from.js b/test/parallel/test-stream-iter-broadcast-from.js new file mode 100644 index 00000000000000..2f17b1a7de92fa --- /dev/null +++ b/test/parallel/test-stream-iter-broadcast-from.js @@ -0,0 +1,192 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { broadcast, Broadcast, from, text } = require('stream/iter'); + +// ============================================================================= +// Broadcast.from +// ============================================================================= + +async function testBroadcastFromAsyncIterable() { + const source = from('broadcast-from'); + const { broadcast: bc } = Broadcast.from(source); + const consumer = bc.push(); + + const data = await text(consumer); + assert.strictEqual(data, 'broadcast-from'); +} + +async function testBroadcastFromNonArrayChunks() { + // Source that yields single Uint8Array chunks (not arrays) + const enc = new TextEncoder(); + async function* singleChunkSource() { + yield enc.encode('hello'); + yield enc.encode(' world'); + } + const { broadcast: bc } = Broadcast.from(singleChunkSource()); + const consumer = bc.push(); + const data = await text(consumer); + assert.strictEqual(data, 'hello world'); +} + +async function testBroadcastFromStringChunks() { + // Source that yields bare strings (not arrays) + async function* stringSource() { + yield 'foo'; + yield 'bar'; + } + const { broadcast: bc } = Broadcast.from(stringSource()); + const consumer = bc.push(); + const data = await text(consumer); + assert.strictEqual(data, 'foobar'); +} + +async function testBroadcastFromMultipleConsumers() { + const source = from('shared-data'); + const { broadcast: bc } = Broadcast.from(source); + + const c1 = bc.push(); + const c2 = bc.push(); + + const [data1, data2] = await Promise.all([ + text(c1), + text(c2), + ]); + + assert.strictEqual(data1, 'shared-data'); + assert.strictEqual(data2, 'shared-data'); +} + +// ============================================================================= +// AbortSignal +// ============================================================================= + +async function testAbortSignal() { + const ac = new AbortController(); + const { broadcast: bc } = broadcast({ signal: ac.signal }); + const consumer = bc.push(); + + ac.abort(); + + const batches = []; + for await (const batch of consumer) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +async function testAlreadyAbortedSignal() { + const ac = new AbortController(); + ac.abort(); + + const { broadcast: bc } = broadcast({ signal: ac.signal }); + const consumer = bc.push(); + + const batches = []; + for await (const batch of consumer) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +// ============================================================================= +// Broadcast.from() hang fix - cancel while write blocked on backpressure +// ============================================================================= + +async function testBroadcastFromCancelWhileBlocked() { + // Create a slow async source that blocks between yields + let sourceFinished = false; + async function* slowSource() { + const enc = new TextEncoder(); + yield [enc.encode('chunk1')]; + // Simulate a long delay - the cancel should unblock this + await new Promise((resolve) => setTimeout(resolve, 10000)); + yield [enc.encode('chunk2')]; + sourceFinished = true; + } + + const { broadcast: bc } = Broadcast.from(slowSource()); + const consumer = bc.push(); + + // Read the first chunk + const iter = consumer[Symbol.asyncIterator](); + const first = await iter.next(); + assert.strictEqual(first.done, false); + + // Cancel while the source is blocked waiting to yield the next chunk + bc.cancel(); + + // The iteration should complete (not hang) + const next = await iter.next(); + assert.strictEqual(next.done, true); + + // Source should NOT have finished (we cancelled before chunk2) + assert.strictEqual(sourceFinished, false); +} + +// ============================================================================= +// Source error propagation via Broadcast.from() +// ============================================================================= + +async function testBroadcastFromSourceError() { + async function* failingSource() { + yield [new TextEncoder().encode('a')]; + throw new Error('broadcast source boom'); + } + const { broadcast: bc } = Broadcast.from(failingSource()); + const consumer = bc.push(); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of consumer) { /* consume */ } + }, { message: 'broadcast source boom' }); +} + +// ============================================================================= +// Protocol validation +// ============================================================================= + +function testBroadcastProtocolReturnsNull() { + const obj = { + [Symbol.for('Stream.broadcastProtocol')]() { return null; }, + }; + assert.throws( + () => Broadcast.from(obj), + { code: 'ERR_INVALID_RETURN_VALUE' }, + ); +} + +function testBroadcastProtocolReturnsString() { + const obj = { + [Symbol.for('Stream.broadcastProtocol')]() { return 'bad'; }, + }; + assert.throws( + () => Broadcast.from(obj), + { code: 'ERR_INVALID_RETURN_VALUE' }, + ); +} + +function testBroadcastProtocolReturnsUndefined() { + const obj = { + [Symbol.for('Stream.broadcastProtocol')]() { }, + }; + assert.throws( + () => Broadcast.from(obj), + { code: 'ERR_INVALID_RETURN_VALUE' }, + ); +} + +Promise.all([ + testBroadcastFromAsyncIterable(), + testBroadcastFromNonArrayChunks(), + testBroadcastFromStringChunks(), + testBroadcastFromMultipleConsumers(), + testAbortSignal(), + testAlreadyAbortedSignal(), + testBroadcastFromCancelWhileBlocked(), + testBroadcastFromSourceError(), + testBroadcastProtocolReturnsNull(), + testBroadcastProtocolReturnsString(), + testBroadcastProtocolReturnsUndefined(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-consumers-bytes.js b/test/parallel/test-stream-iter-consumers-bytes.js new file mode 100644 index 00000000000000..ebb5dae0ac636e --- /dev/null +++ b/test/parallel/test-stream-iter-consumers-bytes.js @@ -0,0 +1,220 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + fromSync, + bytes, + bytesSync, + text, + textSync, + arrayBuffer, + arrayBufferSync, + array, + arraySync, +} = require('stream/iter'); + +// ============================================================================= +// bytesSync / bytes +// ============================================================================= + +async function testBytesSyncBasic() { + const data = bytesSync(fromSync('hello')); + assert.deepStrictEqual(data, new TextEncoder().encode('hello')); +} + +async function testBytesSyncLimit() { + assert.throws( + () => bytesSync(fromSync('hello world'), { limit: 3 }), + { name: 'RangeError' }, + ); +} + +async function testBytesAsync() { + const data = await bytes(from('hello-async')); + assert.deepStrictEqual(data, new TextEncoder().encode('hello-async')); +} + +async function testBytesAsyncLimit() { + await assert.rejects( + () => bytes(from('hello world'), { limit: 3 }), + { name: 'RangeError' }, + ); +} + +async function testBytesAsyncAbort() { + const ac = new AbortController(); + ac.abort(); + await assert.rejects( + () => bytes(from('data'), { signal: ac.signal }), + { name: 'AbortError' }, + ); +} + +async function testBytesEmpty() { + const data = await bytes(from([])); + assert.ok(data instanceof Uint8Array); + assert.strictEqual(data.byteLength, 0); +} + +// ============================================================================= +// arrayBufferSync / arrayBuffer +// ============================================================================= + +async function testArrayBufferSyncBasic() { + const ab = arrayBufferSync(fromSync(new Uint8Array([1, 2, 3]))); + assert.ok(ab instanceof ArrayBuffer); + assert.strictEqual(ab.byteLength, 3); + const view = new Uint8Array(ab); + assert.deepStrictEqual(view, new Uint8Array([1, 2, 3])); +} + +async function testArrayBufferAsync() { + const ab = await arrayBuffer(from(new Uint8Array([10, 20, 30]))); + assert.ok(ab instanceof ArrayBuffer); + assert.strictEqual(ab.byteLength, 3); + const view = new Uint8Array(ab); + assert.deepStrictEqual(view, new Uint8Array([10, 20, 30])); +} + +// ============================================================================= +// arraySync / array +// ============================================================================= + +async function testArraySyncBasic() { + function* gen() { + yield new Uint8Array([1]); + yield new Uint8Array([2]); + yield new Uint8Array([3]); + } + const chunks = arraySync(fromSync(gen())); + assert.strictEqual(chunks.length, 3); + assert.deepStrictEqual(chunks[0], new Uint8Array([1])); + assert.deepStrictEqual(chunks[1], new Uint8Array([2])); + assert.deepStrictEqual(chunks[2], new Uint8Array([3])); +} + +async function testArraySyncLimit() { + function* gen() { + yield new Uint8Array(100); + yield new Uint8Array(100); + } + const source = fromSync(gen()); + assert.throws( + () => arraySync(source, { limit: 50 }), + { name: 'RangeError' }, + ); +} + +async function testArrayAsync() { + async function* gen() { + yield [new Uint8Array([1])]; + yield [new Uint8Array([2])]; + } + const chunks = await array(gen()); + assert.strictEqual(chunks.length, 2); + assert.deepStrictEqual(chunks[0], new Uint8Array([1])); + assert.deepStrictEqual(chunks[1], new Uint8Array([2])); +} + +async function testArrayAsyncLimit() { + async function* gen() { + yield [new Uint8Array(100)]; + yield [new Uint8Array(100)]; + } + await assert.rejects( + () => array(gen(), { limit: 50 }), + { name: 'RangeError' }, + ); +} + +// ============================================================================= +// Non-array batch tolerance +// ============================================================================= + +// Regression test: consumers should tolerate sources that yield raw +// Uint8Array or string values instead of Uint8Array[] batches. +async function testConsumersNonArrayBatch() { + const encoder = new TextEncoder(); + + // Source yields raw Uint8Array, not wrapped in an array + async function* rawSource() { + yield encoder.encode('hello'); + yield encoder.encode(' world'); + } + const result = await text(rawSource()); + assert.strictEqual(result, 'hello world'); + + // bytes() with raw chunks + async function* rawSource2() { + yield encoder.encode('ab'); + } + const data = await bytes(rawSource2()); + assert.strictEqual(data.length, 2); + assert.strictEqual(data[0], 97); // 'a' + assert.strictEqual(data[1], 98); // 'b' + + // array() with raw chunks + async function* rawSource3() { + yield encoder.encode('x'); + yield encoder.encode('y'); + } + const arr = await array(rawSource3()); + assert.strictEqual(arr.length, 2); +} + +async function testConsumersNonArrayBatchSync() { + const encoder = new TextEncoder(); + + function* rawSyncSource() { + yield encoder.encode('sync'); + yield encoder.encode('data'); + } + const result = textSync(rawSyncSource()); + assert.strictEqual(result, 'syncdata'); + + const data = bytesSync(rawSyncSource()); + assert.strictEqual(data.length, 8); + + const arr = arraySync(rawSyncSource()); + assert.strictEqual(arr.length, 2); +} + +// Consumers accept string sources directly (normalized via from/fromSync) +async function testBytesStringSource() { + const result = await bytes('hello-bytes'); + assert.strictEqual(new TextDecoder().decode(result), 'hello-bytes'); +} + +function testBytesSyncStringSource() { + const result = bytesSync('hello-sync'); + assert.strictEqual(new TextDecoder().decode(result), 'hello-sync'); +} + +async function testTextStringSource() { + const { text } = require('stream/iter'); + const result = await text('direct-string'); + assert.strictEqual(result, 'direct-string'); +} + +Promise.all([ + testBytesSyncBasic(), + testBytesSyncLimit(), + testBytesAsync(), + testBytesAsyncLimit(), + testBytesAsyncAbort(), + testBytesEmpty(), + testArrayBufferSyncBasic(), + testArrayBufferAsync(), + testArraySyncBasic(), + testArraySyncLimit(), + testArrayAsync(), + testArrayAsyncLimit(), + testConsumersNonArrayBatch(), + testConsumersNonArrayBatchSync(), + testBytesStringSource(), + testBytesSyncStringSource(), + testTextStringSource(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-consumers-merge.js b/test/parallel/test-stream-iter-consumers-merge.js new file mode 100644 index 00000000000000..ad047c0ffd7ed4 --- /dev/null +++ b/test/parallel/test-stream-iter-consumers-merge.js @@ -0,0 +1,175 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + fromSync, + push, + merge, + text, +} = require('stream/iter'); + +// ============================================================================= +// merge +// ============================================================================= + +async function testMergeTwoSources() { + const { writer: w1, readable: r1 } = push(); + const { writer: w2, readable: r2 } = push(); + + w1.write('from-a'); + w1.end(); + w2.write('from-b'); + w2.end(); + + const merged = merge(r1, r2); + const chunks = []; + for await (const batch of merged) { + for (const chunk of batch) { + chunks.push(new TextDecoder().decode(chunk)); + } + } + + // Both sources should be present (order is temporal, not guaranteed) + assert.strictEqual(chunks.length, 2); + assert.ok(chunks.includes('from-a')); + assert.ok(chunks.includes('from-b')); +} + +async function testMergeSingleSource() { + const data = await text(merge(from('only-one'))); + assert.strictEqual(data, 'only-one'); +} + +async function testMergeEmpty() { + const merged = merge(); + const batches = []; + for await (const batch of merged) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +async function testMergeWithAbortSignal() { + const ac = new AbortController(); + ac.abort(); + + const merged = merge(from('data'), { signal: ac.signal }); + + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of merged) { + assert.fail('Should not reach here'); + } + }, + { name: 'AbortError' }, + ); +} + +// Regression test: merge() with sync iterable sources +async function testMergeSyncSources() { + const s1 = fromSync('abc'); + const s2 = fromSync('def'); + const result = await text(merge(s1, s2)); + // Both sources should be fully consumed; order may vary + assert.strictEqual(result.length, 6); + for (const ch of 'abcdef') { + assert.ok(result.includes(ch), `missing '${ch}' in '${result}'`); + } +} + +// ============================================================================= +// Merge error propagation +// ============================================================================= + +async function testMergeSourceError() { + async function* goodSource() { + const enc = new TextEncoder(); + yield [enc.encode('a')]; + // Slow so the bad source errors first + await new Promise((r) => setTimeout(r, 50)); + yield [enc.encode('b')]; + } + + async function* badSource() { + yield [new TextEncoder().encode('x')]; + throw new Error('merge source boom'); + } + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of merge(goodSource(), badSource())) { /* consume */ } + }, { message: 'merge source boom' }); +} + +async function testMergeConsumerBreak() { + let source1Return = false; + let source2Return = false; + async function* source1() { + try { + while (true) yield [new TextEncoder().encode('a')]; + } finally { + source1Return = true; + } + } + + async function* source2() { + try { + while (true) yield [new TextEncoder().encode('b')]; + } finally { + source2Return = true; + } + } + // eslint-disable-next-line no-unused-vars + for await (const _ of merge(source1(), source2())) { + break; // Break after first batch + } + // Give async cleanup a tick to complete + await new Promise(setImmediate); + // Both sources should be cleaned up + assert.strictEqual(source1Return && source2Return, true); +} + +async function testMergeSignalMidIteration() { + const ac = new AbortController(); + async function* slowSource() { + const enc = new TextEncoder(); + yield [enc.encode('a')]; + await new Promise((r) => setTimeout(r, 100)); + yield [enc.encode('b')]; + } + const merged = merge(slowSource(), { signal: ac.signal }); + const iter = merged[Symbol.asyncIterator](); + await iter.next(); // First batch + ac.abort(); + await assert.rejects(() => iter.next(), { name: 'AbortError' }); +} + +// merge() accepts string sources (normalized via from()) +async function testMergeStringSources() { + const batches = []; + for await (const batch of merge('hello', 'world')) { + batches.push(batch); + } + // Each string becomes a single-batch source + assert.strictEqual(batches.length >= 2, true); + const combined = new TextDecoder().decode( + Buffer.concat(batches.flat())); + // Both strings should appear (order may vary) + assert.ok(combined.includes('hello')); + assert.ok(combined.includes('world')); +} + +Promise.all([ + testMergeTwoSources(), + testMergeSingleSource(), + testMergeEmpty(), + testMergeWithAbortSignal(), + testMergeSyncSources(), + testMergeSourceError(), + testMergeConsumerBreak(), + testMergeSignalMidIteration(), + testMergeStringSources(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-consumers-tap.js b/test/parallel/test-stream-iter-consumers-tap.js new file mode 100644 index 00000000000000..b93f93eb242b2c --- /dev/null +++ b/test/parallel/test-stream-iter-consumers-tap.js @@ -0,0 +1,130 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + fromSync, + pull, + pullSync, + push, + tap, + tapSync, + text, + textSync, +} = require('stream/iter'); + +// ============================================================================= +// tap / tapSync +// ============================================================================= + +async function testTapSync() { + const observed = []; + const observer = tapSync((chunks) => { + if (chunks !== null) { + observed.push(chunks.length); + } + }); + + // tapSync returns a function transform + assert.strictEqual(typeof observer, 'function'); + + // Test that it passes data through unchanged + const input = [new Uint8Array([1]), new Uint8Array([2])]; + const result = observer(input); + assert.deepStrictEqual(result, input); + assert.deepStrictEqual(observed, [2]); + + // null (flush) passes through + const flushResult = observer(null); + assert.strictEqual(flushResult, null); +} + +async function testTapAsync() { + const observed = []; + const observer = tap(async (chunks) => { + if (chunks !== null) { + observed.push(chunks.length); + } + }); + + assert.strictEqual(typeof observer, 'function'); + + const input = [new Uint8Array([1])]; + const result = await observer(input); + assert.deepStrictEqual(result, input); + assert.deepStrictEqual(observed, [1]); +} + +async function testTapInPipeline() { + const { writer, readable } = push(); + const seen = []; + + const observer = tap(async (chunks) => { + if (chunks !== null) { + for (const chunk of chunks) { + seen.push(new TextDecoder().decode(chunk)); + } + } + }); + + writer.write('hello'); + writer.end(); + + // Use pull with tap as a transform + const result = pull(readable, observer); + const data = await text(result); + + assert.strictEqual(data, 'hello'); + assert.strictEqual(seen.length, 1); + assert.strictEqual(seen[0], 'hello'); +} + +// Tap callback error propagates through async pipeline +async function testTapAsyncErrorPropagation() { + const badTap = tap(() => { throw new Error('tap error'); }); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of pull(from('hello'), badTap)) { /* consume */ } + }, { message: 'tap error' }); +} + +// TapSync callback error propagates through sync pipeline +function testTapSyncErrorPropagation() { + const badTap = tapSync(() => { throw new Error('tapSync error'); }); + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of pullSync(fromSync('hello'), badTap)) { /* consume */ } + }, { message: 'tapSync error' }); +} + +// TapSync in a pullSync pipeline passes through data and flush +function testTapSyncInPipeline() { + const seen = []; + let sawFlush = false; + const observer = tapSync((chunks) => { + if (chunks === null) { + sawFlush = true; + } else { + for (const chunk of chunks) { + seen.push(new TextDecoder().decode(chunk)); + } + } + }); + + const data = textSync(pullSync(fromSync('hello'), observer)); + assert.strictEqual(data, 'hello'); + assert.strictEqual(seen.length, 1); + assert.strictEqual(seen[0], 'hello'); + assert.strictEqual(sawFlush, true); +} + +Promise.all([ + testTapSync(), + testTapAsync(), + testTapInPipeline(), + testTapAsyncErrorPropagation(), + testTapSyncErrorPropagation(), + testTapSyncInPipeline(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-consumers-text.js b/test/parallel/test-stream-iter-consumers-text.js new file mode 100644 index 00000000000000..8bfa7c3320981c --- /dev/null +++ b/test/parallel/test-stream-iter-consumers-text.js @@ -0,0 +1,164 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + fromSync, + text, + textSync, +} = require('stream/iter'); + +// ============================================================================= +// textSync / text +// ============================================================================= + +async function testTextSyncBasic() { + const data = textSync(fromSync('hello text')); + assert.strictEqual(data, 'hello text'); +} + +async function testTextAsync() { + const data = await text(from('hello async text')); + assert.strictEqual(data, 'hello async text'); +} + +async function testTextEncoding() { + // Default encoding is utf-8 + const data = await text(from('café')); + assert.strictEqual(data, 'café'); +} + +// ============================================================================= +// Text encoding tests +// ============================================================================= + +async function testTextNonUtf8Encoding() { + // Latin-1 encoding + const latin1Bytes = new Uint8Array([0xE9, 0xE8, 0xEA]); // é, è, ê in latin1 + const result = await text(from(latin1Bytes), { encoding: 'iso-8859-1' }); + assert.strictEqual(result, 'éèê'); +} + +async function testTextSyncNonUtf8Encoding() { + const latin1Bytes = new Uint8Array([0xE9, 0xE8, 0xEA]); + const result = textSync(fromSync(latin1Bytes), { encoding: 'iso-8859-1' }); + assert.strictEqual(result, 'éèê'); +} + +async function testTextInvalidUtf8() { + // Invalid UTF-8 sequence with fatal: true should throw + const invalid = new Uint8Array([0xFF, 0xFE]); + await assert.rejects( + () => text(from(invalid)), + { name: 'TypeError' }, // TextDecoder fatal throws TypeError + ); +} + +async function testTextWithLimit() { + // Limit caps total bytes; exceeding throws ERR_OUT_OF_RANGE + await assert.rejects( + () => text(from('hello world'), { limit: 5 }), + { code: 'ERR_OUT_OF_RANGE' }, + ); + // Within limit should succeed + const result = await text(from('hello'), { limit: 10 }); + assert.strictEqual(result, 'hello'); + + // Exact boundary: 'hello' is 5 UTF-8 bytes, limit: 5 should succeed + // (source uses > not >=) + const exact = await text(from('hello'), { limit: 5 }); + assert.strictEqual(exact, 'hello'); +} + +async function testTextSyncWithLimit() { + // Sync version of limit testing + assert.throws( + () => textSync(fromSync('hello world'), { limit: 5 }), + { code: 'ERR_OUT_OF_RANGE' }, + ); + const result = textSync(fromSync('hello'), { limit: 10 }); + assert.strictEqual(result, 'hello'); + + // Exact boundary + const exact = textSync(fromSync('hello'), { limit: 5 }); + assert.strictEqual(exact, 'hello'); +} + +async function testTextEmpty() { + const result = await text(from('')); + assert.strictEqual(result, ''); + + const syncResult = textSync(fromSync('')); + assert.strictEqual(syncResult, ''); +} + +// text() with abort signal +async function testTextWithSignal() { + const ac = new AbortController(); + ac.abort(); + await assert.rejects( + () => text(from('data'), { signal: ac.signal }), + { name: 'AbortError' }, + ); +} + +// Multi-chunk source with a multi-byte UTF-8 character split across chunks +async function testTextMultiChunkSplitCodepoint() { + // '€' is U+20AC, encoded as 3 UTF-8 bytes: 0xE2, 0x82, 0xAC + // Split these bytes across two chunks to test proper re-assembly + async function* splitSource() { + yield [new Uint8Array([0xE2, 0x82])]; // First 2 bytes of '€' + yield [new Uint8Array([0xAC])]; // Last byte of '€' + } + const result = await text(splitSource()); + assert.strictEqual(result, '€'); +} + +// BOM should be stripped (ignoreBOM defaults to false per spec) +async function testTextBOMStripped() { + // UTF-8 BOM: 0xEF, 0xBB, 0xBF followed by 'hi' + const withBOM = new Uint8Array([0xEF, 0xBB, 0xBF, 0x68, 0x69]); + const result = await text(from(withBOM)); + assert.strictEqual(result, 'hi'); +} + +async function testTextSyncBOMStripped() { + const withBOM = new Uint8Array([0xEF, 0xBB, 0xBF, 0x68, 0x69]); + const result = textSync(fromSync(withBOM)); + assert.strictEqual(result, 'hi'); +} + +// Unsupported encoding throws RangeError +async function testTextUnsupportedEncodingThrowsRangeError() { + await assert.rejects( + () => text(from('hello'), { encoding: 'not-a-real-encoding' }), + { name: 'RangeError' }, + ); +} + +function testTextSyncUnsupportedEncodingThrowsRangeError() { + assert.throws( + () => textSync(fromSync('hello'), { encoding: 'not-a-real-encoding' }), + { name: 'RangeError' }, + ); +} + +Promise.all([ + testTextSyncBasic(), + testTextAsync(), + testTextEncoding(), + testTextNonUtf8Encoding(), + testTextSyncNonUtf8Encoding(), + testTextInvalidUtf8(), + testTextWithLimit(), + testTextSyncWithLimit(), + testTextEmpty(), + testTextWithSignal(), + testTextMultiChunkSplitCodepoint(), + testTextBOMStripped(), + testTextSyncBOMStripped(), + testTextUnsupportedEncodingThrowsRangeError(), + testTextSyncUnsupportedEncodingThrowsRangeError(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-cross-realm.js b/test/parallel/test-stream-iter-cross-realm.js new file mode 100644 index 00000000000000..6a6e92179253ec --- /dev/null +++ b/test/parallel/test-stream-iter-cross-realm.js @@ -0,0 +1,132 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const vm = require('vm'); +const { from, fromSync, pull, text, bytesSync } = require('stream/iter'); + +// Cross-realm objects are created in a different VM context. +// They have different prototypes, so `instanceof` checks fail. +// These tests verify that stream/iter correctly handles cross-realm types. + +// Helper: compare Uint8Array content regardless of realm. +function assertBytes(actual, expected) { + assert.strictEqual(actual.length, expected.length, + `length mismatch: ${actual.length} !== ${expected.length}`); + for (let i = 0; i < expected.length; i++) { + assert.strictEqual(actual[i], expected[i], `byte mismatch at index ${i}`); + } +} + +// ============================================================================= +// from() / fromSync() with cross-realm Uint8Array +// ============================================================================= + +async function testFromSyncCrossRealmUint8Array() { + const crossRealm = vm.runInNewContext('new Uint8Array([1, 2, 3])'); + const data = bytesSync(fromSync(crossRealm)); + assertBytes(data, new Uint8Array([1, 2, 3])); +} + +async function testFromCrossRealmUint8Array() { + const crossRealm = vm.runInNewContext('new Uint8Array([4, 5, 6])'); + const result = await text(from(crossRealm)); + assert.strictEqual(result, '\x04\x05\x06'); +} + +// ============================================================================= +// from() / fromSync() with cross-realm ArrayBuffer +// ============================================================================= + +async function testFromSyncCrossRealmArrayBuffer() { + const crossRealm = vm.runInNewContext( + 'new Uint8Array([7, 8, 9]).buffer', + ); + const data = bytesSync(fromSync(crossRealm)); + assertBytes(data, new Uint8Array([7, 8, 9])); +} + +async function testFromCrossRealmArrayBuffer() { + const crossRealm = vm.runInNewContext( + 'new Uint8Array([10, 11, 12]).buffer', + ); + const result = await text(from(crossRealm)); + assert.strictEqual(result, '\x0a\x0b\x0c'); +} + +// ============================================================================= +// from() / fromSync() with cross-realm Uint8Array[] +// ============================================================================= + +async function testFromSyncCrossRealmUint8ArrayArray() { + const crossRealm = vm.runInNewContext( + '[new Uint8Array([1, 2]), new Uint8Array([3, 4])]', + ); + const data = bytesSync(fromSync(crossRealm)); + assertBytes(data, new Uint8Array([1, 2, 3, 4])); +} + +async function testFromCrossRealmUint8ArrayArray() { + const crossRealm = vm.runInNewContext( + '[new Uint8Array([5, 6]), new Uint8Array([7, 8])]', + ); + const result = await text(from(crossRealm)); + assert.strictEqual(result, '\x05\x06\x07\x08'); +} + +// ============================================================================= +// pull() with cross-realm Uint8Array from transforms +// ============================================================================= + +async function testPullCrossRealmTransformOutput() { + // Transform that returns cross-realm Uint8Array[] batches + const crossRealmTransform = (chunks) => { + if (chunks === null) return null; + // Re-encode each chunk as cross-realm Uint8Array + return vm.runInNewContext( + `[new Uint8Array([${[...chunks[0]]}])]`, + ); + }; + const output = await text(pull(from('hello'), crossRealmTransform)); + assert.strictEqual(output, 'hello'); +} + +// ============================================================================= +// from() with cross-realm Promise +// ============================================================================= + +async function testFromCrossRealmPromise() { + const crossRealmPromise = vm.runInNewContext( + 'Promise.resolve("promised-data")', + ); + async function* gen() { + yield crossRealmPromise; + } + const result = await text(from(gen())); + assert.strictEqual(result, 'promised-data'); +} + +// ============================================================================= +// from() with cross-realm typed arrays (non-Uint8Array views) +// ============================================================================= + +async function testFromSyncCrossRealmInt32Array() { + const crossRealm = vm.runInNewContext('new Int32Array([1])'); + const data = bytesSync(fromSync(crossRealm)); + // Int32Array([1]) = 4 bytes, endianness varies by platform + assert.strictEqual(data.length, 4); + assert.strictEqual(new Int32Array(data.buffer, data.byteOffset, 1)[0], 1); +} + +Promise.all([ + testFromSyncCrossRealmUint8Array(), + testFromCrossRealmUint8Array(), + testFromSyncCrossRealmArrayBuffer(), + testFromCrossRealmArrayBuffer(), + testFromSyncCrossRealmUint8ArrayArray(), + testFromCrossRealmUint8ArrayArray(), + testPullCrossRealmTransformOutput(), + testFromCrossRealmPromise(), + testFromSyncCrossRealmInt32Array(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-disabled.js b/test/parallel/test-stream-iter-disabled.js new file mode 100644 index 00000000000000..8c8538ffae9268 --- /dev/null +++ b/test/parallel/test-stream-iter-disabled.js @@ -0,0 +1,34 @@ +'use strict'; +const common = require('../common'); +const assert = require('assert'); +const { spawnPromisified } = common; + +async function testRequireNodeStreamIterWithoutFlag() { + const { stderr, code } = await spawnPromisified(process.execPath, [ + '-e', 'require("node:stream/iter")', + ]); + assert.match(stderr, /No such built-in module: node:stream\/iter/); + assert.notStrictEqual(code, 0); +} + +async function testRequireStreamIterWithoutFlag() { + const { stderr, code } = await spawnPromisified(process.execPath, [ + '-e', 'require("stream/iter")', + ]); + assert.match(stderr, /Cannot find module/); + assert.notStrictEqual(code, 0); +} + +async function testRequireWithFlag() { + const { code } = await spawnPromisified(process.execPath, [ + '--experimental-stream-iter', + '-e', 'require("node:stream/iter")', + ]); + assert.strictEqual(code, 0); +} + +Promise.all([ + testRequireNodeStreamIterWithoutFlag(), + testRequireStreamIterWithoutFlag(), + testRequireWithFlag(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-duplex.js b/test/parallel/test-stream-iter-duplex.js new file mode 100644 index 00000000000000..83c85d7be00816 --- /dev/null +++ b/test/parallel/test-stream-iter-duplex.js @@ -0,0 +1,188 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { duplex, text, bytes } = require('stream/iter'); + +// ============================================================================= +// Basic duplex +// ============================================================================= + +async function testBasicDuplex() { + const [channelA, channelB] = duplex(); + + // A writes, B reads + await channelA.writer.write('hello from A'); + await channelA.close(); + + const dataAtB = await text(channelB.readable); + assert.strictEqual(dataAtB, 'hello from A'); +} + +async function testBidirectional() { + const [channelA, channelB] = duplex(); + + // A writes to B, B writes to A concurrently + const writeA = (async () => { + await channelA.writer.write('A to B'); + await channelA.close(); + })(); + + const writeB = (async () => { + await channelB.writer.write('B to A'); + await channelB.close(); + })(); + + const readAtB = text(channelB.readable); + const readAtA = text(channelA.readable); + + await Promise.all([writeA, writeB]); + + const [dataAtA, dataAtB] = await Promise.all([readAtA, readAtB]); + + assert.strictEqual(dataAtB, 'A to B'); + assert.strictEqual(dataAtA, 'B to A'); +} + +async function testMultipleWrites() { + const [channelA, channelB] = duplex({ highWaterMark: 10 }); + + await channelA.writer.write('one'); + await channelA.writer.write('two'); + await channelA.writer.write('three'); + await channelA.close(); + + const data = await text(channelB.readable); + assert.strictEqual(data, 'onetwothree'); +} + +async function testChannelClose() { + const [channelA, channelB] = duplex(); + + await channelA.close(); + + // Should be able to close twice without error + await channelA.close(); + + // B's readable should end (A -> B direction is closed) + const batches = []; + for await (const batch of channelB.readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +async function testWithOptions() { + const [channelA, channelB] = duplex({ + highWaterMark: 2, + backpressure: 'strict', + }); + + await channelA.writer.write('msg'); + await channelA.close(); + + const data = await text(channelB.readable); + assert.strictEqual(data, 'msg'); +} + +async function testPerChannelOptions() { + const [channelA, channelB] = duplex({ + a: { highWaterMark: 1 }, + b: { highWaterMark: 4 }, + }); + + // Channel A -> B direction uses A's options + // Channel B -> A direction uses B's options + await channelA.writer.write('from-a'); + await channelA.close(); + + await channelB.writer.write('from-b'); + await channelB.close(); + + const [dataAtA, dataAtB] = await Promise.all([ + text(channelA.readable), + text(channelB.readable), + ]); + + assert.strictEqual(dataAtB, 'from-a'); + assert.strictEqual(dataAtA, 'from-b'); +} + +async function testAbortSignal() { + const ac = new AbortController(); + const [channelA] = duplex({ signal: ac.signal }); + + ac.abort(); + + // Both directions should error + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of channelA.readable) { + assert.fail('Should not reach here'); + } + }, + (err) => err.name === 'AbortError', + ); +} + +async function testEmptyDuplex() { + const [channelA, channelB] = duplex(); + + // Close without writing + await channelA.close(); + await channelB.close(); + + const dataAtA = await bytes(channelA.readable); + const dataAtB = await bytes(channelB.readable); + + assert.strictEqual(dataAtA.byteLength, 0); + assert.strictEqual(dataAtB.byteLength, 0); +} + +// Channel fail propagation +async function testChannelFail() { + const [a, b] = duplex(); + a.writer.fail(new Error('channel failed')); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of b.readable) { /* consume */ } + }, { message: 'channel failed' }); + await b.close(); +} + +// Abort signal affects both channels +async function testAbortSignalBothChannels() { + const ac = new AbortController(); + const [channelA, channelB] = duplex({ signal: ac.signal }); + + ac.abort(); + + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of channelA.readable) { + assert.fail('Should not reach here'); + } + }, (err) => err.name === 'AbortError'); + + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of channelB.readable) { + assert.fail('Should not reach here'); + } + }, (err) => err.name === 'AbortError'); +} + +Promise.all([ + testBasicDuplex(), + testBidirectional(), + testMultipleWrites(), + testChannelClose(), + testWithOptions(), + testPerChannelOptions(), + testAbortSignal(), + testEmptyDuplex(), + testChannelFail(), + testAbortSignalBothChannels(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-from-async.js b/test/parallel/test-stream-iter-from-async.js new file mode 100644 index 00000000000000..8080b7a5cd86ca --- /dev/null +++ b/test/parallel/test-stream-iter-from-async.js @@ -0,0 +1,251 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { from, text, Stream } = require('stream/iter'); + +async function testFromString() { + const readable = from('hello-async'); + const batches = []; + for await (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], + new TextEncoder().encode('hello-async')); +} + +async function testFromAsyncGenerator() { + async function* gen() { + yield new Uint8Array([10, 20]); + yield new Uint8Array([30, 40]); + } + const readable = from(gen()); + const batches = []; + for await (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 2); + assert.deepStrictEqual(batches[0][0], new Uint8Array([10, 20])); + assert.deepStrictEqual(batches[1][0], new Uint8Array([30, 40])); +} + +async function testFromSyncIterableAsAsync() { + // Sync iterable passed to from() should work + function* gen() { + yield new Uint8Array([1]); + yield new Uint8Array([2]); + } + const readable = from(gen()); + const batches = []; + for await (const batch of readable) { + batches.push(batch); + } + // Sync iterables get batched together into a single batch + assert.strictEqual(batches.length, 1); + assert.strictEqual(batches[0].length, 2); + assert.deepStrictEqual(batches[0][0], new Uint8Array([1])); + assert.deepStrictEqual(batches[0][1], new Uint8Array([2])); +} + +async function testFromToAsyncStreamableProtocol() { + const sym = Symbol.for('Stream.toAsyncStreamable'); + const obj = { + [sym]() { + return 'async-protocol-data'; + }, + }; + async function* gen() { + yield obj; + } + const readable = from(gen()); + const batches = []; + for await (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], + new TextEncoder().encode('async-protocol-data')); +} + +function testFromRejectsNonStreamable() { + assert.throws( + () => from(12345), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); + assert.throws( + () => from(null), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +async function testFromEmptyArray() { + const readable = from([]); + const batches = []; + for await (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +// Also accessible via Stream namespace +async function testStreamNamespace() { + const readable = Stream.from('via-namespace'); + const batches = []; + for await (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], new TextEncoder().encode('via-namespace')); +} + +async function testCustomToStringInStreamRejects() { + // Objects with custom toString but no toStreamable protocol are rejected. + // Use toStreamable protocol instead. + const obj = { toString() { return 'from toString'; } }; + async function* source() { + yield obj; + } + await assert.rejects( + () => text(from(source())), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +async function testCustomToPrimitiveInStreamRejects() { + // Objects with Symbol.toPrimitive but no toStreamable protocol are rejected. + const obj = { + [Symbol.toPrimitive](hint) { + if (hint === 'string') return 'from toPrimitive'; + return 42; + }, + }; + async function* source() { + yield obj; + } + await assert.rejects( + () => text(from(source())), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +async function testToStreamableProtocolInStream() { + // Objects should use toStreamable protocol instead of toString + const obj = { + [Symbol.for('Stream.toStreamable')]() { return 'from protocol'; }, + }; + async function* source() { + yield obj; + } + const result = await text(from(source())); + assert.strictEqual(result, 'from protocol'); +} + +// Both toAsyncStreamable and toStreamable: async takes precedence +async function testFromAsyncStreamablePrecedence() { + const obj = { + [Symbol.for('Stream.toStreamable')]() { return 'sync version'; }, + [Symbol.for('Stream.toAsyncStreamable')]() { return 'async version'; }, + }; + async function* gen() { yield obj; } + const result = await text(from(gen())); + assert.strictEqual(result, 'async version'); +} + +// Top-level toAsyncStreamable protocol on input to from() +async function testFromTopLevelToAsyncStreamable() { + const obj = { + [Symbol.for('Stream.toAsyncStreamable')]() { + return 'top-level-async'; + }, + }; + const result = await text(from(obj)); + assert.strictEqual(result, 'top-level-async'); +} + +// Top-level toAsyncStreamable returning a Promise +async function testFromTopLevelToAsyncStreamablePromise() { + const obj = { + [Symbol.for('Stream.toAsyncStreamable')]() { + return Promise.resolve('async-promise'); + }, + }; + const result = await text(from(obj)); + assert.strictEqual(result, 'async-promise'); +} + +// Top-level toStreamable protocol on input to from() +async function testFromTopLevelToStreamable() { + const obj = { + [Symbol.for('Stream.toStreamable')]() { + return 'top-level-sync'; + }, + }; + const result = await text(from(obj)); + assert.strictEqual(result, 'top-level-sync'); +} + +// Top-level: toAsyncStreamable takes precedence over toStreamable +async function testFromTopLevelAsyncPrecedence() { + const obj = { + [Symbol.for('Stream.toStreamable')]() { return 'sync'; }, + [Symbol.for('Stream.toAsyncStreamable')]() { return 'async'; }, + }; + const result = await text(from(obj)); + assert.strictEqual(result, 'async'); +} + +// Top-level: toAsyncStreamable takes precedence over Symbol.asyncIterator +async function testFromTopLevelProtocolOverIterator() { + const obj = { + [Symbol.for('Stream.toAsyncStreamable')]() { return 'from-protocol'; }, + async *[Symbol.asyncIterator]() { yield [new TextEncoder().encode('from-iterator')]; }, + }; + const result = await text(from(obj)); + assert.strictEqual(result, 'from-protocol'); +} + +// DataView input should be converted to Uint8Array (zero-copy) +async function testFromDataView() { + const buf = new ArrayBuffer(5); + const view = new DataView(buf); + // Write "hello" into the DataView + view.setUint8(0, 0x68); // h + view.setUint8(1, 0x65); // e + view.setUint8(2, 0x6c); // l + view.setUint8(3, 0x6c); // l + view.setUint8(4, 0x6f); // o + const result = await text(from(view)); + assert.strictEqual(result, 'hello'); +} + +function testFromNullThrows() { + assert.throws(() => from(null), { code: 'ERR_INVALID_ARG_TYPE' }); +} + +function testFromUndefinedThrows() { + assert.throws(() => from(undefined), { code: 'ERR_INVALID_ARG_TYPE' }); +} + +Promise.all([ + testFromString(), + testFromAsyncGenerator(), + testFromSyncIterableAsAsync(), + testFromToAsyncStreamableProtocol(), + testFromRejectsNonStreamable(), + testFromEmptyArray(), + testStreamNamespace(), + testCustomToStringInStreamRejects(), + testCustomToPrimitiveInStreamRejects(), + testToStreamableProtocolInStream(), + testFromAsyncStreamablePrecedence(), + testFromNullThrows(), + testFromUndefinedThrows(), + testFromTopLevelToAsyncStreamable(), + testFromTopLevelToAsyncStreamablePromise(), + testFromTopLevelToStreamable(), + testFromTopLevelAsyncPrecedence(), + testFromTopLevelProtocolOverIterator(), + testFromDataView(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-from-coverage.js b/test/parallel/test-stream-iter-from-coverage.js new file mode 100644 index 00000000000000..c4f622a56bd7fa --- /dev/null +++ b/test/parallel/test-stream-iter-from-coverage.js @@ -0,0 +1,144 @@ +// Flags: --experimental-stream-iter +'use strict'; + +// Coverage tests for from.js: sub-batching >128, DataView in generator, +// non-Uint8Array TypedArray normalization. + +const common = require('../common'); +const assert = require('assert'); +const { + from, + fromSync, + bytes, + bytesSync, +} = require('stream/iter'); + +// fromSync: Uint8Array[] with > 128 elements triggers sub-batching +async function testFromSyncSubBatching() { + const bigBatch = Array.from({ length: 200 }, + (_, i) => new Uint8Array([i & 0xFF])); + const batches = []; + for (const batch of fromSync(bigBatch)) { + batches.push(batch); + } + // Should be split into sub-batches: 128 + 72 + assert.strictEqual(batches.length, 2); + assert.strictEqual(batches[0].length, 128); + assert.strictEqual(batches[1].length, 72); + // Verify no data loss + let totalChunks = 0; + for (const batch of batches) totalChunks += batch.length; + assert.strictEqual(totalChunks, 200); +} + +// from: Uint8Array[] with > 128 elements triggers sub-batching (async) +async function testFromAsyncSubBatching() { + const bigBatch = Array.from({ length: 200 }, + (_, i) => new Uint8Array([i & 0xFF])); + const batches = []; + for await (const batch of from(bigBatch)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 2); + assert.strictEqual(batches[0].length, 128); + assert.strictEqual(batches[1].length, 72); +} + +// Exact boundary: 128 elements → single batch (no split) +async function testFromSubBatchingBoundary() { + const exactBatch = Array.from({ length: 128 }, + (_, i) => new Uint8Array([i])); + const batches = []; + for (const batch of fromSync(exactBatch)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.strictEqual(batches[0].length, 128); +} + +// 129 elements → 2 batches (128 + 1) +async function testFromSubBatchingBoundaryPlus1() { + const batch129 = Array.from({ length: 129 }, + (_, i) => new Uint8Array([i & 0xFF])); + const batches = []; + for await (const batch of from(batch129)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 2); + assert.strictEqual(batches[0].length, 128); + assert.strictEqual(batches[1].length, 1); +} + +// DataView yielded from a sync generator → normalizeSyncValue path +async function testFromSyncDataViewInGenerator() { + function* gen() { + const buf = new ArrayBuffer(3); + const dv = new DataView(buf); + dv.setUint8(0, 65); + dv.setUint8(1, 66); + dv.setUint8(2, 67); + yield dv; + } + const data = bytesSync(fromSync(gen())); + assert.deepStrictEqual(data, new Uint8Array([65, 66, 67])); +} + +// DataView yielded from an async generator → normalizeAsyncValue path +async function testFromAsyncDataViewInGenerator() { + async function* gen() { + const buf = new ArrayBuffer(3); + const dv = new DataView(buf); + dv.setUint8(0, 68); + dv.setUint8(1, 69); + dv.setUint8(2, 70); + yield dv; + } + const data = await bytes(from(gen())); + assert.deepStrictEqual(data, new Uint8Array([68, 69, 70])); +} + +// Int16Array yielded from generator → primitiveToUint8Array fallback +async function testFromSyncInt16ArrayInGenerator() { + function* gen() { + yield new Int16Array([0x0102, 0x0304]); + } + const data = bytesSync(fromSync(gen())); + assert.strictEqual(data.byteLength, 4); // 2 int16 = 4 bytes +} + +// Float64Array as top-level input to from() +async function testFromFloat64Array() { + const f64 = new Float64Array([1.0]); + const batches = []; + for await (const batch of from(f64)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.strictEqual(batches[0][0].byteLength, 8); // 1 float64 = 8 bytes +} + +// Sync generator yielding invalid type → ERR_INVALID_ARG_TYPE +async function testFromSyncInvalidYield() { + function* gen() { + yield 42; // Not a valid stream value + } + assert.throws( + () => { + // eslint-disable-next-line no-unused-vars + for (const batch of fromSync(gen())) { /* consume */ } + }, + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +Promise.all([ + testFromSyncSubBatching(), + testFromAsyncSubBatching(), + testFromSubBatchingBoundary(), + testFromSubBatchingBoundaryPlus1(), + testFromSyncDataViewInGenerator(), + testFromAsyncDataViewInGenerator(), + testFromSyncInt16ArrayInGenerator(), + testFromFloat64Array(), + testFromSyncInvalidYield(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-from-sync.js b/test/parallel/test-stream-iter-from-sync.js new file mode 100644 index 00000000000000..a9ce0bd575abdc --- /dev/null +++ b/test/parallel/test-stream-iter-from-sync.js @@ -0,0 +1,236 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { fromSync } = require('stream/iter'); + +function testFromSyncString() { + // String input should be UTF-8 encoded + const readable = fromSync('hello'); + const batches = []; + for (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.strictEqual(batches[0].length, 1); + assert.deepStrictEqual(batches[0][0], + new TextEncoder().encode('hello')); +} + +function testFromSyncUint8Array() { + const input = new Uint8Array([1, 2, 3]); + const readable = fromSync(input); + const batches = []; + for (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.strictEqual(batches[0].length, 1); + assert.deepStrictEqual(batches[0][0], input); +} + +function testFromSyncArrayBuffer() { + const ab = new ArrayBuffer(4); + new Uint8Array(ab).set([10, 20, 30, 40]); + const readable = fromSync(ab); + const batches = []; + for (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], new Uint8Array([10, 20, 30, 40])); +} + +function testFromSyncUint8ArrayArray() { + // Array of Uint8Array should yield as a single batch + const chunks = [new Uint8Array([1]), new Uint8Array([2])]; + const readable = fromSync(chunks); + const batches = []; + for (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.strictEqual(batches[0].length, 2); + assert.deepStrictEqual(batches[0][0], new Uint8Array([1])); + assert.deepStrictEqual(batches[0][1], new Uint8Array([2])); +} + +function testFromSyncGenerator() { + function* gen() { + yield new Uint8Array([1, 2]); + yield new Uint8Array([3, 4]); + } + const readable = fromSync(gen()); + const batches = []; + for (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 2); + assert.deepStrictEqual(batches[0][0], new Uint8Array([1, 2])); + assert.deepStrictEqual(batches[1][0], new Uint8Array([3, 4])); +} + +function testFromSyncNestedIterables() { + // Nested arrays and strings should be flattened + function* gen() { + yield ['hello', ' ', 'world']; + } + const readable = fromSync(gen()); + const batches = []; + for (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.strictEqual(batches[0].length, 3); + assert.deepStrictEqual(batches[0][0], new TextEncoder().encode('hello')); + assert.deepStrictEqual(batches[0][1], new TextEncoder().encode(' ')); + assert.deepStrictEqual(batches[0][2], new TextEncoder().encode('world')); +} + +function testFromSyncToStreamableProtocol() { + const sym = Symbol.for('Stream.toStreamable'); + const obj = { + [sym]() { + return 'protocol-data'; + }, + }; + function* gen() { + yield obj; + } + const readable = fromSync(gen()); + const batches = []; + for (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], + new TextEncoder().encode('protocol-data')); +} + +function testFromSyncGeneratorError() { + function* gen() { + yield new Uint8Array([1]); + throw new Error('generator boom'); + } + const readable = fromSync(gen()); + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of readable) { /* consume */ } + }, { message: 'generator boom' }); +} + +function testFromSyncRejectsNonStreamable() { + assert.throws( + () => fromSync(12345), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); + assert.throws( + () => fromSync(null), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +function testFromSyncEmptyGenerator() { + function* empty() {} + let count = 0; + // eslint-disable-next-line no-unused-vars + for (const _ of fromSync(empty())) { count++; } + assert.strictEqual(count, 0); +} + +// Top-level toStreamable protocol on input to fromSync() +function testFromSyncTopLevelToStreamable() { + const obj = { + [Symbol.for('Stream.toStreamable')]() { + return 'top-level-sync'; + }, + }; + const batches = []; + for (const batch of fromSync(obj)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], + new TextEncoder().encode('top-level-sync')); +} + +// Top-level: toStreamable takes precedence over Symbol.iterator +function testFromSyncTopLevelProtocolOverIterator() { + const obj = { + [Symbol.for('Stream.toStreamable')]() { return 'from-protocol'; }, + *[Symbol.iterator]() { yield [new TextEncoder().encode('from-iterator')]; }, + }; + const batches = []; + for (const batch of fromSync(obj)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], + new TextEncoder().encode('from-protocol')); +} + +// Top-level: toAsyncStreamable is ignored by fromSync +function testFromSyncIgnoresAsyncStreamable() { + const obj = { + [Symbol.for('Stream.toAsyncStreamable')]() { return 'async'; }, + }; + // Has no toStreamable and no Symbol.iterator, should throw + assert.throws(() => fromSync(obj), { code: 'ERR_INVALID_ARG_TYPE' }); +} + +// Explicit async iterable rejected +function testFromSyncRejectsAsyncIterable() { + async function* gen() { yield [new TextEncoder().encode('a')]; } + assert.throws(() => fromSync(gen()), { code: 'ERR_INVALID_ARG_TYPE' }); +} + +// Promise rejected +function testFromSyncRejectsPromise() { + assert.throws(() => fromSync(Promise.resolve('hello')), + { code: 'ERR_INVALID_ARG_TYPE' }); +} + +// DataView input should be converted to Uint8Array (zero-copy) +function testFromSyncDataView() { + const buf = new ArrayBuffer(3); + const view = new DataView(buf); + view.setUint8(0, 0x48); // H + view.setUint8(1, 0x49); // I + view.setUint8(2, 0x21); // ! + const batches = []; + for (const batch of fromSync(view)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], new Uint8Array([0x48, 0x49, 0x21])); +} + +function testFromSyncNullThrows() { + assert.throws(() => fromSync(null), { code: 'ERR_INVALID_ARG_TYPE' }); +} + +function testFromSyncUndefinedThrows() { + assert.throws(() => fromSync(undefined), { code: 'ERR_INVALID_ARG_TYPE' }); +} + +Promise.all([ + testFromSyncString(), + testFromSyncUint8Array(), + testFromSyncArrayBuffer(), + testFromSyncUint8ArrayArray(), + testFromSyncGenerator(), + testFromSyncNestedIterables(), + testFromSyncToStreamableProtocol(), + testFromSyncGeneratorError(), + testFromSyncRejectsNonStreamable(), + testFromSyncEmptyGenerator(), + testFromSyncNullThrows(), + testFromSyncUndefinedThrows(), + testFromSyncTopLevelToStreamable(), + testFromSyncTopLevelProtocolOverIterator(), + testFromSyncIgnoresAsyncStreamable(), + testFromSyncRejectsAsyncIterable(), + testFromSyncRejectsPromise(), + testFromSyncDataView(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-namespace.js b/test/parallel/test-stream-iter-namespace.js new file mode 100644 index 00000000000000..ce197e85846dfe --- /dev/null +++ b/test/parallel/test-stream-iter-namespace.js @@ -0,0 +1,210 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const streamNew = require('stream/iter'); + +// ============================================================================= +// Stream namespace object +// ============================================================================= + +async function testStreamNamespaceExists() { + assert.ok(streamNew.Stream); + assert.strictEqual(typeof streamNew.Stream, 'object'); +} + +async function testStreamNamespaceFrozen() { + assert.ok(Object.isFrozen(streamNew.Stream)); +} + +async function testStreamNamespaceFactories() { + const { Stream } = streamNew; + + assert.strictEqual(typeof Stream.push, 'function'); + assert.strictEqual(typeof Stream.duplex, 'function'); + assert.strictEqual(typeof Stream.from, 'function'); + assert.strictEqual(typeof Stream.fromSync, 'function'); +} + +async function testStreamNamespacePipelines() { + const { Stream } = streamNew; + + assert.strictEqual(typeof Stream.pull, 'function'); + assert.strictEqual(typeof Stream.pullSync, 'function'); + assert.strictEqual(typeof Stream.pipeTo, 'function'); + assert.strictEqual(typeof Stream.pipeToSync, 'function'); +} + +async function testStreamNamespaceAsyncConsumers() { + const { Stream } = streamNew; + + assert.strictEqual(typeof Stream.bytes, 'function'); + assert.strictEqual(typeof Stream.text, 'function'); + assert.strictEqual(typeof Stream.arrayBuffer, 'function'); + assert.strictEqual(typeof Stream.array, 'function'); +} + +async function testStreamNamespaceSyncConsumers() { + const { Stream } = streamNew; + + assert.strictEqual(typeof Stream.bytesSync, 'function'); + assert.strictEqual(typeof Stream.textSync, 'function'); + assert.strictEqual(typeof Stream.arrayBufferSync, 'function'); + assert.strictEqual(typeof Stream.arraySync, 'function'); +} + +async function testStreamNamespaceCombining() { + const { Stream } = streamNew; + + assert.strictEqual(typeof Stream.merge, 'function'); + assert.strictEqual(typeof Stream.broadcast, 'function'); + assert.strictEqual(typeof Stream.share, 'function'); + assert.strictEqual(typeof Stream.shareSync, 'function'); +} + +async function testStreamNamespaceUtilities() { + const { Stream } = streamNew; + + assert.strictEqual(typeof Stream.tap, 'function'); + assert.strictEqual(typeof Stream.tapSync, 'function'); + assert.strictEqual(typeof Stream.ondrain, 'function'); +} + +async function testStreamNamespaceProtocols() { + const { Stream } = streamNew; + + assert.strictEqual(typeof Stream.toStreamable, 'symbol'); + assert.strictEqual(typeof Stream.toAsyncStreamable, 'symbol'); + assert.strictEqual(typeof Stream.broadcastProtocol, 'symbol'); + assert.strictEqual(typeof Stream.shareProtocol, 'symbol'); + assert.strictEqual(typeof Stream.shareSyncProtocol, 'symbol'); + assert.strictEqual(typeof Stream.drainableProtocol, 'symbol'); +} + +// ============================================================================= +// Individual exports (destructured imports) +// ============================================================================= + +async function testIndividualExports() { + // Factories + assert.strictEqual(typeof streamNew.push, 'function'); + assert.strictEqual(typeof streamNew.duplex, 'function'); + assert.strictEqual(typeof streamNew.from, 'function'); + assert.strictEqual(typeof streamNew.fromSync, 'function'); + + // Pipelines + assert.strictEqual(typeof streamNew.pull, 'function'); + assert.strictEqual(typeof streamNew.pullSync, 'function'); + assert.strictEqual(typeof streamNew.pipeTo, 'function'); + assert.strictEqual(typeof streamNew.pipeToSync, 'function'); + + // Consumers + assert.strictEqual(typeof streamNew.bytes, 'function'); + assert.strictEqual(typeof streamNew.bytesSync, 'function'); + assert.strictEqual(typeof streamNew.text, 'function'); + assert.strictEqual(typeof streamNew.textSync, 'function'); + assert.strictEqual(typeof streamNew.arrayBuffer, 'function'); + assert.strictEqual(typeof streamNew.arrayBufferSync, 'function'); + assert.strictEqual(typeof streamNew.array, 'function'); + assert.strictEqual(typeof streamNew.arraySync, 'function'); + + // Combining + assert.strictEqual(typeof streamNew.merge, 'function'); + assert.strictEqual(typeof streamNew.broadcast, 'function'); + assert.strictEqual(typeof streamNew.share, 'function'); + assert.strictEqual(typeof streamNew.shareSync, 'function'); + + // Utilities + assert.strictEqual(typeof streamNew.tap, 'function'); + assert.strictEqual(typeof streamNew.tapSync, 'function'); + assert.strictEqual(typeof streamNew.ondrain, 'function'); + + // Protocol symbols + assert.strictEqual(typeof streamNew.toStreamable, 'symbol'); + assert.strictEqual(typeof streamNew.toAsyncStreamable, 'symbol'); + assert.strictEqual(typeof streamNew.broadcastProtocol, 'symbol'); + assert.strictEqual(typeof streamNew.shareProtocol, 'symbol'); + assert.strictEqual(typeof streamNew.shareSyncProtocol, 'symbol'); + assert.strictEqual(typeof streamNew.drainableProtocol, 'symbol'); +} + +async function testMultiConsumerExports() { + // Broadcast and Share constructors/factories + assert.ok(streamNew.Broadcast); + assert.strictEqual(typeof streamNew.Broadcast.from, 'function'); + assert.ok(streamNew.Share); + assert.strictEqual(typeof streamNew.Share.from, 'function'); + assert.ok(streamNew.SyncShare); + assert.strictEqual(typeof streamNew.SyncShare.fromSync, 'function'); +} + +// ============================================================================= +// Cross-check: namespace matches individual exports +// ============================================================================= + +async function testNamespaceMatchesExports() { + const { Stream } = streamNew; + + // Every function on Stream should also be available as a direct export + assert.strictEqual(Stream.push, streamNew.push); + assert.strictEqual(Stream.duplex, streamNew.duplex); + assert.strictEqual(Stream.from, streamNew.from); + assert.strictEqual(Stream.fromSync, streamNew.fromSync); + assert.strictEqual(Stream.pull, streamNew.pull); + assert.strictEqual(Stream.pullSync, streamNew.pullSync); + assert.strictEqual(Stream.pipeTo, streamNew.pipeTo); + assert.strictEqual(Stream.pipeToSync, streamNew.pipeToSync); + assert.strictEqual(Stream.bytes, streamNew.bytes); + assert.strictEqual(Stream.text, streamNew.text); + assert.strictEqual(Stream.arrayBuffer, streamNew.arrayBuffer); + assert.strictEqual(Stream.array, streamNew.array); + assert.strictEqual(Stream.bytesSync, streamNew.bytesSync); + assert.strictEqual(Stream.textSync, streamNew.textSync); + assert.strictEqual(Stream.arrayBufferSync, streamNew.arrayBufferSync); + assert.strictEqual(Stream.arraySync, streamNew.arraySync); + assert.strictEqual(Stream.merge, streamNew.merge); + assert.strictEqual(Stream.broadcast, streamNew.broadcast); + assert.strictEqual(Stream.share, streamNew.share); + assert.strictEqual(Stream.shareSync, streamNew.shareSync); + assert.strictEqual(Stream.tap, streamNew.tap); + assert.strictEqual(Stream.tapSync, streamNew.tapSync); + assert.strictEqual(Stream.ondrain, streamNew.ondrain); + + // Protocol symbols + assert.strictEqual(Stream.toStreamable, streamNew.toStreamable); + assert.strictEqual(Stream.toAsyncStreamable, streamNew.toAsyncStreamable); + assert.strictEqual(Stream.broadcastProtocol, streamNew.broadcastProtocol); + assert.strictEqual(Stream.shareProtocol, streamNew.shareProtocol); + assert.strictEqual(Stream.shareSyncProtocol, streamNew.shareSyncProtocol); + assert.strictEqual(Stream.drainableProtocol, streamNew.drainableProtocol); +} + +// ============================================================================= +// Require paths +// ============================================================================= + +async function testRequirePaths() { + // Both require('stream/iter') and require('node:stream/iter') should work + const fromPlain = require('stream/iter'); + const fromNode = require('node:stream/iter'); + + assert.strictEqual(fromPlain.Stream, fromNode.Stream); + assert.strictEqual(fromPlain.push, fromNode.push); +} + +Promise.all([ + testStreamNamespaceExists(), + testStreamNamespaceFrozen(), + testStreamNamespaceFactories(), + testStreamNamespacePipelines(), + testStreamNamespaceAsyncConsumers(), + testStreamNamespaceSyncConsumers(), + testStreamNamespaceCombining(), + testStreamNamespaceUtilities(), + testStreamNamespaceProtocols(), + testIndividualExports(), + testMultiConsumerExports(), + testNamespaceMatchesExports(), + testRequirePaths(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-pipeto-edge.js b/test/parallel/test-stream-iter-pipeto-edge.js new file mode 100644 index 00000000000000..3f09c4dfd42d00 --- /dev/null +++ b/test/parallel/test-stream-iter-pipeto-edge.js @@ -0,0 +1,68 @@ +// Flags: --experimental-stream-iter +'use strict'; + +// Edge case tests for pipeToSync: endSync fallback, preventFail. + +const common = require('../common'); +const assert = require('assert'); +const { pipeToSync, fromSync } = require('stream/iter'); + +// pipeToSync endSync returns negative → falls back to end() +async function testPipeToSyncEndSyncFallback() { + let endCalled = false; + const writer = { + writeSync() { return true; }, + endSync() { return -1; }, // Negative → triggers end() fallback + end() { endCalled = true; }, + }; + pipeToSync(fromSync('data'), writer); + assert.strictEqual(endCalled, true); +} + +// pipeToSync endSync missing → falls back to end() +async function testPipeToSyncNoEndSync() { + let endCalled = false; + const writer = { + writeSync() { return true; }, + end() { endCalled = true; }, + }; + pipeToSync(fromSync('data'), writer); + assert.strictEqual(endCalled, true); +} + +// pipeToSync with preventFail: true — source error does NOT call fail() +async function testPipeToSyncPreventFail() { + let failCalled = false; + const writer = { + writeSync() { return true; }, + endSync() { return 0; }, + fail() { failCalled = true; }, + }; + function* badSource() { + yield [new Uint8Array([1])]; + throw new Error('source error'); + } + assert.throws( + () => pipeToSync(badSource(), writer, { preventFail: true }), + { message: 'source error' }, + ); + assert.strictEqual(failCalled, false); +} + +// pipeToSync with preventClose: true — end/endSync not called +async function testPipeToSyncPreventClose() { + let endCalled = false; + const writer = { + writeSync() { return true; }, + endSync() { endCalled = true; return 0; }, + }; + pipeToSync(fromSync('data'), writer, { preventClose: true }); + assert.strictEqual(endCalled, false); +} + +Promise.all([ + testPipeToSyncEndSyncFallback(), + testPipeToSyncNoEndSync(), + testPipeToSyncPreventFail(), + testPipeToSyncPreventClose(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-pipeto-signal.js b/test/parallel/test-stream-iter-pipeto-signal.js new file mode 100644 index 00000000000000..153ee1a80e6176 --- /dev/null +++ b/test/parallel/test-stream-iter-pipeto-signal.js @@ -0,0 +1,90 @@ +// Flags: --experimental-stream-iter +'use strict'; + +// Tests for pipeTo with live (non-pre-aborted) AbortSignal, +// both with and without transforms. + +const common = require('../common'); +const assert = require('assert'); +const { pipeTo, from } = require('stream/iter'); + +// pipeTo with live signal, no transforms — abort mid-stream +async function testPipeToLiveSignalNoTransforms() { + const ac = new AbortController(); + const written = []; + const writer = { + async write(chunk) { written.push(chunk); }, + async end() {}, + }; + async function* source() { + yield [new Uint8Array([1])]; + yield [new Uint8Array([2])]; + ac.abort(); + yield [new Uint8Array([3])]; + } + await assert.rejects( + () => pipeTo(source(), writer, { signal: ac.signal }), + { name: 'AbortError' }, + ); + // Should have written at least the first two chunks before abort + assert.ok(written.length >= 1); +} + +// pipeTo with live signal + transforms — abort mid-stream +async function testPipeToLiveSignalWithTransforms() { + const ac = new AbortController(); + const written = []; + const writer = { + async write(chunk) { written.push(chunk); }, + async end() {}, + }; + const identity = (chunks) => chunks; + async function* source() { + yield [new Uint8Array([10])]; + yield [new Uint8Array([20])]; + ac.abort(); + yield [new Uint8Array([30])]; + } + await assert.rejects( + () => pipeTo(source(), identity, writer, { signal: ac.signal }), + { name: 'AbortError' }, + ); + assert.ok(written.length >= 1); +} + +// pipeTo with live signal, no abort — runs to completion +async function testPipeToLiveSignalCompletes() { + const ac = new AbortController(); + const written = []; + const writer = { + write(chunk) { written.push(chunk); }, + writeSync(chunk) { written.push(chunk); return true; }, + async end() {}, + endSync() { return written.length; }, + }; + await pipeTo(from('signal-ok'), writer, { signal: ac.signal }); + assert.ok(written.length > 0); +} + +// pipeTo with live signal + transforms, no abort — runs to completion +async function testPipeToLiveSignalWithTransformsCompletes() { + const ac = new AbortController(); + const written = []; + const writer = { + write(chunk) { written.push(chunk); }, + writeSync(chunk) { written.push(chunk); return true; }, + async end() {}, + endSync() { return written.length; }, + }; + const identity = (chunks) => chunks; + await pipeTo(from('signal-tx-ok'), identity, writer, + { signal: ac.signal }); + assert.ok(written.length > 0); +} + +Promise.all([ + testPipeToLiveSignalNoTransforms(), + testPipeToLiveSignalWithTransforms(), + testPipeToLiveSignalCompletes(), + testPipeToLiveSignalWithTransformsCompletes(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-pipeto-writev.js b/test/parallel/test-stream-iter-pipeto-writev.js new file mode 100644 index 00000000000000..505bdd6d2b2ced --- /dev/null +++ b/test/parallel/test-stream-iter-pipeto-writev.js @@ -0,0 +1,147 @@ +// Flags: --experimental-stream-iter +'use strict'; + +// Tests for pipeTo writev/writevSync paths and writeBatchAsyncFallback. + +const common = require('../common'); +const assert = require('assert'); +const { pipeTo, pipeToSync } = require('stream/iter'); + +// Multi-chunk batch with writevSync (sync success path) +async function testWritevSyncSuccess() { + const batches = []; + const writer = { + write(chunk) {}, + writevSync(chunks) { batches.push(chunks); return true; }, + writev(chunks) { batches.push(chunks); }, + writeSync(chunk) { return true; }, + endSync() { return 0; }, + }; + // Source that yields multi-chunk batches + async function* source() { + yield [new Uint8Array([1]), new Uint8Array([2]), new Uint8Array([3])]; + yield [new Uint8Array([4]), new Uint8Array([5])]; + } + const total = await pipeTo(source(), writer); + assert.ok(batches.length > 0); + // writevSync was used for multi-chunk batches + assert.ok(batches.some((b) => b.length > 1)); + assert.strictEqual(total, 5); +} + +// Multi-chunk batch with writev async (no writevSync) +async function testWritevAsyncFallback() { + const batches = []; + const writer = { + async writev(chunks) { batches.push(chunks); }, + async write(chunk) { batches.push([chunk]); }, + async end() {}, + }; + async function* source() { + yield [new Uint8Array([1]), new Uint8Array([2]), new Uint8Array([3])]; + } + await pipeTo(source(), writer); + assert.ok(batches.length > 0); + assert.ok(batches.some((b) => b.length > 1)); +} + +// writevSync returns false — falls through to async writev +async function testWritevSyncFails() { + const asyncCalls = []; + const writer = { + write() {}, + writevSync() { return false; }, + async writev(chunks) { asyncCalls.push(chunks); }, + writeSync() { return true; }, + endSync() { return 0; }, + }; + async function* source() { + yield [new Uint8Array([1]), new Uint8Array([2])]; + } + await pipeTo(source(), writer); + assert.strictEqual(asyncCalls.length, 1); + assert.strictEqual(asyncCalls[0].length, 2); +} + +// writeSync fails mid-batch — triggers writeBatchAsyncFallback +async function testWriteSyncFailsMidBatch() { + const asyncWrites = []; + const writer = { + writeSync(chunk) { + // Fail for chunk value 2 — always, including retries + if (chunk[0] === 2) return false; + return true; + }, + async write(chunk) { asyncWrites.push(chunk); }, + async end() {}, + }; + // Single batch with 3 chunks + async function* source() { + yield [new Uint8Array([1]), new Uint8Array([2]), new Uint8Array([3])]; + } + const total = await pipeTo(source(), writer); + // Chunk 1: writeSync succeeds + // Chunk 2: writeSync fails → writeBatchAsyncFallback → write() called + // Chunk 3: writeBatchAsyncFallback retries writeSync → succeeds + assert.ok(asyncWrites.length >= 1); + assert.deepStrictEqual(asyncWrites[0], new Uint8Array([2])); + assert.strictEqual(total, 3); +} + +// writeSync always fails — all chunks go through async +async function testWriteSyncAlwaysFails() { + const asyncWrites = []; + const writer = { + writeSync() { return false; }, + async write(chunk) { asyncWrites.push(chunk); }, + async end() {}, + }; + async function* source() { + yield [new Uint8Array([10]), new Uint8Array([20])]; + } + const total = await pipeTo(source(), writer); + assert.strictEqual(asyncWrites.length, 2); + assert.strictEqual(total, 2); +} + +// pipeToSync with writevSync +async function testPipeToSyncWritev() { + const batches = []; + const writer = { + writevSync(chunks) { batches.push(chunks); }, + writeSync(chunk) { return true; }, + endSync() { return 0; }, + }; + function* source() { + yield [new Uint8Array([1]), new Uint8Array([2]), new Uint8Array([3])]; + yield [new Uint8Array([4])]; + } + pipeToSync(source(), writer); + // Multi-chunk batch should have used writevSync + assert.ok(batches.some((b) => b.length > 1)); +} + +// pipeToSync with writer that has write() and writeSync() — writeSync preferred +async function testPipeToSyncWriteFallback() { + const syncWrites = []; + const writer = { + writeSync(chunk) { syncWrites.push(chunk); return true; }, + write(chunk) { /* should not be called */ }, + endSync() { return 0; }, + }; + function* source() { + yield [new Uint8Array([1]), new Uint8Array([2])]; + } + pipeToSync(source(), writer); + assert.strictEqual(syncWrites.length, 2); +} + +Promise.all([ + testWritevSyncSuccess(), + testWritevAsyncFallback(), + testWritevSyncFails(), + testWriteSyncFailsMidBatch(), + testWriteSyncAlwaysFails(), + testPipeToSyncWritev(), + testPipeToSyncWriteFallback(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-pipeto.js b/test/parallel/test-stream-iter-pipeto.js new file mode 100644 index 00000000000000..9845a5ba254efb --- /dev/null +++ b/test/parallel/test-stream-iter-pipeto.js @@ -0,0 +1,237 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { pipeTo, pipeToSync, from, fromSync } = require('stream/iter'); + +async function testPipeToSync() { + const written = []; + const writer = { + writeSync(chunk) { written.push(chunk); return true; }, + endSync() { return written.length; }, + fail() {}, + }; + + const totalBytes = pipeToSync(fromSync('pipe-data'), writer); + assert.strictEqual(totalBytes, 9); // 'pipe-data' = 9 UTF-8 bytes + assert.ok(written.length > 0); + const result = new TextDecoder().decode( + new Uint8Array(written.reduce((acc, c) => [...acc, ...c], []))); + assert.strictEqual(result, 'pipe-data'); +} + +async function testPipeTo() { + const written = []; + const writer = { + async write(chunk) { written.push(chunk); }, + async end() { return written.length; }, + async fail() {}, + }; + + const totalBytes = await pipeTo(from('async-pipe-data'), writer); + assert.strictEqual(totalBytes, 15); // 'async-pipe-data' = 15 UTF-8 bytes + assert.ok(written.length > 0); +} + +async function testPipeToPreventClose() { + let endCalled = false; + const writer = { + async write() {}, + async end() { endCalled = true; }, + async fail() {}, + }; + + await pipeTo(from('data'), writer, { preventClose: true }); + assert.strictEqual(endCalled, false); +} + +// PipeTo source error calls writer.fail() +async function testPipeToSourceError() { + let failCalled = false; + let failReason; + const writer = { + write() {}, + fail(reason) { failCalled = true; failReason = reason; }, + }; + async function* failingSource() { + yield [new TextEncoder().encode('a')]; + throw new Error('pipe source boom'); + } + await assert.rejects( + () => pipeTo(failingSource(), writer), + { message: 'pipe source boom' }, + ); + assert.strictEqual(failCalled, true); + assert.strictEqual(failReason.message, 'pipe source boom'); +} + +// PipeToSync source error calls writer.fail() +async function testPipeToSyncSourceError() { + let failCalled = false; + const writer = { + writeSync() { return true; }, + fail(reason) { failCalled = true; }, + }; + function* failingSource() { + yield [new TextEncoder().encode('a')]; + throw new Error('sync pipe boom'); + } + assert.throws( + () => pipeToSync(failingSource(), writer), + { message: 'sync pipe boom' }, + ); + assert.strictEqual(failCalled, true); +} + +// PipeTo with AbortSignal +async function testPipeToWithSignal() { + const ac = new AbortController(); + const chunks = []; + const writer = { + write(chunk) { chunks.push(chunk); }, + }; + async function* slowSource() { + yield [new TextEncoder().encode('a')]; + await new Promise((r) => setTimeout(r, 50)); + yield [new TextEncoder().encode('b')]; + } + ac.abort(); + await assert.rejects( + () => pipeTo(slowSource(), writer, { signal: ac.signal }), + { name: 'AbortError' }, + ); +} + +// PipeTo with transforms +async function testPipeToWithTransforms() { + const chunks = []; + const writer = { + write(chunk) { chunks.push(new TextDecoder().decode(chunk)); }, + }; + const upper = (batch) => { + if (batch === null) return null; + return batch.map((c) => { + const out = new Uint8Array(c); + for (let i = 0; i < out.length; i++) + out[i] -= (out[i] >= 97 && out[i] <= 122) * 32; + return out; + }); + }; + await pipeTo(from('hello'), upper, writer); + assert.strictEqual(chunks.join(''), 'HELLO'); +} + +// PipeToSync with transforms +async function testPipeToSyncWithTransforms() { + const chunks = []; + const writer = { + writeSync(chunk) { chunks.push(new TextDecoder().decode(chunk)); return true; }, + }; + const upper = (batch) => { + if (batch === null) return null; + return batch.map((c) => { + const out = new Uint8Array(c); + for (let i = 0; i < out.length; i++) + out[i] -= (out[i] >= 97 && out[i] <= 122) * 32; + return out; + }); + }; + pipeToSync(fromSync('hello'), upper, writer); + assert.strictEqual(chunks.join(''), 'HELLO'); +} + +// PipeTo with writev writer +async function testPipeToWithWritevWriter() { + const allChunks = []; + const writer = { + write(chunk) { allChunks.push(chunk); }, + writev(chunks) { allChunks.push(...chunks); }, + }; + await pipeTo(from('hello world'), writer); + assert.strictEqual(allChunks.length > 0, true); +} + +// PipeTo with writeSync/writevSync fallback +async function testPipeToSyncFallback() { + const chunks = []; + const writer = { + writeSync(chunk) { chunks.push(chunk); return true; }, + write(chunk) { chunks.push(chunk); }, + }; + await pipeTo(from('hello'), writer); + assert.strictEqual(chunks.length > 0, true); +} + +// PipeTo preventFail option +async function testPipeToPreventFail() { + let failCalled = false; + const writer = { + write() {}, + fail() { failCalled = true; }, + }; + // eslint-disable-next-line require-yield + async function* failingSource() { + throw new Error('boom'); + } + await assert.rejects( + () => pipeTo(failingSource(), writer, { preventFail: true }), + { message: 'boom' }, + ); + assert.strictEqual(failCalled, false); +} + +// PipeToSync preventClose option +async function testPipeToSyncPreventClose() { + let endCalled = false; + const writer = { + writeSync() { return true; }, + endSync() { endCalled = true; return 0; }, + }; + pipeToSync(fromSync('hello'), writer, { preventClose: true }); + assert.strictEqual(endCalled, false); +} + +// Regression test: pipeTo should work with a minimal writer that only +// implements write(). end(), fail(), and all *Sync methods are optional. +async function testPipeToMinimalWriter() { + const chunks = []; + const minimalWriter = { + write(chunk) { + chunks.push(chunk); + }, + }; + + await pipeTo(from('minimal'), minimalWriter); + assert.strictEqual(chunks.length > 0, true); +} + +async function testPipeToSyncMinimalWriter() { + const chunks = []; + const minimalWriter = { + writeSync(chunk) { + chunks.push(chunk); + return true; + }, + }; + + pipeToSync(fromSync('minimal-sync'), minimalWriter); + assert.strictEqual(chunks.length > 0, true); +} + +Promise.all([ + testPipeToSync(), + testPipeTo(), + testPipeToPreventClose(), + testPipeToSourceError(), + testPipeToSyncSourceError(), + testPipeToWithSignal(), + testPipeToWithTransforms(), + testPipeToSyncWithTransforms(), + testPipeToWithWritevWriter(), + testPipeToSyncFallback(), + testPipeToPreventFail(), + testPipeToSyncPreventClose(), + testPipeToMinimalWriter(), + testPipeToSyncMinimalWriter(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-pull-async.js b/test/parallel/test-stream-iter-pull-async.js new file mode 100644 index 00000000000000..157cc5e265ea34 --- /dev/null +++ b/test/parallel/test-stream-iter-pull-async.js @@ -0,0 +1,371 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { pull, from, text, tap } = require('stream/iter'); + +async function testPullIdentity() { + const data = await text(pull(from('hello-async'))); + assert.strictEqual(data, 'hello-async'); +} + +async function testPullStatelessTransform() { + const upper = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => { + const str = new TextDecoder().decode(c); + return new TextEncoder().encode(str.toUpperCase()); + }); + }; + const data = await text(pull(from('abc'), upper)); + assert.strictEqual(data, 'ABC'); +} + +async function testPullStatefulTransform() { + const stateful = { + transform: async function*(source) { + for await (const chunks of source) { + if (chunks === null) { + yield new TextEncoder().encode('-ASYNC-END'); + continue; + } + for (const chunk of chunks) { + yield chunk; + } + } + }, + }; + const data = await text(pull(from('data'), stateful)); + assert.strictEqual(data, 'data-ASYNC-END'); +} + +async function testPullWithAbortSignal() { + const ac = new AbortController(); + ac.abort(); + + async function* gen() { + yield [new Uint8Array([1])]; + } + + const result = pull(gen(), { signal: ac.signal }); + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of result) { + assert.fail('Should not reach here'); + } + }, + { name: 'AbortError' }, + ); +} + +async function testPullChainedTransforms() { + const enc = new TextEncoder(); + const transforms = [ + (chunks) => { + if (chunks === null) return null; + return [...chunks, enc.encode('!')]; + }, + (chunks) => { + if (chunks === null) return null; + return [...chunks, enc.encode('?')]; + }, + ]; + const data = await text(pull(from('hello'), ...transforms)); + assert.strictEqual(data, 'hello!?'); +} + +// Source error → controller.abort() → transform listener throws → +// source error propagates to consumer; listener error becomes uncaught +// exception (per EventTarget spec behavior). +async function testTransformSignalListenerErrorOnSourceError() { + // Listener errors from dispatchEvent are rethrown via process.nextTick, + // so we must catch them as uncaught exceptions. + const uncaughtErrors = []; + const handler = (err) => uncaughtErrors.push(err); + process.on('uncaughtException', handler); + + const throwingTransform = { + transform(source, options) { + options.signal.addEventListener('abort', () => { + throw new Error('listener boom'); + }); + return source; + }, + }; + + async function* failingSource() { + yield [new TextEncoder().encode('a')]; + throw new Error('source error'); + } + + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of pull(failingSource(), throwingTransform)) { + // Consume + } + }, + { message: 'source error' }, + ); + + // Give the nextTick rethrow a chance to fire + await new Promise(setImmediate); + process.removeListener('uncaughtException', handler); + + assert.strictEqual(uncaughtErrors.length, 1); + assert.strictEqual(uncaughtErrors[0].message, 'listener boom'); +} + +// Pull source error propagates to consumer +async function testPullSourceError() { + async function* failingSource() { + yield [new TextEncoder().encode('a')]; + throw new Error('source boom'); + } + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of pull(failingSource())) { /* consume */ } + }, { message: 'source boom' }); +} + +// Tap callback error propagates through pipeline +async function testTapCallbackError() { + const badTap = tap(() => { throw new Error('tap boom'); }); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of pull(from('hello'), badTap)) { /* consume */ } + }, { message: 'tap boom' }); +} + +// Pull signal aborted mid-iteration (not pre-aborted) +async function testPullSignalAbortMidIteration() { + const ac = new AbortController(); + const enc = new TextEncoder(); + async function* slowSource() { + yield [enc.encode('a')]; + yield [enc.encode('b')]; + yield [enc.encode('c')]; + } + const result = pull(slowSource(), { signal: ac.signal }); + const iter = result[Symbol.asyncIterator](); + const first = await iter.next(); // Read first batch + assert.strictEqual(first.done, false); + ac.abort(); + await assert.rejects(() => iter.next(), { name: 'AbortError' }); +} + +// Pull consumer break (return()) cleans up transform signal +async function testPullConsumerBreakCleanup() { + let signalAborted = false; + const trackingTransform = { + transform(source, options) { + options.signal.addEventListener('abort', () => { + signalAborted = true; + }); + return source; + }, + }; + async function* infiniteSource() { + let i = 0; + while (true) { + yield [new TextEncoder().encode(`chunk${i++}`)]; + } + } + // Consumer breaks after first chunk + // eslint-disable-next-line no-unused-vars + for await (const _ of pull(infiniteSource(), trackingTransform)) { + break; + } + // Give the abort handler a tick to fire + await new Promise(setImmediate); + assert.strictEqual(signalAborted, true); +} + +// Pull transform returning a Promise +async function testPullTransformReturnsPromise() { + const asyncTransform = async (chunks) => { + if (chunks === null) return null; + return chunks; + }; + const result = await text(pull(from('hello'), asyncTransform)); + assert.strictEqual(result, 'hello'); +} + +// Stateless transform error propagates +async function testPullStatelessTransformError() { + const badTransform = (chunks) => { + if (chunks === null) return null; + throw new Error('async stateless boom'); + }; + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of pull(from('hello'), badTransform)) { /* consume */ } + }, { message: 'async stateless boom' }); +} + +// Stateful transform error propagates +async function testPullStatefulTransformError() { + const badStateful = { + transform: async function*(source) { // eslint-disable-line require-yield + for await (const chunks of source) { + if (chunks === null) continue; + throw new Error('async stateful boom'); + } + }, + }; + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of pull(from('hello'), badStateful)) { /* consume */ } + }, { message: 'async stateful boom' }); +} + +// Stateless transform flush emitting data +async function testPullStatelessTransformFlush() { + const withTrailer = (chunks) => { + if (chunks === null) { + return [new TextEncoder().encode('-TRAILER')]; + } + return chunks; + }; + const data = await text(pull(from('data'), withTrailer)); + assert.strictEqual(data, 'data-TRAILER'); +} + +// Stateless transform flush error propagates +async function testPullStatelessTransformFlushError() { + const badFlush = (chunks) => { + if (chunks === null) { + throw new Error('async flush boom'); + } + return chunks; + }; + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of pull(from('hello'), badFlush)) { /* consume */ } + }, { message: 'async flush boom' }); +} + +// Pull with a sync iterable source (not async) +async function testPullWithSyncSource() { + function* gen() { + yield new TextEncoder().encode('sync-source'); + } + const data = await text(pull(gen())); + assert.strictEqual(data, 'sync-source'); +} + +// Pull transform yielding strings +async function testPullTransformYieldsStrings() { + const stringTransform = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => new TextDecoder().decode(c)); + }; + const result = await text(pull(from('hello'), stringTransform)); + assert.strictEqual(result, 'hello'); +} + +// pull() accepts a string source directly (normalized via from()) +async function testPullStringSource() { + const data = await text(pull('hello-direct')); + assert.strictEqual(data, 'hello-direct'); +} + +// Transform returning a single Uint8Array should be wrapped as a batch, +// not iterated byte-by-byte +async function testTransformReturnsSingleUint8Array() { + const transform = (chunks) => { + if (chunks === null) return null; + // Return a single Uint8Array, not an array + const enc = new TextEncoder(); + return enc.encode('transformed'); + }; + const data = await text(pull(from('input'), transform)); + assert.strictEqual(data, 'transformed'); +} + +// Transform returning a single string should be UTF-8 encoded, +// not iterated character-by-character +async function testTransformReturnsSingleString() { + const transform = (chunks) => { + if (chunks === null) return null; + return 'hello-string'; + }; + const data = await text(pull(from('input'), transform)); + assert.strictEqual(data, 'hello-string'); +} + +// Transform returning an ArrayBuffer should be converted to Uint8Array +async function testTransformReturnsArrayBuffer() { + const transform = (chunks) => { + if (chunks === null) return null; + const enc = new TextEncoder(); + return enc.encode('arraybuf').buffer; + }; + const data = await text(pull(from('input'), transform)); + assert.strictEqual(data, 'arraybuf'); +} + +// pipeTo() accepts a string source directly (normalized via from()) +async function testPipeToStringSource() { + const { pipeTo, push: pushFn, text: textFn } = require('stream/iter'); + const { writer, readable } = pushFn({ highWaterMark: 10 }); + const consume = (async () => textFn(readable))(); + await pipeTo('hello-pipe', writer); + const data = await consume; + assert.strictEqual(data, 'hello-pipe'); +} + +// INVARIANT: Each transform invocation receives its own options object. +// A transform that mutates options must not affect subsequent transforms. +async function testTransformOptionsNotShared() { + const seen = []; + const transform1 = (chunks, options) => { + // Mutate the options object + options.mutated = true; + seen.push({ id: 1, mutated: options.mutated }); + return chunks; + }; + const transform2 = (chunks, options) => { + // Should NOT see mutation from transform1 + seen.push({ id: 2, mutated: options.mutated }); + return chunks; + }; + await text(pull(from('test'), transform1, transform2)); + // transform1 sees its own mutation + assert.strictEqual(seen[0].mutated, true); + // transform2 gets a fresh options object - no mutation visible + assert.strictEqual(seen[1].mutated, undefined); +} + +// Run the uncaughtException test sequentially (it installs a global handler +// that would interfere with concurrent tests). +(async () => { + await Promise.all([ + testPullIdentity(), + testPullStatelessTransform(), + testPullStatefulTransform(), + testPullWithAbortSignal(), + testPullChainedTransforms(), + testPullSourceError(), + testTapCallbackError(), + testPullSignalAbortMidIteration(), + testPullConsumerBreakCleanup(), + testPullTransformReturnsPromise(), + testPullTransformYieldsStrings(), + testPullStatelessTransformError(), + testPullStatefulTransformError(), + testPullStatelessTransformFlush(), + testPullStatelessTransformFlushError(), + testPullWithSyncSource(), + testPullStringSource(), + testTransformReturnsSingleUint8Array(), + testTransformReturnsSingleString(), + testTransformReturnsArrayBuffer(), + testPipeToStringSource(), + testTransformOptionsNotShared(), + ]); + // Run after all concurrent tests complete to avoid global handler races + await testTransformSignalListenerErrorOnSourceError(); +})().then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-pull-sync.js b/test/parallel/test-stream-iter-pull-sync.js new file mode 100644 index 00000000000000..35679ac102d512 --- /dev/null +++ b/test/parallel/test-stream-iter-pull-sync.js @@ -0,0 +1,178 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { pullSync, fromSync, bytesSync, tapSync } = require('stream/iter'); + +function testPullSyncIdentity() { + // No transforms - just pass through + const data = bytesSync(pullSync(fromSync('hello'))); + assert.deepStrictEqual(data, new TextEncoder().encode('hello')); +} + +function testPullSyncStatelessTransform() { + const upper = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => { + const str = new TextDecoder().decode(c); + return new TextEncoder().encode(str.toUpperCase()); + }); + }; + const data = bytesSync(pullSync(fromSync('abc'), upper)); + assert.deepStrictEqual(data, new TextEncoder().encode('ABC')); +} + +function testPullSyncStatefulTransform() { + const source = fromSync('data'); + const stateful = { + transform: function*(source) { + for (const chunks of source) { + if (chunks === null) { + // Flush: emit trailer + yield new TextEncoder().encode('-END'); + continue; + } + for (const chunk of chunks) { + yield chunk; + } + } + }, + }; + const result = pullSync(source, stateful); + const data = new TextDecoder().decode(bytesSync(result)); + assert.strictEqual(data, 'data-END'); +} + +function testPullSyncChainedTransforms() { + const addExcl = (chunks) => { + if (chunks === null) return null; + return [...chunks, new TextEncoder().encode('!')]; + }; + const addQ = (chunks) => { + if (chunks === null) return null; + return [...chunks, new TextEncoder().encode('?')]; + }; + const result = pullSync(fromSync('hello'), addExcl, addQ); + const data = new TextDecoder().decode(bytesSync(result)); + assert.strictEqual(data, 'hello!?'); +} + +// PullSync source error propagates +function testPullSyncSourceError() { + function* failingSource() { + yield [new TextEncoder().encode('a')]; + throw new Error('sync source boom'); + } + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of pullSync(failingSource())) { /* consume */ } + }, { message: 'sync source boom' }); +} + +// PullSync with empty source +function testPullSyncEmptySource() { + function* empty() {} + const result = bytesSync(pullSync(empty())); + assert.strictEqual(result.length, 0); +} + +// TapSync callback error propagates +function testTapSyncCallbackError() { + const badTap = tapSync(() => { throw new Error('tapSync boom'); }); + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of pullSync(fromSync('hello'), badTap)) { /* consume */ } + }, { message: 'tapSync boom' }); +} + +// Stateless transform error propagates +function testPullSyncStatelessTransformError() { + const badTransform = (chunks) => { + if (chunks === null) return null; + throw new Error('stateless transform boom'); + }; + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of pullSync(fromSync('hello'), badTransform)) { /* consume */ } + }, { message: 'stateless transform boom' }); +} + +// Stateful transform error propagates +function testPullSyncStatefulTransformError() { + const badStateful = { + transform: function*(source) { // eslint-disable-line require-yield + for (const chunks of source) { + if (chunks === null) continue; + throw new Error('stateful transform boom'); + } + }, + }; + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of pullSync(fromSync('hello'), badStateful)) { /* consume */ } + }, { message: 'stateful transform boom' }); +} + +// Stateless transform flush emitting data +function testPullSyncStatelessTransformFlush() { + const withTrailer = (chunks) => { + if (chunks === null) { + // Flush: emit trailing data + return [new TextEncoder().encode('-TRAILER')]; + } + return chunks; + }; + const data = new TextDecoder().decode(bytesSync(pullSync(fromSync('data'), withTrailer))); + assert.strictEqual(data, 'data-TRAILER'); +} + +// Stateless transform flush error propagates +function testPullSyncStatelessTransformFlushError() { + const badFlush = (chunks) => { + if (chunks === null) { + throw new Error('flush boom'); + } + return chunks; + }; + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of pullSync(fromSync('hello'), badFlush)) { /* consume */ } + }, { message: 'flush boom' }); +} + +// Empty source result is a Uint8Array +function testPullSyncEmptySourceType() { + function* empty() {} + const result = bytesSync(pullSync(empty())); + assert.ok(result instanceof Uint8Array); + assert.strictEqual(result.byteLength, 0); +} + +// Invalid transform argument +function testPullSyncInvalidTransform() { + assert.throws( + () => { for (const _ of pullSync(fromSync('x'), 42)) { /* consume */ } }, // eslint-disable-line no-unused-vars + { code: 'ERR_INVALID_ARG_TYPE' }, + ); + assert.throws( + () => { for (const _ of pullSync(fromSync('x'), null)) { /* consume */ } }, // eslint-disable-line no-unused-vars + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +Promise.all([ + testPullSyncIdentity(), + testPullSyncStatelessTransform(), + testPullSyncStatefulTransform(), + testPullSyncChainedTransforms(), + testPullSyncSourceError(), + testPullSyncEmptySource(), + testPullSyncEmptySourceType(), + testTapSyncCallbackError(), + testPullSyncStatelessTransformError(), + testPullSyncStatefulTransformError(), + testPullSyncStatelessTransformFlush(), + testPullSyncStatelessTransformFlushError(), + testPullSyncInvalidTransform(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-push-backpressure.js b/test/parallel/test-stream-iter-push-backpressure.js new file mode 100644 index 00000000000000..a62a24ee6b623c --- /dev/null +++ b/test/parallel/test-stream-iter-push-backpressure.js @@ -0,0 +1,156 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { push, text } = require('stream/iter'); + +async function testStrictBackpressure() { + const { writer, readable } = push({ + highWaterMark: 1, + backpressure: 'strict', + }); + + // First write should succeed synchronously + assert.strictEqual(writer.writeSync('a'), true); + // Second write should fail synchronously (buffer full) + assert.strictEqual(writer.writeSync('b'), false); + + // Consume to free space, then end + const resultPromise = text(readable); + writer.end(); + const data = await resultPromise; + assert.strictEqual(data, 'a'); +} + +async function testDropOldest() { + const { writer, readable } = push({ + highWaterMark: 2, + backpressure: 'drop-oldest', + }); + + assert.strictEqual(writer.writeSync('first'), true); + assert.strictEqual(writer.writeSync('second'), true); + // This should drop 'first' — return value is true (write accepted via drop) + assert.strictEqual(writer.writeSync('third'), true); + writer.end(); + + const batches = []; + for await (const batch of readable) { + batches.push(batch); + } + // Should have 'second' and 'third' + const allBytes = []; + for (const batch of batches) { + for (const chunk of batch) { + allBytes.push(...chunk); + } + } + const result = new TextDecoder().decode(new Uint8Array(allBytes)); + assert.strictEqual(result, 'secondthird'); +} + +async function testDropNewest() { + const { writer, readable } = push({ + highWaterMark: 1, + backpressure: 'drop-newest', + }); + + assert.strictEqual(writer.writeSync('kept'), true); + // This is silently dropped — return value is true (accepted but discarded) + assert.strictEqual(writer.writeSync('dropped'), true); + writer.end(); + + const data = await text(readable); + assert.strictEqual(data, 'kept'); +} + +async function testBlockBackpressure() { + const { writer, readable } = push({ highWaterMark: 1, backpressure: 'block' }); + + // Fill the buffer + writer.writeSync('a'); + + // Next write should block (not throw, not drop) + let writeState = 'pending'; + const writePromise = writer.write('b').then(() => { writeState = 'resolved'; }); + + // The write cannot resolve until the buffer is drained, so a microtask + // tick is sufficient to confirm it is still blocked. + await new Promise(setImmediate); + assert.strictEqual(writeState, 'pending'); // Still blocked + + // Read from the consumer to drain + const iter = readable[Symbol.asyncIterator](); + const first = await iter.next(); // Drains 'a' + assert.strictEqual(first.done, false); + + // After draining, the pending write resolves as a microtask + await new Promise(setImmediate); + assert.strictEqual(writeState, 'resolved'); // Now unblocked + + writer.endSync(); + const second = await iter.next(); // Read 'b' + assert.strictEqual(second.done, false); + await writePromise; +} + +async function testBlockWriteSyncEnqueues() { + // With block policy, writeSync should enqueue the data even when the buffer + // is full, returning false as a backpressure signal. The data IS accepted. + const { writer, readable } = push({ highWaterMark: 1, backpressure: 'block' }); + + // Fill the buffer + assert.strictEqual(writer.writeSync('a'), true); + + // Buffer full: writeSync should enqueue and return false (data accepted) + assert.strictEqual(writer.writeSync('b'), false); + + writer.endSync(); + + // Both chunks should be delivered (drain flushes all slots into one batch) + const result = await text(readable); + assert.strictEqual(result, 'ab'); +} + +async function testStrictPendingQueueOverflow() { + // With highWaterMark: 1 and strict, the pending writes queue is also limited to 1. + // Filling the buffer (1 sync write) + filling the pending queue (1 async write) + // should leave no room. A third write must reject with a RangeError. + const { writer, readable } = push({ + highWaterMark: 1, + backpressure: 'strict', + }); + + // Fill the buffer + assert.strictEqual(writer.writeSync('a'), true); + + // This async write goes into the pending queue (buffer full, queue has room) + const pendingWrite = writer.write('b'); + + // This write should reject: buffer full AND pending queue at capacity + await assert.rejects( + () => writer.write('c'), + { + code: 'ERR_INVALID_STATE', + name: 'RangeError', + }, + ); + + // Clean up: drain the readable + const iter = readable[Symbol.asyncIterator](); + await iter.next(); + await iter.next(); + await pendingWrite; + writer.endSync(); + await iter.return(); +} + +Promise.all([ + testStrictBackpressure(), + testDropOldest(), + testDropNewest(), + testBlockBackpressure(), + testBlockWriteSyncEnqueues(), + testStrictPendingQueueOverflow(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-push-basic.js b/test/parallel/test-stream-iter-push-basic.js new file mode 100644 index 00000000000000..22d5b26c830a47 --- /dev/null +++ b/test/parallel/test-stream-iter-push-basic.js @@ -0,0 +1,182 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { push, text } = require('stream/iter'); + +async function testBasicWriteRead() { + const { writer, readable } = push(); + + writer.write('hello'); + writer.end(); + + const data = await text(readable); + assert.strictEqual(data, 'hello'); +} + +async function testMultipleWrites() { + const { writer, readable } = push({ highWaterMark: 10 }); + + writer.write('a'); + writer.write('b'); + writer.write('c'); + writer.end(); + + const data = await text(readable); + assert.strictEqual(data, 'abc'); +} + +async function testDesiredSize() { + const { writer } = push({ highWaterMark: 3 }); + + assert.strictEqual(writer.desiredSize, 3); + writer.writeSync('a'); + assert.strictEqual(writer.desiredSize, 2); + writer.writeSync('b'); + assert.strictEqual(writer.desiredSize, 1); + writer.writeSync('c'); + assert.strictEqual(writer.desiredSize, 0); + + writer.end(); + assert.strictEqual(writer.desiredSize, null); +} + +async function testWriterEnd() { + const { writer, readable } = push(); + + const totalBytes = writer.endSync(); + assert.strictEqual(totalBytes, 0); + + // Calling endSync again returns byte count (idempotent when closed) + assert.strictEqual(writer.endSync(), 0); + + const batches = []; + for await (const batch of readable) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +async function testWriterFail() { + const { writer, readable } = push(); + + writer.fail(new Error('test fail')); + + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { + assert.fail('Should not reach here'); + } + }, + { message: 'test fail' }, + ); +} + +async function testConsumerBreak() { + const { writer, readable } = push({ highWaterMark: 10 }); + + writer.writeSync('a'); + writer.writeSync('b'); + writer.writeSync('c'); + + // Break after first batch + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { + break; + } + + // Writer should now see null desiredSize + assert.strictEqual(writer.desiredSize, null); +} + +async function testAbortSignal() { + const ac = new AbortController(); + const { readable } = push({ signal: ac.signal }); + + ac.abort(); + + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { + assert.fail('Should not reach here'); + } + }, + { name: 'AbortError' }, + ); +} + +async function testPreAbortedSignal() { + const ac = new AbortController(); + ac.abort(); + const { readable } = push({ signal: ac.signal }); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { + assert.fail('Should not reach here'); + } + }, { name: 'AbortError' }); +} + +async function testConsumerBreakWriteSyncReturnsFalse() { + const { writer, readable } = push({ highWaterMark: 10 }); + writer.writeSync('a'); + + // Break after first batch + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { + break; + } + + // After consumer break, writeSync should return false + assert.strictEqual(writer.writeSync('b'), false); + assert.strictEqual(writer.desiredSize, null); +} + +async function testPushWithTransforms() { + const upper = (chunks) => { + if (chunks === null) return null; + return chunks.map((c) => { + const str = new TextDecoder().decode(c); + return new TextEncoder().encode(str.toUpperCase()); + }); + }; + + const { writer, readable } = push(upper); + + writer.write('hello'); + writer.end(); + + const data = await text(readable); + assert.strictEqual(data, 'HELLO'); +} + +async function testInvalidBackpressure() { + assert.throws(() => push({ backpressure: 'banana' }), { + code: 'ERR_INVALID_ARG_VALUE', + }); + assert.throws(() => push({ backpressure: '' }), { + code: 'ERR_INVALID_ARG_VALUE', + }); + + // Valid values should not throw + for (const bp of ['strict', 'block', 'drop-oldest', 'drop-newest']) { + push({ backpressure: bp }); + } +} + +Promise.all([ + testBasicWriteRead(), + testMultipleWrites(), + testDesiredSize(), + testWriterEnd(), + testWriterFail(), + testConsumerBreak(), + testAbortSignal(), + testPreAbortedSignal(), + testConsumerBreakWriteSyncReturnsFalse(), + testPushWithTransforms(), + testInvalidBackpressure(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-push-writer.js b/test/parallel/test-stream-iter-push-writer.js new file mode 100644 index 00000000000000..e7e783d7b74a9c --- /dev/null +++ b/test/parallel/test-stream-iter-push-writer.js @@ -0,0 +1,426 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { push, ondrain, text } = require('stream/iter'); + +async function testOndrain() { + const { writer } = push({ highWaterMark: 1 }); + + // With space available, ondrain resolves immediately + const drainResult = ondrain(writer); + assert.ok(drainResult instanceof Promise); + const result = await drainResult; + assert.strictEqual(result, true); + + // After close, ondrain returns null + writer.end(); + assert.strictEqual(ondrain(writer), null); +} + +async function testOndrainNonDrainable() { + // Non-drainable objects return null + assert.strictEqual(ondrain(null), null); + assert.strictEqual(ondrain({}), null); + assert.strictEqual(ondrain('string'), null); +} + +async function testOndrainProtocolErrorPropagates() { + const badDrainable = { + [Symbol.for('Stream.drainableProtocol')]() { + throw new Error('protocol error'); + }, + }; + assert.throws( + () => ondrain(badDrainable), + { message: 'protocol error' }, + ); +} + +async function testWriteWithSignalRejects() { + const { writer, readable } = push({ highWaterMark: 1 }); + + // Fill the buffer so write will block + writer.writeSync('a'); + + const ac = new AbortController(); + const writePromise = writer.write('b', { signal: ac.signal }); + + // Signal fires while write is pending + ac.abort(); + + await assert.rejects(writePromise, { name: 'AbortError' }); + + // Clean up + writer.end(); + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { break; } +} + +async function testWriteWithPreAbortedSignal() { + const { writer, readable } = push({ highWaterMark: 1 }); + + const ac = new AbortController(); + ac.abort(); + + // Pre-aborted signal should reject immediately + await assert.rejects( + writer.write('data', { signal: ac.signal }), + { name: 'AbortError' }, + ); + + // Writer should still be usable for other writes + writer.write('ok'); + writer.end(); + const data = await text(readable); + assert.strictEqual(data, 'ok'); +} + +async function testCancelledWriteRemovedFromQueue() { + const { writer, readable } = push({ highWaterMark: 1 }); + + // Fill the buffer + writer.writeSync('first'); + + const ac = new AbortController(); + // This write should be queued since buffer is full + const cancelledWrite = writer.write('cancelled', { signal: ac.signal }); + + // Cancel it + ac.abort(); + await cancelledWrite.catch(() => {}); + + // Drain 'first' to make room for the replacement write + const iter = readable[Symbol.asyncIterator](); + await iter.next(); + + // The cancelled write should NOT occupy a pending slot. + // A new write should succeed now that the buffer has room. + await writer.write('second'); + writer.end(); + + const result = await iter.next(); + // 'second' should be the next (and only remaining) chunk + const decoder = new TextDecoder(); + let data = ''; + for (const chunk of result.value) { + data += decoder.decode(chunk, { stream: true }); + } + assert.strictEqual(data, 'second'); + await iter.return(); +} + +async function testOndrainResolvesFalseOnConsumerBreak() { + const { writer, readable } = push({ highWaterMark: 1 }); + + // Fill the buffer so desiredSize = 0 + writer.writeSync('a'); + + // Also queue a pending write so that reading one chunk + // doesn't clear backpressure (the pending write refills the slot) + const pendingWrite = writer.write('b'); + + // Start a drain wait - still at capacity + const drainPromise = ondrain(writer); + + // Consumer returns without draining enough to clear backpressure + const iter = readable[Symbol.asyncIterator](); + await iter.return(); + + // Ondrain should resolve false since the consumer terminated + const result = await drainPromise; + assert.strictEqual(result, false); + await pendingWrite.catch(() => {}); // Ignore write rejection +} + +async function testOndrainRejectsOnConsumerThrow() { + const { writer, readable } = push({ highWaterMark: 1 }); + + // Fill the buffer so desiredSize = 0 + writer.writeSync('a'); + + // Also queue a pending write so that reading one chunk + // doesn't clear backpressure (the pending write refills the slot) + const pendingWrite = writer.write('b'); + + // Start a drain wait - still at capacity + const drainPromise = ondrain(writer); + + // Consumer throws via iterator.throw() before draining enough + // to clear backpressure. The drain should reject. + const iter = readable[Symbol.asyncIterator](); + await iter.throw(new Error('consumer error')); + + await assert.rejects(drainPromise, /consumer error/); + await pendingWrite.catch(() => {}); // Ignore write rejection +} + +async function testWritev() { + const { writer, readable } = push({ highWaterMark: 10 }); + const enc = new TextEncoder(); + writer.writev([enc.encode('hel'), enc.encode('lo')]); + writer.endSync(); + const result = await text(readable); + assert.strictEqual(result, 'hello'); +} + +async function testWritevSync() { + const { writer, readable } = push({ highWaterMark: 10 }); + const enc = new TextEncoder(); + assert.strictEqual(writer.writevSync([enc.encode('hel'), enc.encode('lo')]), true); + writer.endSync(); + const result = await text(readable); + assert.strictEqual(result, 'hello'); +} + +async function testWritevMixedTypes() { + const { writer, readable } = push({ highWaterMark: 10 }); + // Mix strings and Uint8Arrays + writer.writev(['hel', new TextEncoder().encode('lo')]); + writer.endSync(); + const result = await text(readable); + assert.strictEqual(result, 'hello'); +} + +async function testWriteAfterEnd() { + const { writer } = push(); + writer.endSync(); + // Sync write after end returns false + assert.strictEqual(writer.writeSync('fail'), false); + // Async write after end rejects + await assert.rejects( + () => writer.write('fail'), + { code: 'ERR_INVALID_STATE' }, + ); +} + +async function testWriteAfterFail() { + const { writer } = push(); + writer.fail(new Error('failed')); + // Sync write after fail returns false + assert.strictEqual(writer.writeSync('fail'), false); + // Async write after fail rejects with the stored error + await assert.rejects( + () => writer.write('fail'), + { message: 'failed' }, + ); +} + +async function testFail() { + const { writer, readable } = push(); + writer.writeSync('hello'); + writer.fail(new Error('boom')); + // Second fail is a no-op (already errored) + writer.fail(new Error('boom2')); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { /* consume */ } + }, { message: 'boom' }); +} + +async function testEndAsyncReturnValue() { + const { writer, readable } = push(); + writer.writeSync('hello'); + // Start consuming concurrently (end() waits for drain) + const consume = (async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { /* drain */ } + })(); + const total = await writer.end(); + assert.strictEqual(total, 5); + await consume; +} + +async function testWriteUint8Array() { + const { writer, readable } = push(); + writer.write(new Uint8Array([72, 73])); // 'HI' + writer.endSync(); + const result = await text(readable); + assert.strictEqual(result, 'HI'); +} + +async function testOndrainWaitsForDrain() { + const { writer, readable } = push({ highWaterMark: 1 }); + writer.writeSync('a'); // Fills buffer + + let drainState = 'pending'; + const drainPromise = ondrain(writer).then((v) => { drainState = v; }); + + await new Promise(setImmediate); + assert.strictEqual(drainState, 'pending'); // Still waiting + + // Read to drain + const iter = readable[Symbol.asyncIterator](); + await iter.next(); + + await drainPromise; + assert.strictEqual(drainState, true); + writer.endSync(); +} + +// Consumer throw causes subsequent writes to reject with consumer's error +async function testConsumerThrowRejectsWrites() { + const { writer, readable } = push({ highWaterMark: 1 }); + writer.writeSync('a'); + + const iter = readable[Symbol.asyncIterator](); + await iter.throw(new Error('consumer boom')); + + // Subsequent async writes should reject with the consumer's error + await assert.rejects( + () => writer.write('x'), + { message: 'consumer boom' }, + ); +} + +// end() resolves a pending read as done:true +async function testEndResolvesPendingRead() { + const { writer, readable } = push(); + + // Consumer starts reading — blocks because buffer is empty + const iter = readable[Symbol.asyncIterator](); + const readPromise = iter.next(); + + // Give the read a tick to enter the pending state + await new Promise(setImmediate); + + // End the writer — should resolve the pending read with done:true + writer.endSync(); + const result = await readPromise; + assert.strictEqual(result.done, true); +} + +// fail() rejects a pending read with the error +async function testFailRejectsPendingRead() { + const { writer, readable } = push(); + + const iter = readable[Symbol.asyncIterator](); + const readPromise = iter.next(); + + await new Promise(setImmediate); + + writer.fail(new Error('fail during read')); + await assert.rejects( + () => readPromise, + { message: 'fail during read' }, + ); +} + +// end() while writes are pending rejects those writes +async function testEndRejectsPendingWrites() { + const { writer, readable } = push({ highWaterMark: 1, backpressure: 'block' }); + writer.writeSync('a'); // fill buffer + + // This write blocks on backpressure + const writePromise = writer.write('b'); + + await new Promise(setImmediate); + + // Ending should reject the pending write + writer.endSync(); + + await assert.rejects( + () => writePromise, + { code: 'ERR_INVALID_STATE' }, + ); + + // Clean up: drain the readable + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { break; } +} + +async function testEndIdempotentWhenClosed() { + const { writer, readable } = push({ highWaterMark: 10 }); + await writer.write('hello'); + // Start consuming concurrently (end() waits for drain) + const consume = (async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { /* drain */ } + })(); + const first = await writer.end(); + assert.strictEqual(first, 5); + // Second end() should resolve with same byte count (idempotent) + const second = await writer.end(); + assert.strictEqual(second, 5); + await consume; +} + +async function testAsyncDispose() { + const { writer, readable } = push({ highWaterMark: 10 }); + writer.writeSync('hello'); + // Symbol.asyncDispose calls fail() with no argument + await writer[Symbol.asyncDispose](); + // Writer is now errored, writes should fail + assert.strictEqual(writer.writeSync('fail'), false); + // Drain readable + try { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { /* consume */ } + } catch { + // Expected - reader sees the error + } +} + +async function testSyncDispose() { + const { writer, readable } = push({ highWaterMark: 10 }); + writer.writeSync('hello'); + // Symbol.dispose calls fail() with no argument + writer[Symbol.dispose](); + // Writer is now errored, writes should fail + assert.strictEqual(writer.writeSync('fail'), false); + // Drain readable + try { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { /* consume */ } + } catch { + // Expected + } +} + +async function testEndRejectsWhenErrored() { + const { writer, readable } = push({ highWaterMark: 10 }); + await writer.write('hello'); + const err = new Error('boom'); + await writer.fail(err); + // end() after fail should reject with the stored error + await assert.rejects( + () => writer.end(), + (e) => e === err, + ); + // Drain readable + try { + // eslint-disable-next-line no-unused-vars + for await (const _ of readable) { break; } + } catch { + // Expected - reader may see the error + } +} + +Promise.all([ + testOndrain(), + testOndrainNonDrainable(), + testWriteWithSignalRejects(), + testWriteWithPreAbortedSignal(), + testCancelledWriteRemovedFromQueue(), + testOndrainResolvesFalseOnConsumerBreak(), + testOndrainRejectsOnConsumerThrow(), + testWritev(), + testWritevSync(), + testWritevMixedTypes(), + testWriteAfterEnd(), + testWriteAfterFail(), + testOndrainProtocolErrorPropagates(), + testFail(), + testEndAsyncReturnValue(), + testWriteUint8Array(), + testOndrainWaitsForDrain(), + testConsumerThrowRejectsWrites(), + testEndResolvesPendingRead(), + testFailRejectsPendingRead(), + testEndRejectsPendingWrites(), + testEndIdempotentWhenClosed(), + testEndRejectsWhenErrored(), + testAsyncDispose(), + testSyncDispose(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-share-async.js b/test/parallel/test-stream-iter-share-async.js new file mode 100644 index 00000000000000..86b0eb9b273a34 --- /dev/null +++ b/test/parallel/test-stream-iter-share-async.js @@ -0,0 +1,282 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + share, + text, +} = require('stream/iter'); + +const { setTimeout } = require('timers/promises'); + +// ============================================================================= +// Async share() +// ============================================================================= + +async function testBasicShare() { + const shared = share(from('hello shared')); + + const consumer = shared.pull(); + const data = await text(consumer); + assert.strictEqual(data, 'hello shared'); +} + +async function testShareMultipleConsumers() { + async function* gen() { + yield [new TextEncoder().encode('chunk1')]; + yield [new TextEncoder().encode('chunk2')]; + yield [new TextEncoder().encode('chunk3')]; + } + + const shared = share(gen(), { highWaterMark: 16 }); + + const c1 = shared.pull(); + const c2 = shared.pull(); + + assert.strictEqual(shared.consumerCount, 2); + + const [data1, data2] = await Promise.all([ + text(c1), + text(c2), + ]); + + assert.strictEqual(data1, 'chunk1chunk2chunk3'); + assert.strictEqual(data2, 'chunk1chunk2chunk3'); +} + +async function testShareConsumerCount() { + const shared = share(from('data')); + + assert.strictEqual(shared.consumerCount, 0); + + const c1 = shared.pull(); + assert.strictEqual(shared.consumerCount, 1); + + const c2 = shared.pull(); + assert.strictEqual(shared.consumerCount, 2); + + // Cancel detaches all consumers + shared.cancel(); + assert.strictEqual(shared.consumerCount, 0); + + // Both should complete immediately + const [data1, data2] = await Promise.all([ + text(c1), + text(c2), + ]); + assert.strictEqual(data1, ''); + assert.strictEqual(data2, ''); +} + +async function testShareCancel() { + const shared = share(from('data')); + const consumer = shared.pull(); + + shared.cancel(); + assert.strictEqual(shared.consumerCount, 0); + + const batches = []; + for await (const batch of consumer) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +async function testShareCancelMidIteration() { + // Verify that cancel during iteration stops data flow + let sourceReturnCalled = false; + const enc = new TextEncoder(); + async function* gen() { + try { + yield [enc.encode('a')]; + yield [enc.encode('b')]; + yield [enc.encode('c')]; + } finally { + sourceReturnCalled = true; + } + } + const shared = share(gen(), { highWaterMark: 16 }); + const consumer = shared.pull(); + + const items = []; + for await (const batch of consumer) { + for (const chunk of batch) { + items.push(new TextDecoder().decode(chunk)); + } + // Cancel after first batch + shared.cancel(); + } + assert.strictEqual(items.length, 1); + assert.strictEqual(items[0], 'a'); + + await new Promise(setImmediate); + assert.strictEqual(sourceReturnCalled, true); +} + +async function testShareCancelWithReason() { + const shared = share(from('data')); + const consumer = shared.pull(); + + shared.cancel(new Error('share cancelled')); + + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of consumer) { + assert.fail('Should not reach here'); + } + }, + { message: 'share cancelled' }, + ); +} + +async function testShareAbortSignal() { + const ac = new AbortController(); + const shared = share(from('data'), { signal: ac.signal }); + const consumer = shared.pull(); + + ac.abort(); + + const batches = []; + for await (const batch of consumer) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +async function testShareAlreadyAborted() { + const ac = new AbortController(); + ac.abort(); + + const shared = share(from('data'), { signal: ac.signal }); + const consumer = shared.pull(); + + const batches = []; + for await (const batch of consumer) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +// ============================================================================= +// Source error propagation +// ============================================================================= + +async function testShareSourceError() { + async function* failingSource() { + yield [new TextEncoder().encode('a')]; + throw new Error('share source boom'); + } + const shared = share(failingSource()); + const c1 = shared.pull(); + const c2 = shared.pull(); + + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of c1) { /* consume */ } + }, { message: 'share source boom' }); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of c2) { /* consume */ } + }, { message: 'share source boom' }); +} + +async function testShareLateJoiningConsumer() { + // A consumer that joins after some data has been consumed should only + // see data remaining in the buffer (not items already trimmed). + const enc = new TextEncoder(); + async function* gen() { + yield [enc.encode('a')]; + yield [enc.encode('b')]; + yield [enc.encode('c')]; + } + const shared = share(gen(), { highWaterMark: 16 }); + + // First consumer reads all data + const c1 = shared.pull(); + const data1 = await text(c1); + assert.strictEqual(data1, 'abc'); + + // Late-joining consumer: source is exhausted, buffer has been trimmed + // past all data by c1's reads, so c2 gets nothing. + const c2 = shared.pull(); + const data2 = await text(c2); + assert.strictEqual(data2, ''); +} + +async function testShareConsumerBreak() { + // Verify that a consumer breaking mid-iteration detaches properly + const enc = new TextEncoder(); + async function* gen() { + yield [enc.encode('a')]; + yield [enc.encode('b')]; + yield [enc.encode('c')]; + } + const shared = share(gen(), { highWaterMark: 16 }); + const c1 = shared.pull(); + const c2 = shared.pull(); + + assert.strictEqual(shared.consumerCount, 2); + + // c1 breaks after first batch + // eslint-disable-next-line no-unused-vars + for await (const _ of c1) { + break; + } + // c1 should be detached + assert.strictEqual(shared.consumerCount, 1); + + // c2 should still get all data + const data2 = await text(c2); + assert.strictEqual(data2, 'abc'); +} + +async function testShareMultipleConsumersConcurrentPull() { + // Multiple consumers pulling concurrently should each receive all items + // even when only one item is pulled from source at a time. + async function* slowSource() { + const enc = new TextEncoder(); + for (let i = 0; i < 5; i++) { + await setTimeout(1); + yield [enc.encode(`item-${i}`)]; + } + } + const shared = share(slowSource()); + const c1 = shared.pull(); + const c2 = shared.pull(); + const c3 = shared.pull(); + + const [t1, t2, t3] = await Promise.all([ + text(c1), text(c2), text(c3), + ]); + + const expected = 'item-0item-1item-2item-3item-4'; + assert.strictEqual(t1, expected); + assert.strictEqual(t2, expected); + assert.strictEqual(t3, expected); +} + +// share() accepts string source directly (normalized via from()) +async function testShareStringSource() { + const shared = share('hello-share'); + const result = await text(shared.pull()); + assert.strictEqual(result, 'hello-share'); +} + +Promise.all([ + testBasicShare(), + testShareMultipleConsumers(), + testShareConsumerCount(), + testShareCancel(), + testShareCancelMidIteration(), + testShareCancelWithReason(), + testShareAbortSignal(), + testShareAlreadyAborted(), + testShareSourceError(), + testShareLateJoiningConsumer(), + testShareConsumerBreak(), + testShareMultipleConsumersConcurrentPull(), + testShareStringSource(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-share-coverage.js b/test/parallel/test-stream-iter-share-coverage.js new file mode 100644 index 00000000000000..48866e61deab9b --- /dev/null +++ b/test/parallel/test-stream-iter-share-coverage.js @@ -0,0 +1,128 @@ +// Flags: --experimental-stream-iter +'use strict'; + +// Coverage tests for share.js: protocol happy path, dispose, throw, +// non-Error source throws. + +const common = require('../common'); +const assert = require('assert'); +const { + Share, + SyncShare, + share, + shareSync, + shareProtocol, + shareSyncProtocol, + from, + fromSync, + text, + textSync, +} = require('stream/iter'); + +// Share.from — protocol symbol happy path (returns valid share object) +async function testShareProtocolHappyPath() { + const obj = { + [shareProtocol](options) { + return share(from('protocol-data'), options); + }, + }; + const shared = Share.from(obj); + const result = await text(shared.pull()); + assert.strictEqual(result, 'protocol-data'); +} + +// SyncShare.fromSync — protocol symbol happy path +async function testSyncShareProtocolHappyPath() { + const obj = { + [shareSyncProtocol](options) { + return shareSync(fromSync('sync-protocol'), options); + }, + }; + const shared = SyncShare.fromSync(obj); + const result = textSync(shared.pull()); + assert.strictEqual(result, 'sync-protocol'); +} + +// Async share — Symbol.dispose cancels +async function testShareDispose() { + const shared = share(from('dispose-test')); + const consumer = shared.pull(); + shared[Symbol.dispose](); + assert.strictEqual(shared.consumerCount, 0); + // Consumer should yield nothing (cancelled before read) + const result = await text(consumer); + assert.strictEqual(result, ''); +} + +// Sync share — Symbol.dispose cancels +async function testSyncShareDispose() { + const shared = shareSync(fromSync('sync-dispose')); + const consumer = shared.pull(); + shared[Symbol.dispose](); + assert.strictEqual(shared.consumerCount, 0); + const result = textSync(consumer); + assert.strictEqual(result, ''); +} + +// Async consumer iterator throw() +async function testAsyncIteratorThrow() { + const shared = share(from('throw-test')); + const consumer = shared.pull(); + const iter = consumer[Symbol.asyncIterator](); + const first = await iter.next(); + assert.strictEqual(first.done, false); + const result = await iter.throw(new Error('test-throw')); + assert.strictEqual(result.done, true); + assert.strictEqual(shared.consumerCount, 0); +} + +// Sync consumer iterator throw() +async function testSyncIteratorThrow() { + const shared = shareSync(fromSync('throw-sync')); + const consumer = shared.pull(); + const iter = consumer[Symbol.iterator](); + const first = iter.next(); + assert.strictEqual(first.done, false); + const result = iter.throw(new Error('test-throw')); + assert.strictEqual(result.done, true); + assert.strictEqual(shared.consumerCount, 0); +} + +// Async source throws non-Error value → wrapError +async function testShareSourceThrowsNonError() { + async function* source() { + yield [new TextEncoder().encode('ok')]; + throw 'not an error'; // eslint-disable-line no-throw-literal + } + const shared = share(source()); + const consumer = shared.pull(); + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const batch of consumer) { /* consume */ } + }, { code: 'ERR_OPERATION_FAILED' }); +} + +// Sync source throws non-Error value → wrapError +async function testSyncShareSourceThrowsNonError() { + function* source() { + yield [new TextEncoder().encode('ok')]; + throw 42; // eslint-disable-line no-throw-literal + } + const shared = shareSync(source()); + const consumer = shared.pull(); + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const batch of consumer) { /* consume */ } + }, { code: 'ERR_OPERATION_FAILED' }); +} + +Promise.all([ + testShareProtocolHappyPath(), + testSyncShareProtocolHappyPath(), + testShareDispose(), + testSyncShareDispose(), + testAsyncIteratorThrow(), + testSyncIteratorThrow(), + testShareSourceThrowsNonError(), + testSyncShareSourceThrowsNonError(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-share-from.js b/test/parallel/test-stream-iter-share-from.js new file mode 100644 index 00000000000000..362bfef78f1a2e --- /dev/null +++ b/test/parallel/test-stream-iter-share-from.js @@ -0,0 +1,242 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + fromSync, + share, + Share, + SyncShare, + text, + textSync, + +} = require('stream/iter'); + +// ============================================================================= +// Share.from +// ============================================================================= + +async function testShareFrom() { + const shared = Share.from(from('share-from')); + const consumer = shared.pull(); + + const data = await text(consumer); + assert.strictEqual(data, 'share-from'); +} + +function testShareFromRejectsNonStreamable() { + assert.throws( + () => Share.from(12345), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +// ============================================================================= +// SyncShare.fromSync +// ============================================================================= + +async function testSyncShareFromSync() { + const shared = SyncShare.fromSync(fromSync('sync-share-from')); + const consumer = shared.pull(); + + const data = textSync(consumer); + assert.strictEqual(data, 'sync-share-from'); +} + +function testSyncShareFromRejectsNonStreamable() { + assert.throws( + () => SyncShare.fromSync(12345), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +// ============================================================================= +// Protocol validation +// ============================================================================= + +function testShareProtocolReturnsNull() { + const obj = { + [Symbol.for('Stream.shareProtocol')]() { return null; }, + }; + assert.throws( + () => Share.from(obj), + { code: 'ERR_INVALID_RETURN_VALUE' }, + ); +} + +function testShareProtocolReturnsNonObject() { + const obj = { + [Symbol.for('Stream.shareProtocol')]() { return 42; }, + }; + assert.throws( + () => Share.from(obj), + { code: 'ERR_INVALID_RETURN_VALUE' }, + ); +} + +function testSyncShareProtocolReturnsNull() { + const obj = { + [Symbol.for('Stream.shareSyncProtocol')]() { return null; }, + }; + assert.throws( + () => SyncShare.fromSync(obj), + { code: 'ERR_INVALID_RETURN_VALUE' }, + ); +} + +function testSyncShareProtocolReturnsNonObject() { + const obj = { + [Symbol.for('Stream.shareSyncProtocol')]() { return 'bad'; }, + }; + assert.throws( + () => SyncShare.fromSync(obj), + { code: 'ERR_INVALID_RETURN_VALUE' }, + ); +} + +// ============================================================================= +// Block backpressure: two consumers, slow consumer blocks the source +// ============================================================================= + +async function testShareBlockBackpressure() { + // A source that yields 5 items. With two consumers and highWaterMark: 2, + // the fast consumer drives the source forward. The slow consumer holds back + // trimming, causing the buffer to fill. 'block' mode should stall the + // source pull until the slow consumer catches up. + const enc = new TextEncoder(); + async function* source() { + for (let i = 0; i < 5; i++) { + yield [enc.encode(`item${i}`)]; + } + } + const shared = share(source(), { highWaterMark: 2, backpressure: 'block' }); + const fast = shared.pull(); + const slow = shared.pull(); + + // Both consumers should ultimately receive all 5 items + const [fastData, slowData] = await Promise.all([ + text(fast), + text(slow), + ]); + + assert.strictEqual(fastData, 'item0item1item2item3item4'); + assert.strictEqual(slowData, 'item0item1item2item3item4'); +} + +// ============================================================================= +// Drop backpressure modes: use a fast + stalled consumer to trigger drops +// ============================================================================= + +async function testShareDropOldest() { + // Two consumers, fast reads eagerly then slow reads. With drop-oldest, + // the slow consumer's cursor is advanced past dropped items, so it + // misses old data and only sees recent items. + async function* source() { + for (let i = 0; i < 4; i++) { + yield [new TextEncoder().encode(`${i}`)]; + } + } + const shared = share(source(), { highWaterMark: 2, backpressure: 'drop-oldest' }); + const fast = shared.pull(); + const slow = shared.pull(); + + // Fast consumer reads all items + const fastItems = []; + for await (const batch of fast) { + for (const chunk of batch) { + fastItems.push(new TextDecoder().decode(chunk)); + } + } + assert.strictEqual(fastItems.length, 4); + + // Slow consumer reads after fast is done — old items were dropped + const slowItems = []; + for await (const batch of slow) { + for (const chunk of batch) { + slowItems.push(new TextDecoder().decode(chunk)); + } + } + // The slow consumer should see fewer items than were produced + assert.ok(slowItems.length < 4, + `Expected < 4 items after drop-oldest, got ${slowItems.length}`); + assert.ok(slowItems.length > 0, + 'Expected at least some items after drop-oldest'); + // The last item should always be present (most recent items kept) + assert.strictEqual(slowItems[slowItems.length - 1], '3'); +} + +async function testShareDropNewest() { + // With drop-newest and a stalled consumer, the async path allows the + // buffer to grow beyond highWaterMark (the "drop" applies to the + // backpressure signal, not the buffer contents). Both consumers + // ultimately see all items. + async function* source() { + for (let i = 0; i < 4; i++) { + yield [new TextEncoder().encode(`${i}`)]; + } + } + const shared = share(source(), { highWaterMark: 2, backpressure: 'drop-newest' }); + const fast = shared.pull(); + const slow = shared.pull(); + + // Fast consumer reads all items + const fastItems = []; + for await (const batch of fast) { + for (const chunk of batch) { + fastItems.push(new TextDecoder().decode(chunk)); + } + } + assert.strictEqual(fastItems.length, 4); + + // Slow consumer also sees all items (buffer grew past hwm) + const slowItems = []; + for await (const batch of slow) { + for (const chunk of batch) { + slowItems.push(new TextDecoder().decode(chunk)); + } + } + assert.strictEqual(slowItems.length, 4); + assert.strictEqual(slowItems[0], '0'); + assert.strictEqual(slowItems[3], '3'); +} + +// ============================================================================= +// Strict backpressure: should throw when buffer overflows +// ============================================================================= + +async function testShareStrictBackpressure() { + async function* source() { + for (let i = 0; i < 10; i++) { + yield [new TextEncoder().encode(`${i}`)]; + } + } + const shared = share(source(), { highWaterMark: 2, backpressure: 'strict' }); + const fast = shared.pull(); + // Create a second consumer that never reads — this prevents buffer trimming + shared.pull(); + + // The fast consumer's pulls will eventually cause the buffer to exceed + // the highWaterMark (since the slow consumer prevents trimming), + // triggering an ERR_OUT_OF_RANGE error. + await assert.rejects(async () => { + // eslint-disable-next-line no-unused-vars + for await (const _ of fast) { /* consume */ } + }, { code: 'ERR_OUT_OF_RANGE' }); +} + +Promise.all([ + testShareFrom(), + testShareFromRejectsNonStreamable(), + testSyncShareFromSync(), + testSyncShareFromRejectsNonStreamable(), + testShareProtocolReturnsNull(), + testShareProtocolReturnsNonObject(), + testSyncShareProtocolReturnsNull(), + testSyncShareProtocolReturnsNonObject(), + testShareBlockBackpressure(), + testShareDropOldest(), + testShareDropNewest(), + testShareStrictBackpressure(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-share-sync.js b/test/parallel/test-stream-iter-share-sync.js new file mode 100644 index 00000000000000..98ad74fe4f1c72 --- /dev/null +++ b/test/parallel/test-stream-iter-share-sync.js @@ -0,0 +1,162 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + shareSync, + fromSync, + textSync, + +} = require('stream/iter'); + +// ============================================================================= +// Sync share +// ============================================================================= + +async function testShareSyncBasic() { + const shared = shareSync(fromSync('sync shared')); + + const consumer = shared.pull(); + const data = textSync(consumer); + assert.strictEqual(data, 'sync shared'); +} + +async function testShareSyncMultipleConsumers() { + const enc = new TextEncoder(); + function* gen() { + yield [enc.encode('a')]; + yield [enc.encode('b')]; + yield [enc.encode('c')]; + } + + const shared = shareSync(gen(), { highWaterMark: 16 }); + + const c1 = shared.pull(); + const c2 = shared.pull(); + + const data1 = textSync(c1); + const data2 = textSync(c2); + + assert.strictEqual(data1, 'abc'); + assert.strictEqual(data2, 'abc'); +} + +function testShareSyncCancel() { + // Verify that cancel() on a pre-iteration share yields nothing + const shared = shareSync(fromSync('data')); + const consumer = shared.pull(); + + shared.cancel(); + assert.strictEqual(shared.consumerCount, 0); + + const batches = []; + for (const batch of consumer) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +function testShareSyncCancelMidIteration() { + // Verify cancel during iteration stops data flow and cleans up + const enc = new TextEncoder(); + let sourceReturnCalled = false; + function* gen() { + try { + yield [enc.encode('a')]; + yield [enc.encode('b')]; + yield [enc.encode('c')]; + } finally { + sourceReturnCalled = true; + } + } + const shared = shareSync(gen(), { highWaterMark: 16 }); + const consumer = shared.pull(); + + const items = []; + for (const batch of consumer) { + for (const chunk of batch) { + items.push(new TextDecoder().decode(chunk)); + } + // Cancel after first batch + shared.cancel(); + } + assert.strictEqual(items.length, 1); + assert.strictEqual(items[0], 'a'); + assert.strictEqual(sourceReturnCalled, true); +} + +function testShareSyncCancelWithReason() { + // When cancel(reason) is called, a consumer that hasn't started + // iterating is already detached, so it sees done:true (not the error). + // But a consumer that is mid-iteration when another consumer cancels + // with a reason will see the error on the next pull after cancel. + const enc = new TextEncoder(); + function* gen() { + yield [enc.encode('a')]; + yield [enc.encode('b')]; + yield [enc.encode('c')]; + } + const shared = shareSync(gen(), { highWaterMark: 16 }); + const c1 = shared.pull(); + const c2 = shared.pull(); + + // c1 reads one item, then c2 cancels with a reason + const iter1 = c1[Symbol.iterator](); + const first = iter1.next(); + assert.strictEqual(first.done, false); + + shared.cancel(new Error('sync cancel reason')); + + // c1 was already iterating, it's now detached → done + const next = iter1.next(); + assert.strictEqual(next.done, true); + + // c2 never started, also detached → done (not error) + const batches = []; + for (const batch of c2) { + batches.push(batch); + } + assert.strictEqual(batches.length, 0); +} + +// ============================================================================= +// Source error propagation +// ============================================================================= + +function testShareSyncSourceError() { + function* failingSource() { + yield [new TextEncoder().encode('ok')]; + throw new Error('sync share boom'); + } + const shared = shareSync(failingSource()); + const c1 = shared.pull(); + const c2 = shared.pull(); + + // Both consumers should see the error + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of c1) { /* consume */ } + }, { message: 'sync share boom' }); + assert.throws(() => { + // eslint-disable-next-line no-unused-vars + for (const _ of c2) { /* consume */ } + }, { message: 'sync share boom' }); +} + +// shareSync() accepts string source directly (normalized via fromSync()) +function testShareSyncStringSource() { + const shared = shareSync('hello-sync-share'); + const result = textSync(shared.pull()); + assert.strictEqual(result, 'hello-sync-share'); +} + +Promise.all([ + testShareSyncBasic(), + testShareSyncMultipleConsumers(), + testShareSyncCancel(), + testShareSyncCancelMidIteration(), + testShareSyncCancelWithReason(), + testShareSyncSourceError(), + testShareSyncStringSource(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-sharedarraybuffer.js b/test/parallel/test-stream-iter-sharedarraybuffer.js new file mode 100644 index 00000000000000..7aff205bf7664a --- /dev/null +++ b/test/parallel/test-stream-iter-sharedarraybuffer.js @@ -0,0 +1,195 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + fromSync, + bytes, + bytesSync, + text, + textSync, + arrayBuffer, + arrayBufferSync, + pipeTo, + pipeToSync, + pull, + pullSync, +} = require('stream/iter'); + +// ============================================================================= +// from() / fromSync() with SharedArrayBuffer +// ============================================================================= + +function testFromSyncSAB() { + const sab = new SharedArrayBuffer(4); + new Uint8Array(sab).set([10, 20, 30, 40]); + const batches = []; + for (const batch of fromSync(sab)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], new Uint8Array([10, 20, 30, 40])); +} + +async function testFromAsyncSAB() { + const sab = new SharedArrayBuffer(4); + new Uint8Array(sab).set([10, 20, 30, 40]); + const batches = []; + for await (const batch of from(sab)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], new Uint8Array([10, 20, 30, 40])); +} + +// ============================================================================= +// Consumers with SAB-backed source +// ============================================================================= + +function testBytesSyncSAB() { + const sab = new SharedArrayBuffer(5); + new Uint8Array(sab).set([104, 101, 108, 108, 111]); // 'hello' + const data = bytesSync(fromSync(sab)); + assert.deepStrictEqual(data, new Uint8Array([104, 101, 108, 108, 111])); +} + +async function testBytesAsyncSAB() { + const sab = new SharedArrayBuffer(5); + new Uint8Array(sab).set([104, 101, 108, 108, 111]); // 'hello' + const data = await bytes(from(sab)); + assert.deepStrictEqual(data, new Uint8Array([104, 101, 108, 108, 111])); +} + +function testTextSyncSAB() { + const sab = new SharedArrayBuffer(5); + new Uint8Array(sab).set([104, 101, 108, 108, 111]); // 'hello' + const result = textSync(fromSync(sab)); + assert.strictEqual(result, 'hello'); +} + +async function testTextAsyncSAB() { + const sab = new SharedArrayBuffer(5); + new Uint8Array(sab).set([104, 101, 108, 108, 111]); // 'hello' + const result = await text(from(sab)); + assert.strictEqual(result, 'hello'); +} + +function testArrayBufferSyncSAB() { + const sab = new SharedArrayBuffer(4); + new Uint8Array(sab).set([1, 2, 3, 4]); + const result = arrayBufferSync(fromSync(sab)); + assert.ok(result instanceof SharedArrayBuffer || result instanceof ArrayBuffer); + assert.deepStrictEqual(new Uint8Array(result), new Uint8Array([1, 2, 3, 4])); +} + +async function testArrayBufferAsyncSAB() { + const sab = new SharedArrayBuffer(4); + new Uint8Array(sab).set([1, 2, 3, 4]); + const result = await arrayBuffer(from(sab)); + assert.ok(result instanceof SharedArrayBuffer || result instanceof ArrayBuffer); + assert.deepStrictEqual(new Uint8Array(result), new Uint8Array([1, 2, 3, 4])); +} + +// ============================================================================= +// pipeTo / pipeToSync with SAB source +// ============================================================================= + +function testPipeToSyncSAB() { + const sab = new SharedArrayBuffer(3); + new Uint8Array(sab).set([65, 66, 67]); // 'ABC' + const written = []; + const writer = { + writeSync(chunk) { written.push(chunk); return true; }, + endSync() { return written.length; }, + }; + const totalBytes = pipeToSync(fromSync(sab), writer); + assert.strictEqual(totalBytes, 3); + assert.deepStrictEqual(written[0], new Uint8Array([65, 66, 67])); +} + +async function testPipeToAsyncSAB() { + const sab = new SharedArrayBuffer(3); + new Uint8Array(sab).set([65, 66, 67]); // 'ABC' + const written = []; + const writer = { + async write(chunk) { written.push(chunk); }, + async end() { return written.length; }, + }; + await pipeTo(from(sab), writer); + assert.deepStrictEqual(written[0], new Uint8Array([65, 66, 67])); +} + +// ============================================================================= +// pull / pullSync with SAB-yielding generator +// ============================================================================= + +function testPullSyncSABChunks() { + function* gen() { + const sab = new SharedArrayBuffer(2); + new Uint8Array(sab).set([1, 2]); + yield [new Uint8Array(sab)]; + } + const batches = []; + for (const batch of pullSync(gen())) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], new Uint8Array([1, 2])); +} + +async function testPullAsyncSABChunks() { + async function* gen() { + const sab = new SharedArrayBuffer(2); + new Uint8Array(sab).set([3, 4]); + yield [new Uint8Array(sab)]; + } + const batches = []; + for await (const batch of pull(gen())) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], new Uint8Array([3, 4])); +} + +// ============================================================================= +// Transform returning SAB +// ============================================================================= + +async function testTransformReturningSAB() { + function* source() { + yield [new Uint8Array([1, 2, 3])]; + } + const transform = (chunks) => { + if (chunks === null) return null; + // Transform returns a Uint8Array backed by a SharedArrayBuffer + const sab = new SharedArrayBuffer(chunks[0].length); + new Uint8Array(sab).set(chunks[0]); + return new Uint8Array(sab); + }; + const batches = []; + for await (const batch of pull(source(), transform)) { + batches.push(batch); + } + assert.strictEqual(batches.length, 1); + assert.deepStrictEqual(batches[0][0], new Uint8Array([1, 2, 3])); +} + +// ============================================================================= + +Promise.all([ + testFromSyncSAB(), + testFromAsyncSAB(), + testBytesSyncSAB(), + testBytesAsyncSAB(), + testTextSyncSAB(), + testTextAsyncSAB(), + testArrayBufferSyncSAB(), + testArrayBufferAsyncSAB(), + testPipeToSyncSAB(), + testPipeToAsyncSAB(), + testPullSyncSABChunks(), + testPullAsyncSABChunks(), + testTransformReturningSAB(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-transform-compat.js b/test/parallel/test-stream-iter-transform-compat.js new file mode 100644 index 00000000000000..b3180f41d98fa1 --- /dev/null +++ b/test/parallel/test-stream-iter-transform-compat.js @@ -0,0 +1,127 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const zlib = require('zlib'); +const { promisify } = require('util'); +const { + from, + pull, + bytes, + text, +} = require('stream/iter'); +const { + compressGzip, + compressDeflate, + compressBrotli, + compressZstd, + decompressGzip, + decompressDeflate, + decompressBrotli, + decompressZstd, +} = require('zlib/iter'); + +// ============================================================================= +// Cross-compatibility: verify gzip/deflate output is compatible with zlib +// ============================================================================= + +async function testGzipCompatWithZlib() { + const gunzip = promisify(zlib.gunzip); + + const input = 'Cross-compat test with node:zlib. '.repeat(100); + const compressed = await bytes(pull(from(input), compressGzip())); + + // Decompress with standard zlib + const decompressed = await gunzip(compressed); + assert.strictEqual(decompressed.toString(), input); +} + +async function testDeflateCompatWithZlib() { + const inflate = promisify(zlib.inflate); + + const input = 'Cross-compat deflate test. '.repeat(100); + const compressed = await bytes(pull(from(input), compressDeflate())); + + // Decompress with standard zlib + const decompressed = await inflate(compressed); + assert.strictEqual(decompressed.toString(), input); +} + +async function testBrotliCompatWithZlib() { + const brotliDecompress = promisify(zlib.brotliDecompress); + + const input = 'Cross-compat brotli test. '.repeat(100); + const compressed = await bytes(pull(from(input), compressBrotli())); + + const decompressed = await brotliDecompress(compressed); + assert.strictEqual(decompressed.toString(), input); +} + +async function testZstdCompatWithZlib() { + const zstdDecompress = promisify(zlib.zstdDecompress); + + const input = 'Cross-compat zstd test. '.repeat(100); + const compressed = await bytes(pull(from(input), compressZstd())); + + const decompressed = await zstdDecompress(compressed); + assert.strictEqual(decompressed.toString(), input); +} + +// ============================================================================= +// Reverse compat: compress with zlib, decompress with new streams +// ============================================================================= + +async function testZlibGzipToNewStreams() { + const gzip = promisify(zlib.gzip); + + const input = 'Reverse compat gzip test. '.repeat(100); + const compressed = await gzip(input); + const result = await text(pull(from(compressed), decompressGzip())); + assert.strictEqual(result, input); +} + +async function testZlibDeflateToNewStreams() { + const deflate = promisify(zlib.deflate); + + const input = 'Reverse compat deflate test. '.repeat(100); + const compressed = await deflate(input); + const result = await text(pull(from(compressed), decompressDeflate())); + assert.strictEqual(result, input); +} + +async function testZlibBrotliToNewStreams() { + const brotliCompress = promisify(zlib.brotliCompress); + + const input = 'Reverse compat brotli test. '.repeat(100); + const compressed = await brotliCompress(input); + const result = await text(pull(from(compressed), decompressBrotli())); + assert.strictEqual(result, input); +} + +async function testZlibZstdToNewStreams() { + const zstdCompress = promisify(zlib.zstdCompress); + + const input = 'Reverse compat zstd test. '.repeat(100); + const compressed = await zstdCompress(input); + const result = await text(pull(from(compressed), decompressZstd())); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Run all tests +// ============================================================================= + +(async () => { + // Cross-compat: new streams compress → zlib decompress + await testGzipCompatWithZlib(); + await testDeflateCompatWithZlib(); + await testBrotliCompatWithZlib(); + await testZstdCompatWithZlib(); + + // Reverse compat: zlib compress → new streams decompress + await testZlibGzipToNewStreams(); + await testZlibDeflateToNewStreams(); + await testZlibBrotliToNewStreams(); + await testZlibZstdToNewStreams(); +})().then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-transform-coverage.js b/test/parallel/test-stream-iter-transform-coverage.js new file mode 100644 index 00000000000000..d10766badd4dc6 --- /dev/null +++ b/test/parallel/test-stream-iter-transform-coverage.js @@ -0,0 +1,119 @@ +// Flags: --experimental-stream-iter +'use strict'; + +// Coverage tests for transform.js: abort cleanup, strategy/params options, +// early consumer exit triggering finally block. + +const common = require('../common'); +const assert = require('assert'); +const { + from, + pull, + bytes, + text, +} = require('stream/iter'); +const { + compressGzip, + decompressGzip, + compressDeflate, + decompressDeflate, + compressBrotli, + decompressBrotli, + compressZstd, + decompressZstd, +} = require('zlib/iter'); +const { constants } = require('zlib'); + +async function roundTrip(input, compress, decompress) { + return text(pull(from(input), compress, decompress)); +} + +// Abort mid-compression triggers finally cleanup +async function testAbortMidCompression() { + const ac = new AbortController(); + const largeInput = 'x'.repeat(100_000); + const compressed = pull(from(largeInput), compressGzip(), + { signal: ac.signal }); + const iter = compressed[Symbol.asyncIterator](); + + // Read one batch then abort + const first = await iter.next(); + assert.strictEqual(first.done, false); + ac.abort(); + await assert.rejects(iter.next(), { name: 'AbortError' }); +} + +// Early consumer exit (break from for-await) triggers finally +async function testEarlyConsumerExit() { + const largeInput = 'y'.repeat(100_000); + const compressed = pull(from(largeInput), compressGzip()); + + // eslint-disable-next-line no-unused-vars + for await (const batch of compressed) { + break; // Early exit — should trigger finally block cleanup + } + // If we get here without hanging or crashing, cleanup worked +} + +// Gzip with explicit strategy option +async function testGzipWithStrategy() { + const input = 'strategy test data '.repeat(100); + const c = compressGzip({ strategy: constants.Z_DEFAULT_STRATEGY }); + const result = await roundTrip(input, c, decompressGzip()); + assert.strictEqual(result, input); +} + +// Deflate with Z_FIXED strategy +async function testDeflateWithFixedStrategy() { + const input = 'fixed strategy '.repeat(100); + const c = compressDeflate({ strategy: constants.Z_FIXED }); + const result = await roundTrip(input, c, decompressDeflate()); + assert.strictEqual(result, input); +} + +// Brotli with custom quality param +async function testBrotliWithParams() { + const input = 'brotli params test '.repeat(100); + const params = { [constants.BROTLI_PARAM_QUALITY]: 5 }; + const result = await roundTrip(input, compressBrotli({ params }), + decompressBrotli()); + assert.strictEqual(result, input); +} + +// Zstd with custom compression level param +async function testZstdWithParams() { + const input = 'zstd params test '.repeat(100); + const params = { [constants.ZSTD_c_compressionLevel]: 10 }; + const result = await roundTrip(input, compressZstd({ params }), + decompressZstd()); + assert.strictEqual(result, input); +} + +// Gzip with custom chunkSize +async function testGzipWithChunkSize() { + const input = 'chunk size test'; + const c = compressGzip({ chunkSize: 256 }); + const d = decompressGzip({ chunkSize: 256 }); + const result = await roundTrip(input, c, d); + assert.strictEqual(result, input); +} + +// Invalid chunkSize throws when transform is invoked +async function testInvalidChunkSize() { + const tx = compressGzip({ chunkSize: 8 }); + await assert.rejects( + async () => await bytes(pull(from('data'), tx)), + { code: 'ERR_OUT_OF_RANGE' }, + ); +} + +Promise.all([ + testAbortMidCompression(), + testEarlyConsumerExit(), + testGzipWithStrategy(), + testDeflateWithFixedStrategy(), + testBrotliWithParams(), + testZstdWithParams(), + testGzipWithChunkSize(), + testInvalidChunkSize(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-transform-errors.js b/test/parallel/test-stream-iter-transform-errors.js new file mode 100644 index 00000000000000..62a3c178ac5f58 --- /dev/null +++ b/test/parallel/test-stream-iter-transform-errors.js @@ -0,0 +1,71 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + pull, + bytes, +} = require('stream/iter'); +const { + decompressGzip, + decompressDeflate, + decompressBrotli, + decompressZstd, +} = require('zlib/iter'); + +// ============================================================================= +// Decompression of corrupt data +// ============================================================================= + +async function testCorruptGzipData() { + const corrupt = new Uint8Array([0x1F, 0x8B, 0xFF, 0xFF, 0xFF]); + + await assert.rejects( + async () => await bytes(pull(from(corrupt), decompressGzip())), { + name: 'Error', + code: 'Z_DATA_ERROR', + }); +} + +async function testCorruptDeflateData() { + const corrupt = new Uint8Array([0x78, 0xFF, 0xFF, 0xFF]); + + await assert.rejects( + async () => await bytes(pull(from(corrupt), decompressDeflate())), { + name: 'Error', + code: 'Z_DATA_ERROR', + }); +} + +async function testCorruptBrotliData() { + const corrupt = new Uint8Array([0xFF, 0xFF, 0xFF, 0xFF]); + + await assert.rejects( + async () => await bytes(pull(from(corrupt), decompressBrotli())), { + name: 'Error', + code: 'ERR__ERROR_FORMAT_PADDING_2', + }); +} + +async function testCorruptZstdData() { + // Completely invalid data (not even valid magic bytes) + const corrupt = new Uint8Array([0xFF, 0xFF, 0xFF, 0xFF, 0xFF]); + await assert.rejects( + async () => await bytes(pull(from(corrupt), decompressZstd())), { + name: 'Error', + code: 'ZSTD_error_prefix_unknown', + }); +} + +// ============================================================================= +// Run all tests +// ============================================================================= + +(async () => { + await testCorruptGzipData(); + await testCorruptDeflateData(); + await testCorruptBrotliData(); + await testCorruptZstdData(); +})().then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-transform-output.js b/test/parallel/test-stream-iter-transform-output.js new file mode 100644 index 00000000000000..a37cf08dc9d53f --- /dev/null +++ b/test/parallel/test-stream-iter-transform-output.js @@ -0,0 +1,225 @@ +// Flags: --experimental-stream-iter +'use strict'; + +// Tests for transform output normalization edge cases: +// ArrayBuffer, ArrayBufferView, iterables, strings, invalid types. + +const common = require('../common'); +const assert = require('assert'); +const { + pull, + pullSync, + bytes, + bytesSync, + from, + fromSync, +} = require('stream/iter'); + +// Stateless transform returns ArrayBuffer (async) +async function testTransformReturnsArrayBuffer() { + const tx = (chunks) => { + if (chunks === null) return null; + const ab = new ArrayBuffer(chunks[0].length); + new Uint8Array(ab).set(chunks[0]); + return ab; + }; + const data = await bytes(pull(from('AB'), tx)); + assert.deepStrictEqual(data, new TextEncoder().encode('AB')); +} + +// Stateless transform returns ArrayBuffer (sync) +async function testSyncTransformReturnsArrayBuffer() { + const tx = (chunks) => { + if (chunks === null) return null; + const ab = new ArrayBuffer(chunks[0].length); + new Uint8Array(ab).set(chunks[0]); + return ab; + }; + const data = bytesSync(pullSync(fromSync('AB'), tx)); + assert.deepStrictEqual(data, new TextEncoder().encode('AB')); +} + +// Stateless transform returns Float32Array (non-Uint8Array ArrayBufferView) +async function testTransformReturnsFloat32Array() { + const tx = (chunks) => { + if (chunks === null) return null; + return new Float32Array([1.0]); + }; + const data = await bytes(pull(from('x'), tx)); + assert.strictEqual(data.byteLength, 4); // 1 float32 = 4 bytes +} + +// Stateless sync transform returns Float32Array +async function testSyncTransformReturnsFloat32Array() { + const tx = (chunks) => { + if (chunks === null) return null; + return new Float32Array([1.0]); + }; + const data = bytesSync(pullSync(fromSync('x'), tx)); + assert.strictEqual(data.byteLength, 4); +} + +// Stateless transform returns a sync generator (iterable) +async function testTransformReturnsGenerator() { + const tx = (chunks) => { + if (chunks === null) return null; + return (function*() { + yield new Uint8Array([65]); + yield new Uint8Array([66]); + })(); + }; + const data = await bytes(pull(from('x'), tx)); + assert.deepStrictEqual(data, new Uint8Array([65, 66])); +} + +// Stateless sync transform returns a generator +async function testSyncTransformReturnsGenerator() { + const tx = (chunks) => { + if (chunks === null) return null; + return (function*() { + yield new Uint8Array([67]); + yield new Uint8Array([68]); + })(); + }; + const data = bytesSync(pullSync(fromSync('x'), tx)); + assert.deepStrictEqual(data, new Uint8Array([67, 68])); +} + +// Stateless async transform returns an async generator +async function testTransformReturnsAsyncGenerator() { + const tx = (chunks) => { + if (chunks === null) return null; + return (async function*() { + yield new Uint8Array([69]); + yield new Uint8Array([70]); + })(); + }; + const data = await bytes(pull(from('x'), tx)); + assert.deepStrictEqual(data, new Uint8Array([69, 70])); +} + +// Stateful async transform yields string +async function testStatefulTransformYieldsString() { + const tx = { + async *transform(source) { + for await (const chunks of source) { + if (chunks === null) return; + yield 'hello'; + } + }, + }; + const data = await bytes(pull(from('x'), tx)); + assert.deepStrictEqual(data, new TextEncoder().encode('hello')); +} + +// Stateful async transform yields ArrayBuffer +async function testStatefulTransformYieldsArrayBuffer() { + const tx = { + async *transform(source) { + for await (const chunks of source) { + if (chunks === null) return; + const ab = new ArrayBuffer(2); + new Uint8Array(ab).set([71, 72]); + yield ab; + } + }, + }; + const data = await bytes(pull(from('x'), tx)); + assert.deepStrictEqual(data, new Uint8Array([71, 72])); +} + +// Stateful sync transform yields string +async function testStatefulSyncTransformYieldsString() { + const tx = { + *transform(source) { + for (const chunks of source) { + if (chunks === null) return; + yield 'world'; + } + }, + }; + const data = bytesSync(pullSync(fromSync('x'), tx)); + assert.deepStrictEqual(data, new TextEncoder().encode('world')); +} + +// Flush returns single Uint8Array (not batch) +async function testFlushReturnsSingleUint8Array() { + const tx = (chunks) => { + if (chunks === null) return new Uint8Array([99]); // Flush returns single + return chunks; + }; + const data = await bytes(pull(from('x'), tx)); + // Should contain both the original data and the flush byte + assert.ok(data.includes(99)); +} + +// Flush returns string +async function testFlushReturnsString() { + const tx = (chunks) => { + if (chunks === null) return 'trailer'; + return chunks; + }; + const data = await bytes(pull(from('x'), tx)); + const trailer = new TextEncoder().encode('trailer'); + // Last bytes should be the trailer + const tail = data.slice(data.length - trailer.length); + assert.deepStrictEqual(tail, trailer); +} + +// Sync flush returns single Uint8Array +async function testSyncFlushReturnsSingleUint8Array() { + const tx = (chunks) => { + if (chunks === null) return new Uint8Array([88]); + return chunks; + }; + const data = bytesSync(pullSync(fromSync('x'), tx)); + assert.ok(data.includes(88)); +} + +// Transform returns invalid type → ERR_INVALID_ARG_TYPE +async function testTransformReturnsInvalidType() { + const tx = (chunks) => { + if (chunks === null) return null; + return 42; // Invalid + }; + await assert.rejects( + async () => { + // eslint-disable-next-line no-unused-vars + for await (const batch of pull(from('x'), tx)) { /* consume */ } + }, + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +// Sync transform returns invalid type → ERR_INVALID_ARG_TYPE +async function testSyncTransformReturnsInvalidType() { + const tx = (chunks) => { + if (chunks === null) return null; + return 42; + }; + assert.throws( + () => { + // eslint-disable-next-line no-unused-vars + for (const batch of pullSync(fromSync('x'), tx)) { /* consume */ } + }, + { code: 'ERR_INVALID_ARG_TYPE' }, + ); +} + +Promise.all([ + testTransformReturnsArrayBuffer(), + testSyncTransformReturnsArrayBuffer(), + testTransformReturnsFloat32Array(), + testSyncTransformReturnsFloat32Array(), + testTransformReturnsGenerator(), + testSyncTransformReturnsGenerator(), + testTransformReturnsAsyncGenerator(), + testStatefulTransformYieldsString(), + testStatefulTransformYieldsArrayBuffer(), + testStatefulSyncTransformYieldsString(), + testFlushReturnsSingleUint8Array(), + testFlushReturnsString(), + testSyncFlushReturnsSingleUint8Array(), + testTransformReturnsInvalidType(), + testSyncTransformReturnsInvalidType(), +]).then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-transform-roundtrip.js b/test/parallel/test-stream-iter-transform-roundtrip.js new file mode 100644 index 00000000000000..df63483d6c8535 --- /dev/null +++ b/test/parallel/test-stream-iter-transform-roundtrip.js @@ -0,0 +1,291 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, + pull, + bytes, + text, +} = require('stream/iter'); +const { + compressGzip, + compressDeflate, + compressBrotli, + compressZstd, + decompressGzip, + decompressDeflate, + decompressBrotli, + decompressZstd, +} = require('zlib/iter'); + +// ============================================================================= +// Helper: compress then decompress, verify round-trip equality +// ============================================================================= + +async function roundTrip(input, compress, decompress) { + return text(pull(pull(from(input), compress), decompress)); +} + +async function roundTripBytes(inputBuf, compress, decompress) { + return bytes(pull(pull(from(inputBuf), compress), decompress)); +} + +// ============================================================================= +// Gzip round-trip tests +// ============================================================================= + +async function testGzipRoundTrip() { + const input = 'Hello, gzip compression!'; + const result = await roundTrip(input, compressGzip(), decompressGzip()); + assert.strictEqual(result, input); +} + +async function testGzipLargeData() { + // 100KB of repeated text - exercises multi-chunk path + const input = 'gzip large data test. '.repeat(5000); + const result = await roundTrip(input, compressGzip(), decompressGzip()); + assert.strictEqual(result, input); +} + +async function testGzipActuallyCompresses() { + const input = 'Repeated data compresses well. '.repeat(1000); + const inputBuf = Buffer.from(input); + const source = from(inputBuf); + const compressed = await bytes(pull(source, compressGzip())); + assert.ok(compressed.byteLength < inputBuf.byteLength, + `Compressed ${compressed.byteLength} should be < original ${inputBuf.byteLength}`); +} + +// ============================================================================= +// Deflate round-trip tests +// ============================================================================= + +async function testDeflateRoundTrip() { + const input = 'Hello, deflate compression!'; + const result = await roundTrip(input, compressDeflate(), decompressDeflate()); + assert.strictEqual(result, input); +} + +async function testDeflateLargeData() { + const input = 'deflate large data test. '.repeat(5000); + const result = await roundTrip(input, compressDeflate(), decompressDeflate()); + assert.strictEqual(result, input); +} + +async function testDeflateActuallyCompresses() { + const input = 'Repeated data compresses well. '.repeat(1000); + const inputBuf = Buffer.from(input); + const source = from(inputBuf); + const compressed = await bytes(pull(source, compressDeflate())); + assert.ok(compressed.byteLength < inputBuf.byteLength, + `Compressed ${compressed.byteLength} should be < original ${inputBuf.byteLength}`); +} + +// ============================================================================= +// Brotli round-trip tests +// ============================================================================= + +async function testBrotliRoundTrip() { + const input = 'Hello, brotli compression!'; + const result = await roundTrip(input, compressBrotli(), decompressBrotli()); + assert.strictEqual(result, input); +} + +async function testBrotliLargeData() { + const input = 'brotli large data test. '.repeat(5000); + const result = await roundTrip(input, compressBrotli(), decompressBrotli()); + assert.strictEqual(result, input); +} + +async function testBrotliActuallyCompresses() { + const input = 'Repeated data compresses well. '.repeat(1000); + const inputBuf = Buffer.from(input); + const compressed = await bytes(pull(from(inputBuf), compressBrotli())); + assert.ok(compressed.byteLength < inputBuf.byteLength, + `Compressed ${compressed.byteLength} should be < original ${inputBuf.byteLength}`); +} + +// ============================================================================= +// Zstd round-trip tests +// ============================================================================= + +async function testZstdRoundTrip() { + const input = 'Hello, zstd compression!'; + const result = await roundTrip(input, compressZstd(), decompressZstd()); + assert.strictEqual(result, input); +} + +async function testZstdLargeData() { + const input = 'zstd large data test. '.repeat(5000); + const result = await roundTrip(input, compressZstd(), decompressZstd()); + assert.strictEqual(result, input); +} + +async function testZstdActuallyCompresses() { + const input = 'Repeated data compresses well. '.repeat(1000); + const inputBuf = Buffer.from(input); + const compressed = await bytes(pull(from(inputBuf), compressZstd())); + assert.ok(compressed.byteLength < inputBuf.byteLength, + `Compressed ${compressed.byteLength} should be < original ${inputBuf.byteLength}`); +} + +// ============================================================================= +// Binary data round-trip - verify no corruption on non-text data +// ============================================================================= + +// Create a buffer with a repeating byte pattern covering all 256 values. +function makeBinaryTestData(size = 1024) { + const buf = Buffer.alloc(size); + for (let i = 0; i < size; i++) buf[i] = i & 0xFF; + return buf; +} + +async function testBinaryRoundTripGzip() { + const input = makeBinaryTestData(); + const result = await roundTripBytes(input, compressGzip(), decompressGzip()); + assert.strictEqual(result.byteLength, input.byteLength); + assert.deepStrictEqual(Buffer.from(result), input); +} + +async function testBinaryRoundTripDeflate() { + const input = makeBinaryTestData(); + const result = await roundTripBytes(input, compressDeflate(), + decompressDeflate()); + assert.strictEqual(result.byteLength, input.byteLength); + assert.deepStrictEqual(Buffer.from(result), input); +} + +async function testBinaryRoundTripBrotli() { + const input = makeBinaryTestData(); + const result = await roundTripBytes(input, compressBrotli(), + decompressBrotli()); + assert.strictEqual(result.byteLength, input.byteLength); + assert.deepStrictEqual(Buffer.from(result), input); +} + +async function testBinaryRoundTripZstd() { + const input = makeBinaryTestData(); + const result = await roundTripBytes(input, compressZstd(), decompressZstd()); + assert.strictEqual(result.byteLength, input.byteLength); + assert.deepStrictEqual(Buffer.from(result), input); +} + +// ============================================================================= +// Empty input +// ============================================================================= + +async function testEmptyInputGzip() { + const result = await roundTrip('', compressGzip(), decompressGzip()); + assert.strictEqual(result, ''); +} + +async function testEmptyInputDeflate() { + const result = await roundTrip('', compressDeflate(), decompressDeflate()); + assert.strictEqual(result, ''); +} + +async function testEmptyInputBrotli() { + const result = await roundTrip('', compressBrotli(), decompressBrotli()); + assert.strictEqual(result, ''); +} + +async function testEmptyInputZstd() { + const result = await roundTrip('', compressZstd(), decompressZstd()); + assert.strictEqual(result, ''); +} + +// ============================================================================= +// Chained transforms - compress with one, then another, decompress in reverse +// ============================================================================= + +async function testChainedGzipDeflate() { + const input = 'Double compression test data. '.repeat(100); + // Compress: gzip then deflate + const compressed = pull(pull(from(input), compressGzip()), compressDeflate()); + // Decompress: deflate then gzip (reverse order) + const decompressed = pull(pull(compressed, decompressDeflate()), + decompressGzip()); + const result = await text(decompressed); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Transform protocol: verify each factory returns a proper transform object +// ============================================================================= + +function testTransformProtocol() { + [ + compressGzip, compressDeflate, compressBrotli, compressZstd, + decompressGzip, decompressDeflate, decompressBrotli, decompressZstd, + ].forEach((factory) => { + const t = factory(); + assert.strictEqual(typeof t.transform, 'function', + `${factory.name}() should have a transform function`); + }); +} + +// ============================================================================= +// Compression with options +// ============================================================================= + +async function testGzipWithLevel() { + const data = 'a'.repeat(10000); + const level1 = await bytes(pull(from(data), compressGzip({ level: 1 }))); + const level9 = await bytes(pull(from(data), compressGzip({ level: 9 }))); + // Higher compression level should produce smaller output + assert.ok(level9.length <= level1.length); + // Both should decompress to original + const dec1 = await text(pull(from(level1), decompressGzip())); + const dec9 = await text(pull(from(level9), decompressGzip())); + assert.strictEqual(dec1, data); + assert.strictEqual(dec9, data); +} + +// ============================================================================= +// Run all tests +// ============================================================================= + +(async () => { + // Gzip + await testGzipRoundTrip(); + await testGzipLargeData(); + await testGzipActuallyCompresses(); + + // Deflate + await testDeflateRoundTrip(); + await testDeflateLargeData(); + await testDeflateActuallyCompresses(); + + // Brotli + await testBrotliRoundTrip(); + await testBrotliLargeData(); + await testBrotliActuallyCompresses(); + + // Zstd + await testZstdRoundTrip(); + await testZstdLargeData(); + await testZstdActuallyCompresses(); + + // Binary data + await testBinaryRoundTripGzip(); + await testBinaryRoundTripDeflate(); + await testBinaryRoundTripBrotli(); + await testBinaryRoundTripZstd(); + + // Empty input + await testEmptyInputGzip(); + await testEmptyInputDeflate(); + await testEmptyInputBrotli(); + await testEmptyInputZstd(); + + // Chained + await testChainedGzipDeflate(); + + // Protocol + testTransformProtocol(); + + // Compression with options + await testGzipWithLevel(); +})().then(common.mustCall()); diff --git a/test/parallel/test-stream-iter-transform-sync.js b/test/parallel/test-stream-iter-transform-sync.js new file mode 100644 index 00000000000000..d674c26cca100a --- /dev/null +++ b/test/parallel/test-stream-iter-transform-sync.js @@ -0,0 +1,227 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + fromSync, + pullSync, + bytesSync, + textSync, +} = require('stream/iter'); +const { + compressGzipSync, + compressDeflateSync, + compressBrotliSync, + compressZstdSync, + decompressGzipSync, + decompressDeflateSync, + decompressBrotliSync, + decompressZstdSync, +} = require('zlib/iter'); + +// ============================================================================= +// Helper: sync compress then decompress, verify round-trip equality +// ============================================================================= + +function roundTrip(input, compress, decompress) { + return textSync(pullSync(pullSync(fromSync(input), compress), decompress)); +} + +function roundTripBytes(inputBuf, compress, decompress) { + return bytesSync(pullSync(pullSync(fromSync(inputBuf), compress), decompress)); +} + +// ============================================================================= +// Gzip sync round-trip tests +// ============================================================================= + +function testGzipRoundTrip() { + const input = 'Hello, sync gzip compression!'; + const result = roundTrip(input, compressGzipSync(), decompressGzipSync()); + assert.strictEqual(result, input); +} + +function testGzipLargeData() { + const input = 'gzip sync large data test. '.repeat(5000); + const result = roundTrip(input, compressGzipSync(), decompressGzipSync()); + assert.strictEqual(result, input); +} + +function testGzipActuallyCompresses() { + const input = 'Repeated data compresses well. '.repeat(1000); + const inputBuf = Buffer.from(input); + const compressed = bytesSync(pullSync(fromSync(inputBuf), + compressGzipSync())); + assert.ok(compressed.byteLength < inputBuf.byteLength, + `Compressed ${compressed.byteLength} should be < ` + + `original ${inputBuf.byteLength}`); +} + +function testGzipBinaryData() { + const inputBuf = Buffer.alloc(10000); + for (let i = 0; i < inputBuf.length; i++) inputBuf[i] = i & 0xff; + const result = roundTripBytes(inputBuf, compressGzipSync(), + decompressGzipSync()); + assert.deepStrictEqual(result, inputBuf); +} + +// ============================================================================= +// Deflate sync round-trip tests +// ============================================================================= + +function testDeflateRoundTrip() { + const input = 'Hello, sync deflate compression!'; + const result = roundTrip(input, compressDeflateSync(), + decompressDeflateSync()); + assert.strictEqual(result, input); +} + +function testDeflateLargeData() { + const input = 'deflate sync large data test. '.repeat(5000); + const result = roundTrip(input, compressDeflateSync(), + decompressDeflateSync()); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Brotli sync round-trip tests +// ============================================================================= + +function testBrotliRoundTrip() { + const input = 'Hello, sync brotli compression!'; + const result = roundTrip(input, compressBrotliSync(), + decompressBrotliSync()); + assert.strictEqual(result, input); +} + +function testBrotliLargeData() { + const input = 'brotli sync large data test. '.repeat(5000); + const result = roundTrip(input, compressBrotliSync(), + decompressBrotliSync()); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Zstd sync round-trip tests +// ============================================================================= + +function testZstdRoundTrip() { + const input = 'Hello, sync zstd compression!'; + const result = roundTrip(input, compressZstdSync(), decompressZstdSync()); + assert.strictEqual(result, input); +} + +function testZstdLargeData() { + const input = 'zstd sync large data test. '.repeat(5000); + const result = roundTrip(input, compressZstdSync(), decompressZstdSync()); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Cross-algorithm: compress async-compatible, decompress sync (and vice versa) +// The sync transforms should produce output compatible with the standard format +// ============================================================================= + +function testGzipWithOptions() { + const input = 'options test data '.repeat(100); + const result = roundTrip(input, + compressGzipSync({ level: 1 }), + decompressGzipSync()); + assert.strictEqual(result, input); +} + +function testBrotliWithOptions() { + const zlib = require('zlib'); + const input = 'brotli options test data '.repeat(100); + const result = roundTrip(input, + compressBrotliSync({ + params: { + [zlib.constants.BROTLI_PARAM_QUALITY]: 3, + }, + }), + decompressBrotliSync()); + assert.strictEqual(result, input); +} + +// ============================================================================= +// Stateless + stateful sync transform pipeline +// ============================================================================= + +function testMixedStatelessAndStateful() { + // Uppercase stateless transform + gzip stateful transform + const upper = (chunks) => { + if (chunks === null) return null; + const out = new Array(chunks.length); + for (let j = 0; j < chunks.length; j++) { + const src = chunks[j]; + const buf = Buffer.allocUnsafe(src.length); + for (let i = 0; i < src.length; i++) { + const b = src[i]; + buf[i] = (b >= 0x61 && b <= 0x7a) ? b - 0x20 : b; + } + out[j] = buf; + } + return out; + }; + + const input = 'hello world '.repeat(100); + const result = textSync( + pullSync( + pullSync(fromSync(input), upper, compressGzipSync()), + decompressGzipSync(), + ), + ); + assert.strictEqual(result, input.toUpperCase()); +} + +// ============================================================================= +// Early consumer exit (break from for-of) triggers cleanup +// ============================================================================= + +function testEarlyExit() { + const input = 'y'.repeat(100_000); + const compressed = pullSync(fromSync(input), compressGzipSync()); + + // eslint-disable-next-line no-unused-vars + for (const batch of compressed) { + break; // Early exit - should trigger finally block cleanup + } + // If we get here without crashing, cleanup worked +} + +// ============================================================================= +// Empty input +// ============================================================================= + +function testEmptyInput() { + const result = textSync( + pullSync( + pullSync(fromSync(''), compressGzipSync()), + decompressGzipSync(), + ), + ); + assert.strictEqual(result, ''); +} + +// ============================================================================= +// Run all tests +// ============================================================================= + +testGzipRoundTrip(); +testGzipLargeData(); +testGzipActuallyCompresses(); +testGzipBinaryData(); +testDeflateRoundTrip(); +testDeflateLargeData(); +testBrotliRoundTrip(); +testBrotliLargeData(); +testZstdRoundTrip(); +testZstdLargeData(); +testGzipWithOptions(); +testBrotliWithOptions(); +testMixedStatelessAndStateful(); +testEarlyExit(); +testEmptyInput(); + +common.mustCall()(); diff --git a/test/parallel/test-stream-iter-validation.js b/test/parallel/test-stream-iter-validation.js new file mode 100644 index 00000000000000..d58eca4e63ac3b --- /dev/null +++ b/test/parallel/test-stream-iter-validation.js @@ -0,0 +1,347 @@ +// Flags: --experimental-stream-iter +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const { + from, fromSync, pull, pullSync, pipeTo, + push, duplex, broadcast, Broadcast, share, shareSync, + Share, SyncShare, + bytes, bytesSync, text, textSync, + arrayBuffer, arrayBufferSync, array, arraySync, + tap, tapSync, +} = require('stream/iter'); +const { + compressGzip, compressBrotli, compressZstd, + decompressGzip, decompressBrotli, decompressZstd, +} = require('zlib/iter'); + +// ============================================================================= +// push() validation +// ============================================================================= + +// HighWaterMark must be integer >= 1 +assert.throws(() => push({ highWaterMark: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => push({ highWaterMark: 1.5 }), { code: 'ERR_OUT_OF_RANGE' }); +// Values < 1 are clamped to 1 +assert.strictEqual(push({ highWaterMark: 0 }).writer.desiredSize, 1); +assert.strictEqual(push({ highWaterMark: -1 }).writer.desiredSize, 1); +assert.strictEqual(push({ highWaterMark: -100 }).writer.desiredSize, 1); +// MAX_SAFE_INTEGER is accepted +assert.strictEqual(push({ highWaterMark: Number.MAX_SAFE_INTEGER }).writer.desiredSize, + Number.MAX_SAFE_INTEGER); +// Values above MAX_SAFE_INTEGER are rejected by validateInteger +assert.throws(() => push({ highWaterMark: Number.MAX_SAFE_INTEGER + 1 }), + { code: 'ERR_OUT_OF_RANGE' }); + +// Signal must be AbortSignal +assert.throws(() => push({ signal: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => push({ signal: {} }), { code: 'ERR_INVALID_ARG_TYPE' }); + +// Transforms must be functions or transform objects +assert.throws(() => push(42, {}), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => push('bad', {}), { code: 'ERR_INVALID_ARG_TYPE' }); + +// Writer.writev requires array +{ + const { writer } = push(); + assert.throws(() => writer.writev('bad'), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => writer.writev(42), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => writer.writevSync('bad'), { code: 'ERR_INVALID_ARG_TYPE' }); + writer.endSync(); +} + +// Writer.write rejects non-string/non-Uint8Array +{ + const { writer } = push(); + assert.throws(() => writer.writeSync(42), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => writer.writeSync({}), { code: 'ERR_INVALID_ARG_TYPE' }); + assert.throws(() => writer.writeSync(true), { code: 'ERR_INVALID_ARG_TYPE' }); + writer.endSync(); +} + +// ============================================================================= +// duplex() validation +// ============================================================================= + +assert.throws(() => duplex(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => duplex('bad'), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => duplex({ a: 42 }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => duplex({ b: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); + +// highWaterMark validation (cascades through to push()) +assert.throws(() => duplex({ highWaterMark: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => duplex({ highWaterMark: 1.5 }), { code: 'ERR_OUT_OF_RANGE' }); +assert.throws(() => duplex({ highWaterMark: Number.MAX_SAFE_INTEGER + 1 }), + { code: 'ERR_OUT_OF_RANGE' }); + +// Values < 1 are clamped to 1 (both directions) +{ + const [a, b] = duplex({ highWaterMark: 0 }); + assert.strictEqual(a.writer.desiredSize, 1); + assert.strictEqual(b.writer.desiredSize, 1); + a.close(); + b.close(); +} +// MAX_SAFE_INTEGER is accepted +{ + const [a, b] = duplex({ highWaterMark: Number.MAX_SAFE_INTEGER }); + assert.strictEqual(a.writer.desiredSize, Number.MAX_SAFE_INTEGER); + assert.strictEqual(b.writer.desiredSize, Number.MAX_SAFE_INTEGER); + a.close(); + b.close(); +} +// Per-direction overrides +{ + const [a, b] = duplex({ a: { highWaterMark: 0 }, b: { highWaterMark: 5 } }); + assert.strictEqual(a.writer.desiredSize, 1); // clamped + assert.strictEqual(b.writer.desiredSize, 5); + a.close(); + b.close(); +} + +assert.throws(() => duplex({ signal: {} }), { code: 'ERR_INVALID_ARG_TYPE' }); + +// ============================================================================= +// pull() / pullSync() validation +// ============================================================================= + +// Signal must be AbortSignal +assert.throws(() => pull(from('a'), { signal: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); + +// Transforms must be functions or transform objects +assert.throws(() => pull(from('a'), 42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => pull(from('a'), 'bad'), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => pullSync(fromSync('a'), 42), { code: 'ERR_INVALID_ARG_TYPE' }); + +// ============================================================================= +// broadcast() validation +// ============================================================================= + +assert.throws(() => broadcast({ highWaterMark: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => broadcast({ highWaterMark: 1.5 }), { code: 'ERR_OUT_OF_RANGE' }); +assert.throws(() => broadcast({ highWaterMark: Number.MAX_SAFE_INTEGER + 1 }), + { code: 'ERR_OUT_OF_RANGE' }); + +// Values < 1 are clamped to 1 (need a consumer for desiredSize to work) +{ + const bc = broadcast({ highWaterMark: 0 }); + bc.broadcast.push(); + assert.strictEqual(bc.writer.desiredSize, 1); + bc.writer.endSync(); +} +{ + const bc = broadcast({ highWaterMark: -1 }); + bc.broadcast.push(); + assert.strictEqual(bc.writer.desiredSize, 1); + bc.writer.endSync(); +} +// MAX_SAFE_INTEGER is accepted +{ + const bc = broadcast({ highWaterMark: Number.MAX_SAFE_INTEGER }); + bc.broadcast.push(); + assert.strictEqual(bc.writer.desiredSize, Number.MAX_SAFE_INTEGER); + bc.writer.endSync(); +} + +assert.throws(() => broadcast({ signal: {} }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => broadcast({ backpressure: 'bad' }), { code: 'ERR_INVALID_ARG_VALUE' }); + +// Broadcast.from rejects non-iterable input +assert.throws(() => Broadcast.from(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => Broadcast.from('bad'), { code: 'ERR_INVALID_ARG_TYPE' }); + +// ============================================================================= +// share() / shareSync() validation +// ============================================================================= + +assert.throws(() => share(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => share(from('a'), { highWaterMark: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => share(from('a'), { highWaterMark: 1.5 }), { code: 'ERR_OUT_OF_RANGE' }); +assert.throws(() => share(from('a'), { highWaterMark: Number.MAX_SAFE_INTEGER + 1 }), + { code: 'ERR_OUT_OF_RANGE' }); +assert.throws(() => share(from('a'), { signal: {} }), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => share(from('a'), { backpressure: 'bad' }), { code: 'ERR_INVALID_ARG_VALUE' }); + +// share() values < 1 are clamped (no desiredSize, but accepts the value) +share(from('a'), { highWaterMark: 0 }).cancel(); +share(from('a'), { highWaterMark: -1 }).cancel(); +share(from('a'), { highWaterMark: Number.MAX_SAFE_INTEGER }).cancel(); + +assert.throws(() => shareSync(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => shareSync(fromSync('a'), { highWaterMark: 'bad' }), + { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => shareSync(fromSync('a'), { highWaterMark: 1.5 }), + { code: 'ERR_OUT_OF_RANGE' }); +assert.throws(() => shareSync(fromSync('a'), { highWaterMark: Number.MAX_SAFE_INTEGER + 1 }), + { code: 'ERR_OUT_OF_RANGE' }); + +// shareSync() values < 1 are clamped (accepts the value) +shareSync(fromSync('a'), { highWaterMark: 0 }).cancel(); +shareSync(fromSync('a'), { highWaterMark: -1 }).cancel(); +shareSync(fromSync('a'), { highWaterMark: Number.MAX_SAFE_INTEGER }).cancel(); + +// Share.from / SyncShare.fromSync reject non-iterable +assert.throws(() => Share.from(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => SyncShare.fromSync(42), { code: 'ERR_INVALID_ARG_TYPE' }); + +// ============================================================================= +// Consumer validation (synchronous) +// ============================================================================= + +// tap / tapSync require function +assert.throws(() => tap(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => tap('bad'), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => tapSync(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => tapSync(null), { code: 'ERR_INVALID_ARG_TYPE' }); + +// Sync consumer options +assert.throws(() => bytesSync(fromSync('a'), { limit: 'bad' }), + { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => bytesSync(fromSync('a'), { limit: -1 }), + { code: 'ERR_OUT_OF_RANGE' }); +assert.throws(() => textSync(fromSync('a'), { encoding: 42 }), + { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => textSync(fromSync('a'), { encoding: 'bogus' }), + { code: 'ERR_INVALID_ARG_VALUE' }); +assert.throws(() => arrayBufferSync(fromSync('a'), { limit: 'bad' }), + { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => arraySync(fromSync('a'), { limit: -1 }), + { code: 'ERR_OUT_OF_RANGE' }); + +// Options must be object if provided +assert.throws(() => bytesSync(fromSync('a'), 42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => textSync(fromSync('a'), 'bad'), { code: 'ERR_INVALID_ARG_TYPE' }); + +// Compression options must be object +assert.throws(() => compressGzip(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => decompressGzip('bad'), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => compressBrotli(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => decompressBrotli('bad'), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => compressZstd(42), { code: 'ERR_INVALID_ARG_TYPE' }); +assert.throws(() => decompressZstd('bad'), { code: 'ERR_INVALID_ARG_TYPE' }); + +// ============================================================================= +// Async consumer and compression validation +// ============================================================================= + +// Helper: consume a transform through a pipeline to trigger lazy validation. +const consume = (transform) => bytes(pull(from('test'), transform)); + +async function testAsyncValidation() { + // pipeTo signal + await assert.rejects( + () => pipeTo(from('a'), { write() {} }, { signal: 'bad' }), + { code: 'ERR_INVALID_ARG_TYPE' }, + ); + + // Async consumer options + await assert.rejects( + () => bytes(from('a'), 42), { code: 'ERR_INVALID_ARG_TYPE' }); + await assert.rejects( + () => bytes(from('a'), { signal: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); + await assert.rejects( + () => bytes(from('a'), { limit: 'bad' }), { code: 'ERR_INVALID_ARG_TYPE' }); + await assert.rejects( + () => bytes(from('a'), { limit: -1 }), { code: 'ERR_OUT_OF_RANGE' }); + await assert.rejects( + () => text(from('a'), { encoding: 42 }), { code: 'ERR_INVALID_ARG_TYPE' }); + await assert.rejects( + () => text(from('a'), { encoding: 'not-a-real-encoding' }), + { code: 'ERR_INVALID_ARG_VALUE' }); + await assert.rejects( + () => arrayBuffer(from('a'), { limit: 'bad' }), + { code: 'ERR_INVALID_ARG_TYPE' }); + await assert.rejects( + () => array(from('a'), { limit: -1 }), { code: 'ERR_OUT_OF_RANGE' }); + + const TYPE = { code: 'ERR_INVALID_ARG_TYPE' }; + const RANGE = { code: 'ERR_OUT_OF_RANGE' }; + const BROTLI = { code: 'ERR_BROTLI_INVALID_PARAM' }; + const ZSTD = { code: 'ERR_ZSTD_INVALID_PARAM' }; + + // ChunkSize + await assert.rejects(consume(compressGzip({ chunkSize: 'bad' })), TYPE); + await assert.rejects(consume(compressGzip({ chunkSize: 0 })), RANGE); + await assert.rejects(consume(compressGzip({ chunkSize: 10 })), RANGE); + + // WindowBits + await assert.rejects(consume(compressGzip({ windowBits: 'bad' })), TYPE); + await assert.rejects(consume(compressGzip({ windowBits: 100 })), RANGE); + + // Level + await assert.rejects(consume(compressGzip({ level: 'bad' })), TYPE); + await assert.rejects(consume(compressGzip({ level: 100 })), RANGE); + + // MemLevel + await assert.rejects(consume(compressGzip({ memLevel: 'bad' })), TYPE); + await assert.rejects(consume(compressGzip({ memLevel: 100 })), RANGE); + + // Strategy + await assert.rejects(consume(compressGzip({ strategy: 'bad' })), TYPE); + await assert.rejects(consume(compressGzip({ strategy: 100 })), RANGE); + + // Dictionary + await assert.rejects(consume(compressGzip({ dictionary: 42 })), TYPE); + await assert.rejects(consume(compressGzip({ dictionary: 'bad' })), TYPE); + + // Brotli params + await assert.rejects(consume(compressBrotli({ params: 42 })), TYPE); + await assert.rejects(consume(compressBrotli({ params: { bad: 1 } })), BROTLI); + await assert.rejects(consume(compressBrotli({ params: { [-1]: 1 } })), BROTLI); + await assert.rejects(consume(compressBrotli({ params: { 0: 'bad' } })), TYPE); + + // Zstd params + await assert.rejects(consume(compressZstd({ params: 42 })), TYPE); + await assert.rejects(consume(compressZstd({ params: { bad: 1 } })), ZSTD); + await assert.rejects(consume(compressZstd({ params: { 0: 'bad' } })), TYPE); + + // Zstd pledgedSrcSize + await assert.rejects(consume(compressZstd({ pledgedSrcSize: 'bad' })), TYPE); + await assert.rejects(consume(compressZstd({ pledgedSrcSize: -1 })), RANGE); +} + +// ============================================================================= +// Valid calls still work +// ============================================================================= + +// Push with valid options +{ + const { writer } = push({ highWaterMark: 2 }); + writer.writeSync('hello'); + writer.endSync(); +} + +// Duplex with valid options +{ + const [a, b] = duplex({ highWaterMark: 2 }); + a.close(); + b.close(); +} + +// Broadcast with valid options +{ + const { writer } = broadcast({ highWaterMark: 4 }); + writer.endSync(); +} + +// Share with valid options +{ + const shared = share(from('hello'), { highWaterMark: 4 }); + shared.cancel(); +} + +// Compression with valid options +{ + const transform = compressGzip({ chunkSize: 1024, level: 6 }); + assert.strictEqual(typeof transform.transform, 'function'); +} + +// Brotli with valid params +{ + const { constants: { BROTLI_PARAM_QUALITY } } = require('zlib'); + const transform = compressBrotli({ params: { [BROTLI_PARAM_QUALITY]: 5 } }); + assert.strictEqual(typeof transform.transform, 'function'); +} + +testAsyncValidation().then(common.mustCall());