|
| 1 | +import { Readable } from "node:stream"; |
| 2 | +import { describe, expect, test as it, vi } from "vitest"; |
| 3 | + |
| 4 | +import { createBufferedReadable } from "./createBufferedReadableStream"; |
| 5 | + |
| 6 | +describe("Buffered ReadableStream", () => { |
| 7 | + function stringStream(size: number, chunkSize: number) { |
| 8 | + async function* generate() { |
| 9 | + while (size > 0) { |
| 10 | + yield "a".repeat(chunkSize); |
| 11 | + size -= chunkSize; |
| 12 | + } |
| 13 | + } |
| 14 | + return Readable.toWeb(Readable.from(generate())); |
| 15 | + } |
| 16 | + function byteStream(size: number, chunkSize: number) { |
| 17 | + async function* generate() { |
| 18 | + while (size > 0) { |
| 19 | + yield new Uint8Array(chunkSize); |
| 20 | + size -= chunkSize; |
| 21 | + } |
| 22 | + } |
| 23 | + return Readable.toWeb(Readable.from(generate())); |
| 24 | + } |
| 25 | + |
| 26 | + const logger = { |
| 27 | + debug: vi.fn(), |
| 28 | + info: vi.fn(), |
| 29 | + warn: vi.fn(), |
| 30 | + error() {}, |
| 31 | + }; |
| 32 | + |
| 33 | + it("should join upstream chunks if they are too small (stringStream)", async () => { |
| 34 | + let upstreamChunkCount = 0; |
| 35 | + let downstreamChunkCount = 0; |
| 36 | + |
| 37 | + const upstream = stringStream(1024, 8); |
| 38 | + const upstreamReader = upstream.getReader(); |
| 39 | + |
| 40 | + const midstream = new ReadableStream({ |
| 41 | + async pull(controller) { |
| 42 | + const { value, done } = await upstreamReader.read(); |
| 43 | + if (done) { |
| 44 | + controller.close(); |
| 45 | + } else { |
| 46 | + expect(value.length).toBe(8); |
| 47 | + upstreamChunkCount += 1; |
| 48 | + controller.enqueue(value); |
| 49 | + } |
| 50 | + }, |
| 51 | + }); |
| 52 | + const downstream = createBufferedReadable(midstream, 64); |
| 53 | + const reader = downstream.getReader(); |
| 54 | + |
| 55 | + while (true) { |
| 56 | + const { done, value } = await reader.read(); |
| 57 | + if (done) { |
| 58 | + break; |
| 59 | + } else { |
| 60 | + downstreamChunkCount += 1; |
| 61 | + expect(value.length).toBe(64); |
| 62 | + } |
| 63 | + } |
| 64 | + |
| 65 | + expect(upstreamChunkCount).toEqual(128); |
| 66 | + expect(downstreamChunkCount).toEqual(16); |
| 67 | + }); |
| 68 | + |
| 69 | + it("should join upstream chunks if they are too small (byteStream)", async () => { |
| 70 | + let upstreamChunkCount = 0; |
| 71 | + let downstreamChunkCount = 0; |
| 72 | + |
| 73 | + const upstream = byteStream(1031, 7); |
| 74 | + const upstreamReader = upstream.getReader(); |
| 75 | + |
| 76 | + const midstream = new ReadableStream({ |
| 77 | + async pull(controller) { |
| 78 | + const { value, done } = await upstreamReader.read(); |
| 79 | + if (done) { |
| 80 | + controller.close(); |
| 81 | + } else { |
| 82 | + expect(value.length).toBe(7); |
| 83 | + upstreamChunkCount += 1; |
| 84 | + controller.enqueue(value); |
| 85 | + } |
| 86 | + }, |
| 87 | + }); |
| 88 | + const downstream = createBufferedReadable(midstream, 49, logger); |
| 89 | + const downstreamReader = downstream.getReader(); |
| 90 | + |
| 91 | + while (true) { |
| 92 | + const { done, value } = await downstreamReader.read(); |
| 93 | + if (done) { |
| 94 | + break; |
| 95 | + } else { |
| 96 | + downstreamChunkCount += 1; |
| 97 | + if (value.byteLength > 7) { |
| 98 | + expect(value.byteLength).toBe(49); |
| 99 | + } |
| 100 | + } |
| 101 | + } |
| 102 | + |
| 103 | + expect(upstreamChunkCount).toEqual(148); |
| 104 | + expect(downstreamChunkCount).toEqual(22); |
| 105 | + expect(logger.warn).toHaveBeenCalled(); |
| 106 | + }); |
| 107 | +}); |
0 commit comments