mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 07:30:19 +00:00
LibWeb: Push a temporary execution context for setTimeout
This fixes a crash seen running stream tests.
This commit is contained in:
parent
4a1e109678
commit
1dc1bebd2a
Notes:
github-actions[bot]
2024-11-01 17:12:52 +00:00
Author: https://github.com/shannonbooth Commit: https://github.com/LadybirdBrowser/ladybird/commit/1dc1bebd2af Pull-request: https://github.com/LadybirdBrowser/ladybird/pull/2107
8 changed files with 2152 additions and 1 deletions
|
@ -0,0 +1,49 @@
|
|||
Summary
|
||||
|
||||
Harness status: Error
|
||||
|
||||
Rerun
|
||||
|
||||
Found 39 tests
|
||||
|
||||
39 Pass
|
||||
Details
|
||||
Result Test Name MessagePass ReadableStream teeing with byte source: rs.tee() returns an array of two ReadableStreams
|
||||
Pass ReadableStream teeing with byte source: should be able to read one branch to the end without affecting the other
|
||||
Pass ReadableStream teeing with byte source: chunks should be cloned for each branch
|
||||
Pass ReadableStream teeing with byte source: chunks for BYOB requests from branch 1 should be cloned to branch 2
|
||||
Pass ReadableStream teeing with byte source: errors in the source should propagate to both branches
|
||||
Pass ReadableStream teeing with byte source: canceling branch1 should not impact branch2
|
||||
Pass ReadableStream teeing with byte source: canceling branch2 should not impact branch1
|
||||
Pass Running templatedRSTeeCancel with ReadableStream teeing with byte source
|
||||
Pass ReadableStream teeing with byte source: canceling both branches should aggregate the cancel reasons into an array
|
||||
Pass ReadableStream teeing with byte source: canceling both branches in reverse order should aggregate the cancel reasons into an array
|
||||
Pass ReadableStream teeing with byte source: failing to cancel the original stream should cause cancel() to reject on branches
|
||||
Pass ReadableStream teeing with byte source: erroring a teed stream should properly handle canceled branches
|
||||
Pass ReadableStream teeing with byte source: closing the original should close the branches
|
||||
Pass ReadableStream teeing with byte source: erroring the original should immediately error the branches
|
||||
Pass ReadableStream teeing with byte source: erroring the original should error pending reads from default reader
|
||||
Pass ReadableStream teeing with byte source: erroring the original should error pending reads from BYOB reader
|
||||
Pass ReadableStream teeing with byte source: canceling branch1 should finish when branch2 reads until end of stream
|
||||
Pass ReadableStream teeing with byte source: canceling branch1 should finish when original stream errors
|
||||
Pass ReadableStream teeing with byte source: should not pull any chunks if no branches are reading
|
||||
Pass ReadableStream teeing with byte source: should only pull enough to fill the emptiest queue
|
||||
Pass ReadableStream teeing with byte source: should not pull when original is already errored
|
||||
Pass ReadableStream teeing with byte source: stops pulling when original stream errors while branch 1 is reading
|
||||
Pass ReadableStream teeing with byte source: stops pulling when original stream errors while branch 2 is reading
|
||||
Pass ReadableStream teeing with byte source: stops pulling when original stream errors while both branches are reading
|
||||
Pass ReadableStream teeing with byte source: canceling both branches in sequence with delay
|
||||
Pass ReadableStream teeing with byte source: failing to cancel when canceling both branches in sequence with delay
|
||||
Pass ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch1, cancel branch2
|
||||
Pass ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch2, cancel branch1
|
||||
Pass ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch2, enqueue to branch1
|
||||
Pass ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch1, respond to branch2
|
||||
Pass ReadableStream teeing with byte source: pull with BYOB reader, then pull with default reader
|
||||
Pass ReadableStream teeing with byte source: pull with default reader, then pull with BYOB reader
|
||||
Pass ReadableStream teeing with byte source: read from branch2, then read from branch1
|
||||
Pass ReadableStream teeing with byte source: read from branch1 with default reader, then close while branch2 has pending BYOB read
|
||||
Pass ReadableStream teeing with byte source: read from branch2 with default reader, then close while branch1 has pending BYOB read
|
||||
Pass ReadableStream teeing with byte source: close when both branches have pending BYOB reads
|
||||
Pass ReadableStream teeing with byte source: enqueue() and close() while both branches are pulling
|
||||
Pass ReadableStream teeing with byte source: respond() and close() while both branches are pulling
|
||||
Pass ReadableStream teeing with byte source: reading an array with a byte offset should clone correctly
|
|
@ -0,0 +1,18 @@
|
|||
<!doctype html>
|
||||
<meta charset=utf-8>
|
||||
|
||||
<script>
|
||||
self.GLOBAL = {
|
||||
isWindow: function() { return true; },
|
||||
isWorker: function() { return false; },
|
||||
isShadowRealm: function() { return false; },
|
||||
};
|
||||
</script>
|
||||
<script src="../../resources/testharness.js"></script>
|
||||
<script src="../../resources/testharnessreport.js"></script>
|
||||
<script src="../resources/rs-utils.js"></script>
|
||||
<script src="../resources/test-utils.js"></script>
|
||||
<script src="../resources/recording-streams.js"></script>
|
||||
<script src="../resources/rs-test-templates.js"></script>
|
||||
<div id=log></div>
|
||||
<script src="../../streams/readable-byte-streams/tee.any.js"></script>
|
|
@ -0,0 +1,969 @@
|
|||
// META: global=window,worker,shadowrealm
|
||||
// META: script=../resources/rs-utils.js
|
||||
// META: script=../resources/test-utils.js
|
||||
// META: script=../resources/recording-streams.js
|
||||
// META: script=../resources/rs-test-templates.js
|
||||
'use strict';
|
||||
|
||||
test(() => {
|
||||
|
||||
const rs = new ReadableStream({ type: 'bytes' });
|
||||
const result = rs.tee();
|
||||
|
||||
assert_true(Array.isArray(result), 'return value should be an array');
|
||||
assert_equals(result.length, 2, 'array should have length 2');
|
||||
assert_equals(result[0].constructor, ReadableStream, '0th element should be a ReadableStream');
|
||||
assert_equals(result[1].constructor, ReadableStream, '1st element should be a ReadableStream');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: rs.tee() returns an array of two ReadableStreams');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
c.enqueue(new Uint8Array([0x01]));
|
||||
c.enqueue(new Uint8Array([0x02]));
|
||||
c.close();
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader({ mode: 'byob' });
|
||||
|
||||
reader2.closed.then(t.unreached_func('branch2 should not be closed'));
|
||||
|
||||
{
|
||||
const result = await reader1.read(new Uint8Array(1));
|
||||
assert_equals(result.done, false, 'done');
|
||||
assert_typed_array_equals(result.value, new Uint8Array([0x01]), 'value');
|
||||
}
|
||||
|
||||
{
|
||||
const result = await reader1.read(new Uint8Array(1));
|
||||
assert_equals(result.done, false, 'done');
|
||||
assert_typed_array_equals(result.value, new Uint8Array([0x02]), 'value');
|
||||
}
|
||||
|
||||
{
|
||||
const result = await reader1.read(new Uint8Array(1));
|
||||
assert_equals(result.done, true, 'done');
|
||||
assert_typed_array_equals(result.value, new Uint8Array([0]).subarray(0, 0), 'value');
|
||||
}
|
||||
|
||||
{
|
||||
const result = await reader2.read(new Uint8Array(1));
|
||||
assert_equals(result.done, false, 'done');
|
||||
assert_typed_array_equals(result.value, new Uint8Array([0x01]), 'value');
|
||||
}
|
||||
|
||||
await reader1.closed;
|
||||
|
||||
}, 'ReadableStream teeing with byte source: should be able to read one branch to the end without affecting the other');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
let pullCount = 0;
|
||||
const enqueuedChunk = new Uint8Array([0x01]);
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
pull(c) {
|
||||
++pullCount;
|
||||
if (pullCount === 1) {
|
||||
c.enqueue(enqueuedChunk);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader();
|
||||
const reader2 = branch2.getReader();
|
||||
|
||||
const [result1, result2] = await Promise.all([reader1.read(), reader2.read()]);
|
||||
assert_equals(result1.done, false, 'reader1 done');
|
||||
assert_equals(result2.done, false, 'reader2 done');
|
||||
|
||||
const view1 = result1.value;
|
||||
const view2 = result2.value;
|
||||
assert_typed_array_equals(view1, new Uint8Array([0x01]), 'reader1 value');
|
||||
assert_typed_array_equals(view2, new Uint8Array([0x01]), 'reader2 value');
|
||||
|
||||
assert_not_equals(view1.buffer, view2.buffer, 'chunks should have different buffers');
|
||||
assert_not_equals(enqueuedChunk.buffer, view1.buffer, 'enqueued chunk and branch1\'s chunk should have different buffers');
|
||||
assert_not_equals(enqueuedChunk.buffer, view2.buffer, 'enqueued chunk and branch2\'s chunk should have different buffers');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: chunks should be cloned for each branch');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
let pullCount = 0;
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
pull(c) {
|
||||
++pullCount;
|
||||
if (pullCount === 1) {
|
||||
c.byobRequest.view[0] = 0x01;
|
||||
c.byobRequest.respond(1);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader();
|
||||
const buffer = new Uint8Array([42, 42, 42]).buffer;
|
||||
|
||||
{
|
||||
const result = await reader1.read(new Uint8Array(buffer, 0, 1));
|
||||
assert_equals(result.done, false, 'done');
|
||||
assert_typed_array_equals(result.value, new Uint8Array([0x01, 42, 42]).subarray(0, 1), 'value');
|
||||
}
|
||||
|
||||
{
|
||||
const result = await reader2.read();
|
||||
assert_equals(result.done, false, 'done');
|
||||
assert_typed_array_equals(result.value, new Uint8Array([0x01]), 'value');
|
||||
}
|
||||
|
||||
}, 'ReadableStream teeing with byte source: chunks for BYOB requests from branch 1 should be cloned to branch 2');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
const theError = { name: 'boo!' };
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
c.enqueue(new Uint8Array([0x01]));
|
||||
c.enqueue(new Uint8Array([0x02]));
|
||||
},
|
||||
pull() {
|
||||
throw theError;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader({ mode: 'byob' });
|
||||
|
||||
{
|
||||
const result = await reader1.read(new Uint8Array(1));
|
||||
assert_equals(result.done, false, 'first read from branch1 should not be done');
|
||||
assert_typed_array_equals(result.value, new Uint8Array([0x01]), 'first read from branch1');
|
||||
}
|
||||
|
||||
{
|
||||
const result = await reader1.read(new Uint8Array(1));
|
||||
assert_equals(result.done, false, 'second read from branch1 should not be done');
|
||||
assert_typed_array_equals(result.value, new Uint8Array([0x02]), 'second read from branch1');
|
||||
}
|
||||
|
||||
await promise_rejects_exactly(t, theError, reader1.read(new Uint8Array(1)));
|
||||
await promise_rejects_exactly(t, theError, reader2.read(new Uint8Array(1)));
|
||||
|
||||
await Promise.all([
|
||||
promise_rejects_exactly(t, theError, reader1.closed),
|
||||
promise_rejects_exactly(t, theError, reader2.closed)
|
||||
]);
|
||||
|
||||
}, 'ReadableStream teeing with byte source: errors in the source should propagate to both branches');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
c.enqueue(new Uint8Array([0x01]));
|
||||
c.enqueue(new Uint8Array([0x02]));
|
||||
c.close();
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
branch1.cancel();
|
||||
|
||||
const [chunks1, chunks2] = await Promise.all([readableStreamToArray(branch1), readableStreamToArray(branch2)]);
|
||||
assert_array_equals(chunks1, [], 'branch1 should have no chunks');
|
||||
assert_equals(chunks2.length, 2, 'branch2 should have two chunks');
|
||||
assert_typed_array_equals(chunks2[0], new Uint8Array([0x01]), 'first chunk from branch2');
|
||||
assert_typed_array_equals(chunks2[1], new Uint8Array([0x02]), 'second chunk from branch2');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: canceling branch1 should not impact branch2');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
c.enqueue(new Uint8Array([0x01]));
|
||||
c.enqueue(new Uint8Array([0x02]));
|
||||
c.close();
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
branch2.cancel();
|
||||
|
||||
const [chunks1, chunks2] = await Promise.all([readableStreamToArray(branch1), readableStreamToArray(branch2)]);
|
||||
assert_equals(chunks1.length, 2, 'branch1 should have two chunks');
|
||||
assert_typed_array_equals(chunks1[0], new Uint8Array([0x01]), 'first chunk from branch1');
|
||||
assert_typed_array_equals(chunks1[1], new Uint8Array([0x02]), 'second chunk from branch1');
|
||||
assert_array_equals(chunks2, [], 'branch2 should have no chunks');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: canceling branch2 should not impact branch1');
|
||||
|
||||
templatedRSTeeCancel('ReadableStream teeing with byte source', (extras) => {
|
||||
return new ReadableStream({ type: 'bytes', ...extras });
|
||||
});
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
let controller;
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
controller = c;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader({ mode: 'byob' });
|
||||
|
||||
const promise = Promise.all([reader1.closed, reader2.closed]);
|
||||
|
||||
controller.close();
|
||||
|
||||
// The branches are created with HWM 0, so we need to read from at least one of them
|
||||
// to observe the stream becoming closed.
|
||||
const read1 = await reader1.read(new Uint8Array(1));
|
||||
assert_equals(read1.done, true, 'first read from branch1 should be done');
|
||||
|
||||
await promise;
|
||||
|
||||
}, 'ReadableStream teeing with byte source: closing the original should close the branches');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
let controller;
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
controller = c;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader({ mode: 'byob' });
|
||||
|
||||
const theError = { name: 'boo!' };
|
||||
const promise = Promise.all([
|
||||
promise_rejects_exactly(t, theError, reader1.closed),
|
||||
promise_rejects_exactly(t, theError, reader2.closed)
|
||||
]);
|
||||
|
||||
controller.error(theError);
|
||||
await promise;
|
||||
|
||||
}, 'ReadableStream teeing with byte source: erroring the original should immediately error the branches');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
let controller;
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
controller = c;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader();
|
||||
const reader2 = branch2.getReader();
|
||||
|
||||
const theError = { name: 'boo!' };
|
||||
const promise = Promise.all([
|
||||
promise_rejects_exactly(t, theError, reader1.read()),
|
||||
promise_rejects_exactly(t, theError, reader2.read())
|
||||
]);
|
||||
|
||||
controller.error(theError);
|
||||
await promise;
|
||||
|
||||
}, 'ReadableStream teeing with byte source: erroring the original should error pending reads from default reader');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
let controller;
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
controller = c;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader({ mode: 'byob' });
|
||||
|
||||
const theError = { name: 'boo!' };
|
||||
const promise = Promise.all([
|
||||
promise_rejects_exactly(t, theError, reader1.read(new Uint8Array(1))),
|
||||
promise_rejects_exactly(t, theError, reader2.read(new Uint8Array(1)))
|
||||
]);
|
||||
|
||||
controller.error(theError);
|
||||
await promise;
|
||||
|
||||
}, 'ReadableStream teeing with byte source: erroring the original should error pending reads from BYOB reader');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
let controller;
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
controller = c;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader({ mode: 'byob' });
|
||||
const cancelPromise = reader2.cancel();
|
||||
|
||||
controller.enqueue(new Uint8Array([0x01]));
|
||||
|
||||
const read1 = await reader1.read(new Uint8Array(1));
|
||||
assert_equals(read1.done, false, 'first read() from branch1 should not be done');
|
||||
assert_typed_array_equals(read1.value, new Uint8Array([0x01]), 'first read() from branch1');
|
||||
|
||||
controller.close();
|
||||
|
||||
const read2 = await reader1.read(new Uint8Array(1));
|
||||
assert_equals(read2.done, true, 'second read() from branch1 should be done');
|
||||
|
||||
await Promise.all([
|
||||
reader1.closed,
|
||||
cancelPromise
|
||||
]);
|
||||
|
||||
}, 'ReadableStream teeing with byte source: canceling branch1 should finish when branch2 reads until end of stream');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
let controller;
|
||||
const theError = { name: 'boo!' };
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
start(c) {
|
||||
controller = c;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader({ mode: 'byob' });
|
||||
const cancelPromise = reader2.cancel();
|
||||
|
||||
controller.error(theError);
|
||||
|
||||
await Promise.all([
|
||||
promise_rejects_exactly(t, theError, reader1.read(new Uint8Array(1))),
|
||||
cancelPromise
|
||||
]);
|
||||
|
||||
}, 'ReadableStream teeing with byte source: canceling branch1 should finish when original stream errors');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
|
||||
// Create two branches, each with a HWM of 0. This should result in no chunks being pulled.
|
||||
rs.tee();
|
||||
|
||||
await flushAsyncEvents();
|
||||
assert_array_equals(rs.events, [], 'pull should not be called');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: should not pull any chunks if no branches are reading');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({
|
||||
type: 'bytes',
|
||||
pull(controller) {
|
||||
controller.enqueue(new Uint8Array([0x01]));
|
||||
}
|
||||
});
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
await Promise.all([
|
||||
reader1.read(new Uint8Array(1)),
|
||||
reader2.read(new Uint8Array(1))
|
||||
]);
|
||||
assert_array_equals(rs.events, ['pull'], 'pull should be called once');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: should only pull enough to fill the emptiest queue');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
const theError = { name: 'boo!' };
|
||||
|
||||
rs.controller.error(theError);
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
await flushAsyncEvents();
|
||||
assert_array_equals(rs.events, [], 'pull should not be called');
|
||||
|
||||
await Promise.all([
|
||||
promise_rejects_exactly(t, theError, reader1.closed),
|
||||
promise_rejects_exactly(t, theError, reader2.closed)
|
||||
]);
|
||||
|
||||
}, 'ReadableStream teeing with byte source: should not pull when original is already errored');
|
||||
|
||||
for (const branch of [1, 2]) {
|
||||
promise_test(async t => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
const theError = { name: 'boo!' };
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
await flushAsyncEvents();
|
||||
assert_array_equals(rs.events, [], 'pull should not be called');
|
||||
|
||||
const reader = (branch === 1) ? reader1 : reader2;
|
||||
const read1 = reader.read(new Uint8Array(1));
|
||||
|
||||
await flushAsyncEvents();
|
||||
assert_array_equals(rs.events, ['pull'], 'pull should be called once');
|
||||
|
||||
rs.controller.error(theError);
|
||||
|
||||
await Promise.all([
|
||||
promise_rejects_exactly(t, theError, read1),
|
||||
promise_rejects_exactly(t, theError, reader1.closed),
|
||||
promise_rejects_exactly(t, theError, reader2.closed)
|
||||
]);
|
||||
|
||||
await flushAsyncEvents();
|
||||
assert_array_equals(rs.events, ['pull'], 'pull should be called once');
|
||||
|
||||
}, `ReadableStream teeing with byte source: stops pulling when original stream errors while branch ${branch} is reading`);
|
||||
}
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
const theError = { name: 'boo!' };
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
await flushAsyncEvents();
|
||||
assert_array_equals(rs.events, [], 'pull should not be called');
|
||||
|
||||
const read1 = reader1.read(new Uint8Array(1));
|
||||
const read2 = reader2.read(new Uint8Array(1));
|
||||
|
||||
await flushAsyncEvents();
|
||||
assert_array_equals(rs.events, ['pull'], 'pull should be called once');
|
||||
|
||||
rs.controller.error(theError);
|
||||
|
||||
await Promise.all([
|
||||
promise_rejects_exactly(t, theError, read1),
|
||||
promise_rejects_exactly(t, theError, read2),
|
||||
promise_rejects_exactly(t, theError, reader1.closed),
|
||||
promise_rejects_exactly(t, theError, reader2.closed)
|
||||
]);
|
||||
|
||||
await flushAsyncEvents();
|
||||
assert_array_equals(rs.events, ['pull'], 'pull should be called once');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: stops pulling when original stream errors while both branches are reading');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
|
||||
const cancel1 = reader1.cancel();
|
||||
await flushAsyncEvents();
|
||||
const cancel2 = reader2.cancel();
|
||||
|
||||
const result1 = await read1;
|
||||
assert_object_equals(result1, { value: undefined, done: true });
|
||||
const result2 = await read2;
|
||||
assert_object_equals(result2, { value: undefined, done: true });
|
||||
|
||||
await Promise.all([cancel1, cancel2]);
|
||||
|
||||
}, 'ReadableStream teeing with byte source: canceling both branches in sequence with delay');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
const theError = { name: 'boo!' };
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
cancel() {
|
||||
throw theError;
|
||||
}
|
||||
});
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
|
||||
const cancel1 = reader1.cancel();
|
||||
await flushAsyncEvents();
|
||||
const cancel2 = reader2.cancel();
|
||||
|
||||
const result1 = await read1;
|
||||
assert_object_equals(result1, { value: undefined, done: true });
|
||||
const result2 = await read2;
|
||||
assert_object_equals(result2, { value: undefined, done: true });
|
||||
|
||||
await Promise.all([
|
||||
promise_rejects_exactly(t, theError, cancel1),
|
||||
promise_rejects_exactly(t, theError, cancel2)
|
||||
]);
|
||||
|
||||
}, 'ReadableStream teeing with byte source: failing to cancel when canceling both branches in sequence with delay');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
let cancelResolve;
|
||||
const cancelCalled = new Promise((resolve) => {
|
||||
cancelResolve = resolve;
|
||||
});
|
||||
const rs = recordingReadableStream({
|
||||
type: 'bytes',
|
||||
cancel() {
|
||||
cancelResolve();
|
||||
}
|
||||
});
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
await flushAsyncEvents();
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
await flushAsyncEvents();
|
||||
|
||||
// We are reading into branch1's buffer.
|
||||
const byobRequest1 = rs.controller.byobRequest;
|
||||
assert_not_equals(byobRequest1, null);
|
||||
assert_typed_array_equals(byobRequest1.view, new Uint8Array([0x11]), 'byobRequest1.view');
|
||||
|
||||
// Cancelling branch1 should not affect the BYOB request.
|
||||
const cancel1 = reader1.cancel();
|
||||
const result1 = await read1;
|
||||
assert_equals(result1.done, true);
|
||||
assert_equals(result1.value, undefined);
|
||||
await flushAsyncEvents();
|
||||
const byobRequest2 = rs.controller.byobRequest;
|
||||
assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11]), 'byobRequest2.view');
|
||||
|
||||
// Cancelling branch1 should invalidate the BYOB request.
|
||||
const cancel2 = reader2.cancel();
|
||||
await cancelCalled;
|
||||
const byobRequest3 = rs.controller.byobRequest;
|
||||
assert_equals(byobRequest3, null);
|
||||
const result2 = await read2;
|
||||
assert_equals(result2.done, true);
|
||||
assert_equals(result2.value, undefined);
|
||||
|
||||
await Promise.all([cancel1, cancel2]);
|
||||
|
||||
}, 'ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch1, cancel branch2');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
let cancelResolve;
|
||||
const cancelCalled = new Promise((resolve) => {
|
||||
cancelResolve = resolve;
|
||||
});
|
||||
const rs = recordingReadableStream({
|
||||
type: 'bytes',
|
||||
cancel() {
|
||||
cancelResolve();
|
||||
}
|
||||
});
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
await flushAsyncEvents();
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
await flushAsyncEvents();
|
||||
|
||||
// We are reading into branch1's buffer.
|
||||
const byobRequest1 = rs.controller.byobRequest;
|
||||
assert_not_equals(byobRequest1, null);
|
||||
assert_typed_array_equals(byobRequest1.view, new Uint8Array([0x11]), 'byobRequest1.view');
|
||||
|
||||
// Cancelling branch2 should not affect the BYOB request.
|
||||
const cancel2 = reader2.cancel();
|
||||
const result2 = await read2;
|
||||
assert_equals(result2.done, true);
|
||||
assert_equals(result2.value, undefined);
|
||||
await flushAsyncEvents();
|
||||
const byobRequest2 = rs.controller.byobRequest;
|
||||
assert_typed_array_equals(byobRequest2.view, new Uint8Array([0x11]), 'byobRequest2.view');
|
||||
|
||||
// Cancelling branch1 should invalidate the BYOB request.
|
||||
const cancel1 = reader1.cancel();
|
||||
await cancelCalled;
|
||||
const byobRequest3 = rs.controller.byobRequest;
|
||||
assert_equals(byobRequest3, null);
|
||||
const result1 = await read1;
|
||||
assert_equals(result1.done, true);
|
||||
assert_equals(result1.value, undefined);
|
||||
|
||||
await Promise.all([cancel1, cancel2]);
|
||||
|
||||
}, 'ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch2, cancel branch1');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
await flushAsyncEvents();
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
await flushAsyncEvents();
|
||||
|
||||
// We are reading into branch1's buffer.
|
||||
assert_typed_array_equals(rs.controller.byobRequest.view, new Uint8Array([0x11]), 'first byobRequest.view');
|
||||
|
||||
// Cancelling branch2 should not affect the BYOB request.
|
||||
reader2.cancel();
|
||||
const result2 = await read2;
|
||||
assert_equals(result2.done, true);
|
||||
assert_equals(result2.value, undefined);
|
||||
await flushAsyncEvents();
|
||||
assert_typed_array_equals(rs.controller.byobRequest.view, new Uint8Array([0x11]), 'second byobRequest.view');
|
||||
|
||||
// Respond to the BYOB request.
|
||||
rs.controller.byobRequest.view[0] = 0x33;
|
||||
rs.controller.byobRequest.respond(1);
|
||||
|
||||
// branch1 should receive the read chunk.
|
||||
const result1 = await read1;
|
||||
assert_equals(result1.done, false);
|
||||
assert_typed_array_equals(result1.value, new Uint8Array([0x33]), 'first read() from branch1');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch2, enqueue to branch1');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
await flushAsyncEvents();
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
await flushAsyncEvents();
|
||||
|
||||
// We are reading into branch1's buffer.
|
||||
assert_typed_array_equals(rs.controller.byobRequest.view, new Uint8Array([0x11]), 'first byobRequest.view');
|
||||
|
||||
// Cancelling branch1 should not affect the BYOB request.
|
||||
reader1.cancel();
|
||||
const result1 = await read1;
|
||||
assert_equals(result1.done, true);
|
||||
assert_equals(result1.value, undefined);
|
||||
await flushAsyncEvents();
|
||||
assert_typed_array_equals(rs.controller.byobRequest.view, new Uint8Array([0x11]), 'second byobRequest.view');
|
||||
|
||||
// Respond to the BYOB request.
|
||||
rs.controller.byobRequest.view[0] = 0x33;
|
||||
rs.controller.byobRequest.respond(1);
|
||||
|
||||
// branch2 should receive the read chunk.
|
||||
const result2 = await read2;
|
||||
assert_equals(result2.done, false);
|
||||
assert_typed_array_equals(result2.value, new Uint8Array([0x33]), 'first read() from branch2');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: read from branch1 and branch2, cancel branch1, respond to branch2');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
let pullCount = 0;
|
||||
const byobRequestDefined = [];
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
pull(c) {
|
||||
++pullCount;
|
||||
byobRequestDefined.push(c.byobRequest !== null);
|
||||
c.enqueue(new Uint8Array([pullCount]));
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, _] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
|
||||
const result1 = await reader1.read(new Uint8Array([0x11]));
|
||||
assert_equals(result1.done, false, 'first read should not be done');
|
||||
assert_typed_array_equals(result1.value, new Uint8Array([0x1]), 'first read');
|
||||
assert_equals(pullCount, 1, 'pull() should be called once');
|
||||
assert_equals(byobRequestDefined[0], true, 'should have created a BYOB request for first read');
|
||||
|
||||
reader1.releaseLock();
|
||||
const reader2 = branch1.getReader();
|
||||
|
||||
const result2 = await reader2.read();
|
||||
assert_equals(result2.done, false, 'second read should not be done');
|
||||
assert_typed_array_equals(result2.value, new Uint8Array([0x2]), 'second read');
|
||||
assert_equals(pullCount, 2, 'pull() should be called twice');
|
||||
assert_equals(byobRequestDefined[1], false, 'should not have created a BYOB request for second read');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: pull with BYOB reader, then pull with default reader');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
let pullCount = 0;
|
||||
const byobRequestDefined = [];
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
pull(c) {
|
||||
++pullCount;
|
||||
byobRequestDefined.push(c.byobRequest !== null);
|
||||
c.enqueue(new Uint8Array([pullCount]));
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, _] = rs.tee();
|
||||
const reader1 = branch1.getReader();
|
||||
|
||||
const result1 = await reader1.read();
|
||||
assert_equals(result1.done, false, 'first read should not be done');
|
||||
assert_typed_array_equals(result1.value, new Uint8Array([0x1]), 'first read');
|
||||
assert_equals(pullCount, 1, 'pull() should be called once');
|
||||
assert_equals(byobRequestDefined[0], false, 'should not have created a BYOB request for first read');
|
||||
|
||||
reader1.releaseLock();
|
||||
const reader2 = branch1.getReader({ mode: 'byob' });
|
||||
|
||||
const result2 = await reader2.read(new Uint8Array([0x22]));
|
||||
assert_equals(result2.done, false, 'second read should not be done');
|
||||
assert_typed_array_equals(result2.value, new Uint8Array([0x2]), 'second read');
|
||||
assert_equals(pullCount, 2, 'pull() should be called twice');
|
||||
assert_equals(byobRequestDefined[1], true, 'should have created a BYOB request for second read');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: pull with default reader, then pull with BYOB reader');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({
|
||||
type: 'bytes'
|
||||
});
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
|
||||
// Wait for each branch's start() promise to resolve.
|
||||
await flushAsyncEvents();
|
||||
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
await flushAsyncEvents();
|
||||
|
||||
// branch2 should provide the BYOB request.
|
||||
const byobRequest = rs.controller.byobRequest;
|
||||
assert_typed_array_equals(byobRequest.view, new Uint8Array([0x22]), 'first BYOB request');
|
||||
byobRequest.view[0] = 0x01;
|
||||
byobRequest.respond(1);
|
||||
|
||||
const result1 = await read1;
|
||||
assert_equals(result1.done, false, 'first read should not be done');
|
||||
assert_typed_array_equals(result1.value, new Uint8Array([0x1]), 'first read');
|
||||
|
||||
const result2 = await read2;
|
||||
assert_equals(result2.done, false, 'second read should not be done');
|
||||
assert_typed_array_equals(result2.value, new Uint8Array([0x1]), 'second read');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: read from branch2, then read from branch1');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader();
|
||||
const reader2 = branch2.getReader({ mode: 'byob' });
|
||||
await flushAsyncEvents();
|
||||
|
||||
const read1 = reader1.read();
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
await flushAsyncEvents();
|
||||
|
||||
// There should be no BYOB request.
|
||||
assert_equals(rs.controller.byobRequest, null, 'first BYOB request');
|
||||
|
||||
// Close the stream.
|
||||
rs.controller.close();
|
||||
|
||||
const result1 = await read1;
|
||||
assert_equals(result1.done, true, 'read from branch1 should be done');
|
||||
assert_equals(result1.value, undefined, 'read from branch1');
|
||||
|
||||
// branch2 should get its buffer back.
|
||||
const result2 = await read2;
|
||||
assert_equals(result2.done, true, 'read from branch2 should be done');
|
||||
assert_typed_array_equals(result2.value, new Uint8Array([0x22]).subarray(0, 0), 'read from branch2');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: read from branch1 with default reader, then close while branch2 has pending BYOB read');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader({ mode: 'byob' });
|
||||
const reader2 = branch2.getReader();
|
||||
await flushAsyncEvents();
|
||||
|
||||
const read2 = reader2.read();
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
await flushAsyncEvents();
|
||||
|
||||
// There should be no BYOB request.
|
||||
assert_equals(rs.controller.byobRequest, null, 'first BYOB request');
|
||||
|
||||
// Close the stream.
|
||||
rs.controller.close();
|
||||
|
||||
const result2 = await read2;
|
||||
assert_equals(result2.done, true, 'read from branch2 should be done');
|
||||
assert_equals(result2.value, undefined, 'read from branch2');
|
||||
|
||||
// branch1 should get its buffer back.
|
||||
const result1 = await read1;
|
||||
assert_equals(result1.done, true, 'read from branch1 should be done');
|
||||
assert_typed_array_equals(result1.value, new Uint8Array([0x11]).subarray(0, 0), 'read from branch1');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: read from branch2 with default reader, then close while branch1 has pending BYOB read');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
await flushAsyncEvents();
|
||||
|
||||
const read1 = reader1.read(new Uint8Array([0x11]));
|
||||
const read2 = reader2.read(new Uint8Array([0x22]));
|
||||
await flushAsyncEvents();
|
||||
|
||||
// branch1 should provide the BYOB request.
|
||||
const byobRequest = rs.controller.byobRequest;
|
||||
assert_typed_array_equals(byobRequest.view, new Uint8Array([0x11]), 'first BYOB request');
|
||||
|
||||
// Close the stream.
|
||||
rs.controller.close();
|
||||
byobRequest.respond(0);
|
||||
|
||||
// Both branches should get their buffers back.
|
||||
const result1 = await read1;
|
||||
assert_equals(result1.done, true, 'first read should be done');
|
||||
assert_typed_array_equals(result1.value, new Uint8Array([0x11]).subarray(0, 0), 'first read');
|
||||
|
||||
const result2 = await read2;
|
||||
assert_equals(result2.done, true, 'second read should be done');
|
||||
assert_typed_array_equals(result2.value, new Uint8Array([0x22]).subarray(0, 0), 'second read');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: close when both branches have pending BYOB reads');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader());
|
||||
const branch1Reads = [reader1.read(), reader1.read()];
|
||||
const branch2Reads = [reader2.read(), reader2.read()];
|
||||
|
||||
await flushAsyncEvents();
|
||||
rs.controller.enqueue(new Uint8Array([0x11]));
|
||||
rs.controller.close();
|
||||
|
||||
const result1 = await branch1Reads[0];
|
||||
assert_equals(result1.done, false, 'first read() from branch1 should be not done');
|
||||
assert_typed_array_equals(result1.value, new Uint8Array([0x11]), 'first chunk from branch1 should be correct');
|
||||
const result2 = await branch2Reads[0];
|
||||
assert_equals(result2.done, false, 'first read() from branch2 should be not done');
|
||||
assert_typed_array_equals(result2.value, new Uint8Array([0x11]), 'first chunk from branch2 should be correct');
|
||||
|
||||
assert_object_equals(await branch1Reads[1], { value: undefined, done: true }, 'second read() from branch1 should be done');
|
||||
assert_object_equals(await branch2Reads[1], { value: undefined, done: true }, 'second read() from branch2 should be done');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: enqueue() and close() while both branches are pulling');
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const rs = recordingReadableStream({ type: 'bytes' });
|
||||
|
||||
const [reader1, reader2] = rs.tee().map(branch => branch.getReader({ mode: 'byob' }));
|
||||
const branch1Reads = [reader1.read(new Uint8Array(1)), reader1.read(new Uint8Array(1))];
|
||||
const branch2Reads = [reader2.read(new Uint8Array(1)), reader2.read(new Uint8Array(1))];
|
||||
|
||||
await flushAsyncEvents();
|
||||
rs.controller.byobRequest.view[0] = 0x11;
|
||||
rs.controller.byobRequest.respond(1);
|
||||
rs.controller.close();
|
||||
|
||||
const result1 = await branch1Reads[0];
|
||||
assert_equals(result1.done, false, 'first read() from branch1 should be not done');
|
||||
assert_typed_array_equals(result1.value, new Uint8Array([0x11]), 'first chunk from branch1 should be correct');
|
||||
const result2 = await branch2Reads[0];
|
||||
assert_equals(result2.done, false, 'first read() from branch2 should be not done');
|
||||
assert_typed_array_equals(result2.value, new Uint8Array([0x11]), 'first chunk from branch2 should be correct');
|
||||
|
||||
const result3 = await branch1Reads[1];
|
||||
assert_equals(result3.done, true, 'second read() from branch1 should be done');
|
||||
assert_typed_array_equals(result3.value, new Uint8Array([0]).subarray(0, 0), 'second chunk from branch1 should be correct');
|
||||
const result4 = await branch2Reads[1];
|
||||
assert_equals(result4.done, true, 'second read() from branch2 should be done');
|
||||
assert_typed_array_equals(result4.value, new Uint8Array([0]).subarray(0, 0), 'second chunk from branch2 should be correct');
|
||||
|
||||
}, 'ReadableStream teeing with byte source: respond() and close() while both branches are pulling');
|
||||
|
||||
promise_test(async t => {
|
||||
let pullCount = 0;
|
||||
const arrayBuffer = new Uint8Array([0x01, 0x02, 0x03]).buffer;
|
||||
const enqueuedChunk = new Uint8Array(arrayBuffer, 2);
|
||||
assert_equals(enqueuedChunk.length, 1);
|
||||
assert_equals(enqueuedChunk.byteOffset, 2);
|
||||
const rs = new ReadableStream({
|
||||
type: 'bytes',
|
||||
pull(c) {
|
||||
++pullCount;
|
||||
if (pullCount === 1) {
|
||||
c.enqueue(enqueuedChunk);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
const reader1 = branch1.getReader();
|
||||
const reader2 = branch2.getReader();
|
||||
|
||||
const [result1, result2] = await Promise.all([reader1.read(), reader2.read()]);
|
||||
assert_equals(result1.done, false, 'reader1 done');
|
||||
assert_equals(result2.done, false, 'reader2 done');
|
||||
|
||||
const view1 = result1.value;
|
||||
const view2 = result2.value;
|
||||
// The first stream has the transferred buffer, but the second stream has the
|
||||
// cloned buffer.
|
||||
const underlying = new Uint8Array([0x01, 0x02, 0x03]).buffer;
|
||||
assert_typed_array_equals(view1, new Uint8Array(underlying, 2), 'reader1 value');
|
||||
assert_typed_array_equals(view2, new Uint8Array([0x03]), 'reader2 value');
|
||||
}, 'ReadableStream teeing with byte source: reading an array with a byte offset should clone correctly');
|
|
@ -0,0 +1,131 @@
|
|||
'use strict';
|
||||
|
||||
self.recordingReadableStream = (extras = {}, strategy) => {
|
||||
let controllerToCopyOver;
|
||||
const stream = new ReadableStream({
|
||||
type: extras.type,
|
||||
start(controller) {
|
||||
controllerToCopyOver = controller;
|
||||
|
||||
if (extras.start) {
|
||||
return extras.start(controller);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
},
|
||||
pull(controller) {
|
||||
stream.events.push('pull');
|
||||
|
||||
if (extras.pull) {
|
||||
return extras.pull(controller);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
},
|
||||
cancel(reason) {
|
||||
stream.events.push('cancel', reason);
|
||||
stream.eventsWithoutPulls.push('cancel', reason);
|
||||
|
||||
if (extras.cancel) {
|
||||
return extras.cancel(reason);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
}, strategy);
|
||||
|
||||
stream.controller = controllerToCopyOver;
|
||||
stream.events = [];
|
||||
stream.eventsWithoutPulls = [];
|
||||
|
||||
return stream;
|
||||
};
|
||||
|
||||
self.recordingWritableStream = (extras = {}, strategy) => {
|
||||
let controllerToCopyOver;
|
||||
const stream = new WritableStream({
|
||||
start(controller) {
|
||||
controllerToCopyOver = controller;
|
||||
|
||||
if (extras.start) {
|
||||
return extras.start(controller);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
},
|
||||
write(chunk, controller) {
|
||||
stream.events.push('write', chunk);
|
||||
|
||||
if (extras.write) {
|
||||
return extras.write(chunk, controller);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
},
|
||||
close() {
|
||||
stream.events.push('close');
|
||||
|
||||
if (extras.close) {
|
||||
return extras.close();
|
||||
}
|
||||
|
||||
return undefined;
|
||||
},
|
||||
abort(e) {
|
||||
stream.events.push('abort', e);
|
||||
|
||||
if (extras.abort) {
|
||||
return extras.abort(e);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
}, strategy);
|
||||
|
||||
stream.controller = controllerToCopyOver;
|
||||
stream.events = [];
|
||||
|
||||
return stream;
|
||||
};
|
||||
|
||||
self.recordingTransformStream = (extras = {}, writableStrategy, readableStrategy) => {
|
||||
let controllerToCopyOver;
|
||||
const stream = new TransformStream({
|
||||
start(controller) {
|
||||
controllerToCopyOver = controller;
|
||||
|
||||
if (extras.start) {
|
||||
return extras.start(controller);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
},
|
||||
|
||||
transform(chunk, controller) {
|
||||
stream.events.push('transform', chunk);
|
||||
|
||||
if (extras.transform) {
|
||||
return extras.transform(chunk, controller);
|
||||
}
|
||||
|
||||
controller.enqueue(chunk);
|
||||
|
||||
return undefined;
|
||||
},
|
||||
|
||||
flush(controller) {
|
||||
stream.events.push('flush');
|
||||
|
||||
if (extras.flush) {
|
||||
return extras.flush(controller);
|
||||
}
|
||||
|
||||
return undefined;
|
||||
}
|
||||
}, writableStrategy, readableStrategy);
|
||||
|
||||
stream.controller = controllerToCopyOver;
|
||||
stream.events = [];
|
||||
|
||||
return stream;
|
||||
};
|
|
@ -0,0 +1,721 @@
|
|||
'use strict';
|
||||
|
||||
// These tests can be run against any readable stream produced by the web platform that meets the given descriptions.
|
||||
// For readable stream tests, the factory should return the stream. For reader tests, the factory should return a
|
||||
// { stream, reader } object. (You can use this to vary the time at which you acquire a reader.)
|
||||
|
||||
self.templatedRSEmpty = (label, factory) => {
|
||||
test(() => {}, 'Running templatedRSEmpty with ' + label);
|
||||
|
||||
test(() => {
|
||||
|
||||
const rs = factory();
|
||||
|
||||
assert_equals(typeof rs.locked, 'boolean', 'has a boolean locked getter');
|
||||
assert_equals(typeof rs.cancel, 'function', 'has a cancel method');
|
||||
assert_equals(typeof rs.getReader, 'function', 'has a getReader method');
|
||||
assert_equals(typeof rs.pipeThrough, 'function', 'has a pipeThrough method');
|
||||
assert_equals(typeof rs.pipeTo, 'function', 'has a pipeTo method');
|
||||
assert_equals(typeof rs.tee, 'function', 'has a tee method');
|
||||
|
||||
}, label + ': instances have the correct methods and properties');
|
||||
|
||||
test(() => {
|
||||
const rs = factory();
|
||||
|
||||
assert_throws_js(TypeError, () => rs.getReader({ mode: '' }), 'empty string mode should throw');
|
||||
assert_throws_js(TypeError, () => rs.getReader({ mode: null }), 'null mode should throw');
|
||||
assert_throws_js(TypeError, () => rs.getReader({ mode: 'asdf' }), 'asdf mode should throw');
|
||||
assert_throws_js(TypeError, () => rs.getReader(5), '5 should throw');
|
||||
|
||||
// Should not throw
|
||||
rs.getReader(null);
|
||||
|
||||
}, label + ': calling getReader with invalid arguments should throw appropriate errors');
|
||||
};
|
||||
|
||||
self.templatedRSClosed = (label, factory) => {
|
||||
test(() => {}, 'Running templatedRSClosed with ' + label);
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const rs = factory();
|
||||
const cancelPromise1 = rs.cancel();
|
||||
const cancelPromise2 = rs.cancel();
|
||||
|
||||
assert_not_equals(cancelPromise1, cancelPromise2, 'cancel() calls should return distinct promises');
|
||||
|
||||
return Promise.all([
|
||||
cancelPromise1.then(v => assert_equals(v, undefined, 'first cancel() call should fulfill with undefined')),
|
||||
cancelPromise2.then(v => assert_equals(v, undefined, 'second cancel() call should fulfill with undefined'))
|
||||
]);
|
||||
|
||||
}, label + ': cancel() should return a distinct fulfilled promise each time');
|
||||
|
||||
test(() => {
|
||||
|
||||
const rs = factory();
|
||||
assert_false(rs.locked, 'locked getter should return false');
|
||||
|
||||
}, label + ': locked should be false');
|
||||
|
||||
test(() => {
|
||||
|
||||
const rs = factory();
|
||||
rs.getReader(); // getReader() should not throw.
|
||||
|
||||
}, label + ': getReader() should be OK');
|
||||
|
||||
test(() => {
|
||||
|
||||
const rs = factory();
|
||||
|
||||
const reader = rs.getReader();
|
||||
reader.releaseLock();
|
||||
|
||||
const reader2 = rs.getReader(); // Getting a second reader should not throw.
|
||||
reader2.releaseLock();
|
||||
|
||||
rs.getReader(); // Getting a third reader should not throw.
|
||||
|
||||
}, label + ': should be able to acquire multiple readers if they are released in succession');
|
||||
|
||||
test(() => {
|
||||
|
||||
const rs = factory();
|
||||
|
||||
rs.getReader();
|
||||
|
||||
assert_throws_js(TypeError, () => rs.getReader(), 'getting a second reader should throw');
|
||||
assert_throws_js(TypeError, () => rs.getReader(), 'getting a third reader should throw');
|
||||
|
||||
}, label + ': should not be able to acquire a second reader if we don\'t release the first one');
|
||||
};
|
||||
|
||||
self.templatedRSErrored = (label, factory, error) => {
|
||||
test(() => {}, 'Running templatedRSErrored with ' + label);
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const rs = factory();
|
||||
const reader = rs.getReader();
|
||||
|
||||
return Promise.all([
|
||||
promise_rejects_exactly(t, error, reader.closed),
|
||||
promise_rejects_exactly(t, error, reader.read())
|
||||
]);
|
||||
|
||||
}, label + ': getReader() should return a reader that acts errored');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const rs = factory();
|
||||
const reader = rs.getReader();
|
||||
|
||||
return Promise.all([
|
||||
promise_rejects_exactly(t, error, reader.read()),
|
||||
promise_rejects_exactly(t, error, reader.read()),
|
||||
promise_rejects_exactly(t, error, reader.closed)
|
||||
]);
|
||||
|
||||
}, label + ': read() twice should give the error each time');
|
||||
|
||||
test(() => {
|
||||
const rs = factory();
|
||||
|
||||
assert_false(rs.locked, 'locked getter should return false');
|
||||
}, label + ': locked should be false');
|
||||
};
|
||||
|
||||
self.templatedRSErroredSyncOnly = (label, factory, error) => {
|
||||
test(() => {}, 'Running templatedRSErroredSyncOnly with ' + label);
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const rs = factory();
|
||||
rs.getReader().releaseLock();
|
||||
const reader = rs.getReader(); // Calling getReader() twice does not throw (the stream is not locked).
|
||||
|
||||
return promise_rejects_exactly(t, error, reader.closed);
|
||||
|
||||
}, label + ': should be able to obtain a second reader, with the correct closed promise');
|
||||
|
||||
test(() => {
|
||||
|
||||
const rs = factory();
|
||||
rs.getReader();
|
||||
|
||||
assert_throws_js(TypeError, () => rs.getReader(), 'getting a second reader should throw a TypeError');
|
||||
assert_throws_js(TypeError, () => rs.getReader(), 'getting a third reader should throw a TypeError');
|
||||
|
||||
}, label + ': should not be able to obtain additional readers if we don\'t release the first lock');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const rs = factory();
|
||||
const cancelPromise1 = rs.cancel();
|
||||
const cancelPromise2 = rs.cancel();
|
||||
|
||||
assert_not_equals(cancelPromise1, cancelPromise2, 'cancel() calls should return distinct promises');
|
||||
|
||||
return Promise.all([
|
||||
promise_rejects_exactly(t, error, cancelPromise1),
|
||||
promise_rejects_exactly(t, error, cancelPromise2)
|
||||
]);
|
||||
|
||||
}, label + ': cancel() should return a distinct rejected promise each time');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const rs = factory();
|
||||
const reader = rs.getReader();
|
||||
const cancelPromise1 = reader.cancel();
|
||||
const cancelPromise2 = reader.cancel();
|
||||
|
||||
assert_not_equals(cancelPromise1, cancelPromise2, 'cancel() calls should return distinct promises');
|
||||
|
||||
return Promise.all([
|
||||
promise_rejects_exactly(t, error, cancelPromise1),
|
||||
promise_rejects_exactly(t, error, cancelPromise2)
|
||||
]);
|
||||
|
||||
}, label + ': reader cancel() should return a distinct rejected promise each time');
|
||||
};
|
||||
|
||||
self.templatedRSEmptyReader = (label, factory) => {
|
||||
test(() => {}, 'Running templatedRSEmptyReader with ' + label);
|
||||
|
||||
test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
assert_true('closed' in reader, 'has a closed property');
|
||||
assert_equals(typeof reader.closed.then, 'function', 'closed property is thenable');
|
||||
|
||||
assert_equals(typeof reader.cancel, 'function', 'has a cancel method');
|
||||
assert_equals(typeof reader.read, 'function', 'has a read method');
|
||||
assert_equals(typeof reader.releaseLock, 'function', 'has a releaseLock method');
|
||||
|
||||
}, label + ': instances have the correct methods and properties');
|
||||
|
||||
test(() => {
|
||||
|
||||
const stream = factory().stream;
|
||||
|
||||
assert_true(stream.locked, 'locked getter should return true');
|
||||
|
||||
}, label + ': locked should be true');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
reader.read().then(
|
||||
t.unreached_func('read() should not fulfill'),
|
||||
t.unreached_func('read() should not reject')
|
||||
);
|
||||
|
||||
return delay(500);
|
||||
|
||||
}, label + ': read() should never settle');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
reader.read().then(
|
||||
t.unreached_func('read() should not fulfill'),
|
||||
t.unreached_func('read() should not reject')
|
||||
);
|
||||
|
||||
reader.read().then(
|
||||
t.unreached_func('read() should not fulfill'),
|
||||
t.unreached_func('read() should not reject')
|
||||
);
|
||||
|
||||
return delay(500);
|
||||
|
||||
}, label + ': two read()s should both never settle');
|
||||
|
||||
test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
assert_not_equals(reader.read(), reader.read(), 'the promises returned should be distinct');
|
||||
|
||||
}, label + ': read() should return distinct promises each time');
|
||||
|
||||
test(() => {
|
||||
|
||||
const stream = factory().stream;
|
||||
assert_throws_js(TypeError, () => stream.getReader(), 'stream.getReader() should throw a TypeError');
|
||||
|
||||
}, label + ': getReader() again on the stream should fail');
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
const streamAndReader = factory();
|
||||
const stream = streamAndReader.stream;
|
||||
const reader = streamAndReader.reader;
|
||||
|
||||
const read1 = reader.read();
|
||||
const read2 = reader.read();
|
||||
const closed = reader.closed;
|
||||
|
||||
reader.releaseLock();
|
||||
|
||||
assert_false(stream.locked, 'the stream should be unlocked');
|
||||
|
||||
await Promise.all([
|
||||
promise_rejects_js(t, TypeError, read1, 'first read should reject'),
|
||||
promise_rejects_js(t, TypeError, read2, 'second read should reject'),
|
||||
promise_rejects_js(t, TypeError, closed, 'closed should reject')
|
||||
]);
|
||||
|
||||
}, label + ': releasing the lock should reject all pending read requests');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
reader.releaseLock();
|
||||
|
||||
return Promise.all([
|
||||
promise_rejects_js(t, TypeError, reader.read()),
|
||||
promise_rejects_js(t, TypeError, reader.read())
|
||||
]);
|
||||
|
||||
}, label + ': releasing the lock should cause further read() calls to reject with a TypeError');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
const closedBefore = reader.closed;
|
||||
reader.releaseLock();
|
||||
const closedAfter = reader.closed;
|
||||
|
||||
assert_equals(closedBefore, closedAfter, 'the closed promise should not change identity');
|
||||
|
||||
return promise_rejects_js(t, TypeError, closedBefore);
|
||||
|
||||
}, label + ': releasing the lock should cause closed calls to reject with a TypeError');
|
||||
|
||||
test(() => {
|
||||
|
||||
const streamAndReader = factory();
|
||||
const stream = streamAndReader.stream;
|
||||
const reader = streamAndReader.reader;
|
||||
|
||||
reader.releaseLock();
|
||||
assert_false(stream.locked, 'locked getter should return false');
|
||||
|
||||
}, label + ': releasing the lock should cause locked to become false');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
reader.cancel();
|
||||
|
||||
return reader.read().then(r => {
|
||||
assert_object_equals(r, { value: undefined, done: true }, 'read()ing from the reader should give a done result');
|
||||
});
|
||||
|
||||
}, label + ': canceling via the reader should cause the reader to act closed');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const stream = factory().stream;
|
||||
return promise_rejects_js(t, TypeError, stream.cancel());
|
||||
|
||||
}, label + ': canceling via the stream should fail');
|
||||
};
|
||||
|
||||
self.templatedRSClosedReader = (label, factory) => {
|
||||
test(() => {}, 'Running templatedRSClosedReader with ' + label);
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
return reader.read().then(v => {
|
||||
assert_object_equals(v, { value: undefined, done: true }, 'read() should fulfill correctly');
|
||||
});
|
||||
|
||||
}, label + ': read() should fulfill with { value: undefined, done: true }');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
return Promise.all([
|
||||
reader.read().then(v => {
|
||||
assert_object_equals(v, { value: undefined, done: true }, 'read() should fulfill correctly');
|
||||
}),
|
||||
reader.read().then(v => {
|
||||
assert_object_equals(v, { value: undefined, done: true }, 'read() should fulfill correctly');
|
||||
})
|
||||
]);
|
||||
|
||||
}, label + ': read() multiple times should fulfill with { value: undefined, done: true }');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
return reader.read().then(() => reader.read()).then(v => {
|
||||
assert_object_equals(v, { value: undefined, done: true }, 'read() should fulfill correctly');
|
||||
});
|
||||
|
||||
}, label + ': read() should work when used within another read() fulfill callback');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
return reader.closed.then(v => assert_equals(v, undefined, 'reader closed should fulfill with undefined'));
|
||||
|
||||
}, label + ': closed should fulfill with undefined');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
const closedBefore = reader.closed;
|
||||
reader.releaseLock();
|
||||
const closedAfter = reader.closed;
|
||||
|
||||
assert_not_equals(closedBefore, closedAfter, 'the closed promise should change identity');
|
||||
|
||||
return Promise.all([
|
||||
closedBefore.then(v => assert_equals(v, undefined, 'reader.closed acquired before release should fulfill')),
|
||||
promise_rejects_js(t, TypeError, closedAfter)
|
||||
]);
|
||||
|
||||
}, label + ': releasing the lock should cause closed to reject and change identity');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
const cancelPromise1 = reader.cancel();
|
||||
const cancelPromise2 = reader.cancel();
|
||||
const closedReaderPromise = reader.closed;
|
||||
|
||||
assert_not_equals(cancelPromise1, cancelPromise2, 'cancel() calls should return distinct promises');
|
||||
assert_not_equals(cancelPromise1, closedReaderPromise, 'cancel() promise 1 should be distinct from reader.closed');
|
||||
assert_not_equals(cancelPromise2, closedReaderPromise, 'cancel() promise 2 should be distinct from reader.closed');
|
||||
|
||||
return Promise.all([
|
||||
cancelPromise1.then(v => assert_equals(v, undefined, 'first cancel() should fulfill with undefined')),
|
||||
cancelPromise2.then(v => assert_equals(v, undefined, 'second cancel() should fulfill with undefined'))
|
||||
]);
|
||||
|
||||
}, label + ': cancel() should return a distinct fulfilled promise each time');
|
||||
};
|
||||
|
||||
self.templatedRSErroredReader = (label, factory, error) => {
|
||||
test(() => {}, 'Running templatedRSErroredReader with ' + label);
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
return promise_rejects_exactly(t, error, reader.closed);
|
||||
|
||||
}, label + ': closed should reject with the error');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
const closedBefore = reader.closed;
|
||||
|
||||
return promise_rejects_exactly(t, error, closedBefore).then(() => {
|
||||
reader.releaseLock();
|
||||
|
||||
const closedAfter = reader.closed;
|
||||
assert_not_equals(closedBefore, closedAfter, 'the closed promise should change identity');
|
||||
|
||||
return promise_rejects_js(t, TypeError, closedAfter);
|
||||
});
|
||||
|
||||
}, label + ': releasing the lock should cause closed to reject and change identity');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
return promise_rejects_exactly(t, error, reader.read());
|
||||
|
||||
}, label + ': read() should reject with the error');
|
||||
};
|
||||
|
||||
self.templatedRSTwoChunksOpenReader = (label, factory, chunks) => {
|
||||
test(() => {}, 'Running templatedRSTwoChunksOpenReader with ' + label);
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
return Promise.all([
|
||||
reader.read().then(r => {
|
||||
assert_object_equals(r, { value: chunks[0], done: false }, 'first result should be correct');
|
||||
}),
|
||||
reader.read().then(r => {
|
||||
assert_object_equals(r, { value: chunks[1], done: false }, 'second result should be correct');
|
||||
})
|
||||
]);
|
||||
|
||||
}, label + ': calling read() twice without waiting will eventually give both chunks (sequential)');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
return reader.read().then(r => {
|
||||
assert_object_equals(r, { value: chunks[0], done: false }, 'first result should be correct');
|
||||
|
||||
return reader.read().then(r2 => {
|
||||
assert_object_equals(r2, { value: chunks[1], done: false }, 'second result should be correct');
|
||||
});
|
||||
});
|
||||
|
||||
}, label + ': calling read() twice without waiting will eventually give both chunks (nested)');
|
||||
|
||||
test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
assert_not_equals(reader.read(), reader.read(), 'the promises returned should be distinct');
|
||||
|
||||
}, label + ': read() should return distinct promises each time');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
const promise1 = reader.closed.then(v => {
|
||||
assert_equals(v, undefined, 'reader closed should fulfill with undefined');
|
||||
});
|
||||
|
||||
const promise2 = reader.read().then(r => {
|
||||
assert_object_equals(r, { value: chunks[0], done: false },
|
||||
'promise returned before cancellation should fulfill with a chunk');
|
||||
});
|
||||
|
||||
reader.cancel();
|
||||
|
||||
const promise3 = reader.read().then(r => {
|
||||
assert_object_equals(r, { value: undefined, done: true },
|
||||
'promise returned after cancellation should fulfill with an end-of-stream signal');
|
||||
});
|
||||
|
||||
return Promise.all([promise1, promise2, promise3]);
|
||||
|
||||
}, label + ': cancel() after a read() should still give that single read result');
|
||||
};
|
||||
|
||||
self.templatedRSTwoChunksClosedReader = function (label, factory, chunks) {
|
||||
test(() => {}, 'Running templatedRSTwoChunksClosedReader with ' + label);
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
return Promise.all([
|
||||
reader.read().then(r => {
|
||||
assert_object_equals(r, { value: chunks[0], done: false }, 'first result should be correct');
|
||||
}),
|
||||
reader.read().then(r => {
|
||||
assert_object_equals(r, { value: chunks[1], done: false }, 'second result should be correct');
|
||||
}),
|
||||
reader.read().then(r => {
|
||||
assert_object_equals(r, { value: undefined, done: true }, 'third result should be correct');
|
||||
})
|
||||
]);
|
||||
|
||||
}, label + ': third read(), without waiting, should give { value: undefined, done: true } (sequential)');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
return reader.read().then(r => {
|
||||
assert_object_equals(r, { value: chunks[0], done: false }, 'first result should be correct');
|
||||
|
||||
return reader.read().then(r2 => {
|
||||
assert_object_equals(r2, { value: chunks[1], done: false }, 'second result should be correct');
|
||||
|
||||
return reader.read().then(r3 => {
|
||||
assert_object_equals(r3, { value: undefined, done: true }, 'third result should be correct');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
}, label + ': third read(), without waiting, should give { value: undefined, done: true } (nested)');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const streamAndReader = factory();
|
||||
const stream = streamAndReader.stream;
|
||||
const reader = streamAndReader.reader;
|
||||
|
||||
assert_true(stream.locked, 'stream should start locked');
|
||||
|
||||
const promise = reader.closed.then(v => {
|
||||
assert_equals(v, undefined, 'reader closed should fulfill with undefined');
|
||||
assert_true(stream.locked, 'stream should remain locked');
|
||||
});
|
||||
|
||||
reader.read();
|
||||
reader.read();
|
||||
|
||||
return promise;
|
||||
|
||||
}, label +
|
||||
': draining the stream via read() should cause the reader closed promise to fulfill, but locked stays true');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const streamAndReader = factory();
|
||||
const stream = streamAndReader.stream;
|
||||
const reader = streamAndReader.reader;
|
||||
|
||||
const promise = reader.closed.then(() => {
|
||||
assert_true(stream.locked, 'the stream should start locked');
|
||||
reader.releaseLock(); // Releasing the lock after reader closed should not throw.
|
||||
assert_false(stream.locked, 'the stream should end unlocked');
|
||||
});
|
||||
|
||||
reader.read();
|
||||
reader.read();
|
||||
|
||||
return promise;
|
||||
|
||||
}, label + ': releasing the lock after the stream is closed should cause locked to become false');
|
||||
|
||||
promise_test(t => {
|
||||
|
||||
const reader = factory().reader;
|
||||
|
||||
reader.releaseLock();
|
||||
|
||||
return Promise.all([
|
||||
promise_rejects_js(t, TypeError, reader.read()),
|
||||
promise_rejects_js(t, TypeError, reader.read()),
|
||||
promise_rejects_js(t, TypeError, reader.read())
|
||||
]);
|
||||
|
||||
}, label + ': releasing the lock should cause further read() calls to reject with a TypeError');
|
||||
|
||||
promise_test(() => {
|
||||
|
||||
const streamAndReader = factory();
|
||||
const stream = streamAndReader.stream;
|
||||
const reader = streamAndReader.reader;
|
||||
|
||||
const readerClosed = reader.closed;
|
||||
|
||||
assert_equals(reader.closed, readerClosed, 'accessing reader.closed twice in succession gives the same value');
|
||||
|
||||
const promise = reader.read().then(() => {
|
||||
assert_equals(reader.closed, readerClosed, 'reader.closed is the same after read() fulfills');
|
||||
|
||||
reader.releaseLock();
|
||||
|
||||
assert_equals(reader.closed, readerClosed, 'reader.closed is the same after releasing the lock');
|
||||
|
||||
const newReader = stream.getReader();
|
||||
return newReader.read();
|
||||
});
|
||||
|
||||
assert_equals(reader.closed, readerClosed, 'reader.closed is the same after calling read()');
|
||||
|
||||
return promise;
|
||||
|
||||
}, label + ': reader\'s closed property always returns the same promise');
|
||||
};
|
||||
|
||||
self.templatedRSTeeCancel = (label, factory) => {
|
||||
test(() => {}, `Running templatedRSTeeCancel with ${label}`);
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const reason1 = new Error('We\'re wanted men.');
|
||||
const reason2 = new Error('I have the death sentence on twelve systems.');
|
||||
|
||||
let resolve;
|
||||
const promise = new Promise(r => resolve = r);
|
||||
const rs = factory({
|
||||
cancel(reason) {
|
||||
assert_array_equals(reason, [reason1, reason2],
|
||||
'the cancel reason should be an array containing those from the branches');
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
await Promise.all([
|
||||
branch1.cancel(reason1),
|
||||
branch2.cancel(reason2),
|
||||
promise
|
||||
]);
|
||||
|
||||
}, `${label}: canceling both branches should aggregate the cancel reasons into an array`);
|
||||
|
||||
promise_test(async () => {
|
||||
|
||||
const reason1 = new Error('This little one\'s not worth the effort.');
|
||||
const reason2 = new Error('Come, let me get you something.');
|
||||
|
||||
let resolve;
|
||||
const promise = new Promise(r => resolve = r);
|
||||
const rs = factory({
|
||||
cancel(reason) {
|
||||
assert_array_equals(reason, [reason1, reason2],
|
||||
'the cancel reason should be an array containing those from the branches');
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
await Promise.all([
|
||||
branch2.cancel(reason2),
|
||||
branch1.cancel(reason1),
|
||||
promise
|
||||
]);
|
||||
|
||||
}, `${label}: canceling both branches in reverse order should aggregate the cancel reasons into an array`);
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
const theError = { name: 'I\'ll be careful.' };
|
||||
const rs = factory({
|
||||
cancel() {
|
||||
throw theError;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = rs.tee();
|
||||
await Promise.all([
|
||||
promise_rejects_exactly(t, theError, branch1.cancel()),
|
||||
promise_rejects_exactly(t, theError, branch2.cancel())
|
||||
]);
|
||||
|
||||
}, `${label}: failing to cancel the original stream should cause cancel() to reject on branches`);
|
||||
|
||||
promise_test(async t => {
|
||||
|
||||
const theError = { name: 'You just watch yourself!' };
|
||||
let controller;
|
||||
const stream = factory({
|
||||
start(c) {
|
||||
controller = c;
|
||||
}
|
||||
});
|
||||
|
||||
const [branch1, branch2] = stream.tee();
|
||||
controller.error(theError);
|
||||
|
||||
await Promise.all([
|
||||
promise_rejects_exactly(t, theError, branch1.cancel()),
|
||||
promise_rejects_exactly(t, theError, branch2.cancel())
|
||||
]);
|
||||
|
||||
}, `${label}: erroring a teed stream should properly handle canceled branches`);
|
||||
|
||||
};
|
234
Tests/LibWeb/Text/input/wpt-import/streams/resources/rs-utils.js
Normal file
234
Tests/LibWeb/Text/input/wpt-import/streams/resources/rs-utils.js
Normal file
|
@ -0,0 +1,234 @@
|
|||
'use strict';
|
||||
(function () {
|
||||
// Fake setInterval-like functionality in environments that don't have it
|
||||
class IntervalHandle {
|
||||
constructor(callback, delayMs) {
|
||||
this.callback = callback;
|
||||
this.delayMs = delayMs;
|
||||
this.cancelled = false;
|
||||
Promise.resolve().then(() => this.check());
|
||||
}
|
||||
|
||||
async check() {
|
||||
while (true) {
|
||||
await new Promise(resolve => step_timeout(resolve, this.delayMs));
|
||||
if (this.cancelled) {
|
||||
return;
|
||||
}
|
||||
this.callback();
|
||||
}
|
||||
}
|
||||
|
||||
cancel() {
|
||||
this.cancelled = true;
|
||||
}
|
||||
}
|
||||
|
||||
let localSetInterval, localClearInterval;
|
||||
if (typeof globalThis.setInterval !== "undefined" &&
|
||||
typeof globalThis.clearInterval !== "undefined") {
|
||||
localSetInterval = globalThis.setInterval;
|
||||
localClearInterval = globalThis.clearInterval;
|
||||
} else {
|
||||
localSetInterval = function setInterval(callback, delayMs) {
|
||||
return new IntervalHandle(callback, delayMs);
|
||||
}
|
||||
localClearInterval = function clearInterval(handle) {
|
||||
handle.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
class RandomPushSource {
|
||||
constructor(toPush) {
|
||||
this.pushed = 0;
|
||||
this.toPush = toPush;
|
||||
this.started = false;
|
||||
this.paused = false;
|
||||
this.closed = false;
|
||||
|
||||
this._intervalHandle = null;
|
||||
}
|
||||
|
||||
readStart() {
|
||||
if (this.closed) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!this.started) {
|
||||
this._intervalHandle = localSetInterval(writeChunk, 2);
|
||||
this.started = true;
|
||||
}
|
||||
|
||||
if (this.paused) {
|
||||
this._intervalHandle = localSetInterval(writeChunk, 2);
|
||||
this.paused = false;
|
||||
}
|
||||
|
||||
const source = this;
|
||||
function writeChunk() {
|
||||
if (source.paused) {
|
||||
return;
|
||||
}
|
||||
|
||||
source.pushed++;
|
||||
|
||||
if (source.toPush > 0 && source.pushed > source.toPush) {
|
||||
if (source._intervalHandle) {
|
||||
localClearInterval(source._intervalHandle);
|
||||
source._intervalHandle = undefined;
|
||||
}
|
||||
source.closed = true;
|
||||
source.onend();
|
||||
} else {
|
||||
source.ondata(randomChunk(128));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
readStop() {
|
||||
if (this.paused) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.started) {
|
||||
this.paused = true;
|
||||
localClearInterval(this._intervalHandle);
|
||||
this._intervalHandle = undefined;
|
||||
} else {
|
||||
throw new Error('Can\'t pause reading an unstarted source.');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function randomChunk(size) {
|
||||
let chunk = '';
|
||||
|
||||
for (let i = 0; i < size; ++i) {
|
||||
// Add a random character from the basic printable ASCII set.
|
||||
chunk += String.fromCharCode(Math.round(Math.random() * 84) + 32);
|
||||
}
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
function readableStreamToArray(readable, reader) {
|
||||
if (reader === undefined) {
|
||||
reader = readable.getReader();
|
||||
}
|
||||
|
||||
const chunks = [];
|
||||
|
||||
return pump();
|
||||
|
||||
function pump() {
|
||||
return reader.read().then(result => {
|
||||
if (result.done) {
|
||||
return chunks;
|
||||
}
|
||||
|
||||
chunks.push(result.value);
|
||||
return pump();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
class SequentialPullSource {
|
||||
constructor(limit, options) {
|
||||
const async = options && options.async;
|
||||
|
||||
this.current = 0;
|
||||
this.limit = limit;
|
||||
this.opened = false;
|
||||
this.closed = false;
|
||||
|
||||
this._exec = f => f();
|
||||
if (async) {
|
||||
this._exec = f => step_timeout(f, 0);
|
||||
}
|
||||
}
|
||||
|
||||
open(cb) {
|
||||
this._exec(() => {
|
||||
this.opened = true;
|
||||
cb();
|
||||
});
|
||||
}
|
||||
|
||||
read(cb) {
|
||||
this._exec(() => {
|
||||
if (++this.current <= this.limit) {
|
||||
cb(null, false, this.current);
|
||||
} else {
|
||||
cb(null, true, null);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
close(cb) {
|
||||
this._exec(() => {
|
||||
this.closed = true;
|
||||
cb();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function sequentialReadableStream(limit, options) {
|
||||
const sequentialSource = new SequentialPullSource(limit, options);
|
||||
|
||||
const stream = new ReadableStream({
|
||||
start() {
|
||||
return new Promise((resolve, reject) => {
|
||||
sequentialSource.open(err => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
pull(c) {
|
||||
return new Promise((resolve, reject) => {
|
||||
sequentialSource.read((err, done, chunk) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
} else if (done) {
|
||||
sequentialSource.close(err2 => {
|
||||
if (err2) {
|
||||
reject(err2);
|
||||
}
|
||||
c.close();
|
||||
resolve();
|
||||
});
|
||||
} else {
|
||||
c.enqueue(chunk);
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
stream.source = sequentialSource;
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
function transferArrayBufferView(view) {
|
||||
const noopByteStream = new ReadableStream({
|
||||
type: 'bytes',
|
||||
pull(c) {
|
||||
c.byobRequest.respond(c.byobRequest.view.byteLength);
|
||||
c.close();
|
||||
}
|
||||
});
|
||||
const reader = noopByteStream.getReader({ mode: 'byob' });
|
||||
return reader.read(view).then((result) => result.value);
|
||||
}
|
||||
|
||||
self.RandomPushSource = RandomPushSource;
|
||||
self.readableStreamToArray = readableStreamToArray;
|
||||
self.sequentialReadableStream = sequentialReadableStream;
|
||||
self.transferArrayBufferView = transferArrayBufferView;
|
||||
|
||||
}());
|
|
@ -0,0 +1,27 @@
|
|||
'use strict';
|
||||
|
||||
self.delay = ms => new Promise(resolve => step_timeout(resolve, ms));
|
||||
|
||||
// For tests which verify that the implementation doesn't do something it shouldn't, it's better not to use a
|
||||
// timeout. Instead, assume that any reasonable implementation is going to finish work after 2 times around the event
|
||||
// loop, and use flushAsyncEvents().then(() => assert_array_equals(...));
|
||||
// Some tests include promise resolutions which may mean the test code takes a couple of event loop visits itself. So go
|
||||
// around an extra 2 times to avoid complicating those tests.
|
||||
self.flushAsyncEvents = () => delay(0).then(() => delay(0)).then(() => delay(0)).then(() => delay(0));
|
||||
|
||||
self.assert_typed_array_equals = (actual, expected, message) => {
|
||||
const prefix = message === undefined ? '' : `${message} `;
|
||||
assert_equals(typeof actual, 'object', `${prefix}type is object`);
|
||||
assert_equals(actual.constructor, expected.constructor, `${prefix}constructor`);
|
||||
assert_equals(actual.byteOffset, expected.byteOffset, `${prefix}byteOffset`);
|
||||
assert_equals(actual.byteLength, expected.byteLength, `${prefix}byteLength`);
|
||||
assert_equals(actual.buffer.byteLength, expected.buffer.byteLength, `${prefix}buffer.byteLength`);
|
||||
assert_array_equals([...actual], [...expected], `${prefix}contents`);
|
||||
assert_array_equals([...new Uint8Array(actual.buffer)], [...new Uint8Array(expected.buffer)], `${prefix}buffer contents`);
|
||||
};
|
||||
|
||||
self.makePromiseAndResolveFunc = () => {
|
||||
let resolve;
|
||||
const promise = new Promise(r => { resolve = r; });
|
||||
return [promise, resolve];
|
||||
};
|
|
@ -415,7 +415,8 @@ i32 WindowOrWorkerGlobalScopeMixin::run_timer_initialization_steps(TimerHandler
|
|||
|
||||
// 11. Let completionStep be an algorithm step which queues a global task on the timer task source given global to run task.
|
||||
Function<void()> completion_step = [this, task = move(task)]() mutable {
|
||||
queue_global_task(Task::Source::TimerTask, this_impl(), JS::create_heap_function(this_impl().heap(), [task] {
|
||||
queue_global_task(Task::Source::TimerTask, this_impl(), JS::create_heap_function(this_impl().heap(), [this, task] {
|
||||
HTML::TemporaryExecutionContext execution_context { Bindings::host_defined_environment_settings_object(this_impl().realm()), HTML::TemporaryExecutionContext::CallbacksEnabled::Yes };
|
||||
task->function()();
|
||||
}));
|
||||
};
|
||||
|
@ -586,6 +587,7 @@ void WindowOrWorkerGlobalScopeMixin::queue_the_performance_observer_task()
|
|||
// timeline task source.
|
||||
queue_global_task(Task::Source::PerformanceTimeline, this_impl(), JS::create_heap_function(this_impl().heap(), [this]() {
|
||||
auto& realm = this_impl().realm();
|
||||
HTML::TemporaryExecutionContext execution_context { Bindings::host_defined_environment_settings_object(realm), HTML::TemporaryExecutionContext::CallbacksEnabled::Yes };
|
||||
|
||||
// 1. Unset performance observer task queued flag of relevantGlobal.
|
||||
m_performance_observer_task_queued = false;
|
||||
|
|
Loading…
Reference in a new issue