mirror of
https://github.com/denoland/deno.git
synced 2024-12-24 16:19:12 -05:00
perf(ext/fetch): consume body using ops (#16038)
This commit adds a fast path to `Request` and `Response` that make consuming request bodies much faster when using `Body#text`, `Body#arrayBuffer`, and `Body#blob`, if the body is a FastStream. Because the response bodies for `fetch` are FastStream, this speeds up consuming `fetch` response bodies significantly.
This commit is contained in:
parent
0b4a6c4d08
commit
569287b15b
16 changed files with 291 additions and 109 deletions
16
cli/bench/http/deno_http_flash_post_bin.js
Normal file
16
cli/bench/http/deno_http_flash_post_bin.js
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
|
||||||
|
|
||||||
|
const addr = Deno.args[0] || "127.0.0.1:4500";
|
||||||
|
const [hostname, port] = addr.split(":");
|
||||||
|
const { serve } = Deno;
|
||||||
|
|
||||||
|
async function handler(request) {
|
||||||
|
try {
|
||||||
|
const buffer = await request.arrayBuffer();
|
||||||
|
return new Response(buffer.byteLength);
|
||||||
|
} catch (e) {
|
||||||
|
console.log(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
serve(handler, { hostname, port });
|
5
cli/bench/http/deno_http_flash_post_bin.lua
Normal file
5
cli/bench/http/deno_http_flash_post_bin.lua
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
wrk.method = "POST"
|
||||||
|
wrk.headers["Content-Type"] = "application/octet-stream"
|
||||||
|
|
||||||
|
file = io.open("./cli/bench/testdata/128k.bin", "rb")
|
||||||
|
wrk.body = file:read("*a")
|
19
cli/bench/http/deno_post_bin.js
Normal file
19
cli/bench/http/deno_post_bin.js
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
|
||||||
|
|
||||||
|
const addr = Deno.args[0] || "127.0.0.1:4500";
|
||||||
|
const [hostname, port] = addr.split(":");
|
||||||
|
const listener = Deno.listen({ hostname, port: Number(port) });
|
||||||
|
console.log("Server listening on", addr);
|
||||||
|
|
||||||
|
for await (const conn of listener) {
|
||||||
|
(async () => {
|
||||||
|
const requests = Deno.serveHttp(conn);
|
||||||
|
for await (const { respondWith, request } of requests) {
|
||||||
|
if (request.method == "POST") {
|
||||||
|
const buffer = await request.arrayBuffer();
|
||||||
|
respondWith(new Response(buffer.byteLength))
|
||||||
|
.catch((e) => console.log(e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
}
|
5
cli/bench/http/deno_post_bin.lua
Normal file
5
cli/bench/http/deno_post_bin.lua
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
wrk.method = "POST"
|
||||||
|
wrk.headers["Content-Type"] = "application/octet-stream"
|
||||||
|
|
||||||
|
file = io.open("./cli/bench/testdata/128k.bin", "rb")
|
||||||
|
wrk.body = file:read("*a")
|
18
cli/bench/http/node_post_bin.js
Normal file
18
cli/bench/http/node_post_bin.js
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
|
||||||
|
const http = require("http");
|
||||||
|
const port = process.argv[2] || "4544";
|
||||||
|
console.log("port", port);
|
||||||
|
http
|
||||||
|
.Server((req, res) => {
|
||||||
|
if (req.method == "POST") {
|
||||||
|
let chunks = [];
|
||||||
|
req.on("data", function (data) {
|
||||||
|
chunks.push(data);
|
||||||
|
});
|
||||||
|
req.on("end", function () {
|
||||||
|
const buffer = Buffer.concat(chunks);
|
||||||
|
res.end(buffer.byteLength.toString());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.listen(port);
|
5
cli/bench/http/node_post_bin.lua
Normal file
5
cli/bench/http/node_post_bin.lua
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
wrk.method = "POST"
|
||||||
|
wrk.headers["Content-Type"] = "application/octet-stream"
|
||||||
|
|
||||||
|
file = io.open("./cli/bench/testdata/128k.bin", "rb")
|
||||||
|
wrk.body = file:read("*a")
|
|
@ -1789,3 +1789,19 @@ Deno.test(
|
||||||
assertEquals(await res.text(), "ok");
|
assertEquals(await res.text(), "ok");
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
Deno.test(
|
||||||
|
{ permissions: { net: true } },
|
||||||
|
async function fetchResponseStreamIsLockedWhileReading() {
|
||||||
|
const response = await fetch("http://localhost:4545/echo_server", {
|
||||||
|
body: new Uint8Array(5000),
|
||||||
|
method: "POST",
|
||||||
|
});
|
||||||
|
|
||||||
|
assertEquals(response.body!.locked, false);
|
||||||
|
const promise = response.arrayBuffer();
|
||||||
|
assertEquals(response.body!.locked, true);
|
||||||
|
|
||||||
|
await promise;
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
|
@ -2292,6 +2292,87 @@ Deno.test("upgradeHttp unix", {
|
||||||
await Promise.all([server, client()]);
|
await Promise.all([server, client()]);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
Deno.test(
|
||||||
|
{ permissions: { net: true } },
|
||||||
|
async function httpServerReadLargeBodyWithContentLength() {
|
||||||
|
const TLS_PACKET_SIZE = 16 * 1024 + 256;
|
||||||
|
// We want the body to be read in multiple packets
|
||||||
|
const body = "aa\n" + "deno.land large body\n".repeat(TLS_PACKET_SIZE) +
|
||||||
|
"zz";
|
||||||
|
|
||||||
|
let httpConn: Deno.HttpConn;
|
||||||
|
const promise = (async () => {
|
||||||
|
const listener = Deno.listen({ port: 4501 });
|
||||||
|
const conn = await listener.accept();
|
||||||
|
listener.close();
|
||||||
|
httpConn = Deno.serveHttp(conn);
|
||||||
|
const reqEvent = await httpConn.nextRequest();
|
||||||
|
assert(reqEvent);
|
||||||
|
const { request, respondWith } = reqEvent;
|
||||||
|
assertEquals(await request.text(), body);
|
||||||
|
await respondWith(new Response(body));
|
||||||
|
})();
|
||||||
|
|
||||||
|
const resp = await fetch("http://127.0.0.1:4501/", {
|
||||||
|
method: "POST",
|
||||||
|
headers: { "connection": "close" },
|
||||||
|
body,
|
||||||
|
});
|
||||||
|
const text = await resp.text();
|
||||||
|
assertEquals(text, body);
|
||||||
|
await promise;
|
||||||
|
|
||||||
|
httpConn!.close();
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
Deno.test(
|
||||||
|
{ permissions: { net: true } },
|
||||||
|
async function httpServerReadLargeBodyWithTransferChunked() {
|
||||||
|
const TLS_PACKET_SIZE = 16 * 1024 + 256;
|
||||||
|
|
||||||
|
// We want the body to be read in multiple packets
|
||||||
|
const chunks = [
|
||||||
|
"aa\n",
|
||||||
|
"deno.land large body\n".repeat(TLS_PACKET_SIZE),
|
||||||
|
"zz",
|
||||||
|
];
|
||||||
|
|
||||||
|
const body = chunks.join("");
|
||||||
|
|
||||||
|
const stream = new TransformStream();
|
||||||
|
const writer = stream.writable.getWriter();
|
||||||
|
for (const chunk of chunks) {
|
||||||
|
writer.write(new TextEncoder().encode(chunk));
|
||||||
|
}
|
||||||
|
writer.close();
|
||||||
|
|
||||||
|
let httpConn: Deno.HttpConn;
|
||||||
|
const promise = (async () => {
|
||||||
|
const listener = Deno.listen({ port: 4501 });
|
||||||
|
const conn = await listener.accept();
|
||||||
|
listener.close();
|
||||||
|
httpConn = Deno.serveHttp(conn);
|
||||||
|
const reqEvent = await httpConn.nextRequest();
|
||||||
|
assert(reqEvent);
|
||||||
|
const { request, respondWith } = reqEvent;
|
||||||
|
assertEquals(await request.text(), body);
|
||||||
|
await respondWith(new Response(body));
|
||||||
|
})();
|
||||||
|
|
||||||
|
const resp = await fetch("http://127.0.0.1:4501/", {
|
||||||
|
method: "POST",
|
||||||
|
headers: { "connection": "close" },
|
||||||
|
body: stream.readable,
|
||||||
|
});
|
||||||
|
const text = await resp.text();
|
||||||
|
assertEquals(text, body);
|
||||||
|
await promise;
|
||||||
|
|
||||||
|
httpConn!.close();
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
function chunkedBodyReader(h: Headers, r: BufReader): Deno.Reader {
|
function chunkedBodyReader(h: Headers, r: BufReader): Deno.Reader {
|
||||||
// Based on https://tools.ietf.org/html/rfc2616#section-19.4.6
|
// Based on https://tools.ietf.org/html/rfc2616#section-19.4.6
|
||||||
const tp = new TextProtoReader(r);
|
const tp = new TextProtoReader(r);
|
||||||
|
|
|
@ -34,6 +34,7 @@ pub(crate) fn init_builtins() -> Extension {
|
||||||
op_add::decl(),
|
op_add::decl(),
|
||||||
// // TODO(@AaronO): track IO metrics for builtin streams
|
// // TODO(@AaronO): track IO metrics for builtin streams
|
||||||
op_read::decl(),
|
op_read::decl(),
|
||||||
|
op_read_all::decl(),
|
||||||
op_write::decl(),
|
op_write::decl(),
|
||||||
op_shutdown::decl(),
|
op_shutdown::decl(),
|
||||||
op_metrics::decl(),
|
op_metrics::decl(),
|
||||||
|
@ -168,6 +169,26 @@ async fn op_read(
|
||||||
resource.read_return(buf).await.map(|(n, _)| n as u32)
|
resource.read_return(buf).await.map(|(n, _)| n as u32)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[op]
|
||||||
|
async fn op_read_all(
|
||||||
|
state: Rc<RefCell<OpState>>,
|
||||||
|
rid: ResourceId,
|
||||||
|
) -> Result<ZeroCopyBuf, Error> {
|
||||||
|
let resource = state.borrow().resource_table.get_any(rid)?;
|
||||||
|
let (min, maximum) = resource.size_hint();
|
||||||
|
let size = maximum.unwrap_or(min) as usize;
|
||||||
|
|
||||||
|
let mut buffer = Vec::with_capacity(size);
|
||||||
|
loop {
|
||||||
|
let tmp = ZeroCopyBuf::new_temp(vec![0u8; 64 * 1024]);
|
||||||
|
let (nread, tmp) = resource.clone().read_return(tmp).await?;
|
||||||
|
if nread == 0 {
|
||||||
|
return Ok(buffer.into());
|
||||||
|
}
|
||||||
|
buffer.extend_from_slice(&tmp[..nread]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[op]
|
#[op]
|
||||||
async fn op_write(
|
async fn op_write(
|
||||||
state: Rc<RefCell<OpState>>,
|
state: Rc<RefCell<OpState>>,
|
||||||
|
|
|
@ -64,6 +64,10 @@ pub trait Resource: Any + 'static {
|
||||||
fn backing_fd(self: Rc<Self>) -> Option<std::os::unix::prelude::RawFd> {
|
fn backing_fd(self: Rc<Self>) -> Option<std::os::unix::prelude::RawFd> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn size_hint(&self) -> (u64, Option<u64>) {
|
||||||
|
(0, None)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl dyn Resource {
|
impl dyn Resource {
|
||||||
|
|
|
@ -30,19 +30,18 @@
|
||||||
errorReadableStream,
|
errorReadableStream,
|
||||||
readableStreamClose,
|
readableStreamClose,
|
||||||
readableStreamDisturb,
|
readableStreamDisturb,
|
||||||
|
readableStreamCollectIntoUint8Array,
|
||||||
createProxy,
|
createProxy,
|
||||||
ReadableStreamPrototype,
|
ReadableStreamPrototype,
|
||||||
} = globalThis.__bootstrap.streams;
|
} = globalThis.__bootstrap.streams;
|
||||||
const {
|
const {
|
||||||
ArrayBufferPrototype,
|
ArrayBufferPrototype,
|
||||||
ArrayBufferIsView,
|
ArrayBufferIsView,
|
||||||
ArrayPrototypePush,
|
|
||||||
ArrayPrototypeMap,
|
ArrayPrototypeMap,
|
||||||
JSONParse,
|
JSONParse,
|
||||||
ObjectDefineProperties,
|
ObjectDefineProperties,
|
||||||
ObjectPrototypeIsPrototypeOf,
|
ObjectPrototypeIsPrototypeOf,
|
||||||
PromiseResolve,
|
PromiseResolve,
|
||||||
TypedArrayPrototypeSet,
|
|
||||||
TypedArrayPrototypeSlice,
|
TypedArrayPrototypeSlice,
|
||||||
TypeError,
|
TypeError,
|
||||||
Uint8Array,
|
Uint8Array,
|
||||||
|
@ -66,12 +65,10 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
class InnerBody {
|
class InnerBody {
|
||||||
#knownExactLength = null;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {ReadableStream<Uint8Array> | { body: Uint8Array | string, consumed: boolean }} stream
|
* @param {ReadableStream<Uint8Array> | { body: Uint8Array | string, consumed: boolean }} stream
|
||||||
*/
|
*/
|
||||||
constructor(stream, knownExactLength) {
|
constructor(stream) {
|
||||||
/** @type {ReadableStream<Uint8Array> | { body: Uint8Array | string, consumed: boolean }} */
|
/** @type {ReadableStream<Uint8Array> | { body: Uint8Array | string, consumed: boolean }} */
|
||||||
this.streamOrStatic = stream ??
|
this.streamOrStatic = stream ??
|
||||||
{ body: new Uint8Array(), consumed: false };
|
{ body: new Uint8Array(), consumed: false };
|
||||||
|
@ -79,8 +76,6 @@
|
||||||
this.source = null;
|
this.source = null;
|
||||||
/** @type {null | number} */
|
/** @type {null | number} */
|
||||||
this.length = null;
|
this.length = null;
|
||||||
|
|
||||||
this.#knownExactLength = knownExactLength;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
get stream() {
|
get stream() {
|
||||||
|
@ -144,7 +139,7 @@
|
||||||
* https://fetch.spec.whatwg.org/#concept-body-consume-body
|
* https://fetch.spec.whatwg.org/#concept-body-consume-body
|
||||||
* @returns {Promise<Uint8Array>}
|
* @returns {Promise<Uint8Array>}
|
||||||
*/
|
*/
|
||||||
async consume() {
|
consume() {
|
||||||
if (this.unusable()) throw new TypeError("Body already consumed.");
|
if (this.unusable()) throw new TypeError("Body already consumed.");
|
||||||
if (
|
if (
|
||||||
ObjectPrototypeIsPrototypeOf(
|
ObjectPrototypeIsPrototypeOf(
|
||||||
|
@ -152,40 +147,7 @@
|
||||||
this.streamOrStatic,
|
this.streamOrStatic,
|
||||||
)
|
)
|
||||||
) {
|
) {
|
||||||
const reader = this.stream.getReader();
|
return readableStreamCollectIntoUint8Array(this.stream);
|
||||||
/** @type {Uint8Array[]} */
|
|
||||||
const chunks = [];
|
|
||||||
|
|
||||||
let finalBuffer = this.#knownExactLength
|
|
||||||
? new Uint8Array(this.#knownExactLength)
|
|
||||||
: null;
|
|
||||||
|
|
||||||
let totalLength = 0;
|
|
||||||
while (true) {
|
|
||||||
const { value: chunk, done } = await reader.read();
|
|
||||||
if (done) break;
|
|
||||||
|
|
||||||
if (finalBuffer) {
|
|
||||||
// fast path, content-length is present
|
|
||||||
TypedArrayPrototypeSet(finalBuffer, chunk, totalLength);
|
|
||||||
} else {
|
|
||||||
// slow path, content-length is not present
|
|
||||||
ArrayPrototypePush(chunks, chunk);
|
|
||||||
}
|
|
||||||
totalLength += chunk.byteLength;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (finalBuffer) {
|
|
||||||
return finalBuffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
finalBuffer = new Uint8Array(totalLength);
|
|
||||||
let i = 0;
|
|
||||||
for (const chunk of chunks) {
|
|
||||||
TypedArrayPrototypeSet(finalBuffer, chunk, i);
|
|
||||||
i += chunk.byteLength;
|
|
||||||
}
|
|
||||||
return finalBuffer;
|
|
||||||
} else {
|
} else {
|
||||||
this.streamOrStatic.consumed = true;
|
this.streamOrStatic.consumed = true;
|
||||||
return this.streamOrStatic.body;
|
return this.streamOrStatic.body;
|
||||||
|
@ -224,7 +186,7 @@
|
||||||
clone() {
|
clone() {
|
||||||
const [out1, out2] = this.stream.tee();
|
const [out1, out2] = this.stream.tee();
|
||||||
this.streamOrStatic = out1;
|
this.streamOrStatic = out1;
|
||||||
const second = new InnerBody(out2, this.#knownExactLength);
|
const second = new InnerBody(out2);
|
||||||
second.source = core.deserialize(core.serialize(this.source));
|
second.source = core.deserialize(core.serialize(this.source));
|
||||||
second.length = this.length;
|
second.length = this.length;
|
||||||
return second;
|
return second;
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
const webidl = window.__bootstrap.webidl;
|
const webidl = window.__bootstrap.webidl;
|
||||||
const { byteLowerCase } = window.__bootstrap.infra;
|
const { byteLowerCase } = window.__bootstrap.infra;
|
||||||
const { BlobPrototype } = window.__bootstrap.file;
|
const { BlobPrototype } = window.__bootstrap.file;
|
||||||
const { errorReadableStream, ReadableStreamPrototype } =
|
const { errorReadableStream, ReadableStreamPrototype, readableStreamForRid } =
|
||||||
window.__bootstrap.streams;
|
window.__bootstrap.streams;
|
||||||
const { InnerBody, extractBody } = window.__bootstrap.fetchBody;
|
const { InnerBody, extractBody } = window.__bootstrap.fetchBody;
|
||||||
const {
|
const {
|
||||||
|
@ -44,7 +44,6 @@
|
||||||
String,
|
String,
|
||||||
StringPrototypeStartsWith,
|
StringPrototypeStartsWith,
|
||||||
StringPrototypeToLowerCase,
|
StringPrototypeToLowerCase,
|
||||||
TypedArrayPrototypeSubarray,
|
|
||||||
TypeError,
|
TypeError,
|
||||||
Uint8Array,
|
Uint8Array,
|
||||||
Uint8ArrayPrototype,
|
Uint8ArrayPrototype,
|
||||||
|
@ -89,65 +88,22 @@
|
||||||
return core.opAsync("op_fetch_send", rid);
|
return core.opAsync("op_fetch_send", rid);
|
||||||
}
|
}
|
||||||
|
|
||||||
// A finalization registry to clean up underlying fetch resources that are GC'ed.
|
|
||||||
const RESOURCE_REGISTRY = new FinalizationRegistry((rid) => {
|
|
||||||
core.tryClose(rid);
|
|
||||||
});
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {number} responseBodyRid
|
* @param {number} responseBodyRid
|
||||||
* @param {AbortSignal} [terminator]
|
* @param {AbortSignal} [terminator]
|
||||||
* @returns {ReadableStream<Uint8Array>}
|
* @returns {ReadableStream<Uint8Array>}
|
||||||
*/
|
*/
|
||||||
function createResponseBodyStream(responseBodyRid, terminator) {
|
function createResponseBodyStream(responseBodyRid, terminator) {
|
||||||
|
const readable = readableStreamForRid(responseBodyRid);
|
||||||
|
|
||||||
function onAbort() {
|
function onAbort() {
|
||||||
if (readable) {
|
errorReadableStream(readable, terminator.reason);
|
||||||
errorReadableStream(readable, terminator.reason);
|
|
||||||
}
|
|
||||||
core.tryClose(responseBodyRid);
|
core.tryClose(responseBodyRid);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(lucacasonato): clean up registration
|
// TODO(lucacasonato): clean up registration
|
||||||
terminator[abortSignal.add](onAbort);
|
terminator[abortSignal.add](onAbort);
|
||||||
const readable = new ReadableStream({
|
|
||||||
type: "bytes",
|
|
||||||
async pull(controller) {
|
|
||||||
try {
|
|
||||||
// This is the largest possible size for a single packet on a TLS
|
|
||||||
// stream.
|
|
||||||
const chunk = new Uint8Array(16 * 1024 + 256);
|
|
||||||
// TODO(@AaronO): switch to handle nulls if that's moved to core
|
|
||||||
const read = await core.read(
|
|
||||||
responseBodyRid,
|
|
||||||
chunk,
|
|
||||||
);
|
|
||||||
if (read > 0) {
|
|
||||||
// We read some data. Enqueue it onto the stream.
|
|
||||||
controller.enqueue(TypedArrayPrototypeSubarray(chunk, 0, read));
|
|
||||||
} else {
|
|
||||||
RESOURCE_REGISTRY.unregister(readable);
|
|
||||||
// We have reached the end of the body, so we close the stream.
|
|
||||||
controller.close();
|
|
||||||
core.tryClose(responseBodyRid);
|
|
||||||
}
|
|
||||||
} catch (err) {
|
|
||||||
RESOURCE_REGISTRY.unregister(readable);
|
|
||||||
if (terminator.aborted) {
|
|
||||||
controller.error(terminator.reason);
|
|
||||||
} else {
|
|
||||||
// There was an error while reading a chunk of the body, so we
|
|
||||||
// error.
|
|
||||||
controller.error(err);
|
|
||||||
}
|
|
||||||
core.tryClose(responseBodyRid);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
cancel() {
|
|
||||||
if (!terminator.aborted) {
|
|
||||||
terminator[abortSignal.signalAbort]();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
});
|
|
||||||
RESOURCE_REGISTRY.register(readable, responseBodyRid, readable);
|
|
||||||
return readable;
|
return readable;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -338,7 +294,6 @@
|
||||||
} else {
|
} else {
|
||||||
response.body = new InnerBody(
|
response.body = new InnerBody(
|
||||||
createResponseBodyStream(resp.responseRid, terminator),
|
createResponseBodyStream(resp.responseRid, terminator),
|
||||||
resp.contentLength,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -408,6 +408,7 @@ pub async fn op_fetch_send(
|
||||||
.add(FetchResponseBodyResource {
|
.add(FetchResponseBodyResource {
|
||||||
reader: AsyncRefCell::new(stream_reader),
|
reader: AsyncRefCell::new(stream_reader),
|
||||||
cancel: CancelHandle::default(),
|
cancel: CancelHandle::default(),
|
||||||
|
size: content_length,
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(FetchResponse {
|
Ok(FetchResponse {
|
||||||
|
@ -479,6 +480,7 @@ type BytesStream =
|
||||||
struct FetchResponseBodyResource {
|
struct FetchResponseBodyResource {
|
||||||
reader: AsyncRefCell<StreamReader<BytesStream, bytes::Bytes>>,
|
reader: AsyncRefCell<StreamReader<BytesStream, bytes::Bytes>>,
|
||||||
cancel: CancelHandle,
|
cancel: CancelHandle,
|
||||||
|
size: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Resource for FetchResponseBodyResource {
|
impl Resource for FetchResponseBodyResource {
|
||||||
|
@ -498,6 +500,10 @@ impl Resource for FetchResponseBodyResource {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn size_hint(&self) -> (u64, Option<u64>) {
|
||||||
|
(0, self.size)
|
||||||
|
}
|
||||||
|
|
||||||
fn close(self: Rc<Self>) {
|
fn close(self: Rc<Self>) {
|
||||||
self.cancel.cancel()
|
self.cancel.cancel()
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,8 @@ use flate2::write::GzEncoder;
|
||||||
use flate2::Compression;
|
use flate2::Compression;
|
||||||
use fly_accept_encoding::Encoding;
|
use fly_accept_encoding::Encoding;
|
||||||
use hyper::body::Bytes;
|
use hyper::body::Bytes;
|
||||||
|
use hyper::body::HttpBody;
|
||||||
|
use hyper::body::SizeHint;
|
||||||
use hyper::header::HeaderName;
|
use hyper::header::HeaderName;
|
||||||
use hyper::header::HeaderValue;
|
use hyper::header::HeaderValue;
|
||||||
use hyper::server::conn::Http;
|
use hyper::server::conn::Http;
|
||||||
|
@ -309,6 +311,7 @@ pub struct HttpStreamResource {
|
||||||
wr: AsyncRefCell<HttpResponseWriter>,
|
wr: AsyncRefCell<HttpResponseWriter>,
|
||||||
accept_encoding: Encoding,
|
accept_encoding: Encoding,
|
||||||
cancel_handle: CancelHandle,
|
cancel_handle: CancelHandle,
|
||||||
|
size: SizeHint,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HttpStreamResource {
|
impl HttpStreamResource {
|
||||||
|
@ -318,11 +321,13 @@ impl HttpStreamResource {
|
||||||
response_tx: oneshot::Sender<Response<Body>>,
|
response_tx: oneshot::Sender<Response<Body>>,
|
||||||
accept_encoding: Encoding,
|
accept_encoding: Encoding,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
|
let size = request.body().size_hint();
|
||||||
Self {
|
Self {
|
||||||
conn: conn.clone(),
|
conn: conn.clone(),
|
||||||
rd: HttpRequestReader::Headers(request).into(),
|
rd: HttpRequestReader::Headers(request).into(),
|
||||||
wr: HttpResponseWriter::Headers(response_tx).into(),
|
wr: HttpResponseWriter::Headers(response_tx).into(),
|
||||||
accept_encoding,
|
accept_encoding,
|
||||||
|
size,
|
||||||
cancel_handle: CancelHandle::new(),
|
cancel_handle: CancelHandle::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -388,6 +393,10 @@ impl Resource for HttpStreamResource {
|
||||||
fn close(self: Rc<Self>) {
|
fn close(self: Rc<Self>) {
|
||||||
self.cancel_handle.cancel();
|
self.cancel_handle.cancel();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn size_hint(&self) -> (u64, Option<u64>) {
|
||||||
|
(self.size.lower(), self.size.upper())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The read half of an HTTP stream.
|
/// The read half of an HTTP stream.
|
||||||
|
|
|
@ -48,6 +48,7 @@
|
||||||
SymbolAsyncIterator,
|
SymbolAsyncIterator,
|
||||||
SymbolFor,
|
SymbolFor,
|
||||||
TypeError,
|
TypeError,
|
||||||
|
TypedArrayPrototypeSet,
|
||||||
Uint8Array,
|
Uint8Array,
|
||||||
Uint8ArrayPrototype,
|
Uint8ArrayPrototype,
|
||||||
Uint16ArrayPrototype,
|
Uint16ArrayPrototype,
|
||||||
|
@ -647,6 +648,10 @@
|
||||||
|
|
||||||
const DEFAULT_CHUNK_SIZE = 64 * 1024; // 64 KiB
|
const DEFAULT_CHUNK_SIZE = 64 * 1024; // 64 KiB
|
||||||
|
|
||||||
|
// A finalization registry to clean up underlying resources that are GC'ed.
|
||||||
|
const RESOURCE_REGISTRY = new FinalizationRegistry((rid) => {
|
||||||
|
core.tryClose(rid);
|
||||||
|
});
|
||||||
/**
|
/**
|
||||||
* Create a new ReadableStream object that is backed by a Resource that
|
* Create a new ReadableStream object that is backed by a Resource that
|
||||||
* implements `Resource::read_return`. This object contains enough metadata to
|
* implements `Resource::read_return`. This object contains enough metadata to
|
||||||
|
@ -660,6 +665,17 @@
|
||||||
function readableStreamForRid(rid, autoClose = true) {
|
function readableStreamForRid(rid, autoClose = true) {
|
||||||
const stream = webidl.createBranded(ReadableStream);
|
const stream = webidl.createBranded(ReadableStream);
|
||||||
stream[_resourceBacking] = { rid, autoClose };
|
stream[_resourceBacking] = { rid, autoClose };
|
||||||
|
|
||||||
|
const tryClose = () => {
|
||||||
|
if (!autoClose) return;
|
||||||
|
RESOURCE_REGISTRY.unregister(stream);
|
||||||
|
core.tryClose(rid);
|
||||||
|
};
|
||||||
|
|
||||||
|
if (autoClose) {
|
||||||
|
RESOURCE_REGISTRY.register(stream, rid, stream);
|
||||||
|
}
|
||||||
|
|
||||||
const underlyingSource = {
|
const underlyingSource = {
|
||||||
type: "bytes",
|
type: "bytes",
|
||||||
async pull(controller) {
|
async pull(controller) {
|
||||||
|
@ -667,7 +683,7 @@
|
||||||
try {
|
try {
|
||||||
const bytesRead = await core.read(rid, v);
|
const bytesRead = await core.read(rid, v);
|
||||||
if (bytesRead === 0) {
|
if (bytesRead === 0) {
|
||||||
if (autoClose) core.tryClose(rid);
|
tryClose();
|
||||||
controller.close();
|
controller.close();
|
||||||
controller.byobRequest.respond(0);
|
controller.byobRequest.respond(0);
|
||||||
} else {
|
} else {
|
||||||
|
@ -675,11 +691,11 @@
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
controller.error(e);
|
controller.error(e);
|
||||||
if (autoClose) core.tryClose(rid);
|
tryClose();
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
cancel() {
|
cancel() {
|
||||||
if (autoClose) core.tryClose(rid);
|
tryClose();
|
||||||
},
|
},
|
||||||
autoAllocateChunkSize: DEFAULT_CHUNK_SIZE,
|
autoAllocateChunkSize: DEFAULT_CHUNK_SIZE,
|
||||||
};
|
};
|
||||||
|
@ -766,6 +782,59 @@
|
||||||
return stream[_resourceBacking];
|
return stream[_resourceBacking];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function readableStreamCollectIntoUint8Array(stream) {
|
||||||
|
const resourceBacking = getReadableStreamResourceBacking(stream);
|
||||||
|
const reader = acquireReadableStreamDefaultReader(stream);
|
||||||
|
|
||||||
|
if (resourceBacking) {
|
||||||
|
// fast path, read whole body in a single op call
|
||||||
|
try {
|
||||||
|
readableStreamDisturb(stream);
|
||||||
|
const buf = await core.opAsync("op_read_all", resourceBacking.rid);
|
||||||
|
readableStreamThrowIfErrored(stream);
|
||||||
|
readableStreamClose(stream);
|
||||||
|
return buf;
|
||||||
|
} catch (err) {
|
||||||
|
readableStreamThrowIfErrored(stream);
|
||||||
|
readableStreamError(stream, err);
|
||||||
|
throw err;
|
||||||
|
} finally {
|
||||||
|
if (resourceBacking.autoClose) {
|
||||||
|
core.tryClose(resourceBacking.rid);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// slow path
|
||||||
|
/** @type {Uint8Array[]} */
|
||||||
|
const chunks = [];
|
||||||
|
let totalLength = 0;
|
||||||
|
while (true) {
|
||||||
|
const { value: chunk, done } = await reader.read();
|
||||||
|
if (done) break;
|
||||||
|
|
||||||
|
ArrayPrototypePush(chunks, chunk);
|
||||||
|
totalLength += chunk.byteLength;
|
||||||
|
}
|
||||||
|
|
||||||
|
const finalBuffer = new Uint8Array(totalLength);
|
||||||
|
let i = 0;
|
||||||
|
for (const chunk of chunks) {
|
||||||
|
TypedArrayPrototypeSet(finalBuffer, chunk, i);
|
||||||
|
i += chunk.byteLength;
|
||||||
|
}
|
||||||
|
return finalBuffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @param {ReadableStream} stream
|
||||||
|
*/
|
||||||
|
function readableStreamThrowIfErrored(stream) {
|
||||||
|
if (stream[_state] === "errored") {
|
||||||
|
throw stream[_storedError];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param {unknown} value
|
* @param {unknown} value
|
||||||
* @returns {value is WritableStream}
|
* @returns {value is WritableStream}
|
||||||
|
@ -5982,6 +6051,7 @@
|
||||||
createProxy,
|
createProxy,
|
||||||
writableStreamClose,
|
writableStreamClose,
|
||||||
readableStreamClose,
|
readableStreamClose,
|
||||||
|
readableStreamCollectIntoUint8Array,
|
||||||
readableStreamDisturb,
|
readableStreamDisturb,
|
||||||
readableStreamForRid,
|
readableStreamForRid,
|
||||||
readableStreamForRidUnrefable,
|
readableStreamForRidUnrefable,
|
||||||
|
|
|
@ -2963,21 +2963,11 @@
|
||||||
"stream-response.any.worker.html": true,
|
"stream-response.any.worker.html": true,
|
||||||
"stream-safe-creation.any.html": [
|
"stream-safe-creation.any.html": [
|
||||||
"throwing Object.prototype.start accessor should not affect stream creation by 'fetch'",
|
"throwing Object.prototype.start accessor should not affect stream creation by 'fetch'",
|
||||||
"Object.prototype.start accessor returning invalid value should not affect stream creation by 'fetch'",
|
"Object.prototype.start accessor returning invalid value should not affect stream creation by 'fetch'"
|
||||||
"throwing Object.prototype.type accessor should not affect stream creation by 'fetch'",
|
|
||||||
"throwing Object.prototype.size accessor should not affect stream creation by 'fetch'",
|
|
||||||
"Object.prototype.size accessor returning invalid value should not affect stream creation by 'fetch'",
|
|
||||||
"throwing Object.prototype.highWaterMark accessor should not affect stream creation by 'fetch'",
|
|
||||||
"Object.prototype.highWaterMark accessor returning invalid value should not affect stream creation by 'fetch'"
|
|
||||||
],
|
],
|
||||||
"stream-safe-creation.any.worker.html": [
|
"stream-safe-creation.any.worker.html": [
|
||||||
"throwing Object.prototype.start accessor should not affect stream creation by 'fetch'",
|
"throwing Object.prototype.start accessor should not affect stream creation by 'fetch'",
|
||||||
"Object.prototype.start accessor returning invalid value should not affect stream creation by 'fetch'",
|
"Object.prototype.start accessor returning invalid value should not affect stream creation by 'fetch'"
|
||||||
"throwing Object.prototype.type accessor should not affect stream creation by 'fetch'",
|
|
||||||
"throwing Object.prototype.size accessor should not affect stream creation by 'fetch'",
|
|
||||||
"Object.prototype.size accessor returning invalid value should not affect stream creation by 'fetch'",
|
|
||||||
"throwing Object.prototype.highWaterMark accessor should not affect stream creation by 'fetch'",
|
|
||||||
"Object.prototype.highWaterMark accessor returning invalid value should not affect stream creation by 'fetch'"
|
|
||||||
],
|
],
|
||||||
"integrity.sub.any.html": [
|
"integrity.sub.any.html": [
|
||||||
"Invalid integrity",
|
"Invalid integrity",
|
||||||
|
|
Loading…
Reference in a new issue