1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-27 09:39:08 -05:00
denoland-deno/ext/fetch/23_request.js

629 lines
16 KiB
JavaScript
Raw Normal View History

// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// @ts-check
/// <reference path="../webidl/internal.d.ts" />
/// <reference path="../web/internal.d.ts" />
/// <reference path="../web/lib.deno_web.d.ts" />
/// <reference path="./internal.d.ts" />
/// <reference path="../web/06_streams_types.d.ts" />
/// <reference path="./lib.deno_fetch.d.ts" />
/// <reference lib="esnext" />
import { core, internals, primordials } from "ext:core/mod.js";
const {
ArrayPrototypeMap,
ArrayPrototypeSlice,
ArrayPrototypeSplice,
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
ObjectFreeze,
ObjectKeys,
ObjectPrototypeIsPrototypeOf,
RegExpPrototypeExec,
StringPrototypeStartsWith,
Symbol,
SymbolFor,
TypeError,
} = primordials;
import * as webidl from "ext:deno_webidl/00_webidl.js";
import { createFilteredInspectProxy } from "ext:deno_console/01_console.js";
import {
byteUpperCase,
HTTP_TOKEN_CODE_POINT_RE,
} from "ext:deno_web/00_infra.js";
import { URL } from "ext:deno_url/00_url.js";
import { extractBody, mixinBody } from "ext:deno_fetch/22_body.js";
import { getLocationHref } from "ext:deno_web/12_location.js";
import { extractMimeType } from "ext:deno_web/01_mimesniff.js";
import { blobFromObjectUrl } from "ext:deno_web/09_file.js";
import {
fillHeaders,
getDecodeSplitHeader,
guardFromHeaders,
headerListFromHeaders,
headersFromHeaderList,
} from "ext:deno_fetch/20_headers.js";
import { HttpClientPrototype } from "ext:deno_fetch/22_http_client.js";
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
import {
createDependentAbortSignal,
newSignal,
signalAbort,
} from "ext:deno_web/03_abort_signal.js";
import { DOMException } from "ext:deno_web/01_dom_exception.js";
const { internalRidSymbol } = core;
const _request = Symbol("request");
const _headers = Symbol("headers");
const _getHeaders = Symbol("get headers");
const _headersCache = Symbol("headers cache");
const _signal = Symbol("signal");
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
const _signalCache = Symbol("signalCache");
const _mimeType = Symbol("mime type");
const _body = Symbol("body");
const _url = Symbol("url");
const _method = Symbol("method");
const _brand = webidl.brand;
/**
* @param {(() => string)[]} urlList
* @param {string[]} urlListProcessed
*/
function processUrlList(urlList, urlListProcessed) {
for (let i = 0; i < urlList.length; i++) {
if (urlListProcessed[i] === undefined) {
urlListProcessed[i] = urlList[i]();
}
}
return urlListProcessed;
}
/**
* @typedef InnerRequest
* @property {() => string} method
* @property {() => string} url
* @property {() => string} currentUrl
* @property {() => [string, string][]} headerList
* @property {null | typeof __window.bootstrap.fetchBody.InnerBody} body
* @property {"follow" | "error" | "manual"} redirectMode
* @property {number} redirectCount
* @property {(() => string)[]} urlList
* @property {string[]} urlListProcessed
* @property {number | null} clientRid NOTE: non standard extension for `Deno.HttpClient`.
* @property {Blob | null} blobUrlEntry
*/
/**
* @param {string} method
* @param {string | () => string} url
* @param {() => [string, string][]} headerList
* @param {typeof __window.bootstrap.fetchBody.InnerBody} body
* @param {boolean} maybeBlob
* @returns {InnerRequest}
*/
function newInnerRequest(method, url, headerList, body, maybeBlob) {
let blobUrlEntry = null;
if (
maybeBlob &&
typeof url === "string" &&
StringPrototypeStartsWith(url, "blob:")
) {
blobUrlEntry = blobFromObjectUrl(url);
}
return {
methodInner: method,
get method() {
return this.methodInner;
},
set method(value) {
this.methodInner = value;
},
headerListInner: null,
get headerList() {
if (this.headerListInner === null) {
try {
this.headerListInner = headerList();
} catch {
throw new TypeError("cannot read headers: request closed");
}
}
return this.headerListInner;
},
set headerList(value) {
this.headerListInner = value;
},
body,
redirectMode: "follow",
redirectCount: 0,
urlList: [typeof url === "string" ? () => url : url],
urlListProcessed: [],
clientRid: null,
blobUrlEntry,
url() {
if (this.urlListProcessed[0] === undefined) {
try {
this.urlListProcessed[0] = this.urlList[0]();
} catch {
throw new TypeError("cannot read url: request closed");
}
}
return this.urlListProcessed[0];
},
currentUrl() {
const currentIndex = this.urlList.length - 1;
if (this.urlListProcessed[currentIndex] === undefined) {
try {
this.urlListProcessed[currentIndex] = this.urlList[currentIndex]();
} catch {
throw new TypeError("cannot read url: request closed");
}
}
return this.urlListProcessed[currentIndex];
},
};
}
/**
* https://fetch.spec.whatwg.org/#concept-request-clone
* @param {InnerRequest} request
* @param {boolean} skipBody
* @returns {InnerRequest}
*/
function cloneInnerRequest(request, skipBody = false) {
const headerList = ArrayPrototypeMap(
request.headerList,
(x) => [x[0], x[1]],
);
let body = null;
if (request.body !== null && !skipBody) {
body = request.body.clone();
}
return {
method: request.method,
headerList,
body,
redirectMode: request.redirectMode,
redirectCount: request.redirectCount,
urlList: [() => request.url()],
urlListProcessed: [request.url()],
clientRid: request.clientRid,
blobUrlEntry: request.blobUrlEntry,
url() {
if (this.urlListProcessed[0] === undefined) {
try {
this.urlListProcessed[0] = this.urlList[0]();
} catch {
throw new TypeError("cannot read url: request closed");
}
}
return this.urlListProcessed[0];
},
currentUrl() {
const currentIndex = this.urlList.length - 1;
if (this.urlListProcessed[currentIndex] === undefined) {
try {
this.urlListProcessed[currentIndex] = this.urlList[currentIndex]();
} catch {
throw new TypeError("cannot read url: request closed");
}
}
return this.urlListProcessed[currentIndex];
},
};
}
perf(ext/request): optimize validate and normalize HTTP method (#20143) This PR optimizes `Request` constructor init method step. It doubles the speed for known lowercased methods. I also added `PATCH` to known methods **this patch** ``` benchmark time (avg) iter/s (min … max) p75 p99 p995 ---------------------------------------------------------------------------- ----------------------------- method: GET 1.49 µs/iter 669,336.9 (1.35 µs … 2.02 µs) 1.54 µs 2.02 µs 2.02 µs method: PATCH 1.85 µs/iter 540,921.5 (1.65 µs … 2.02 µs) 1.91 µs 2.02 µs 2.02 µs method: get 1.49 µs/iter 669,067.9 (1.28 µs … 1.69 µs) 1.55 µs 1.69 µs 1.69 µs ``` **main** ``` cpu: 13th Gen Intel(R) Core(TM) i9-13900H runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) benchmark time (avg) iter/s (min … max) p75 p99 p995 ---------------------------------------------------------------------------- ----------------------------- method: GET 1.5 µs/iter 665,232.3 (1.3 µs … 2.02 µs) 1.54 µs 2.02 µs 2.02 µs method: PATCH 2.47 µs/iter 404,052.7 (2.06 µs … 4.05 µs) 2.51 µs 4.05 µs 4.05 µs method: get 3 µs/iter 333,277.2 (2.72 µs … 4.04 µs) 3.05 µs 4.04 µs 4.04 µs ``` ```js Deno.bench("method: GET", () => { const r = new Request("https://deno.land", { method: "GET", }); }); Deno.bench("method: PATCH", () => { const r = new Request("https://deno.land", { method: "PATCH", body: '{"foo": "bar"}', }); }); Deno.bench("method: get", () => { const r = new Request("https://deno.land", { method: "get", }); }); ```
2023-08-12 14:29:00 -04:00
// method => normalized method
const KNOWN_METHODS = {
"DELETE": "DELETE",
"delete": "DELETE",
"GET": "GET",
"get": "GET",
"HEAD": "HEAD",
"head": "HEAD",
"OPTIONS": "OPTIONS",
"options": "OPTIONS",
"PATCH": "PATCH",
"patch": "PATCH",
"POST": "POST",
"post": "POST",
"PUT": "PUT",
"put": "PUT",
};
/**
* @param {string} m
* @returns {string}
*/
function validateAndNormalizeMethod(m) {
if (RegExpPrototypeExec(HTTP_TOKEN_CODE_POINT_RE, m) === null) {
throw new TypeError("Method is not valid.");
}
const upperCase = byteUpperCase(m);
if (
upperCase === "CONNECT" || upperCase === "TRACE" || upperCase === "TRACK"
) {
throw new TypeError("Method is forbidden.");
}
return upperCase;
}
class Request {
/** @type {InnerRequest} */
[_request];
/** @type {Headers} */
[_headersCache];
[_getHeaders];
/** @type {Headers} */
get [_headers]() {
if (this[_headersCache] === undefined) {
this[_headersCache] = this[_getHeaders]();
}
return this[_headersCache];
}
set [_headers](value) {
this[_headersCache] = value;
}
/** @type {AbortSignal} */
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
get [_signal]() {
const signal = this[_signalCache];
// This signal not been created yet, and the request is still in progress
if (signal === undefined) {
const signal = newSignal();
this[_signalCache] = signal;
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
return signal;
}
// This signal has not been created yet, but the request has already completed
if (signal === false) {
const signal = newSignal();
this[_signalCache] = signal;
signal[signalAbort](signalAbortError);
return signal;
}
return signal;
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
}
get [_mimeType]() {
const values = getDecodeSplitHeader(
headerListFromHeaders(this[_headers]),
"Content-Type",
);
return extractMimeType(values);
}
get [_body]() {
return this[_request].body;
}
/**
* https://fetch.spec.whatwg.org/#dom-request
* @param {RequestInfo} input
* @param {RequestInit} init
*/
constructor(input, init = { __proto__: null }) {
if (input === _brand) {
this[_brand] = _brand;
return;
}
const prefix = "Failed to construct 'Request'";
webidl.requiredArguments(arguments.length, 1, prefix);
input = webidl.converters["RequestInfo_DOMString"](
input,
prefix,
"Argument 1",
);
init = webidl.converters["RequestInit"](init, prefix, "Argument 2");
this[_brand] = _brand;
/** @type {InnerRequest} */
let request;
const baseURL = getLocationHref();
// 4.
let signal = null;
// 5.
if (typeof input === "string") {
const parsedURL = new URL(input, baseURL);
request = newInnerRequest(
"GET",
parsedURL.href,
() => [],
null,
true,
);
} else { // 6.
if (!ObjectPrototypeIsPrototypeOf(RequestPrototype, input)) {
throw new TypeError("Unreachable");
}
const originalReq = input[_request];
// fold in of step 12 from below
request = cloneInnerRequest(originalReq, true);
request.redirectCount = 0; // reset to 0 - cloneInnerRequest copies the value
signal = input[_signal];
}
// 12. is folded into the else statement of step 6 above.
// 22.
if (init.redirect !== undefined) {
request.redirectMode = init.redirect;
}
// 25.
if (init.method !== undefined) {
perf(ext/request): optimize validate and normalize HTTP method (#20143) This PR optimizes `Request` constructor init method step. It doubles the speed for known lowercased methods. I also added `PATCH` to known methods **this patch** ``` benchmark time (avg) iter/s (min … max) p75 p99 p995 ---------------------------------------------------------------------------- ----------------------------- method: GET 1.49 µs/iter 669,336.9 (1.35 µs … 2.02 µs) 1.54 µs 2.02 µs 2.02 µs method: PATCH 1.85 µs/iter 540,921.5 (1.65 µs … 2.02 µs) 1.91 µs 2.02 µs 2.02 µs method: get 1.49 µs/iter 669,067.9 (1.28 µs … 1.69 µs) 1.55 µs 1.69 µs 1.69 µs ``` **main** ``` cpu: 13th Gen Intel(R) Core(TM) i9-13900H runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) benchmark time (avg) iter/s (min … max) p75 p99 p995 ---------------------------------------------------------------------------- ----------------------------- method: GET 1.5 µs/iter 665,232.3 (1.3 µs … 2.02 µs) 1.54 µs 2.02 µs 2.02 µs method: PATCH 2.47 µs/iter 404,052.7 (2.06 µs … 4.05 µs) 2.51 µs 4.05 µs 4.05 µs method: get 3 µs/iter 333,277.2 (2.72 µs … 4.04 µs) 3.05 µs 4.04 µs 4.04 µs ``` ```js Deno.bench("method: GET", () => { const r = new Request("https://deno.land", { method: "GET", }); }); Deno.bench("method: PATCH", () => { const r = new Request("https://deno.land", { method: "PATCH", body: '{"foo": "bar"}', }); }); Deno.bench("method: get", () => { const r = new Request("https://deno.land", { method: "get", }); }); ```
2023-08-12 14:29:00 -04:00
const method = init.method;
// fast path: check for known methods
request.method = KNOWN_METHODS[method] ??
validateAndNormalizeMethod(method);
}
// 26.
if (init.signal !== undefined) {
signal = init.signal;
}
// NOTE: non standard extension. This handles Deno.HttpClient parameter
if (init.client !== undefined) {
if (
init.client !== null &&
!ObjectPrototypeIsPrototypeOf(HttpClientPrototype, init.client)
) {
throw webidl.makeException(
TypeError,
"`client` must be a Deno.HttpClient",
prefix,
"Argument 2",
);
}
request.clientRid = init.client?.[internalRidSymbol] ?? null;
}
// 28.
this[_request] = request;
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
// 29 & 30.
if (signal !== null) {
this[_signalCache] = createDependentAbortSignal([signal], prefix);
}
// 31.
this[_headers] = headersFromHeaderList(request.headerList, "request");
// 33.
perf(ext/request): optimize Request constructor (#20141) This PR optimizes `Request` constructor when `init` is not empty. This path is also used by `fetch` when `options` argument is used ```js fetch("https://deno.land", { method: "POST", body: 'land' }); ``` - Removed 3 extra calls to `headerListFromHeaders` - Avoid `Object.keys` & `headerList` clone if `init.headers` is set - Only empty `headersList` (`.splice`) if it's not already empty. ## Benchmarks **this patch** ``` cpu: 13th Gen Intel(R) Core(TM) i9-13900H runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- Request without headers 1.86 µs/iter 536,440.7 (1.67 µs … 2.76 µs) 1.89 µs 2.76 µs 2.76 µs Request with headers 1.96 µs/iter 509,440.5 (1.83 µs … 2.17 µs) 1.99 µs 2.17 µs 2.17 µs ``` **main** ``` cpu: 13th Gen Intel(R) Core(TM) i9-13900H runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- Request without headers 1.96 µs/iter 510,201.5 (1.81 µs … 2.64 µs) 2 µs 2.64 µs 2.64 µs Request with headers 2.03 µs/iter 493,526.6 (1.84 µs … 2.31 µs) 2.08 µs 2.31 µs 2.31 µs ``` ```js Deno.bench("Request without headers", () => { const r = new Request("https://deno.land", { method: "POST", body: '{"foo": "bar"}', }); }); Deno.bench("Request with headers", () => { const r = new Request("https://deno.land", { method: "POST", body: '{"foo": "bar"}', headers: { "Content-Type": "application/json", }, }); }); ```
2023-08-12 12:41:07 -04:00
if (init.headers || ObjectKeys(init).length > 0) {
const headerList = headerListFromHeaders(this[_headers]);
const headers = init.headers ?? ArrayPrototypeSlice(
headerList,
0,
perf(ext/request): optimize Request constructor (#20141) This PR optimizes `Request` constructor when `init` is not empty. This path is also used by `fetch` when `options` argument is used ```js fetch("https://deno.land", { method: "POST", body: 'land' }); ``` - Removed 3 extra calls to `headerListFromHeaders` - Avoid `Object.keys` & `headerList` clone if `init.headers` is set - Only empty `headersList` (`.splice`) if it's not already empty. ## Benchmarks **this patch** ``` cpu: 13th Gen Intel(R) Core(TM) i9-13900H runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- Request without headers 1.86 µs/iter 536,440.7 (1.67 µs … 2.76 µs) 1.89 µs 2.76 µs 2.76 µs Request with headers 1.96 µs/iter 509,440.5 (1.83 µs … 2.17 µs) 1.99 µs 2.17 µs 2.17 µs ``` **main** ``` cpu: 13th Gen Intel(R) Core(TM) i9-13900H runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- Request without headers 1.96 µs/iter 510,201.5 (1.81 µs … 2.64 µs) 2 µs 2.64 µs 2.64 µs Request with headers 2.03 µs/iter 493,526.6 (1.84 µs … 2.31 µs) 2.08 µs 2.31 µs 2.31 µs ``` ```js Deno.bench("Request without headers", () => { const r = new Request("https://deno.land", { method: "POST", body: '{"foo": "bar"}', }); }); Deno.bench("Request with headers", () => { const r = new Request("https://deno.land", { method: "POST", body: '{"foo": "bar"}', headers: { "Content-Type": "application/json", }, }); }); ```
2023-08-12 12:41:07 -04:00
headerList.length,
);
perf(ext/request): optimize Request constructor (#20141) This PR optimizes `Request` constructor when `init` is not empty. This path is also used by `fetch` when `options` argument is used ```js fetch("https://deno.land", { method: "POST", body: 'land' }); ``` - Removed 3 extra calls to `headerListFromHeaders` - Avoid `Object.keys` & `headerList` clone if `init.headers` is set - Only empty `headersList` (`.splice`) if it's not already empty. ## Benchmarks **this patch** ``` cpu: 13th Gen Intel(R) Core(TM) i9-13900H runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- Request without headers 1.86 µs/iter 536,440.7 (1.67 µs … 2.76 µs) 1.89 µs 2.76 µs 2.76 µs Request with headers 1.96 µs/iter 509,440.5 (1.83 µs … 2.17 µs) 1.99 µs 2.17 µs 2.17 µs ``` **main** ``` cpu: 13th Gen Intel(R) Core(TM) i9-13900H runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- Request without headers 1.96 µs/iter 510,201.5 (1.81 µs … 2.64 µs) 2 µs 2.64 µs 2.64 µs Request with headers 2.03 µs/iter 493,526.6 (1.84 µs … 2.31 µs) 2.08 µs 2.31 µs 2.31 µs ``` ```js Deno.bench("Request without headers", () => { const r = new Request("https://deno.land", { method: "POST", body: '{"foo": "bar"}', }); }); Deno.bench("Request with headers", () => { const r = new Request("https://deno.land", { method: "POST", body: '{"foo": "bar"}', headers: { "Content-Type": "application/json", }, }); }); ```
2023-08-12 12:41:07 -04:00
if (headerList.length !== 0) {
ArrayPrototypeSplice(headerList, 0, headerList.length);
}
fillHeaders(this[_headers], headers);
}
// 34.
let inputBody = null;
if (ObjectPrototypeIsPrototypeOf(RequestPrototype, input)) {
inputBody = input[_body];
}
// 35.
if (
(request.method === "GET" || request.method === "HEAD") &&
((init.body !== undefined && init.body !== null) ||
inputBody !== null)
) {
throw new TypeError("Request with GET/HEAD method cannot have body.");
}
// 36.
let initBody = null;
// 37.
if (init.body !== undefined && init.body !== null) {
const res = extractBody(init.body);
initBody = res.body;
if (res.contentType !== null && !this[_headers].has("content-type")) {
this[_headers].append("Content-Type", res.contentType);
}
}
// 38.
const inputOrInitBody = initBody ?? inputBody;
// 40.
let finalBody = inputOrInitBody;
// 41.
if (initBody === null && inputBody !== null) {
if (input[_body] && input[_body].unusable()) {
throw new TypeError("Input request's body is unusable.");
}
finalBody = inputBody.createProxy();
}
// 42.
request.body = finalBody;
}
get method() {
webidl.assertBranded(this, RequestPrototype);
if (this[_method]) {
return this[_method];
}
this[_method] = this[_request].method;
return this[_method];
}
get url() {
webidl.assertBranded(this, RequestPrototype);
if (this[_url]) {
return this[_url];
}
this[_url] = this[_request].url();
return this[_url];
}
get headers() {
webidl.assertBranded(this, RequestPrototype);
return this[_headers];
}
get redirect() {
webidl.assertBranded(this, RequestPrototype);
return this[_request].redirectMode;
}
get signal() {
webidl.assertBranded(this, RequestPrototype);
return this[_signal];
}
clone() {
const prefix = "Failed to execute 'Request.clone'";
webidl.assertBranded(this, RequestPrototype);
if (this[_body] && this[_body].unusable()) {
throw new TypeError("Body is unusable.");
}
const clonedReq = cloneInnerRequest(this[_request]);
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
const materializedSignal = this[_signal];
const clonedSignal = createDependentAbortSignal(
[materializedSignal],
prefix,
);
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
const request = new Request(_brand);
request[_request] = clonedReq;
request[_signalCache] = clonedSignal;
request[_getHeaders] = () =>
headersFromHeaderList(
clonedReq.headerList,
guardFromHeaders(this[_headers]),
);
return request;
}
[SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) {
return inspect(
createFilteredInspectProxy({
object: this,
evaluate: ObjectPrototypeIsPrototypeOf(RequestPrototype, this),
keys: [
"bodyUsed",
"headers",
"method",
"redirect",
"url",
],
}),
inspectOptions,
);
}
}
webidl.configureInterface(Request);
const RequestPrototype = Request.prototype;
mixinBody(RequestPrototype, _body, _mimeType);
webidl.converters["Request"] = webidl.createInterfaceConverter(
"Request",
RequestPrototype,
);
webidl.converters["RequestInfo_DOMString"] = (V, prefix, context, opts) => {
// Union for (Request or USVString)
if (typeof V == "object") {
if (ObjectPrototypeIsPrototypeOf(RequestPrototype, V)) {
return webidl.converters["Request"](V, prefix, context, opts);
}
}
// Passed to new URL(...) which implicitly converts DOMString -> USVString
return webidl.converters["DOMString"](V, prefix, context, opts);
};
webidl.converters["RequestRedirect"] = webidl.createEnumConverter(
"RequestRedirect",
[
"follow",
"error",
"manual",
],
);
webidl.converters["RequestInit"] = webidl.createDictionaryConverter(
"RequestInit",
[
{ key: "method", converter: webidl.converters["ByteString"] },
{ key: "headers", converter: webidl.converters["HeadersInit"] },
{
key: "body",
converter: webidl.createNullableConverter(
webidl.converters["BodyInit_DOMString"],
),
},
{ key: "redirect", converter: webidl.converters["RequestRedirect"] },
{
key: "signal",
converter: webidl.createNullableConverter(
webidl.converters["AbortSignal"],
),
},
{ key: "client", converter: webidl.converters.any },
],
);
/**
* @param {Request} request
* @returns {InnerRequest}
*/
function toInnerRequest(request) {
return request[_request];
}
/**
* @param {InnerRequest} inner
* @param {"request" | "immutable" | "request-no-cors" | "response" | "none"} guard
* @returns {Request}
*/
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
function fromInnerRequest(inner, guard) {
const request = new Request(_brand);
request[_request] = inner;
request[_getHeaders] = () => headersFromHeaderList(inner.headerList, guard);
return request;
}
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
const signalAbortError = new DOMException(
"The request has been cancelled.",
"AbortError",
);
ObjectFreeze(signalAbortError);
function abortRequest(request) {
if (request[_signalCache] !== undefined) {
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
request[_signal][signalAbort](signalAbortError);
} else {
request[_signalCache] = false;
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
}
}
function getCachedAbortSignal(request) {
return request[_signalCache];
}
// For testing
internals.getCachedAbortSignal = getCachedAbortSignal;
export {
perf(ext/http): recover memory for serve and optimize AbortController (#23559) Max rps without a signal is unchanged, however we can drastically reduce memory usage by not creating the signal until needed, and we can optimize the rps in the case where the signal is created. With a quick memory benchmark, it looks like this helps pretty drastically with # of GCs when benchmarking w/wrk: - 1.42.4: 1763 - canary: 1093 - this patch: 874 This branch: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 87.33us 439.95us 20.68ms 99.67% Req/Sec 66.70k 6.39k 74.11k 83.66% 1340255 requests in 10.10s, 191.73MB read Requests/sec: 132696.90 Transfer/sec: 18.98MB cpu: Apple M2 Pro runtime: deno 1.43.0 (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs ``` Latest canary: ``` Running 10s test @ http://localhost:8080/ 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 72.86us 71.23us 4.47ms 99.05% Req/Sec 64.66k 5.54k 72.48k 82.18% 1299015 requests in 10.10s, 185.83MB read Requests/sec: 128616.02 Transfer/sec: 18.40MB cpu: Apple M2 Pro runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin) file:///Users/matt/Documents/scripts/bench_request.js benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------------------------- ----------------------------- newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs ```
2024-04-25 14:52:24 -04:00
abortRequest,
fromInnerRequest,
newInnerRequest,
processUrlList,
Request,
RequestPrototype,
toInnerRequest,
};