2024-01-01 14:58:21 -05:00
|
|
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
2021-04-20 08:47:22 -04:00
|
|
|
|
|
|
|
// @ts-check
|
|
|
|
/// <reference path="../webidl/internal.d.ts" />
|
|
|
|
/// <reference path="../web/internal.d.ts" />
|
2021-06-10 09:26:10 -04:00
|
|
|
/// <reference path="../web/lib.deno_web.d.ts" />
|
2021-04-20 08:47:22 -04:00
|
|
|
/// <reference path="./internal.d.ts" />
|
2021-06-14 07:51:02 -04:00
|
|
|
/// <reference path="../web/06_streams_types.d.ts" />
|
2021-04-20 08:47:22 -04:00
|
|
|
/// <reference path="./lib.deno_fetch.d.ts" />
|
|
|
|
/// <reference lib="esnext" />
|
|
|
|
|
2024-04-29 11:40:02 -04:00
|
|
|
import { core, internals, primordials } from "ext:core/mod.js";
|
2024-01-10 17:37:25 -05:00
|
|
|
const {
|
|
|
|
ArrayPrototypeMap,
|
|
|
|
ArrayPrototypeSlice,
|
|
|
|
ArrayPrototypeSplice,
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
ObjectFreeze,
|
2024-01-10 17:37:25 -05:00
|
|
|
ObjectKeys,
|
|
|
|
ObjectPrototypeIsPrototypeOf,
|
|
|
|
RegExpPrototypeExec,
|
|
|
|
StringPrototypeStartsWith,
|
|
|
|
Symbol,
|
|
|
|
SymbolFor,
|
|
|
|
TypeError,
|
|
|
|
} = primordials;
|
|
|
|
|
2023-03-08 06:44:54 -05:00
|
|
|
import * as webidl from "ext:deno_webidl/00_webidl.js";
|
2023-04-30 05:11:37 -04:00
|
|
|
import { createFilteredInspectProxy } from "ext:deno_console/01_console.js";
|
2023-02-07 14:22:46 -05:00
|
|
|
import {
|
|
|
|
byteUpperCase,
|
|
|
|
HTTP_TOKEN_CODE_POINT_RE,
|
2023-03-08 06:44:54 -05:00
|
|
|
} from "ext:deno_web/00_infra.js";
|
|
|
|
import { URL } from "ext:deno_url/00_url.js";
|
2023-04-04 06:37:56 -04:00
|
|
|
import { extractBody, mixinBody } from "ext:deno_fetch/22_body.js";
|
2023-03-08 06:44:54 -05:00
|
|
|
import { getLocationHref } from "ext:deno_web/12_location.js";
|
|
|
|
import { extractMimeType } from "ext:deno_web/01_mimesniff.js";
|
|
|
|
import { blobFromObjectUrl } from "ext:deno_web/09_file.js";
|
2023-02-07 14:22:46 -05:00
|
|
|
import {
|
|
|
|
fillHeaders,
|
|
|
|
getDecodeSplitHeader,
|
|
|
|
guardFromHeaders,
|
|
|
|
headerListFromHeaders,
|
|
|
|
headersFromHeaderList,
|
2023-03-08 06:44:54 -05:00
|
|
|
} from "ext:deno_fetch/20_headers.js";
|
|
|
|
import { HttpClientPrototype } from "ext:deno_fetch/22_http_client.js";
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
import {
|
|
|
|
createDependentAbortSignal,
|
|
|
|
newSignal,
|
|
|
|
signalAbort,
|
|
|
|
} from "ext:deno_web/03_abort_signal.js";
|
|
|
|
import { DOMException } from "ext:deno_web/01_dom_exception.js";
|
2024-02-18 09:27:06 -05:00
|
|
|
const { internalRidSymbol } = core;
|
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
const _request = Symbol("request");
|
|
|
|
const _headers = Symbol("headers");
|
|
|
|
const _getHeaders = Symbol("get headers");
|
|
|
|
const _headersCache = Symbol("headers cache");
|
|
|
|
const _signal = Symbol("signal");
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
const _signalCache = Symbol("signalCache");
|
2023-02-07 14:22:46 -05:00
|
|
|
const _mimeType = Symbol("mime type");
|
|
|
|
const _body = Symbol("body");
|
|
|
|
const _url = Symbol("url");
|
|
|
|
const _method = Symbol("method");
|
2023-09-21 22:06:42 -04:00
|
|
|
const _brand = webidl.brand;
|
2023-02-07 14:22:46 -05:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {(() => string)[]} urlList
|
|
|
|
* @param {string[]} urlListProcessed
|
|
|
|
*/
|
|
|
|
function processUrlList(urlList, urlListProcessed) {
|
|
|
|
for (let i = 0; i < urlList.length; i++) {
|
|
|
|
if (urlListProcessed[i] === undefined) {
|
|
|
|
urlListProcessed[i] = urlList[i]();
|
2022-08-17 10:29:26 -04:00
|
|
|
}
|
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
return urlListProcessed;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @typedef InnerRequest
|
|
|
|
* @property {() => string} method
|
|
|
|
* @property {() => string} url
|
|
|
|
* @property {() => string} currentUrl
|
|
|
|
* @property {() => [string, string][]} headerList
|
|
|
|
* @property {null | typeof __window.bootstrap.fetchBody.InnerBody} body
|
|
|
|
* @property {"follow" | "error" | "manual"} redirectMode
|
|
|
|
* @property {number} redirectCount
|
|
|
|
* @property {(() => string)[]} urlList
|
|
|
|
* @property {string[]} urlListProcessed
|
|
|
|
* @property {number | null} clientRid NOTE: non standard extension for `Deno.HttpClient`.
|
|
|
|
* @property {Blob | null} blobUrlEntry
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
2023-04-04 06:37:56 -04:00
|
|
|
* @param {string} method
|
2023-02-07 14:22:46 -05:00
|
|
|
* @param {string | () => string} url
|
|
|
|
* @param {() => [string, string][]} headerList
|
|
|
|
* @param {typeof __window.bootstrap.fetchBody.InnerBody} body
|
|
|
|
* @param {boolean} maybeBlob
|
|
|
|
* @returns {InnerRequest}
|
|
|
|
*/
|
|
|
|
function newInnerRequest(method, url, headerList, body, maybeBlob) {
|
|
|
|
let blobUrlEntry = null;
|
2023-05-01 09:30:02 -04:00
|
|
|
if (
|
|
|
|
maybeBlob &&
|
|
|
|
typeof url === "string" &&
|
|
|
|
StringPrototypeStartsWith(url, "blob:")
|
|
|
|
) {
|
2023-02-07 14:22:46 -05:00
|
|
|
blobUrlEntry = blobFromObjectUrl(url);
|
|
|
|
}
|
|
|
|
return {
|
2023-04-04 06:37:56 -04:00
|
|
|
methodInner: method,
|
2023-02-07 14:22:46 -05:00
|
|
|
get method() {
|
|
|
|
return this.methodInner;
|
|
|
|
},
|
|
|
|
set method(value) {
|
|
|
|
this.methodInner = value;
|
|
|
|
},
|
|
|
|
headerListInner: null,
|
|
|
|
get headerList() {
|
|
|
|
if (this.headerListInner === null) {
|
|
|
|
try {
|
|
|
|
this.headerListInner = headerList();
|
|
|
|
} catch {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Cannot read headers: request closed");
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return this.headerListInner;
|
|
|
|
},
|
|
|
|
set headerList(value) {
|
|
|
|
this.headerListInner = value;
|
|
|
|
},
|
|
|
|
body,
|
|
|
|
redirectMode: "follow",
|
|
|
|
redirectCount: 0,
|
|
|
|
urlList: [typeof url === "string" ? () => url : url],
|
|
|
|
urlListProcessed: [],
|
|
|
|
clientRid: null,
|
|
|
|
blobUrlEntry,
|
|
|
|
url() {
|
|
|
|
if (this.urlListProcessed[0] === undefined) {
|
|
|
|
try {
|
|
|
|
this.urlListProcessed[0] = this.urlList[0]();
|
|
|
|
} catch {
|
|
|
|
throw new TypeError("cannot read url: request closed");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return this.urlListProcessed[0];
|
|
|
|
},
|
|
|
|
currentUrl() {
|
|
|
|
const currentIndex = this.urlList.length - 1;
|
|
|
|
if (this.urlListProcessed[currentIndex] === undefined) {
|
|
|
|
try {
|
|
|
|
this.urlListProcessed[currentIndex] = this.urlList[currentIndex]();
|
|
|
|
} catch {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Cannot read url: request closed");
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return this.urlListProcessed[currentIndex];
|
|
|
|
},
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* https://fetch.spec.whatwg.org/#concept-request-clone
|
|
|
|
* @param {InnerRequest} request
|
|
|
|
* @param {boolean} skipBody
|
|
|
|
* @returns {InnerRequest}
|
|
|
|
*/
|
2023-04-04 06:37:56 -04:00
|
|
|
function cloneInnerRequest(request, skipBody = false) {
|
2023-02-07 14:22:46 -05:00
|
|
|
const headerList = ArrayPrototypeMap(
|
|
|
|
request.headerList,
|
|
|
|
(x) => [x[0], x[1]],
|
|
|
|
);
|
2022-08-17 10:29:26 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
let body = null;
|
|
|
|
if (request.body !== null && !skipBody) {
|
|
|
|
body = request.body.clone();
|
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
return {
|
|
|
|
method: request.method,
|
|
|
|
headerList,
|
|
|
|
body,
|
|
|
|
redirectMode: request.redirectMode,
|
|
|
|
redirectCount: request.redirectCount,
|
2023-07-30 09:13:28 -04:00
|
|
|
urlList: [() => request.url()],
|
|
|
|
urlListProcessed: [request.url()],
|
2023-02-07 14:22:46 -05:00
|
|
|
clientRid: request.clientRid,
|
|
|
|
blobUrlEntry: request.blobUrlEntry,
|
|
|
|
url() {
|
|
|
|
if (this.urlListProcessed[0] === undefined) {
|
|
|
|
try {
|
|
|
|
this.urlListProcessed[0] = this.urlList[0]();
|
|
|
|
} catch {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Cannot read url: request closed");
|
2022-08-17 10:29:26 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
|
|
|
return this.urlListProcessed[0];
|
|
|
|
},
|
|
|
|
currentUrl() {
|
|
|
|
const currentIndex = this.urlList.length - 1;
|
|
|
|
if (this.urlListProcessed[currentIndex] === undefined) {
|
|
|
|
try {
|
|
|
|
this.urlListProcessed[currentIndex] = this.urlList[currentIndex]();
|
|
|
|
} catch {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Cannot read url: request closed");
|
2022-08-17 10:29:26 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
|
|
|
return this.urlListProcessed[currentIndex];
|
|
|
|
},
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
perf(ext/request): optimize validate and normalize HTTP method (#20143)
This PR optimizes `Request` constructor init method step. It doubles the
speed for known lowercased methods. I also added `PATCH` to known
methods
**this patch**
```
benchmark time (avg) iter/s (min … max) p75 p99 p995
---------------------------------------------------------------------------- -----------------------------
method: GET 1.49 µs/iter 669,336.9 (1.35 µs … 2.02 µs) 1.54 µs 2.02 µs 2.02 µs
method: PATCH 1.85 µs/iter 540,921.5 (1.65 µs … 2.02 µs) 1.91 µs 2.02 µs 2.02 µs
method: get 1.49 µs/iter 669,067.9 (1.28 µs … 1.69 µs) 1.55 µs 1.69 µs 1.69 µs
```
**main**
```
cpu: 13th Gen Intel(R) Core(TM) i9-13900H
runtime: deno 1.36.1 (x86_64-unknown-linux-gnu)
benchmark time (avg) iter/s (min … max) p75 p99 p995
---------------------------------------------------------------------------- -----------------------------
method: GET 1.5 µs/iter 665,232.3 (1.3 µs … 2.02 µs) 1.54 µs 2.02 µs 2.02 µs
method: PATCH 2.47 µs/iter 404,052.7 (2.06 µs … 4.05 µs) 2.51 µs 4.05 µs 4.05 µs
method: get 3 µs/iter 333,277.2 (2.72 µs … 4.04 µs) 3.05 µs 4.04 µs 4.04 µs
```
```js
Deno.bench("method: GET", () => {
const r = new Request("https://deno.land", {
method: "GET",
});
});
Deno.bench("method: PATCH", () => {
const r = new Request("https://deno.land", {
method: "PATCH",
body: '{"foo": "bar"}',
});
});
Deno.bench("method: get", () => {
const r = new Request("https://deno.land", {
method: "get",
});
});
```
2023-08-12 14:29:00 -04:00
|
|
|
// method => normalized method
|
|
|
|
const KNOWN_METHODS = {
|
|
|
|
"DELETE": "DELETE",
|
|
|
|
"delete": "DELETE",
|
|
|
|
"GET": "GET",
|
|
|
|
"get": "GET",
|
|
|
|
"HEAD": "HEAD",
|
|
|
|
"head": "HEAD",
|
|
|
|
"OPTIONS": "OPTIONS",
|
|
|
|
"options": "OPTIONS",
|
|
|
|
"PATCH": "PATCH",
|
|
|
|
"patch": "PATCH",
|
|
|
|
"POST": "POST",
|
|
|
|
"post": "POST",
|
|
|
|
"PUT": "PUT",
|
|
|
|
"put": "PUT",
|
|
|
|
};
|
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
/**
|
|
|
|
* @param {string} m
|
|
|
|
* @returns {string}
|
|
|
|
*/
|
|
|
|
function validateAndNormalizeMethod(m) {
|
2023-06-05 04:52:40 -04:00
|
|
|
if (RegExpPrototypeExec(HTTP_TOKEN_CODE_POINT_RE, m) === null) {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Method is not valid");
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
const upperCase = byteUpperCase(m);
|
|
|
|
if (
|
|
|
|
upperCase === "CONNECT" || upperCase === "TRACE" || upperCase === "TRACK"
|
|
|
|
) {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Method is forbidden");
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
|
|
|
return upperCase;
|
|
|
|
}
|
|
|
|
|
|
|
|
class Request {
|
|
|
|
/** @type {InnerRequest} */
|
|
|
|
[_request];
|
|
|
|
/** @type {Headers} */
|
|
|
|
[_headersCache];
|
|
|
|
[_getHeaders];
|
|
|
|
|
|
|
|
/** @type {Headers} */
|
|
|
|
get [_headers]() {
|
|
|
|
if (this[_headersCache] === undefined) {
|
|
|
|
this[_headersCache] = this[_getHeaders]();
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
return this[_headersCache];
|
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
set [_headers](value) {
|
|
|
|
this[_headersCache] = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** @type {AbortSignal} */
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
get [_signal]() {
|
|
|
|
const signal = this[_signalCache];
|
2024-04-29 11:40:02 -04:00
|
|
|
// This signal not been created yet, and the request is still in progress
|
|
|
|
if (signal === undefined) {
|
|
|
|
const signal = newSignal();
|
|
|
|
this[_signalCache] = signal;
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
return signal;
|
|
|
|
}
|
2024-04-29 11:40:02 -04:00
|
|
|
// This signal has not been created yet, but the request has already completed
|
|
|
|
if (signal === false) {
|
|
|
|
const signal = newSignal();
|
|
|
|
this[_signalCache] = signal;
|
|
|
|
signal[signalAbort](signalAbortError);
|
|
|
|
return signal;
|
|
|
|
}
|
|
|
|
return signal;
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
get [_mimeType]() {
|
|
|
|
const values = getDecodeSplitHeader(
|
|
|
|
headerListFromHeaders(this[_headers]),
|
|
|
|
"Content-Type",
|
|
|
|
);
|
|
|
|
return extractMimeType(values);
|
|
|
|
}
|
|
|
|
get [_body]() {
|
2023-04-04 06:37:56 -04:00
|
|
|
return this[_request].body;
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
/**
|
|
|
|
* https://fetch.spec.whatwg.org/#dom-request
|
|
|
|
* @param {RequestInfo} input
|
|
|
|
* @param {RequestInit} init
|
|
|
|
*/
|
2024-05-22 18:03:35 -04:00
|
|
|
constructor(input, init = { __proto__: null }) {
|
2023-09-21 22:06:42 -04:00
|
|
|
if (input === _brand) {
|
|
|
|
this[_brand] = _brand;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
const prefix = "Failed to construct 'Request'";
|
2023-04-12 15:58:57 -04:00
|
|
|
webidl.requiredArguments(arguments.length, 1, prefix);
|
2023-05-01 06:47:13 -04:00
|
|
|
input = webidl.converters["RequestInfo_DOMString"](
|
|
|
|
input,
|
2023-02-07 14:22:46 -05:00
|
|
|
prefix,
|
2023-05-01 06:47:13 -04:00
|
|
|
"Argument 1",
|
|
|
|
);
|
|
|
|
init = webidl.converters["RequestInit"](init, prefix, "Argument 2");
|
2023-02-07 14:22:46 -05:00
|
|
|
|
2023-09-21 22:06:42 -04:00
|
|
|
this[_brand] = _brand;
|
2023-02-07 14:22:46 -05:00
|
|
|
|
2021-04-20 08:47:22 -04:00
|
|
|
/** @type {InnerRequest} */
|
2023-02-07 14:22:46 -05:00
|
|
|
let request;
|
|
|
|
const baseURL = getLocationHref();
|
|
|
|
|
|
|
|
// 4.
|
|
|
|
let signal = null;
|
|
|
|
|
|
|
|
// 5.
|
|
|
|
if (typeof input === "string") {
|
|
|
|
const parsedURL = new URL(input, baseURL);
|
|
|
|
request = newInnerRequest(
|
2023-04-04 06:37:56 -04:00
|
|
|
"GET",
|
2023-02-07 14:22:46 -05:00
|
|
|
parsedURL.href,
|
|
|
|
() => [],
|
|
|
|
null,
|
|
|
|
true,
|
|
|
|
);
|
|
|
|
} else { // 6.
|
|
|
|
if (!ObjectPrototypeIsPrototypeOf(RequestPrototype, input)) {
|
|
|
|
throw new TypeError("Unreachable");
|
2022-07-03 22:11:52 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
const originalReq = input[_request];
|
|
|
|
// fold in of step 12 from below
|
|
|
|
request = cloneInnerRequest(originalReq, true);
|
|
|
|
request.redirectCount = 0; // reset to 0 - cloneInnerRequest copies the value
|
|
|
|
signal = input[_signal];
|
2022-07-03 22:11:52 -04:00
|
|
|
}
|
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
// 12. is folded into the else statement of step 6 above.
|
|
|
|
|
|
|
|
// 22.
|
|
|
|
if (init.redirect !== undefined) {
|
|
|
|
request.redirectMode = init.redirect;
|
2022-07-03 22:11:52 -04:00
|
|
|
}
|
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
// 25.
|
|
|
|
if (init.method !== undefined) {
|
perf(ext/request): optimize validate and normalize HTTP method (#20143)
This PR optimizes `Request` constructor init method step. It doubles the
speed for known lowercased methods. I also added `PATCH` to known
methods
**this patch**
```
benchmark time (avg) iter/s (min … max) p75 p99 p995
---------------------------------------------------------------------------- -----------------------------
method: GET 1.49 µs/iter 669,336.9 (1.35 µs … 2.02 µs) 1.54 µs 2.02 µs 2.02 µs
method: PATCH 1.85 µs/iter 540,921.5 (1.65 µs … 2.02 µs) 1.91 µs 2.02 µs 2.02 µs
method: get 1.49 µs/iter 669,067.9 (1.28 µs … 1.69 µs) 1.55 µs 1.69 µs 1.69 µs
```
**main**
```
cpu: 13th Gen Intel(R) Core(TM) i9-13900H
runtime: deno 1.36.1 (x86_64-unknown-linux-gnu)
benchmark time (avg) iter/s (min … max) p75 p99 p995
---------------------------------------------------------------------------- -----------------------------
method: GET 1.5 µs/iter 665,232.3 (1.3 µs … 2.02 µs) 1.54 µs 2.02 µs 2.02 µs
method: PATCH 2.47 µs/iter 404,052.7 (2.06 µs … 4.05 µs) 2.51 µs 4.05 µs 4.05 µs
method: get 3 µs/iter 333,277.2 (2.72 µs … 4.04 µs) 3.05 µs 4.04 µs 4.04 µs
```
```js
Deno.bench("method: GET", () => {
const r = new Request("https://deno.land", {
method: "GET",
});
});
Deno.bench("method: PATCH", () => {
const r = new Request("https://deno.land", {
method: "PATCH",
body: '{"foo": "bar"}',
});
});
Deno.bench("method: get", () => {
const r = new Request("https://deno.land", {
method: "get",
});
});
```
2023-08-12 14:29:00 -04:00
|
|
|
const method = init.method;
|
|
|
|
// fast path: check for known methods
|
|
|
|
request.method = KNOWN_METHODS[method] ??
|
|
|
|
validateAndNormalizeMethod(method);
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
|
|
|
|
// 26.
|
|
|
|
if (init.signal !== undefined) {
|
|
|
|
signal = init.signal;
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
// NOTE: non standard extension. This handles Deno.HttpClient parameter
|
|
|
|
if (init.client !== undefined) {
|
|
|
|
if (
|
|
|
|
init.client !== null &&
|
|
|
|
!ObjectPrototypeIsPrototypeOf(HttpClientPrototype, init.client)
|
|
|
|
) {
|
|
|
|
throw webidl.makeException(
|
|
|
|
TypeError,
|
|
|
|
"`client` must be a Deno.HttpClient",
|
2023-04-19 18:58:41 -04:00
|
|
|
prefix,
|
|
|
|
"Argument 2",
|
2022-08-17 10:29:26 -04:00
|
|
|
);
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2024-02-18 09:27:06 -05:00
|
|
|
request.clientRid = init.client?.[internalRidSymbol] ?? null;
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
// 28.
|
2023-11-12 19:04:11 -05:00
|
|
|
this[_request] = request;
|
2021-04-20 08:47:22 -04:00
|
|
|
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
// 29 & 30.
|
|
|
|
if (signal !== null) {
|
|
|
|
this[_signalCache] = createDependentAbortSignal([signal], prefix);
|
|
|
|
}
|
2023-11-12 19:04:11 -05:00
|
|
|
|
|
|
|
// 31.
|
2023-02-07 14:22:46 -05:00
|
|
|
this[_headers] = headersFromHeaderList(request.headerList, "request");
|
2021-06-06 09:37:17 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 33.
|
perf(ext/request): optimize Request constructor (#20141)
This PR optimizes `Request` constructor when `init` is not empty. This
path is also used by `fetch` when `options` argument is used
```js
fetch("https://deno.land", {
method: "POST",
body: 'land'
});
```
- Removed 3 extra calls to `headerListFromHeaders`
- Avoid `Object.keys` & `headerList` clone if `init.headers` is set
- Only empty `headersList` (`.splice`) if it's not already empty.
## Benchmarks
**this patch**
```
cpu: 13th Gen Intel(R) Core(TM) i9-13900H
runtime: deno 1.36.1 (x86_64-unknown-linux-gnu)
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------- -----------------------------
Request without headers 1.86 µs/iter 536,440.7 (1.67 µs … 2.76 µs) 1.89 µs 2.76 µs 2.76 µs
Request with headers 1.96 µs/iter 509,440.5 (1.83 µs … 2.17 µs) 1.99 µs 2.17 µs 2.17 µs
```
**main**
```
cpu: 13th Gen Intel(R) Core(TM) i9-13900H
runtime: deno 1.36.1 (x86_64-unknown-linux-gnu)
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------- -----------------------------
Request without headers 1.96 µs/iter 510,201.5 (1.81 µs … 2.64 µs) 2 µs 2.64 µs 2.64 µs
Request with headers 2.03 µs/iter 493,526.6 (1.84 µs … 2.31 µs) 2.08 µs 2.31 µs 2.31 µs
```
```js
Deno.bench("Request without headers", () => {
const r = new Request("https://deno.land", {
method: "POST",
body: '{"foo": "bar"}',
});
});
Deno.bench("Request with headers", () => {
const r = new Request("https://deno.land", {
method: "POST",
body: '{"foo": "bar"}',
headers: {
"Content-Type": "application/json",
},
});
});
```
2023-08-12 12:41:07 -04:00
|
|
|
if (init.headers || ObjectKeys(init).length > 0) {
|
|
|
|
const headerList = headerListFromHeaders(this[_headers]);
|
|
|
|
const headers = init.headers ?? ArrayPrototypeSlice(
|
|
|
|
headerList,
|
2023-02-07 14:22:46 -05:00
|
|
|
0,
|
perf(ext/request): optimize Request constructor (#20141)
This PR optimizes `Request` constructor when `init` is not empty. This
path is also used by `fetch` when `options` argument is used
```js
fetch("https://deno.land", {
method: "POST",
body: 'land'
});
```
- Removed 3 extra calls to `headerListFromHeaders`
- Avoid `Object.keys` & `headerList` clone if `init.headers` is set
- Only empty `headersList` (`.splice`) if it's not already empty.
## Benchmarks
**this patch**
```
cpu: 13th Gen Intel(R) Core(TM) i9-13900H
runtime: deno 1.36.1 (x86_64-unknown-linux-gnu)
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------- -----------------------------
Request without headers 1.86 µs/iter 536,440.7 (1.67 µs … 2.76 µs) 1.89 µs 2.76 µs 2.76 µs
Request with headers 1.96 µs/iter 509,440.5 (1.83 µs … 2.17 µs) 1.99 µs 2.17 µs 2.17 µs
```
**main**
```
cpu: 13th Gen Intel(R) Core(TM) i9-13900H
runtime: deno 1.36.1 (x86_64-unknown-linux-gnu)
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------- -----------------------------
Request without headers 1.96 µs/iter 510,201.5 (1.81 µs … 2.64 µs) 2 µs 2.64 µs 2.64 µs
Request with headers 2.03 µs/iter 493,526.6 (1.84 µs … 2.31 µs) 2.08 µs 2.31 µs 2.31 µs
```
```js
Deno.bench("Request without headers", () => {
const r = new Request("https://deno.land", {
method: "POST",
body: '{"foo": "bar"}',
});
});
Deno.bench("Request with headers", () => {
const r = new Request("https://deno.land", {
method: "POST",
body: '{"foo": "bar"}',
headers: {
"Content-Type": "application/json",
},
});
});
```
2023-08-12 12:41:07 -04:00
|
|
|
headerList.length,
|
2023-02-07 14:22:46 -05:00
|
|
|
);
|
perf(ext/request): optimize Request constructor (#20141)
This PR optimizes `Request` constructor when `init` is not empty. This
path is also used by `fetch` when `options` argument is used
```js
fetch("https://deno.land", {
method: "POST",
body: 'land'
});
```
- Removed 3 extra calls to `headerListFromHeaders`
- Avoid `Object.keys` & `headerList` clone if `init.headers` is set
- Only empty `headersList` (`.splice`) if it's not already empty.
## Benchmarks
**this patch**
```
cpu: 13th Gen Intel(R) Core(TM) i9-13900H
runtime: deno 1.36.1 (x86_64-unknown-linux-gnu)
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------- -----------------------------
Request without headers 1.86 µs/iter 536,440.7 (1.67 µs … 2.76 µs) 1.89 µs 2.76 µs 2.76 µs
Request with headers 1.96 µs/iter 509,440.5 (1.83 µs … 2.17 µs) 1.99 µs 2.17 µs 2.17 µs
```
**main**
```
cpu: 13th Gen Intel(R) Core(TM) i9-13900H
runtime: deno 1.36.1 (x86_64-unknown-linux-gnu)
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------- -----------------------------
Request without headers 1.96 µs/iter 510,201.5 (1.81 µs … 2.64 µs) 2 µs 2.64 µs 2.64 µs
Request with headers 2.03 µs/iter 493,526.6 (1.84 µs … 2.31 µs) 2.08 µs 2.31 µs 2.31 µs
```
```js
Deno.bench("Request without headers", () => {
const r = new Request("https://deno.land", {
method: "POST",
body: '{"foo": "bar"}',
});
});
Deno.bench("Request with headers", () => {
const r = new Request("https://deno.land", {
method: "POST",
body: '{"foo": "bar"}',
headers: {
"Content-Type": "application/json",
},
});
});
```
2023-08-12 12:41:07 -04:00
|
|
|
if (headerList.length !== 0) {
|
|
|
|
ArrayPrototypeSplice(headerList, 0, headerList.length);
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
fillHeaders(this[_headers], headers);
|
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 34.
|
2023-02-07 14:22:46 -05:00
|
|
|
let inputBody = null;
|
|
|
|
if (ObjectPrototypeIsPrototypeOf(RequestPrototype, input)) {
|
|
|
|
inputBody = input[_body];
|
|
|
|
}
|
2021-06-06 09:37:17 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 35.
|
2023-02-07 14:22:46 -05:00
|
|
|
if (
|
|
|
|
(request.method === "GET" || request.method === "HEAD") &&
|
|
|
|
((init.body !== undefined && init.body !== null) ||
|
|
|
|
inputBody !== null)
|
|
|
|
) {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Request with GET/HEAD method cannot have body");
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 36.
|
2023-02-07 14:22:46 -05:00
|
|
|
let initBody = null;
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 37.
|
2023-02-07 14:22:46 -05:00
|
|
|
if (init.body !== undefined && init.body !== null) {
|
|
|
|
const res = extractBody(init.body);
|
|
|
|
initBody = res.body;
|
|
|
|
if (res.contentType !== null && !this[_headers].has("content-type")) {
|
|
|
|
this[_headers].append("Content-Type", res.contentType);
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 38.
|
2023-02-07 14:22:46 -05:00
|
|
|
const inputOrInitBody = initBody ?? inputBody;
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 40.
|
2023-02-07 14:22:46 -05:00
|
|
|
let finalBody = inputOrInitBody;
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 41.
|
2023-02-07 14:22:46 -05:00
|
|
|
if (initBody === null && inputBody !== null) {
|
|
|
|
if (input[_body] && input[_body].unusable()) {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Input request's body is unusable");
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
finalBody = inputBody.createProxy();
|
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-11-12 19:04:11 -05:00
|
|
|
// 42.
|
2023-02-07 14:22:46 -05:00
|
|
|
request.body = finalBody;
|
|
|
|
}
|
2021-06-06 09:37:17 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
get method() {
|
|
|
|
webidl.assertBranded(this, RequestPrototype);
|
|
|
|
if (this[_method]) {
|
|
|
|
return this[_method];
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-04-04 06:37:56 -04:00
|
|
|
this[_method] = this[_request].method;
|
|
|
|
return this[_method];
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
get url() {
|
|
|
|
webidl.assertBranded(this, RequestPrototype);
|
|
|
|
if (this[_url]) {
|
|
|
|
return this[_url];
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
|
|
|
|
2023-04-04 06:37:56 -04:00
|
|
|
this[_url] = this[_request].url();
|
|
|
|
return this[_url];
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
get headers() {
|
|
|
|
webidl.assertBranded(this, RequestPrototype);
|
|
|
|
return this[_headers];
|
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
get redirect() {
|
|
|
|
webidl.assertBranded(this, RequestPrototype);
|
|
|
|
return this[_request].redirectMode;
|
|
|
|
}
|
2021-06-06 09:37:17 -04:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
get signal() {
|
|
|
|
webidl.assertBranded(this, RequestPrototype);
|
|
|
|
return this[_signal];
|
|
|
|
}
|
2023-01-14 23:08:34 -05:00
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
clone() {
|
2024-03-09 21:23:14 -05:00
|
|
|
const prefix = "Failed to execute 'Request.clone'";
|
2023-02-07 14:22:46 -05:00
|
|
|
webidl.assertBranded(this, RequestPrototype);
|
|
|
|
if (this[_body] && this[_body].unusable()) {
|
2024-09-18 21:19:45 -04:00
|
|
|
throw new TypeError("Body is unusable");
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
2023-11-12 19:04:11 -05:00
|
|
|
const clonedReq = cloneInnerRequest(this[_request]);
|
2023-01-14 23:08:34 -05:00
|
|
|
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
const materializedSignal = this[_signal];
|
|
|
|
const clonedSignal = createDependentAbortSignal(
|
|
|
|
[materializedSignal],
|
2023-11-12 19:04:11 -05:00
|
|
|
prefix,
|
|
|
|
);
|
2023-01-14 23:08:34 -05:00
|
|
|
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
const request = new Request(_brand);
|
|
|
|
request[_request] = clonedReq;
|
|
|
|
request[_signalCache] = clonedSignal;
|
|
|
|
request[_getHeaders] = () =>
|
|
|
|
headersFromHeaderList(
|
|
|
|
clonedReq.headerList,
|
|
|
|
guardFromHeaders(this[_headers]),
|
|
|
|
);
|
|
|
|
return request;
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
|
|
|
|
2023-11-19 03:13:38 -05:00
|
|
|
[SymbolFor("Deno.privateCustomInspect")](inspect, inspectOptions) {
|
|
|
|
return inspect(
|
|
|
|
createFilteredInspectProxy({
|
|
|
|
object: this,
|
|
|
|
evaluate: ObjectPrototypeIsPrototypeOf(RequestPrototype, this),
|
|
|
|
keys: [
|
|
|
|
"bodyUsed",
|
|
|
|
"headers",
|
|
|
|
"method",
|
|
|
|
"redirect",
|
|
|
|
"url",
|
|
|
|
],
|
|
|
|
}),
|
|
|
|
inspectOptions,
|
|
|
|
);
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
}
|
|
|
|
|
2023-10-09 23:01:01 -04:00
|
|
|
webidl.configureInterface(Request);
|
2023-02-07 14:22:46 -05:00
|
|
|
const RequestPrototype = Request.prototype;
|
|
|
|
mixinBody(RequestPrototype, _body, _mimeType);
|
|
|
|
|
|
|
|
webidl.converters["Request"] = webidl.createInterfaceConverter(
|
|
|
|
"Request",
|
|
|
|
RequestPrototype,
|
|
|
|
);
|
2023-05-01 06:47:13 -04:00
|
|
|
webidl.converters["RequestInfo_DOMString"] = (V, prefix, context, opts) => {
|
2023-02-07 14:22:46 -05:00
|
|
|
// Union for (Request or USVString)
|
|
|
|
if (typeof V == "object") {
|
|
|
|
if (ObjectPrototypeIsPrototypeOf(RequestPrototype, V)) {
|
2023-05-01 06:47:13 -04:00
|
|
|
return webidl.converters["Request"](V, prefix, context, opts);
|
2023-01-14 23:08:34 -05:00
|
|
|
}
|
2021-04-20 08:47:22 -04:00
|
|
|
}
|
2023-02-07 14:22:46 -05:00
|
|
|
// Passed to new URL(...) which implicitly converts DOMString -> USVString
|
2023-05-01 06:47:13 -04:00
|
|
|
return webidl.converters["DOMString"](V, prefix, context, opts);
|
2023-02-07 14:22:46 -05:00
|
|
|
};
|
|
|
|
webidl.converters["RequestRedirect"] = webidl.createEnumConverter(
|
|
|
|
"RequestRedirect",
|
|
|
|
[
|
|
|
|
"follow",
|
|
|
|
"error",
|
|
|
|
"manual",
|
|
|
|
],
|
|
|
|
);
|
|
|
|
webidl.converters["RequestInit"] = webidl.createDictionaryConverter(
|
|
|
|
"RequestInit",
|
|
|
|
[
|
|
|
|
{ key: "method", converter: webidl.converters["ByteString"] },
|
|
|
|
{ key: "headers", converter: webidl.converters["HeadersInit"] },
|
|
|
|
{
|
|
|
|
key: "body",
|
|
|
|
converter: webidl.createNullableConverter(
|
|
|
|
webidl.converters["BodyInit_DOMString"],
|
|
|
|
),
|
|
|
|
},
|
|
|
|
{ key: "redirect", converter: webidl.converters["RequestRedirect"] },
|
|
|
|
{
|
|
|
|
key: "signal",
|
|
|
|
converter: webidl.createNullableConverter(
|
|
|
|
webidl.converters["AbortSignal"],
|
|
|
|
),
|
|
|
|
},
|
|
|
|
{ key: "client", converter: webidl.converters.any },
|
|
|
|
],
|
|
|
|
);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {Request} request
|
|
|
|
* @returns {InnerRequest}
|
|
|
|
*/
|
|
|
|
function toInnerRequest(request) {
|
|
|
|
return request[_request];
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param {InnerRequest} inner
|
|
|
|
* @param {"request" | "immutable" | "request-no-cors" | "response" | "none"} guard
|
|
|
|
* @returns {Request}
|
|
|
|
*/
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
function fromInnerRequest(inner, guard) {
|
2023-09-21 22:06:42 -04:00
|
|
|
const request = new Request(_brand);
|
2023-04-04 06:37:56 -04:00
|
|
|
request[_request] = inner;
|
|
|
|
request[_getHeaders] = () => headersFromHeaderList(inner.headerList, guard);
|
2023-02-07 14:22:46 -05:00
|
|
|
return request;
|
|
|
|
}
|
|
|
|
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
const signalAbortError = new DOMException(
|
|
|
|
"The request has been cancelled.",
|
|
|
|
"AbortError",
|
|
|
|
);
|
|
|
|
ObjectFreeze(signalAbortError);
|
|
|
|
|
|
|
|
function abortRequest(request) {
|
2024-04-29 11:40:02 -04:00
|
|
|
if (request[_signalCache] !== undefined) {
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
request[_signal][signalAbort](signalAbortError);
|
2024-04-29 11:40:02 -04:00
|
|
|
} else {
|
|
|
|
request[_signalCache] = false;
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-29 11:40:02 -04:00
|
|
|
function getCachedAbortSignal(request) {
|
|
|
|
return request[_signalCache];
|
|
|
|
}
|
|
|
|
|
|
|
|
// For testing
|
|
|
|
internals.getCachedAbortSignal = getCachedAbortSignal;
|
|
|
|
|
2023-02-07 14:22:46 -05:00
|
|
|
export {
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
abortRequest,
|
2023-02-07 14:22:46 -05:00
|
|
|
fromInnerRequest,
|
|
|
|
newInnerRequest,
|
|
|
|
processUrlList,
|
|
|
|
Request,
|
|
|
|
RequestPrototype,
|
|
|
|
toInnerRequest,
|
|
|
|
};
|