2024-01-01 14:58:21 -05:00
|
|
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
2023-09-07 09:09:16 -04:00
|
|
|
|
2023-12-07 08:21:01 -05:00
|
|
|
import { core, internals, primordials } from "ext:core/mod.js";
|
2024-01-10 17:37:25 -05:00
|
|
|
const {
|
|
|
|
BadResourcePrototype,
|
|
|
|
InterruptedPrototype,
|
2024-04-24 14:03:37 -04:00
|
|
|
Interrupted,
|
2024-01-26 14:04:07 -05:00
|
|
|
internalRidSymbol,
|
2024-01-10 17:37:25 -05:00
|
|
|
} = core;
|
2024-01-26 17:46:46 -05:00
|
|
|
import {
|
|
|
|
op_http_cancel,
|
|
|
|
op_http_close,
|
2024-01-10 17:37:25 -05:00
|
|
|
op_http_close_after_finish,
|
|
|
|
op_http_get_request_headers,
|
|
|
|
op_http_get_request_method_and_url,
|
|
|
|
op_http_read_request_body,
|
|
|
|
op_http_serve,
|
|
|
|
op_http_serve_on,
|
|
|
|
op_http_set_promise_complete,
|
|
|
|
op_http_set_response_body_bytes,
|
|
|
|
op_http_set_response_body_resource,
|
|
|
|
op_http_set_response_body_text,
|
|
|
|
op_http_set_response_header,
|
|
|
|
op_http_set_response_headers,
|
|
|
|
op_http_set_response_trailers,
|
2024-01-26 17:46:46 -05:00
|
|
|
op_http_try_wait,
|
2024-01-10 17:37:25 -05:00
|
|
|
op_http_upgrade_raw,
|
|
|
|
op_http_upgrade_websocket_next,
|
|
|
|
op_http_wait,
|
2024-01-26 17:46:46 -05:00
|
|
|
} from "ext:core/ops";
|
2024-01-10 17:37:25 -05:00
|
|
|
const {
|
|
|
|
ArrayPrototypePush,
|
|
|
|
ObjectHasOwn,
|
|
|
|
ObjectPrototypeIsPrototypeOf,
|
|
|
|
PromisePrototypeCatch,
|
|
|
|
PromisePrototypeThen,
|
2024-06-08 20:03:07 -04:00
|
|
|
StringPrototypeIncludes,
|
2024-01-10 17:37:25 -05:00
|
|
|
Symbol,
|
|
|
|
TypeError,
|
|
|
|
TypedArrayPrototypeGetSymbolToStringTag,
|
|
|
|
Uint8Array,
|
2024-04-24 14:03:37 -04:00
|
|
|
Promise,
|
2024-01-10 17:37:25 -05:00
|
|
|
} = primordials;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
import { InnerBody } from "ext:deno_fetch/22_body.js";
|
|
|
|
import { Event } from "ext:deno_web/02_event.js";
|
|
|
|
import {
|
|
|
|
fromInnerResponse,
|
|
|
|
newInnerResponse,
|
2023-11-07 17:52:44 -05:00
|
|
|
ResponsePrototype,
|
2023-04-22 13:48:21 -04:00
|
|
|
toInnerResponse,
|
|
|
|
} from "ext:deno_fetch/23_response.js";
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
import {
|
|
|
|
abortRequest,
|
|
|
|
fromInnerRequest,
|
|
|
|
toInnerRequest,
|
|
|
|
} from "ext:deno_fetch/23_request.js";
|
2023-04-22 13:48:21 -04:00
|
|
|
import { AbortController } from "ext:deno_web/03_abort_signal.js";
|
|
|
|
import {
|
|
|
|
_eventLoop,
|
|
|
|
_idleTimeoutDuration,
|
|
|
|
_idleTimeoutTimeout,
|
|
|
|
_protocol,
|
|
|
|
_readyState,
|
|
|
|
_rid,
|
|
|
|
_role,
|
|
|
|
_server,
|
|
|
|
_serverHandleIdleTimeout,
|
|
|
|
SERVER,
|
|
|
|
WebSocket,
|
|
|
|
} from "ext:deno_websocket/01_websocket.js";
|
|
|
|
import {
|
|
|
|
Deferred,
|
|
|
|
getReadableStreamResourceBacking,
|
|
|
|
readableStreamForRid,
|
|
|
|
ReadableStreamPrototype,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
resourceForReadableStream,
|
2023-04-22 13:48:21 -04:00
|
|
|
} from "ext:deno_web/06_streams.js";
|
2023-10-03 22:37:39 -04:00
|
|
|
import { listen, listenOptionApiName, TcpConn } from "ext:deno_net/01_net.js";
|
2024-04-08 17:01:02 -04:00
|
|
|
import { hasTlsKeyPairOptions, listenTls } from "ext:deno_net/02_tls.js";
|
2023-11-01 15:26:12 -04:00
|
|
|
import { SymbolAsyncDispose } from "ext:deno_web/00_infra.js";
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
const _upgraded = Symbol("_upgraded");
|
|
|
|
|
|
|
|
function internalServerError() {
|
|
|
|
// "Internal Server Error"
|
|
|
|
return new Response(
|
|
|
|
new Uint8Array([
|
|
|
|
73,
|
|
|
|
110,
|
|
|
|
116,
|
|
|
|
101,
|
|
|
|
114,
|
|
|
|
110,
|
|
|
|
97,
|
|
|
|
108,
|
|
|
|
32,
|
|
|
|
83,
|
|
|
|
101,
|
|
|
|
114,
|
|
|
|
118,
|
|
|
|
101,
|
|
|
|
114,
|
|
|
|
32,
|
|
|
|
69,
|
|
|
|
114,
|
|
|
|
114,
|
|
|
|
111,
|
|
|
|
114,
|
|
|
|
]),
|
|
|
|
{ status: 500 },
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Used to ensure that user returns a valid response (but not a different response) from handlers that are upgraded.
|
|
|
|
const UPGRADE_RESPONSE_SENTINEL = fromInnerResponse(
|
|
|
|
newInnerResponse(101),
|
|
|
|
"immutable",
|
|
|
|
);
|
|
|
|
|
2023-04-26 18:58:18 -04:00
|
|
|
function upgradeHttpRaw(req, conn) {
|
|
|
|
const inner = toInnerRequest(req);
|
|
|
|
if (inner._wantsUpgrade) {
|
|
|
|
return inner._wantsUpgrade("upgradeHttpRaw", conn);
|
|
|
|
}
|
|
|
|
throw new TypeError("upgradeHttpRaw may only be used with Deno.serve");
|
|
|
|
}
|
|
|
|
|
2023-05-18 22:10:25 -04:00
|
|
|
function addTrailers(resp, headerList) {
|
|
|
|
const inner = toInnerResponse(resp);
|
2023-11-13 09:04:49 -05:00
|
|
|
op_http_set_response_trailers(inner.external, headerList);
|
2023-05-18 22:10:25 -04:00
|
|
|
}
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
class InnerRequest {
|
2023-11-13 09:04:49 -05:00
|
|
|
#external;
|
2023-04-22 13:48:21 -04:00
|
|
|
#context;
|
|
|
|
#methodAndUri;
|
|
|
|
#streamRid;
|
|
|
|
#body;
|
|
|
|
#upgraded;
|
2023-07-30 09:13:28 -04:00
|
|
|
#urlValue;
|
2024-04-24 14:03:37 -04:00
|
|
|
#completed;
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
request;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
constructor(external, context) {
|
2023-11-13 09:04:49 -05:00
|
|
|
this.#external = external;
|
2023-04-22 13:48:21 -04:00
|
|
|
this.#context = context;
|
|
|
|
this.#upgraded = false;
|
2024-04-24 14:03:37 -04:00
|
|
|
this.#completed = undefined;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2024-04-24 14:03:37 -04:00
|
|
|
close(success = true) {
|
|
|
|
// The completion signal fires only if someone cares
|
|
|
|
if (this.#completed) {
|
|
|
|
if (success) {
|
|
|
|
this.#completed.resolve(undefined);
|
|
|
|
} else {
|
|
|
|
this.#completed.reject(
|
|
|
|
new Interrupted("HTTP response was not sent successfully"),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
abortRequest(this.request);
|
2023-11-13 09:04:49 -05:00
|
|
|
this.#external = null;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
get [_upgraded]() {
|
|
|
|
return this.#upgraded;
|
|
|
|
}
|
|
|
|
|
|
|
|
_wantsUpgrade(upgradeType, ...originalArgs) {
|
2023-04-23 11:59:46 -04:00
|
|
|
if (this.#upgraded) {
|
|
|
|
throw new Deno.errors.Http("already upgraded");
|
|
|
|
}
|
2023-11-13 09:04:49 -05:00
|
|
|
if (this.#external === null) {
|
2023-04-23 11:59:46 -04:00
|
|
|
throw new Deno.errors.Http("already closed");
|
|
|
|
}
|
|
|
|
|
2023-04-26 18:58:18 -04:00
|
|
|
// upgradeHttpRaw is sync
|
2023-04-22 13:48:21 -04:00
|
|
|
if (upgradeType == "upgradeHttpRaw") {
|
2023-11-13 09:04:49 -05:00
|
|
|
const external = this.#external;
|
2023-04-26 18:58:18 -04:00
|
|
|
const underlyingConn = originalArgs[0];
|
|
|
|
|
|
|
|
this.url();
|
|
|
|
this.headerList;
|
|
|
|
this.close();
|
|
|
|
|
|
|
|
this.#upgraded = () => {};
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
const upgradeRid = op_http_upgrade_raw(external);
|
2023-04-26 18:58:18 -04:00
|
|
|
|
|
|
|
const conn = new TcpConn(
|
|
|
|
upgradeRid,
|
|
|
|
underlyingConn?.remoteAddr,
|
|
|
|
underlyingConn?.localAddr,
|
|
|
|
);
|
|
|
|
|
|
|
|
return { response: UPGRADE_RESPONSE_SENTINEL, conn };
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// upgradeWebSocket is sync
|
|
|
|
if (upgradeType == "upgradeWebSocket") {
|
|
|
|
const response = originalArgs[0];
|
|
|
|
const ws = originalArgs[1];
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
const external = this.#external;
|
2023-04-23 11:59:46 -04:00
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
this.url();
|
|
|
|
this.headerList;
|
|
|
|
this.close();
|
|
|
|
|
|
|
|
const goAhead = new Deferred();
|
|
|
|
this.#upgraded = () => {
|
|
|
|
goAhead.resolve();
|
|
|
|
};
|
2023-11-13 09:04:49 -05:00
|
|
|
const wsPromise = op_http_upgrade_websocket_next(
|
|
|
|
external,
|
|
|
|
response.headerList,
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
// Start the upgrade in the background.
|
|
|
|
(async () => {
|
|
|
|
try {
|
2023-05-15 19:24:41 -04:00
|
|
|
// Returns the upgraded websocket connection
|
2023-11-13 09:04:49 -05:00
|
|
|
const wsRid = await wsPromise;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
// We have to wait for the go-ahead signal
|
|
|
|
await goAhead;
|
|
|
|
|
|
|
|
ws[_rid] = wsRid;
|
|
|
|
ws[_readyState] = WebSocket.OPEN;
|
|
|
|
ws[_role] = SERVER;
|
|
|
|
const event = new Event("open");
|
|
|
|
ws.dispatchEvent(event);
|
|
|
|
|
|
|
|
ws[_eventLoop]();
|
|
|
|
if (ws[_idleTimeoutDuration]) {
|
|
|
|
ws.addEventListener(
|
|
|
|
"close",
|
|
|
|
() => clearTimeout(ws[_idleTimeoutTimeout]),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
ws[_serverHandleIdleTimeout]();
|
|
|
|
} catch (error) {
|
|
|
|
const event = new ErrorEvent("error", { error });
|
|
|
|
ws.dispatchEvent(event);
|
|
|
|
}
|
|
|
|
})();
|
|
|
|
return { response: UPGRADE_RESPONSE_SENTINEL, socket: ws };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
url() {
|
2023-07-30 09:13:28 -04:00
|
|
|
if (this.#urlValue !== undefined) {
|
|
|
|
return this.#urlValue;
|
|
|
|
}
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
if (this.#methodAndUri === undefined) {
|
2023-11-13 09:04:49 -05:00
|
|
|
if (this.#external === null) {
|
2023-04-22 13:48:21 -04:00
|
|
|
throw new TypeError("request closed");
|
|
|
|
}
|
|
|
|
// TODO(mmastrac): This is quite slow as we're serializing a large number of values. We may want to consider
|
|
|
|
// splitting this up into multiple ops.
|
2023-11-13 09:04:49 -05:00
|
|
|
this.#methodAndUri = op_http_get_request_method_and_url(this.#external);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
const path = this.#methodAndUri[2];
|
|
|
|
|
|
|
|
// * is valid for OPTIONS
|
|
|
|
if (path === "*") {
|
2023-07-30 09:13:28 -04:00
|
|
|
return this.#urlValue = "*";
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the path is empty, return the authority (valid for CONNECT)
|
|
|
|
if (path == "") {
|
2023-07-30 09:13:28 -04:00
|
|
|
return this.#urlValue = this.#methodAndUri[1];
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// CONNECT requires an authority
|
|
|
|
if (this.#methodAndUri[0] == "CONNECT") {
|
2023-07-30 09:13:28 -04:00
|
|
|
return this.#urlValue = this.#methodAndUri[1];
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
const hostname = this.#methodAndUri[1];
|
|
|
|
if (hostname) {
|
|
|
|
// Construct a URL from the scheme, the hostname, and the path
|
2023-07-30 09:13:28 -04:00
|
|
|
return this.#urlValue = this.#context.scheme + hostname + path;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a URL from the scheme, the fallback hostname, and the path
|
2023-07-30 09:13:28 -04:00
|
|
|
return this.#urlValue = this.#context.scheme + this.#context.fallbackHost +
|
|
|
|
path;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2024-04-24 14:03:37 -04:00
|
|
|
get completed() {
|
|
|
|
if (!this.#completed) {
|
|
|
|
// NOTE: this is faster than Promise.withResolvers()
|
|
|
|
let resolve, reject;
|
|
|
|
const promise = new Promise((r1, r2) => {
|
|
|
|
resolve = r1;
|
|
|
|
reject = r2;
|
|
|
|
});
|
|
|
|
this.#completed = { promise, resolve, reject };
|
|
|
|
}
|
|
|
|
return this.#completed.promise;
|
|
|
|
}
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
get remoteAddr() {
|
2023-10-03 22:37:39 -04:00
|
|
|
const transport = this.#context.listener?.addr.transport;
|
|
|
|
if (transport === "unix" || transport === "unixpacket") {
|
|
|
|
return {
|
|
|
|
transport,
|
|
|
|
path: this.#context.listener.addr.path,
|
|
|
|
};
|
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
if (this.#methodAndUri === undefined) {
|
2023-11-13 09:04:49 -05:00
|
|
|
if (this.#external === null) {
|
2023-04-22 13:48:21 -04:00
|
|
|
throw new TypeError("request closed");
|
|
|
|
}
|
2023-11-13 09:04:49 -05:00
|
|
|
this.#methodAndUri = op_http_get_request_method_and_url(this.#external);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
return {
|
|
|
|
transport: "tcp",
|
|
|
|
hostname: this.#methodAndUri[3],
|
|
|
|
port: this.#methodAndUri[4],
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
get method() {
|
|
|
|
if (this.#methodAndUri === undefined) {
|
2023-11-13 09:04:49 -05:00
|
|
|
if (this.#external === null) {
|
2023-04-22 13:48:21 -04:00
|
|
|
throw new TypeError("request closed");
|
|
|
|
}
|
2023-11-13 09:04:49 -05:00
|
|
|
this.#methodAndUri = op_http_get_request_method_and_url(this.#external);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
return this.#methodAndUri[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
get body() {
|
2023-11-13 09:04:49 -05:00
|
|
|
if (this.#external === null) {
|
2023-04-22 13:48:21 -04:00
|
|
|
throw new TypeError("request closed");
|
|
|
|
}
|
|
|
|
if (this.#body !== undefined) {
|
|
|
|
return this.#body;
|
|
|
|
}
|
|
|
|
// If the method is GET or HEAD, we do not want to include a body here, even if the Rust
|
|
|
|
// side of the code is willing to provide it to us.
|
|
|
|
if (this.method == "GET" || this.method == "HEAD") {
|
|
|
|
this.#body = null;
|
|
|
|
return null;
|
|
|
|
}
|
2023-11-13 09:04:49 -05:00
|
|
|
this.#streamRid = op_http_read_request_body(this.#external);
|
2023-04-22 13:48:21 -04:00
|
|
|
this.#body = new InnerBody(readableStreamForRid(this.#streamRid, false));
|
|
|
|
return this.#body;
|
|
|
|
}
|
|
|
|
|
|
|
|
get headerList() {
|
2023-11-13 09:04:49 -05:00
|
|
|
if (this.#external === null) {
|
2023-04-22 13:48:21 -04:00
|
|
|
throw new TypeError("request closed");
|
|
|
|
}
|
2023-06-02 11:59:16 -04:00
|
|
|
const headers = [];
|
2023-11-13 09:04:49 -05:00
|
|
|
const reqHeaders = op_http_get_request_headers(this.#external);
|
2023-06-02 11:59:16 -04:00
|
|
|
for (let i = 0; i < reqHeaders.length; i += 2) {
|
2023-06-05 15:57:01 -04:00
|
|
|
ArrayPrototypePush(headers, [reqHeaders[i], reqHeaders[i + 1]]);
|
2023-06-02 11:59:16 -04:00
|
|
|
}
|
|
|
|
return headers;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
get external() {
|
|
|
|
return this.#external;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
class CallbackContext {
|
2023-05-31 19:20:39 -04:00
|
|
|
abortController;
|
2023-04-22 13:48:21 -04:00
|
|
|
scheme;
|
|
|
|
fallbackHost;
|
|
|
|
serverRid;
|
|
|
|
closed;
|
2023-11-01 15:26:12 -04:00
|
|
|
/** @type {Promise<void> | undefined} */
|
2023-09-11 20:06:38 -04:00
|
|
|
closing;
|
2023-10-03 22:37:39 -04:00
|
|
|
listener;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-10-03 22:37:39 -04:00
|
|
|
constructor(signal, args, listener) {
|
2023-09-11 20:06:38 -04:00
|
|
|
// The abort signal triggers a non-graceful shutdown
|
2023-05-31 19:20:39 -04:00
|
|
|
signal?.addEventListener(
|
|
|
|
"abort",
|
2023-09-11 20:06:38 -04:00
|
|
|
() => {
|
|
|
|
op_http_cancel(this.serverRid, false);
|
|
|
|
},
|
2023-05-31 19:20:39 -04:00
|
|
|
{ once: true },
|
|
|
|
);
|
|
|
|
this.abortController = new AbortController();
|
2023-04-22 13:48:21 -04:00
|
|
|
this.serverRid = args[0];
|
|
|
|
this.scheme = args[1];
|
|
|
|
this.fallbackHost = args[2];
|
|
|
|
this.closed = false;
|
2023-10-03 22:37:39 -04:00
|
|
|
this.listener = listener;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
close() {
|
|
|
|
try {
|
|
|
|
this.closed = true;
|
|
|
|
core.tryClose(this.serverRid);
|
|
|
|
} catch {
|
|
|
|
// Pass
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-10 13:45:55 -04:00
|
|
|
class ServeHandlerInfo {
|
2024-04-24 14:03:37 -04:00
|
|
|
#inner: InnerRequest;
|
|
|
|
constructor(inner: InnerRequest) {
|
2023-08-10 13:45:55 -04:00
|
|
|
this.#inner = inner;
|
|
|
|
}
|
|
|
|
get remoteAddr() {
|
|
|
|
return this.#inner.remoteAddr;
|
|
|
|
}
|
2024-04-24 14:03:37 -04:00
|
|
|
get completed() {
|
|
|
|
return this.#inner.completed;
|
|
|
|
}
|
2023-08-10 13:45:55 -04:00
|
|
|
}
|
|
|
|
|
2024-04-24 14:03:37 -04:00
|
|
|
function fastSyncResponseOrStream(
|
|
|
|
req,
|
|
|
|
respBody,
|
|
|
|
status,
|
|
|
|
innerRequest: InnerRequest,
|
|
|
|
) {
|
2023-04-22 13:48:21 -04:00
|
|
|
if (respBody === null || respBody === undefined) {
|
|
|
|
// Don't set the body
|
2023-11-13 14:17:31 -05:00
|
|
|
innerRequest?.close();
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
op_http_set_promise_complete(req, status);
|
|
|
|
return;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
const stream = respBody.streamOrStatic;
|
|
|
|
const body = stream.body;
|
|
|
|
|
2024-01-03 23:12:38 -05:00
|
|
|
if (TypedArrayPrototypeGetSymbolToStringTag(body) === "Uint8Array") {
|
2023-11-13 14:17:31 -05:00
|
|
|
innerRequest?.close();
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
op_http_set_response_body_bytes(req, body, status);
|
|
|
|
return;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (typeof body === "string") {
|
2023-11-13 14:17:31 -05:00
|
|
|
innerRequest?.close();
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
op_http_set_response_body_text(req, body, status);
|
|
|
|
return;
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// At this point in the response it needs to be a stream
|
|
|
|
if (!ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, stream)) {
|
2023-11-13 14:17:31 -05:00
|
|
|
innerRequest?.close();
|
2023-04-22 13:48:21 -04:00
|
|
|
throw TypeError("invalid response");
|
|
|
|
}
|
|
|
|
const resourceBacking = getReadableStreamResourceBacking(stream);
|
2023-11-13 14:17:31 -05:00
|
|
|
let rid, autoClose;
|
2023-04-22 13:48:21 -04:00
|
|
|
if (resourceBacking) {
|
2023-11-13 14:17:31 -05:00
|
|
|
rid = resourceBacking.rid;
|
|
|
|
autoClose = resourceBacking.autoClose;
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
} else {
|
2023-11-13 14:17:31 -05:00
|
|
|
rid = resourceForReadableStream(stream);
|
|
|
|
autoClose = true;
|
|
|
|
}
|
|
|
|
PromisePrototypeThen(
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
op_http_set_response_body_resource(
|
|
|
|
req,
|
|
|
|
rid,
|
2023-11-13 14:17:31 -05:00
|
|
|
autoClose,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status,
|
2023-11-13 14:17:31 -05:00
|
|
|
),
|
2024-04-24 14:03:37 -04:00
|
|
|
(success) => {
|
|
|
|
innerRequest?.close(success);
|
2023-11-13 14:17:31 -05:00
|
|
|
op_http_close_after_finish(req);
|
|
|
|
},
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Maps the incoming request slab ID to a fully-fledged Request object, passes it to the user-provided
|
|
|
|
* callback, then extracts the response that was returned from that callback. The response is then pulled
|
|
|
|
* apart and handled on the Rust side.
|
|
|
|
*
|
|
|
|
* This function returns a promise that will only reject in the case of abnormal exit.
|
|
|
|
*/
|
2023-05-31 19:20:39 -04:00
|
|
|
function mapToCallback(context, callback, onError) {
|
2023-04-22 13:48:21 -04:00
|
|
|
return async function (req) {
|
|
|
|
// Get the response from the user-provided callback. If that fails, use onError. If that fails, return a fallback
|
|
|
|
// 500 error.
|
2023-04-26 12:41:54 -04:00
|
|
|
let innerRequest;
|
2023-04-22 13:48:21 -04:00
|
|
|
let response;
|
|
|
|
try {
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
innerRequest = new InnerRequest(req, context);
|
|
|
|
const request = fromInnerRequest(innerRequest, "immutable");
|
|
|
|
innerRequest.request = request;
|
2023-10-27 07:34:41 -04:00
|
|
|
response = await callback(
|
perf(ext/http): recover memory for serve and optimize AbortController (#23559)
Max rps without a signal is unchanged, however we can drastically reduce
memory usage by not creating the signal until needed, and we can
optimize the rps in the case where the signal is created.
With a quick memory benchmark, it looks like this helps pretty
drastically with # of GCs when benchmarking w/wrk:
- 1.42.4: 1763
- canary: 1093
- this patch: 874
This branch:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 87.33us 439.95us 20.68ms 99.67%
Req/Sec 66.70k 6.39k 74.11k 83.66%
1340255 requests in 10.10s, 191.73MB read
Requests/sec: 132696.90
Transfer/sec: 18.98MB
cpu: Apple M2 Pro
runtime: deno 1.43.0 (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 986.5 ns/iter 1,013,682.6 (878.2 ns … 1.18 µs) 1.01 µs 1.18 µs 1.18 µs
newAbortController 18 ns/iter 55,541,104.1 (15.6 ns … 42.62 ns) 17.71 ns 25.05 ns 26.27 ns
newAbortControllerSignal 18.66 ns/iter 53,578,966.7 (16.49 ns … 32.16 ns) 18.71 ns 25.67 ns 26.39 ns
newAbortControllerSignalOnAbort 106.49 ns/iter 9,390,164.9 (97.87 ns … 120.61 ns) 108.6 ns 114.24 ns 115.89 ns
newAbortControllerSignalAddEventListener 86.92 ns/iter 11,504,880.2 (81.88 ns … 103.15 ns) 90 ns 98.28 ns 99.55 ns
newAbortControllerSignalOnAbortNoListener 3.01 µs/iter 331,964.4 (2.97 µs … 3.1 µs) 3.06 µs 3.1 µs 3.1 µs
newAbortControllerSignalOnAbortAbort 3.26 µs/iter 306,662.6 (3.22 µs … 3.36 µs) 3.27 µs 3.36 µs 3.36 µs
```
Latest canary:
```
Running 10s test @ http://localhost:8080/
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 72.86us 71.23us 4.47ms 99.05%
Req/Sec 64.66k 5.54k 72.48k 82.18%
1299015 requests in 10.10s, 185.83MB read
Requests/sec: 128616.02
Transfer/sec: 18.40MB
cpu: Apple M2 Pro
runtime: deno 1.43.0+bc4aa5f (aarch64-apple-darwin)
file:///Users/matt/Documents/scripts/bench_request.js
benchmark time (avg) iter/s (min … max) p75 p99 p995
----------------------------------------------------------------------------------------------- -----------------------------
newRequest 1.25 µs/iter 800,005.2 (1.01 µs … 4.18 µs) 1.16 µs 4.18 µs 4.18 µs
newAbortController 18.56 ns/iter 53,868,204.3 (16.04 ns … 38.73 ns) 18.38 ns 26.1 ns 26.63 ns
newAbortControllerSignal 18.72 ns/iter 53,430,746.1 (16.13 ns … 36.71 ns) 18.71 ns 26.19 ns 26.98 ns
newAbortControllerSignalOnAbort 193.91 ns/iter 5,156,992.4 (184.25 ns … 211.41 ns) 194.96 ns 207.87 ns 209.4 ns
newAbortControllerSignalAddEventListener 171.45 ns/iter 5,832,569.2 (153 ns … 182.03 ns) 176.17 ns 180.75 ns 181.05 ns
newAbortControllerSignalOnAbortNoListener 3.07 µs/iter 326,263.3 (2.98 µs … 3.17 µs) 3.08 µs 3.17 µs 3.17 µs
newAbortControllerSignalOnAbortAbort 3.32 µs/iter 301,344.6 (3.29 µs … 3.4 µs) 3.33 µs 3.4 µs 3.4 µs
```
2024-04-25 14:52:24 -04:00
|
|
|
request,
|
2023-10-27 07:34:41 -04:00
|
|
|
new ServeHandlerInfo(innerRequest),
|
|
|
|
);
|
2023-11-07 17:52:44 -05:00
|
|
|
|
|
|
|
// Throwing Error if the handler return value is not a Response class
|
|
|
|
if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) {
|
|
|
|
throw TypeError(
|
|
|
|
"Return value from serve handler must be a response or a promise resolving to a response",
|
|
|
|
);
|
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
} catch (error) {
|
|
|
|
try {
|
|
|
|
response = await onError(error);
|
2023-11-07 17:52:44 -05:00
|
|
|
if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) {
|
|
|
|
throw TypeError(
|
|
|
|
"Return value from onError handler must be a response or a promise resolving to a response",
|
|
|
|
);
|
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
} catch (error) {
|
|
|
|
console.error("Exception in onError while handling exception", error);
|
|
|
|
response = internalServerError();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const inner = toInnerResponse(response);
|
2023-04-26 12:41:54 -04:00
|
|
|
if (innerRequest?.[_upgraded]) {
|
2023-04-22 13:48:21 -04:00
|
|
|
// We're done here as the connection has been upgraded during the callback and no longer requires servicing.
|
|
|
|
if (response !== UPGRADE_RESPONSE_SENTINEL) {
|
|
|
|
console.error("Upgrade response was not returned from callback");
|
|
|
|
context.close();
|
|
|
|
}
|
2023-04-26 12:41:54 -04:00
|
|
|
innerRequest?.[_upgraded]();
|
2023-04-22 13:48:21 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Did everything shut down while we were waiting?
|
|
|
|
if (context.closed) {
|
2023-05-16 19:00:59 -04:00
|
|
|
// We're shutting down, so this status shouldn't make it back to the client but "Service Unavailable" seems appropriate
|
2023-04-26 12:41:54 -04:00
|
|
|
innerRequest?.close();
|
2023-11-13 09:04:49 -05:00
|
|
|
op_http_set_promise_complete(req, 503);
|
2023-04-22 13:48:21 -04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const status = inner.status;
|
|
|
|
const headers = inner.headerList;
|
|
|
|
if (headers && headers.length > 0) {
|
|
|
|
if (headers.length == 1) {
|
2023-05-08 17:07:45 -04:00
|
|
|
op_http_set_response_header(req, headers[0][0], headers[0][1]);
|
2023-04-22 13:48:21 -04:00
|
|
|
} else {
|
2023-06-06 10:55:37 -04:00
|
|
|
op_http_set_response_headers(req, headers);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-13 14:17:31 -05:00
|
|
|
fastSyncResponseOrStream(req, inner.body, status, innerRequest);
|
2023-04-22 13:48:21 -04:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2024-04-24 14:03:37 -04:00
|
|
|
type RawHandler = (
|
|
|
|
request: Request,
|
|
|
|
info: ServeHandlerInfo,
|
|
|
|
) => Response | Promise<Response>;
|
|
|
|
|
|
|
|
type RawServeOptions = {
|
|
|
|
port?: number;
|
|
|
|
hostname?: string;
|
|
|
|
signal?: AbortSignal;
|
|
|
|
reusePort?: boolean;
|
|
|
|
key?: string;
|
|
|
|
cert?: string;
|
|
|
|
onError?: (error: unknown) => Response | Promise<Response>;
|
|
|
|
onListen?: (params: { hostname: string; port: number }) => void;
|
|
|
|
handler?: RawHandler;
|
|
|
|
};
|
|
|
|
|
2023-05-18 20:59:23 -04:00
|
|
|
function serve(arg1, arg2) {
|
2024-04-24 14:03:37 -04:00
|
|
|
let options: RawServeOptions | undefined;
|
|
|
|
let handler: RawHandler | undefined;
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
if (typeof arg1 === "function") {
|
|
|
|
handler = arg1;
|
|
|
|
} else if (typeof arg2 === "function") {
|
|
|
|
handler = arg2;
|
|
|
|
options = arg1;
|
|
|
|
} else {
|
|
|
|
options = arg1;
|
|
|
|
}
|
|
|
|
if (handler === undefined) {
|
|
|
|
if (options === undefined) {
|
|
|
|
throw new TypeError(
|
|
|
|
"No handler was provided, so an options bag is mandatory.",
|
|
|
|
);
|
|
|
|
}
|
|
|
|
handler = options.handler;
|
|
|
|
}
|
|
|
|
if (typeof handler !== "function") {
|
|
|
|
throw new TypeError("A handler function must be provided.");
|
|
|
|
}
|
|
|
|
if (options === undefined) {
|
2024-05-22 18:03:35 -04:00
|
|
|
options = { __proto__: null };
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2024-04-08 17:01:02 -04:00
|
|
|
const wantsHttps = hasTlsKeyPairOptions(options);
|
2023-10-03 22:37:39 -04:00
|
|
|
const wantsUnix = ObjectHasOwn(options, "path");
|
2023-04-22 13:48:21 -04:00
|
|
|
const signal = options.signal;
|
|
|
|
const onError = options.onError ?? function (error) {
|
|
|
|
console.error(error);
|
|
|
|
return internalServerError();
|
|
|
|
};
|
2023-10-03 22:37:39 -04:00
|
|
|
|
|
|
|
if (wantsUnix) {
|
|
|
|
const listener = listen({
|
|
|
|
transport: "unix",
|
|
|
|
path: options.path,
|
|
|
|
[listenOptionApiName]: "Deno.serve",
|
|
|
|
});
|
|
|
|
const path = listener.addr.path;
|
|
|
|
return serveHttpOnListener(listener, signal, handler, onError, () => {
|
|
|
|
if (options.onListen) {
|
2024-04-19 20:09:50 -04:00
|
|
|
options.onListen(listener.addr);
|
2023-10-03 22:37:39 -04:00
|
|
|
} else {
|
|
|
|
console.log(`Listening on ${path}`);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
const listenOpts = {
|
|
|
|
hostname: options.hostname ?? "0.0.0.0",
|
2023-07-03 19:46:32 -04:00
|
|
|
port: options.port ?? 8000,
|
2023-04-22 13:48:21 -04:00
|
|
|
reusePort: options.reusePort ?? false,
|
|
|
|
};
|
|
|
|
|
2023-07-19 14:43:49 -04:00
|
|
|
if (options.certFile || options.keyFile) {
|
|
|
|
throw new TypeError(
|
|
|
|
"Unsupported 'certFile' / 'keyFile' options provided: use 'cert' / 'key' instead.",
|
|
|
|
);
|
|
|
|
}
|
|
|
|
if (options.alpnProtocols) {
|
|
|
|
throw new TypeError(
|
|
|
|
"Unsupported 'alpnProtocols' option provided. 'h2' and 'http/1.1' are automatically supported.",
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2023-05-31 19:20:39 -04:00
|
|
|
let listener;
|
2023-04-22 13:48:21 -04:00
|
|
|
if (wantsHttps) {
|
|
|
|
if (!options.cert || !options.key) {
|
|
|
|
throw new TypeError(
|
|
|
|
"Both cert and key must be provided to enable HTTPS.",
|
|
|
|
);
|
|
|
|
}
|
|
|
|
listenOpts.cert = options.cert;
|
|
|
|
listenOpts.key = options.key;
|
|
|
|
listenOpts.alpnProtocols = ["h2", "http/1.1"];
|
2023-05-31 19:20:39 -04:00
|
|
|
listener = listenTls(listenOpts);
|
2023-04-22 13:48:21 -04:00
|
|
|
listenOpts.port = listener.addr.port;
|
|
|
|
} else {
|
2023-05-31 19:20:39 -04:00
|
|
|
listener = listen(listenOpts);
|
2023-04-22 13:48:21 -04:00
|
|
|
listenOpts.port = listener.addr.port;
|
|
|
|
}
|
|
|
|
|
2024-04-19 20:09:50 -04:00
|
|
|
const addr = listener.addr;
|
|
|
|
// If the hostname is "0.0.0.0", we display "localhost" in console
|
|
|
|
// because browsers in Windows don't resolve "0.0.0.0".
|
|
|
|
// See the discussion in https://github.com/denoland/deno_std/issues/1165
|
2024-06-08 20:03:07 -04:00
|
|
|
const hostname = addr.hostname == "0.0.0.0" || addr.hostname == "::"
|
|
|
|
? "localhost"
|
|
|
|
: addr.hostname;
|
2024-04-19 20:09:50 -04:00
|
|
|
addr.hostname = hostname;
|
2023-06-14 08:58:41 -04:00
|
|
|
|
2024-04-19 20:09:50 -04:00
|
|
|
const onListen = (scheme) => {
|
2023-05-31 19:20:39 -04:00
|
|
|
if (options.onListen) {
|
2024-04-19 20:09:50 -04:00
|
|
|
options.onListen(addr);
|
2023-05-31 19:20:39 -04:00
|
|
|
} else {
|
2024-06-08 20:03:07 -04:00
|
|
|
const host = StringPrototypeIncludes(addr.hostname, ":")
|
|
|
|
? `[${addr.hostname}]`
|
|
|
|
: addr.hostname;
|
|
|
|
console.log(`Listening on ${scheme}${host}:${addr.port}/`);
|
2023-05-31 19:20:39 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
};
|
|
|
|
|
2023-05-31 19:20:39 -04:00
|
|
|
return serveHttpOnListener(listener, signal, handler, onError, onListen);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Serve HTTP/1.1 and/or HTTP/2 on an arbitrary listener.
|
|
|
|
*/
|
|
|
|
function serveHttpOnListener(listener, signal, handler, onError, onListen) {
|
2023-10-03 22:37:39 -04:00
|
|
|
const context = new CallbackContext(
|
|
|
|
signal,
|
2024-01-26 14:04:07 -05:00
|
|
|
op_http_serve(listener[internalRidSymbol]),
|
2023-10-03 22:37:39 -04:00
|
|
|
listener,
|
|
|
|
);
|
2023-05-31 19:20:39 -04:00
|
|
|
const callback = mapToCallback(context, handler, onError);
|
|
|
|
|
|
|
|
onListen(context.scheme);
|
|
|
|
|
2024-04-19 20:09:50 -04:00
|
|
|
return serveHttpOn(context, listener.addr, callback);
|
2023-05-31 19:20:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Serve HTTP/1.1 and/or HTTP/2 on an arbitrary connection.
|
|
|
|
*/
|
|
|
|
function serveHttpOnConnection(connection, signal, handler, onError, onListen) {
|
2023-10-03 22:37:39 -04:00
|
|
|
const context = new CallbackContext(
|
|
|
|
signal,
|
2024-01-26 14:04:07 -05:00
|
|
|
op_http_serve_on(connection[internalRidSymbol]),
|
2023-10-03 22:37:39 -04:00
|
|
|
null,
|
|
|
|
);
|
2023-05-31 19:20:39 -04:00
|
|
|
const callback = mapToCallback(context, handler, onError);
|
|
|
|
|
|
|
|
onListen(context.scheme);
|
|
|
|
|
2024-04-19 20:09:50 -04:00
|
|
|
return serveHttpOn(context, connection.localAddr, callback);
|
2023-05-31 19:20:39 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2024-04-19 20:09:50 -04:00
|
|
|
function serveHttpOn(context, addr, callback) {
|
2023-05-19 17:14:40 -04:00
|
|
|
let ref = true;
|
|
|
|
let currentPromise = null;
|
|
|
|
|
2023-06-10 06:17:56 -04:00
|
|
|
const promiseErrorHandler = (error) => {
|
|
|
|
// Abnormal exit
|
|
|
|
console.error(
|
|
|
|
"Terminating Deno.serve loop due to unexpected error",
|
|
|
|
error,
|
|
|
|
);
|
|
|
|
context.close();
|
|
|
|
};
|
|
|
|
|
2023-05-18 20:59:23 -04:00
|
|
|
// Run the server
|
|
|
|
const finished = (async () => {
|
2023-06-09 18:45:56 -04:00
|
|
|
const rid = context.serverRid;
|
2023-05-18 20:59:23 -04:00
|
|
|
while (true) {
|
|
|
|
let req;
|
|
|
|
try {
|
2023-05-30 20:02:52 -04:00
|
|
|
// Attempt to pull as many requests out of the queue as possible before awaiting. This API is
|
|
|
|
// a synchronous, non-blocking API that returns u32::MAX if anything goes wrong.
|
2023-11-13 09:04:49 -05:00
|
|
|
while ((req = op_http_try_wait(rid)) !== null) {
|
2023-06-10 06:17:56 -04:00
|
|
|
PromisePrototypeCatch(callback(req), promiseErrorHandler);
|
2023-05-30 20:02:52 -04:00
|
|
|
}
|
2023-05-19 17:14:40 -04:00
|
|
|
currentPromise = op_http_wait(rid);
|
|
|
|
if (!ref) {
|
2023-11-09 15:57:26 -05:00
|
|
|
core.unrefOpPromise(currentPromise);
|
2023-05-19 17:14:40 -04:00
|
|
|
}
|
|
|
|
req = await currentPromise;
|
|
|
|
currentPromise = null;
|
2023-05-18 20:59:23 -04:00
|
|
|
} catch (error) {
|
|
|
|
if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error)) {
|
|
|
|
break;
|
|
|
|
}
|
2023-09-11 20:06:38 -04:00
|
|
|
if (ObjectPrototypeIsPrototypeOf(InterruptedPrototype, error)) {
|
|
|
|
break;
|
|
|
|
}
|
2023-05-18 20:59:23 -04:00
|
|
|
throw new Deno.errors.Http(error);
|
|
|
|
}
|
2023-11-13 09:04:49 -05:00
|
|
|
if (req === null) {
|
2023-04-22 13:48:21 -04:00
|
|
|
break;
|
|
|
|
}
|
2023-06-10 06:17:56 -04:00
|
|
|
PromisePrototypeCatch(callback(req), promiseErrorHandler);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-09-11 20:06:38 -04:00
|
|
|
|
2023-11-01 15:26:12 -04:00
|
|
|
if (!context.closing && !context.closed) {
|
|
|
|
context.closing = op_http_close(rid, false);
|
2023-09-11 20:06:38 -04:00
|
|
|
context.close();
|
|
|
|
}
|
2023-11-01 15:26:12 -04:00
|
|
|
|
|
|
|
await context.closing;
|
|
|
|
context.close();
|
|
|
|
context.closed = true;
|
2023-05-18 20:59:23 -04:00
|
|
|
})();
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-19 17:14:40 -04:00
|
|
|
return {
|
2024-04-19 20:09:50 -04:00
|
|
|
addr,
|
2023-05-19 17:14:40 -04:00
|
|
|
finished,
|
2023-09-11 20:06:38 -04:00
|
|
|
async shutdown() {
|
2023-11-01 15:26:12 -04:00
|
|
|
if (!context.closing && !context.closed) {
|
2023-09-11 20:06:38 -04:00
|
|
|
// Shut this HTTP server down gracefully
|
2023-11-01 15:26:12 -04:00
|
|
|
context.closing = op_http_close(context.serverRid, true);
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
2023-11-01 15:26:12 -04:00
|
|
|
await context.closing;
|
|
|
|
context.closed = true;
|
2023-09-11 20:06:38 -04:00
|
|
|
},
|
2023-05-19 17:14:40 -04:00
|
|
|
ref() {
|
|
|
|
ref = true;
|
|
|
|
if (currentPromise) {
|
2023-11-09 15:57:26 -05:00
|
|
|
core.refOpPromise(currentPromise);
|
2023-05-19 17:14:40 -04:00
|
|
|
}
|
|
|
|
},
|
|
|
|
unref() {
|
|
|
|
ref = false;
|
|
|
|
if (currentPromise) {
|
2023-11-09 15:57:26 -05:00
|
|
|
core.unrefOpPromise(currentPromise);
|
2023-05-19 17:14:40 -04:00
|
|
|
}
|
|
|
|
},
|
2023-11-01 15:26:12 -04:00
|
|
|
[SymbolAsyncDispose]() {
|
|
|
|
return this.shutdown();
|
|
|
|
},
|
2023-05-19 17:14:40 -04:00
|
|
|
};
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-05-18 22:10:25 -04:00
|
|
|
internals.addTrailers = addTrailers;
|
2023-04-26 18:58:18 -04:00
|
|
|
internals.upgradeHttpRaw = upgradeHttpRaw;
|
2023-05-31 19:20:39 -04:00
|
|
|
internals.serveHttpOnListener = serveHttpOnListener;
|
|
|
|
internals.serveHttpOnConnection = serveHttpOnConnection;
|
2023-04-26 18:58:18 -04:00
|
|
|
|
2024-04-24 15:45:49 -04:00
|
|
|
function registerDeclarativeServer(exports) {
|
|
|
|
if (ObjectHasOwn(exports, "fetch")) {
|
2024-05-17 08:35:19 -04:00
|
|
|
if (typeof exports.fetch !== "function") {
|
2024-04-24 15:45:49 -04:00
|
|
|
throw new TypeError(
|
2024-05-17 08:35:19 -04:00
|
|
|
"Invalid type for fetch: must be a function with a single or no parameter",
|
2024-04-24 15:45:49 -04:00
|
|
|
);
|
|
|
|
}
|
|
|
|
return ({ servePort, serveHost }) => {
|
|
|
|
Deno.serve({
|
|
|
|
port: servePort,
|
|
|
|
hostname: serveHost,
|
|
|
|
onListen: ({ port, hostname }) => {
|
|
|
|
console.debug(
|
|
|
|
`%cdeno serve%c: Listening on %chttp://${hostname}:${port}/%c`,
|
|
|
|
"color: green",
|
|
|
|
"color: inherit",
|
|
|
|
"color: yellow",
|
|
|
|
"color: inherit",
|
|
|
|
);
|
|
|
|
},
|
|
|
|
handler: (req) => {
|
|
|
|
return exports.fetch(req);
|
|
|
|
},
|
|
|
|
});
|
|
|
|
};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-06 06:29:55 -04:00
|
|
|
export {
|
|
|
|
addTrailers,
|
2024-04-24 15:45:49 -04:00
|
|
|
registerDeclarativeServer,
|
2023-06-06 06:29:55 -04:00
|
|
|
serve,
|
|
|
|
serveHttpOnConnection,
|
|
|
|
serveHttpOnListener,
|
|
|
|
upgradeHttpRaw,
|
|
|
|
};
|