2024-01-01 14:58:21 -05:00
|
|
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
2023-05-10 10:23:26 -04:00
|
|
|
use crate::compressible::is_content_compressible;
|
2023-04-22 13:48:21 -04:00
|
|
|
use crate::extract_network_stream;
|
2023-04-24 17:24:40 -04:00
|
|
|
use crate::network_buffered_stream::NetworkStreamPrefixCheck;
|
2023-04-22 13:48:21 -04:00
|
|
|
use crate::request_body::HttpRequestBody;
|
|
|
|
use crate::request_properties::HttpConnectionProperties;
|
|
|
|
use crate::request_properties::HttpListenProperties;
|
|
|
|
use crate::request_properties::HttpPropertyExtractor;
|
2023-05-10 07:23:14 -04:00
|
|
|
use crate::response_body::Compression;
|
2023-04-22 13:48:21 -04:00
|
|
|
use crate::response_body::ResponseBytesInner;
|
2023-11-13 09:04:49 -05:00
|
|
|
use crate::service::handle_request;
|
2023-11-23 11:39:17 -05:00
|
|
|
use crate::service::http_general_trace;
|
2023-11-13 09:04:49 -05:00
|
|
|
use crate::service::http_trace;
|
|
|
|
use crate::service::HttpRecord;
|
2023-11-13 14:17:31 -05:00
|
|
|
use crate::service::HttpRecordResponse;
|
2023-11-13 09:04:49 -05:00
|
|
|
use crate::service::HttpRequestBodyAutocloser;
|
2023-11-13 12:32:34 -05:00
|
|
|
use crate::service::HttpServerState;
|
2023-11-23 11:39:17 -05:00
|
|
|
use crate::service::SignallingRc;
|
2023-04-26 18:58:18 -04:00
|
|
|
use crate::websocket_upgrade::WebSocketUpgrade;
|
2023-04-22 13:48:21 -04:00
|
|
|
use crate::LocalExecutor;
|
2023-05-10 07:23:14 -04:00
|
|
|
use cache_control::CacheControl;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::error::AnyError;
|
2023-11-23 11:39:17 -05:00
|
|
|
use deno_core::futures::future::poll_fn;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::futures::TryFutureExt;
|
2023-06-24 07:54:10 -04:00
|
|
|
use deno_core::op2;
|
2023-06-06 10:55:37 -04:00
|
|
|
use deno_core::serde_v8::from_v8;
|
2023-08-23 19:03:05 -04:00
|
|
|
use deno_core::unsync::spawn;
|
|
|
|
use deno_core::unsync::JoinHandle;
|
2023-06-02 18:31:27 -04:00
|
|
|
use deno_core::v8;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::AsyncRefCell;
|
2023-04-26 18:58:18 -04:00
|
|
|
use deno_core::AsyncResult;
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
use deno_core::BufView;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::ByteString;
|
|
|
|
use deno_core::CancelFuture;
|
|
|
|
use deno_core::CancelHandle;
|
|
|
|
use deno_core::CancelTryFuture;
|
2023-11-13 09:04:49 -05:00
|
|
|
use deno_core::ExternalPointer;
|
2023-07-07 12:47:08 -04:00
|
|
|
use deno_core::JsBuffer;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::OpState;
|
|
|
|
use deno_core::RcRef;
|
|
|
|
use deno_core::Resource;
|
|
|
|
use deno_core::ResourceId;
|
|
|
|
use deno_net::ops_tls::TlsStream;
|
|
|
|
use deno_net::raw::NetworkStream;
|
2023-05-15 19:24:41 -04:00
|
|
|
use deno_websocket::ws_create_server_stream;
|
2023-12-27 11:59:57 -05:00
|
|
|
use hyper::body::Incoming;
|
|
|
|
use hyper::header::HeaderMap;
|
|
|
|
use hyper::header::ACCEPT_ENCODING;
|
|
|
|
use hyper::header::CACHE_CONTROL;
|
|
|
|
use hyper::header::CONTENT_ENCODING;
|
|
|
|
use hyper::header::CONTENT_LENGTH;
|
|
|
|
use hyper::header::CONTENT_RANGE;
|
|
|
|
use hyper::header::CONTENT_TYPE;
|
|
|
|
use hyper::header::COOKIE;
|
|
|
|
use hyper::http::HeaderName;
|
|
|
|
use hyper::http::HeaderValue;
|
|
|
|
use hyper::server::conn::http1;
|
|
|
|
use hyper::server::conn::http2;
|
|
|
|
use hyper::service::service_fn;
|
|
|
|
use hyper::service::HttpService;
|
|
|
|
use hyper::StatusCode;
|
2023-12-21 19:54:28 -05:00
|
|
|
use hyper_util::rt::TokioIo;
|
2023-06-01 10:07:26 -04:00
|
|
|
use once_cell::sync::Lazy;
|
2023-06-02 18:31:27 -04:00
|
|
|
use smallvec::SmallVec;
|
2023-04-22 13:48:21 -04:00
|
|
|
use std::borrow::Cow;
|
|
|
|
use std::cell::RefCell;
|
2023-11-13 09:04:49 -05:00
|
|
|
use std::ffi::c_void;
|
2023-04-22 13:48:21 -04:00
|
|
|
use std::future::Future;
|
|
|
|
use std::io;
|
|
|
|
use std::pin::Pin;
|
2023-11-13 09:04:49 -05:00
|
|
|
use std::ptr::null;
|
2023-04-22 13:48:21 -04:00
|
|
|
use std::rc::Rc;
|
2023-04-26 18:58:18 -04:00
|
|
|
|
2023-12-14 13:43:33 -05:00
|
|
|
use super::fly_accept_encoding;
|
|
|
|
use fly_accept_encoding::Encoding;
|
|
|
|
|
2023-04-26 18:58:18 -04:00
|
|
|
use tokio::io::AsyncReadExt;
|
|
|
|
use tokio::io::AsyncWriteExt;
|
|
|
|
|
2023-12-27 11:59:57 -05:00
|
|
|
type Request = hyper::Request<Incoming>;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-06-01 10:07:26 -04:00
|
|
|
static USE_WRITEV: Lazy<bool> = Lazy::new(|| {
|
2023-06-08 06:55:33 -04:00
|
|
|
let enable = std::env::var("DENO_USE_WRITEV").ok();
|
2023-06-01 10:07:26 -04:00
|
|
|
|
2023-06-08 06:55:33 -04:00
|
|
|
if let Some(val) = enable {
|
|
|
|
return !val.is_empty();
|
2023-06-01 10:07:26 -04:00
|
|
|
}
|
|
|
|
|
2023-06-08 06:55:33 -04:00
|
|
|
false
|
2023-06-01 10:07:26 -04:00
|
|
|
});
|
|
|
|
|
2023-12-06 18:43:01 -05:00
|
|
|
// NOTE(bartlomieju): currently we don't have any unstable HTTP features,
|
|
|
|
// but let's keep this const here, because:
|
|
|
|
// a) we still need to support `--unstable-http` flag to not break user's CLI;
|
|
|
|
// b) we might add more unstable features in the future.
|
|
|
|
#[allow(dead_code)]
|
2023-10-12 11:55:50 -04:00
|
|
|
pub const UNSTABLE_FEATURE_NAME: &str = "http";
|
|
|
|
|
2023-04-24 17:24:40 -04:00
|
|
|
/// All HTTP/2 connections start with this byte string.
|
|
|
|
///
|
|
|
|
/// In HTTP/2, each endpoint is required to send a connection preface as a final confirmation
|
|
|
|
/// of the protocol in use and to establish the initial settings for the HTTP/2 connection. The
|
|
|
|
/// client and server each send a different connection preface.
|
|
|
|
///
|
|
|
|
/// The client connection preface starts with a sequence of 24 octets, which in hex notation is:
|
|
|
|
///
|
|
|
|
/// 0x505249202a20485454502f322e300d0a0d0a534d0d0a0d0a
|
|
|
|
///
|
|
|
|
/// That is, the connection preface starts with the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence
|
|
|
|
/// MUST be followed by a SETTINGS frame (Section 6.5), which MAY be empty.
|
|
|
|
const HTTP2_PREFIX: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
|
|
|
|
|
2023-06-26 09:10:27 -04:00
|
|
|
/// ALPN negotiation for "h2"
|
2023-04-24 17:24:40 -04:00
|
|
|
const TLS_ALPN_HTTP_2: &[u8] = b"h2";
|
|
|
|
|
2023-06-26 09:10:27 -04:00
|
|
|
/// ALPN negotiation for "http/1.1"
|
2023-04-24 17:24:40 -04:00
|
|
|
const TLS_ALPN_HTTP_11: &[u8] = b"http/1.1";
|
|
|
|
|
|
|
|
/// Name a trait for streams we can serve HTTP over.
|
|
|
|
trait HttpServeStream:
|
|
|
|
tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static
|
|
|
|
{
|
|
|
|
}
|
|
|
|
impl<
|
|
|
|
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static,
|
|
|
|
> HttpServeStream for S
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
#[repr(transparent)]
|
|
|
|
struct RcHttpRecord(Rc<HttpRecord>);
|
|
|
|
|
|
|
|
// Temp copy
|
|
|
|
/// Define an external type.
|
|
|
|
macro_rules! external {
|
|
|
|
($type:ident, $name:literal) => {
|
|
|
|
impl deno_core::Externalizable for $type {
|
|
|
|
fn external_marker() -> usize {
|
|
|
|
// Use the address of a static mut as a way to get around lack of usize-sized TypeId. Because it is mutable, the
|
|
|
|
// compiler cannot collapse multiple definitions into one.
|
|
|
|
static mut DEFINITION: deno_core::ExternalDefinition =
|
|
|
|
deno_core::ExternalDefinition::new($name);
|
|
|
|
// Wash the pointer through black_box so the compiler cannot see what we're going to do with it and needs
|
|
|
|
// to assume it will be used for valid purposes.
|
|
|
|
// SAFETY: temporary while waiting on deno core bump
|
|
|
|
let ptr = std::hint::black_box(unsafe { &mut DEFINITION } as *mut _);
|
|
|
|
ptr as usize
|
|
|
|
}
|
|
|
|
|
|
|
|
fn external_name() -> &'static str {
|
|
|
|
$name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the [`HttpRecord`] as an external.
|
|
|
|
external!(RcHttpRecord, "http record");
|
|
|
|
|
|
|
|
/// Construct Rc<HttpRecord> from raw external pointer, consuming
|
|
|
|
/// refcount. You must make sure the external is deleted on the JS side.
|
|
|
|
macro_rules! take_external {
|
|
|
|
($external:expr, $args:tt) => {{
|
|
|
|
let ptr = ExternalPointer::<RcHttpRecord>::from_raw($external);
|
|
|
|
let record = ptr.unsafely_take().0;
|
|
|
|
http_trace!(record, $args);
|
|
|
|
record
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Clone Rc<HttpRecord> from raw external pointer.
|
|
|
|
macro_rules! clone_external {
|
|
|
|
($external:expr, $args:tt) => {{
|
|
|
|
let ptr = ExternalPointer::<RcHttpRecord>::from_raw($external);
|
|
|
|
ptr.unsafely_deref().0.clone()
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(fast)]
|
|
|
|
#[smi]
|
2023-05-08 17:07:45 -04:00
|
|
|
pub fn op_http_upgrade_raw(
|
2023-04-26 18:58:18 -04:00
|
|
|
state: &mut OpState,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-04-26 18:58:18 -04:00
|
|
|
) -> Result<ResourceId, AnyError> {
|
2023-11-13 09:04:49 -05:00
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
let http = unsafe { take_external!(external, "op_http_upgrade_raw") };
|
|
|
|
|
2023-04-26 18:58:18 -04:00
|
|
|
// Stage 1: extract the upgrade future
|
2023-11-13 09:04:49 -05:00
|
|
|
let upgrade = http.upgrade()?;
|
2023-04-26 18:58:18 -04:00
|
|
|
let (read, write) = tokio::io::duplex(1024);
|
|
|
|
let (read_rx, write_tx) = tokio::io::split(read);
|
|
|
|
let (mut write_rx, mut read_tx) = tokio::io::split(write);
|
2023-05-14 17:40:01 -04:00
|
|
|
spawn(async move {
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut upgrade_stream = WebSocketUpgrade::<()>::default();
|
2023-04-26 18:58:18 -04:00
|
|
|
|
|
|
|
// Stage 2: Extract the Upgraded connection
|
|
|
|
let mut buf = [0; 1024];
|
|
|
|
let upgraded = loop {
|
|
|
|
let read = Pin::new(&mut write_rx).read(&mut buf).await?;
|
|
|
|
match upgrade_stream.write(&buf[..read]) {
|
|
|
|
Ok(None) => continue,
|
|
|
|
Ok(Some((response, bytes))) => {
|
2023-11-13 14:17:31 -05:00
|
|
|
let (response_parts, _) = response.into_parts();
|
|
|
|
*http.response_parts() = response_parts;
|
2023-05-16 19:00:59 -04:00
|
|
|
http.complete();
|
2023-07-31 09:34:53 -04:00
|
|
|
let mut upgraded = TokioIo::new(upgrade.await?);
|
2023-04-26 18:58:18 -04:00
|
|
|
upgraded.write_all(&bytes).await?;
|
|
|
|
break upgraded;
|
|
|
|
}
|
|
|
|
Err(err) => return Err(err),
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Stage 3: Pump the data
|
|
|
|
let (mut upgraded_rx, mut upgraded_tx) = tokio::io::split(upgraded);
|
|
|
|
|
2023-05-14 17:40:01 -04:00
|
|
|
spawn(async move {
|
2023-04-26 18:58:18 -04:00
|
|
|
let mut buf = [0; 1024];
|
|
|
|
loop {
|
|
|
|
let read = upgraded_rx.read(&mut buf).await?;
|
|
|
|
if read == 0 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
read_tx.write_all(&buf[..read]).await?;
|
|
|
|
}
|
|
|
|
Ok::<_, AnyError>(())
|
|
|
|
});
|
2023-05-14 17:40:01 -04:00
|
|
|
spawn(async move {
|
2023-04-26 18:58:18 -04:00
|
|
|
let mut buf = [0; 1024];
|
|
|
|
loop {
|
|
|
|
let read = write_rx.read(&mut buf).await?;
|
|
|
|
if read == 0 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
upgraded_tx.write_all(&buf[..read]).await?;
|
|
|
|
}
|
|
|
|
Ok::<_, AnyError>(())
|
|
|
|
});
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
});
|
|
|
|
|
|
|
|
Ok(
|
|
|
|
state
|
|
|
|
.resource_table
|
|
|
|
.add(UpgradeStream::new(read_rx, write_tx)),
|
|
|
|
)
|
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(async)]
|
|
|
|
#[smi]
|
2023-05-15 19:24:41 -04:00
|
|
|
pub async fn op_http_upgrade_websocket_next(
|
2023-04-22 13:48:21 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[serde] headers: Vec<(ByteString, ByteString)>,
|
2023-05-15 19:24:41 -04:00
|
|
|
) -> Result<ResourceId, AnyError> {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_upgrade_websocket_next") };
|
2023-05-16 19:00:59 -04:00
|
|
|
// Stage 1: set the response to 101 Switching Protocols and send it
|
|
|
|
let upgrade = http.upgrade()?;
|
2023-11-13 09:04:49 -05:00
|
|
|
{
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut response_parts = http.response_parts();
|
|
|
|
response_parts.status = StatusCode::SWITCHING_PROTOCOLS;
|
2023-11-13 09:04:49 -05:00
|
|
|
for (name, value) in headers {
|
2023-11-13 14:17:31 -05:00
|
|
|
response_parts.headers.append(
|
2023-11-13 09:04:49 -05:00
|
|
|
HeaderName::from_bytes(&name).unwrap(),
|
|
|
|
HeaderValue::from_bytes(&value).unwrap(),
|
|
|
|
);
|
|
|
|
}
|
2023-05-16 19:00:59 -04:00
|
|
|
}
|
|
|
|
http.complete();
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
// Stage 2: wait for the request to finish upgrading
|
|
|
|
let upgraded = upgrade.await?;
|
|
|
|
|
2023-05-15 19:24:41 -04:00
|
|
|
// Stage 3: take the extracted raw network stream and upgrade it to a websocket, then return it
|
2023-04-22 13:48:21 -04:00
|
|
|
let (stream, bytes) = extract_network_stream(upgraded);
|
2023-05-15 19:24:41 -04:00
|
|
|
ws_create_server_stream(&mut state.borrow_mut(), stream, bytes)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-06-24 07:54:10 -04:00
|
|
|
#[op2(fast)]
|
2023-11-13 09:04:49 -05:00
|
|
|
pub fn op_http_set_promise_complete(external: *const c_void, status: u16) {
|
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_set_promise_complete") };
|
2023-11-13 14:17:31 -05:00
|
|
|
set_promise_complete(http, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_promise_complete(http: Rc<HttpRecord>, status: u16) {
|
2023-07-25 18:12:19 -04:00
|
|
|
// The Javascript code should never provide a status that is invalid here (see 23_response.js), so we
|
2024-03-04 09:28:57 -05:00
|
|
|
// will quietly ignore invalid values.
|
2023-07-25 18:12:19 -04:00
|
|
|
if let Ok(code) = StatusCode::from_u16(status) {
|
2023-11-13 14:17:31 -05:00
|
|
|
http.response_parts().status = code;
|
2023-07-25 18:12:19 -04:00
|
|
|
}
|
2023-05-16 19:00:59 -04:00
|
|
|
http.complete();
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-09-23 15:33:31 -04:00
|
|
|
#[op2]
|
2023-06-03 14:15:53 -04:00
|
|
|
pub fn op_http_get_request_method_and_url<'scope, HTTP>(
|
|
|
|
scope: &mut v8::HandleScope<'scope>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-09-23 15:33:31 -04:00
|
|
|
) -> v8::Local<'scope, v8::Array>
|
2023-05-10 10:23:26 -04:00
|
|
|
where
|
|
|
|
HTTP: HttpPropertyExtractor,
|
|
|
|
{
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_get_request_method_and_url") };
|
2023-05-16 19:00:59 -04:00
|
|
|
let request_info = http.request_info();
|
|
|
|
let request_parts = http.request_parts();
|
|
|
|
let request_properties = HTTP::request_properties(
|
2023-11-13 09:04:49 -05:00
|
|
|
&request_info,
|
2023-05-16 19:00:59 -04:00
|
|
|
&request_parts.uri,
|
|
|
|
&request_parts.headers,
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-06-03 14:15:53 -04:00
|
|
|
let method: v8::Local<v8::Value> = v8::String::new_from_utf8(
|
|
|
|
scope,
|
|
|
|
request_parts.method.as_str().as_bytes(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into();
|
|
|
|
|
|
|
|
let authority: v8::Local<v8::Value> = match request_properties.authority {
|
|
|
|
Some(authority) => v8::String::new_from_utf8(
|
|
|
|
scope,
|
|
|
|
authority.as_ref(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
None => v8::undefined(scope).into(),
|
|
|
|
};
|
|
|
|
|
2023-05-16 19:00:59 -04:00
|
|
|
// Only extract the path part - we handle authority elsewhere
|
|
|
|
let path = match &request_parts.uri.path_and_query() {
|
|
|
|
Some(path_and_query) => path_and_query.to_string(),
|
|
|
|
None => "".to_owned(),
|
|
|
|
};
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-06-03 14:15:53 -04:00
|
|
|
let path: v8::Local<v8::Value> =
|
|
|
|
v8::String::new_from_utf8(scope, path.as_ref(), v8::NewStringType::Normal)
|
|
|
|
.unwrap()
|
|
|
|
.into();
|
|
|
|
|
|
|
|
let peer_address: v8::Local<v8::Value> = v8::String::new_from_utf8(
|
|
|
|
scope,
|
|
|
|
request_info.peer_address.as_bytes(),
|
|
|
|
v8::NewStringType::Normal,
|
2023-05-16 19:00:59 -04:00
|
|
|
)
|
2023-06-03 14:15:53 -04:00
|
|
|
.unwrap()
|
|
|
|
.into();
|
|
|
|
|
|
|
|
let port: v8::Local<v8::Value> = match request_info.peer_port {
|
|
|
|
Some(port) => v8::Integer::new(scope, port.into()).into(),
|
|
|
|
None => v8::undefined(scope).into(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let vec = [method, authority, path, peer_address, port];
|
2023-09-23 15:33:31 -04:00
|
|
|
v8::Array::new_with_elements(scope, vec.as_slice())
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
2023-05-08 17:07:45 -04:00
|
|
|
pub fn op_http_get_request_header(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[string] name: String,
|
2023-05-08 17:07:45 -04:00
|
|
|
) -> Option<ByteString> {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_get_request_header") };
|
|
|
|
let request_parts = http.request_parts();
|
|
|
|
let value = request_parts.headers.get(name);
|
2023-05-16 19:00:59 -04:00
|
|
|
value.map(|value| value.as_bytes().into())
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-09-23 15:33:31 -04:00
|
|
|
#[op2]
|
2023-06-02 18:31:27 -04:00
|
|
|
pub fn op_http_get_request_headers<'scope>(
|
|
|
|
scope: &mut v8::HandleScope<'scope>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-09-23 15:33:31 -04:00
|
|
|
) -> v8::Local<'scope, v8::Array> {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_get_request_headers") };
|
2023-05-16 19:00:59 -04:00
|
|
|
let headers = &http.request_parts().headers;
|
2023-06-02 11:59:16 -04:00
|
|
|
// Two slots for each header key/value pair
|
2023-06-02 18:31:27 -04:00
|
|
|
let mut vec: SmallVec<[v8::Local<v8::Value>; 32]> =
|
|
|
|
SmallVec::with_capacity(headers.len() * 2);
|
|
|
|
|
2023-05-16 19:00:59 -04:00
|
|
|
let mut cookies: Option<Vec<&[u8]>> = None;
|
|
|
|
for (name, value) in headers {
|
|
|
|
if name == COOKIE {
|
|
|
|
if let Some(ref mut cookies) = cookies {
|
|
|
|
cookies.push(value.as_bytes());
|
2023-04-22 13:48:21 -04:00
|
|
|
} else {
|
2023-05-16 19:00:59 -04:00
|
|
|
cookies = Some(vec![value.as_bytes()]);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-05-16 19:00:59 -04:00
|
|
|
} else {
|
2023-06-02 18:31:27 -04:00
|
|
|
vec.push(
|
|
|
|
v8::String::new_from_one_byte(
|
|
|
|
scope,
|
|
|
|
name.as_ref(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
);
|
|
|
|
vec.push(
|
|
|
|
v8::String::new_from_one_byte(
|
|
|
|
scope,
|
|
|
|
value.as_bytes(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-05-16 19:00:59 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-16 19:00:59 -04:00
|
|
|
// We treat cookies specially, because we don't want them to get them
|
|
|
|
// mangled by the `Headers` object in JS. What we do is take all cookie
|
|
|
|
// headers and concat them into a single cookie header, separated by
|
|
|
|
// semicolons.
|
|
|
|
// TODO(mmastrac): This should probably happen on the JS side on-demand
|
|
|
|
if let Some(cookies) = cookies {
|
|
|
|
let cookie_sep = "; ".as_bytes();
|
2023-06-02 18:31:27 -04:00
|
|
|
|
|
|
|
vec.push(
|
|
|
|
v8::String::new_external_onebyte_static(scope, COOKIE.as_ref())
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
);
|
|
|
|
vec.push(
|
|
|
|
v8::String::new_from_one_byte(
|
|
|
|
scope,
|
|
|
|
cookies.join(cookie_sep).as_ref(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
);
|
2023-05-16 19:00:59 -04:00
|
|
|
}
|
2023-06-02 11:59:16 -04:00
|
|
|
|
2023-09-23 15:33:31 -04:00
|
|
|
v8::Array::new_with_elements(scope, vec.as_slice())
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-09-23 15:33:31 -04:00
|
|
|
#[op2(fast)]
|
|
|
|
#[smi]
|
2023-05-08 17:07:45 -04:00
|
|
|
pub fn op_http_read_request_body(
|
fix(ext/http): ensure request body resource lives as long as response is alive (#20206)
Deno.serve's fast streaming implementation was not keeping the request
body resource ID alive. We were taking the `Rc<Resource>` from the
resource table during the response, so a hairpin duplex response that
fed back the request body would work.
However, if any JS code attempted to read from the request body (which
requires the resource ID to be valid), the response would fail with a
difficult-to-diagnose "EOF" error.
This was affecting more complex duplex uses of `Deno.fetch` (though as
far as I can tell was unreported).
Simple test:
```ts
const reader = request.body.getReader();
return new Response(
new ReadableStream({
async pull(controller) {
const { done, value } = await reader.read();
if (done) {
controller.close();
} else {
controller.enqueue(value);
}
},
}),
```
And then attempt to use the stream in duplex mode:
```ts
async function testDuplex(
reader: ReadableStreamDefaultReader<Uint8Array>,
writable: WritableStreamDefaultWriter<Uint8Array>,
) {
await writable.write(new Uint8Array([1]));
const chunk1 = await reader.read();
assert(!chunk1.done);
assertEquals(chunk1.value, new Uint8Array([1]));
await writable.write(new Uint8Array([2]));
const chunk2 = await reader.read();
assert(!chunk2.done);
assertEquals(chunk2.value, new Uint8Array([2]));
await writable.close();
const chunk3 = await reader.read();
assert(chunk3.done);
}
```
In older versions of Deno, this would just lock up. I believe after
23ff0e722e3c4b0827940853c53c5ee2ede5ec9f, it started throwing a more
explicit error:
```
httpServerStreamDuplexJavascript => ./cli/tests/unit/serve_test.ts:1339:6
error: TypeError: request or response body error: error reading a body from connection: Connection reset by peer (os error 54)
at async Object.pull (ext:deno_web/06_streams.js:810:27)
```
2023-08-20 21:35:26 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-05-08 17:07:45 -04:00
|
|
|
) -> ResourceId {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_read_request_body") };
|
2023-11-13 14:17:31 -05:00
|
|
|
let rid = if let Some(incoming) = http.take_request_body() {
|
fix(ext/http): ensure request body resource lives as long as response is alive (#20206)
Deno.serve's fast streaming implementation was not keeping the request
body resource ID alive. We were taking the `Rc<Resource>` from the
resource table during the response, so a hairpin duplex response that
fed back the request body would work.
However, if any JS code attempted to read from the request body (which
requires the resource ID to be valid), the response would fail with a
difficult-to-diagnose "EOF" error.
This was affecting more complex duplex uses of `Deno.fetch` (though as
far as I can tell was unreported).
Simple test:
```ts
const reader = request.body.getReader();
return new Response(
new ReadableStream({
async pull(controller) {
const { done, value } = await reader.read();
if (done) {
controller.close();
} else {
controller.enqueue(value);
}
},
}),
```
And then attempt to use the stream in duplex mode:
```ts
async function testDuplex(
reader: ReadableStreamDefaultReader<Uint8Array>,
writable: WritableStreamDefaultWriter<Uint8Array>,
) {
await writable.write(new Uint8Array([1]));
const chunk1 = await reader.read();
assert(!chunk1.done);
assertEquals(chunk1.value, new Uint8Array([1]));
await writable.write(new Uint8Array([2]));
const chunk2 = await reader.read();
assert(!chunk2.done);
assertEquals(chunk2.value, new Uint8Array([2]));
await writable.close();
const chunk3 = await reader.read();
assert(chunk3.done);
}
```
In older versions of Deno, this would just lock up. I believe after
23ff0e722e3c4b0827940853c53c5ee2ede5ec9f, it started throwing a more
explicit error:
```
httpServerStreamDuplexJavascript => ./cli/tests/unit/serve_test.ts:1339:6
error: TypeError: request or response body error: error reading a body from connection: Connection reset by peer (os error 54)
at async Object.pull (ext:deno_web/06_streams.js:810:27)
```
2023-08-20 21:35:26 -04:00
|
|
|
let body_resource = Rc::new(HttpRequestBody::new(incoming));
|
|
|
|
state.borrow_mut().resource_table.add_rc(body_resource)
|
|
|
|
} else {
|
|
|
|
// This should not be possible, but rather than panicking we'll return an invalid
|
|
|
|
// resource value to JavaScript.
|
|
|
|
ResourceId::MAX
|
|
|
|
};
|
|
|
|
http.put_resource(HttpRequestBodyAutocloser::new(rid, state.clone()));
|
|
|
|
rid
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-08-10 17:35:01 -04:00
|
|
|
#[op2(fast)]
|
2023-06-12 19:43:49 -04:00
|
|
|
pub fn op_http_set_response_header(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-10 17:35:01 -04:00
|
|
|
#[string(onebyte)] name: Cow<[u8]>,
|
|
|
|
#[string(onebyte)] value: Cow<[u8]>,
|
2023-06-12 19:43:49 -04:00
|
|
|
) {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_set_response_header") };
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut response_parts = http.response_parts();
|
2023-05-16 19:00:59 -04:00
|
|
|
// These are valid latin-1 strings
|
2023-06-12 19:43:49 -04:00
|
|
|
let name = HeaderName::from_bytes(&name).unwrap();
|
2023-08-10 17:35:01 -04:00
|
|
|
let value = match value {
|
|
|
|
Cow::Borrowed(bytes) => HeaderValue::from_bytes(bytes).unwrap(),
|
|
|
|
// SAFETY: These are valid latin-1 strings
|
|
|
|
Cow::Owned(bytes_vec) => unsafe {
|
|
|
|
HeaderValue::from_maybe_shared_unchecked(bytes::Bytes::from(bytes_vec))
|
|
|
|
},
|
|
|
|
};
|
2023-11-13 14:17:31 -05:00
|
|
|
response_parts.headers.append(name, value);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
|
|
|
pub fn op_http_set_response_headers(
|
2023-06-06 10:55:37 -04:00
|
|
|
scope: &mut v8::HandleScope,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
headers: v8::Local<v8::Array>,
|
2023-06-06 10:55:37 -04:00
|
|
|
) {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_set_response_headers") };
|
2023-04-22 13:48:21 -04:00
|
|
|
// TODO(mmastrac): Invalid headers should be handled?
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut response_parts = http.response_parts();
|
2023-06-06 10:55:37 -04:00
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
let len = headers.length();
|
2023-06-06 10:55:37 -04:00
|
|
|
let header_len = len * 2;
|
2023-11-13 14:17:31 -05:00
|
|
|
response_parts
|
|
|
|
.headers
|
|
|
|
.reserve(header_len.try_into().unwrap());
|
2023-06-06 10:55:37 -04:00
|
|
|
|
|
|
|
for i in 0..len {
|
2023-08-03 16:36:32 -04:00
|
|
|
let item = headers.get_index(scope, i).unwrap();
|
2023-06-06 10:55:37 -04:00
|
|
|
let pair = v8::Local::<v8::Array>::try_from(item).unwrap();
|
|
|
|
let name = pair.get_index(scope, 0).unwrap();
|
|
|
|
let value = pair.get_index(scope, 1).unwrap();
|
|
|
|
|
|
|
|
let v8_name: ByteString = from_v8(scope, name).unwrap();
|
|
|
|
let v8_value: ByteString = from_v8(scope, value).unwrap();
|
|
|
|
let header_name = HeaderName::from_bytes(&v8_name).unwrap();
|
2023-06-12 19:43:49 -04:00
|
|
|
let header_value =
|
2023-06-12 20:59:41 -04:00
|
|
|
// SAFETY: These are valid latin-1 strings
|
2023-06-12 19:43:49 -04:00
|
|
|
unsafe { HeaderValue::from_maybe_shared_unchecked(v8_value) };
|
2023-11-13 14:17:31 -05:00
|
|
|
response_parts.headers.append(header_name, header_value);
|
2023-05-16 19:00:59 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
2023-05-18 22:10:25 -04:00
|
|
|
pub fn op_http_set_response_trailers(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[serde] trailers: Vec<(ByteString, ByteString)>,
|
2023-05-18 22:10:25 -04:00
|
|
|
) {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_set_response_trailers") };
|
2023-05-18 22:10:25 -04:00
|
|
|
let mut trailer_map: HeaderMap = HeaderMap::with_capacity(trailers.len());
|
|
|
|
for (name, value) in trailers {
|
|
|
|
// These are valid latin-1 strings
|
|
|
|
let name = HeaderName::from_bytes(&name).unwrap();
|
2023-06-12 19:43:49 -04:00
|
|
|
// SAFETY: These are valid latin-1 strings
|
|
|
|
let value = unsafe { HeaderValue::from_maybe_shared_unchecked(value) };
|
2023-05-18 22:10:25 -04:00
|
|
|
trailer_map.append(name, value);
|
|
|
|
}
|
2023-11-13 14:17:31 -05:00
|
|
|
*http.trailers() = Some(trailer_map);
|
2023-05-18 22:10:25 -04:00
|
|
|
}
|
|
|
|
|
2023-09-16 17:15:15 -04:00
|
|
|
fn is_request_compressible(
|
|
|
|
length: Option<usize>,
|
|
|
|
headers: &HeaderMap,
|
|
|
|
) -> Compression {
|
|
|
|
if let Some(length) = length {
|
|
|
|
// By the time we add compression headers and Accept-Encoding, it probably doesn't make sense
|
|
|
|
// to compress stuff that's smaller than this.
|
|
|
|
if length < 64 {
|
|
|
|
return Compression::None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-10 07:23:14 -04:00
|
|
|
let Some(accept_encoding) = headers.get(ACCEPT_ENCODING) else {
|
|
|
|
return Compression::None;
|
|
|
|
};
|
2023-05-24 13:54:47 -04:00
|
|
|
|
2024-01-26 10:33:55 -05:00
|
|
|
match accept_encoding.to_str() {
|
2023-05-24 13:54:47 -04:00
|
|
|
// Firefox and Chrome send this -- no need to parse
|
2024-01-26 10:33:55 -05:00
|
|
|
Ok("gzip, deflate, br") => return Compression::Brotli,
|
|
|
|
Ok("gzip") => return Compression::GZip,
|
|
|
|
Ok("br") => return Compression::Brotli,
|
2023-05-24 13:54:47 -04:00
|
|
|
_ => (),
|
2023-05-10 07:23:14 -04:00
|
|
|
}
|
2023-05-24 13:54:47 -04:00
|
|
|
|
2023-05-10 07:23:14 -04:00
|
|
|
// Fall back to the expensive parser
|
2023-12-21 19:54:28 -05:00
|
|
|
let accepted =
|
|
|
|
fly_accept_encoding::encodings_iter_http_1(headers).filter(|r| {
|
|
|
|
matches!(
|
|
|
|
r,
|
|
|
|
Ok((
|
|
|
|
Some(Encoding::Identity | Encoding::Gzip | Encoding::Brotli),
|
|
|
|
_
|
|
|
|
))
|
|
|
|
)
|
|
|
|
});
|
2023-05-10 07:23:14 -04:00
|
|
|
match fly_accept_encoding::preferred(accepted) {
|
2023-05-24 13:54:47 -04:00
|
|
|
Ok(Some(fly_accept_encoding::Encoding::Gzip)) => Compression::GZip,
|
|
|
|
Ok(Some(fly_accept_encoding::Encoding::Brotli)) => Compression::Brotli,
|
|
|
|
_ => Compression::None,
|
2023-05-10 07:23:14 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_response_compressible(headers: &HeaderMap) -> bool {
|
|
|
|
if let Some(content_type) = headers.get(CONTENT_TYPE) {
|
|
|
|
if !is_content_compressible(content_type) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if headers.contains_key(CONTENT_ENCODING) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if headers.contains_key(CONTENT_RANGE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if let Some(cache_control) = headers.get(CACHE_CONTROL) {
|
|
|
|
if let Ok(s) = std::str::from_utf8(cache_control.as_bytes()) {
|
|
|
|
if let Some(cache_control) = CacheControl::from_value(s) {
|
|
|
|
if cache_control.no_transform {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
|
|
|
fn modify_compressibility_from_response(
|
|
|
|
compression: Compression,
|
|
|
|
headers: &mut HeaderMap,
|
|
|
|
) -> Compression {
|
|
|
|
ensure_vary_accept_encoding(headers);
|
|
|
|
if compression == Compression::None {
|
|
|
|
return Compression::None;
|
|
|
|
}
|
|
|
|
if !is_response_compressible(headers) {
|
|
|
|
return Compression::None;
|
|
|
|
}
|
2023-05-24 13:54:47 -04:00
|
|
|
let encoding = match compression {
|
|
|
|
Compression::Brotli => "br",
|
|
|
|
Compression::GZip => "gzip",
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
2023-05-10 07:23:14 -04:00
|
|
|
weaken_etag(headers);
|
|
|
|
headers.remove(CONTENT_LENGTH);
|
2023-05-24 13:54:47 -04:00
|
|
|
headers.insert(CONTENT_ENCODING, HeaderValue::from_static(encoding));
|
2023-05-10 07:23:14 -04:00
|
|
|
compression
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If the user provided a ETag header for uncompressed data, we need to ensure it is a
|
|
|
|
/// weak Etag header ("W/").
|
|
|
|
fn weaken_etag(hmap: &mut HeaderMap) {
|
2023-12-27 11:59:57 -05:00
|
|
|
if let Some(etag) = hmap.get_mut(hyper::header::ETAG) {
|
2023-05-10 07:23:14 -04:00
|
|
|
if !etag.as_bytes().starts_with(b"W/") {
|
|
|
|
let mut v = Vec::with_capacity(etag.as_bytes().len() + 2);
|
|
|
|
v.extend(b"W/");
|
|
|
|
v.extend(etag.as_bytes());
|
|
|
|
*etag = v.try_into().unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set Vary: Accept-Encoding header for direct body response.
|
|
|
|
// Note: we set the header irrespective of whether or not we compress the data
|
|
|
|
// to make sure cache services do not serve uncompressed data to clients that
|
|
|
|
// support compression.
|
|
|
|
fn ensure_vary_accept_encoding(hmap: &mut HeaderMap) {
|
2023-12-27 11:59:57 -05:00
|
|
|
if let Some(v) = hmap.get_mut(hyper::header::VARY) {
|
2023-05-10 07:23:14 -04:00
|
|
|
if let Ok(s) = v.to_str() {
|
|
|
|
if !s.to_lowercase().contains("accept-encoding") {
|
|
|
|
*v = format!("Accept-Encoding, {s}").try_into().unwrap()
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
hmap.insert(
|
2023-12-27 11:59:57 -05:00
|
|
|
hyper::header::VARY,
|
2023-05-10 07:23:14 -04:00
|
|
|
HeaderValue::from_static("Accept-Encoding"),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2023-09-25 11:23:55 -04:00
|
|
|
/// Sets the appropriate response body. Use `force_instantiate_body` if you need
|
|
|
|
/// to ensure that the response is cleaned up correctly (eg: for resources).
|
2023-05-10 07:23:14 -04:00
|
|
|
fn set_response(
|
2023-11-13 14:17:31 -05:00
|
|
|
http: Rc<HttpRecord>,
|
2023-05-10 07:23:14 -04:00
|
|
|
length: Option<usize>,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status: u16,
|
2023-09-25 11:23:55 -04:00
|
|
|
force_instantiate_body: bool,
|
2023-05-10 07:23:14 -04:00
|
|
|
response_fn: impl FnOnce(Compression) -> ResponseBytesInner,
|
|
|
|
) {
|
2023-08-28 15:29:34 -04:00
|
|
|
// The request may have been cancelled by this point and if so, there's no need for us to
|
|
|
|
// do all of this work to send the response.
|
|
|
|
if !http.cancelled() {
|
2023-09-16 17:15:15 -04:00
|
|
|
let compression =
|
|
|
|
is_request_compressible(length, &http.request_parts().headers);
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut response_headers =
|
|
|
|
std::cell::RefMut::map(http.response_parts(), |this| &mut this.headers);
|
2023-09-16 17:15:15 -04:00
|
|
|
let compression =
|
2023-11-13 14:17:31 -05:00
|
|
|
modify_compressibility_from_response(compression, &mut response_headers);
|
|
|
|
drop(response_headers);
|
|
|
|
http.set_response_body(response_fn(compression));
|
2023-08-28 15:29:34 -04:00
|
|
|
|
|
|
|
// The Javascript code should never provide a status that is invalid here (see 23_response.js), so we
|
2024-03-04 09:28:57 -05:00
|
|
|
// will quietly ignore invalid values.
|
2023-08-28 15:29:34 -04:00
|
|
|
if let Ok(code) = StatusCode::from_u16(status) {
|
2023-11-13 14:17:31 -05:00
|
|
|
http.response_parts().status = code;
|
2023-08-28 15:29:34 -04:00
|
|
|
}
|
2023-09-25 11:23:55 -04:00
|
|
|
} else if force_instantiate_body {
|
|
|
|
response_fn(Compression::None).abort();
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
}
|
2023-09-25 11:23:55 -04:00
|
|
|
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
http.complete();
|
2023-05-10 07:23:14 -04:00
|
|
|
}
|
|
|
|
|
2023-11-13 14:17:31 -05:00
|
|
|
/// Returned promise resolves when body streaming finishes.
|
|
|
|
/// Call [`op_http_close_after_finish`] when done with the external.
|
|
|
|
#[op2(async)]
|
|
|
|
pub async fn op_http_set_response_body_resource(
|
2023-08-28 15:29:34 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[smi] stream_rid: ResourceId,
|
2023-04-22 13:48:21 -04:00
|
|
|
auto_close: bool,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status: u16,
|
2023-04-22 13:48:21 -04:00
|
|
|
) -> Result<(), AnyError> {
|
2023-11-13 14:17:31 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_set_response_body_resource") };
|
|
|
|
|
2023-08-28 15:29:34 -04:00
|
|
|
// IMPORTANT: We might end up requiring the OpState lock in set_response if we need to drop the request
|
|
|
|
// body resource so we _cannot_ hold the OpState lock longer than necessary.
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
// If the stream is auto_close, we will hold the last ref to it until the response is complete.
|
2023-08-28 15:29:34 -04:00
|
|
|
// TODO(mmastrac): We should be using the same auto-close functionality rather than removing autoclose resources.
|
|
|
|
// It's possible things could fail elsewhere if code expects the rid to continue existing after the response has been
|
|
|
|
// returned.
|
|
|
|
let resource = {
|
|
|
|
let mut state = state.borrow_mut();
|
|
|
|
if auto_close {
|
|
|
|
state.resource_table.take_any(stream_rid)?
|
|
|
|
} else {
|
|
|
|
state.resource_table.get_any(stream_rid)?
|
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
};
|
|
|
|
|
2023-11-18 15:16:53 -05:00
|
|
|
*http.needs_close_after_finish() = true;
|
|
|
|
|
2023-05-10 07:23:14 -04:00
|
|
|
set_response(
|
2023-11-13 14:17:31 -05:00
|
|
|
http.clone(),
|
2023-05-10 07:23:14 -04:00
|
|
|
resource.size_hint().1.map(|s| s as usize),
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status,
|
2023-09-25 11:23:55 -04:00
|
|
|
true,
|
2023-05-10 07:23:14 -04:00
|
|
|
move |compression| {
|
|
|
|
ResponseBytesInner::from_resource(compression, resource, auto_close)
|
|
|
|
},
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-11-13 14:17:31 -05:00
|
|
|
http.response_body_finished().await;
|
2023-04-22 13:48:21 -04:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-11-13 14:17:31 -05:00
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_http_close_after_finish(external: *const c_void) {
|
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_close_after_finish") };
|
|
|
|
http.close_after_finish();
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_http_set_response_body_text(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[string] text: String,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status: u16,
|
2023-08-03 16:36:32 -04:00
|
|
|
) {
|
2023-11-13 14:17:31 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_set_response_body_text") };
|
2023-04-22 13:48:21 -04:00
|
|
|
if !text.is_empty() {
|
2023-11-13 14:17:31 -05:00
|
|
|
set_response(http, Some(text.len()), status, false, |compression| {
|
2023-05-10 07:23:14 -04:00
|
|
|
ResponseBytesInner::from_vec(compression, text.into_bytes())
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
} else {
|
2023-11-13 14:17:31 -05:00
|
|
|
set_promise_complete(http, status);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-09 11:25:10 -05:00
|
|
|
#[op2]
|
2023-08-03 16:36:32 -04:00
|
|
|
pub fn op_http_set_response_body_bytes(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
#[buffer] buffer: JsBuffer,
|
|
|
|
status: u16,
|
2023-08-03 16:36:32 -04:00
|
|
|
) {
|
2023-11-13 14:17:31 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_set_response_body_bytes") };
|
2023-04-22 13:48:21 -04:00
|
|
|
if !buffer.is_empty() {
|
2023-11-13 14:17:31 -05:00
|
|
|
set_response(http, Some(buffer.len()), status, false, |compression| {
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
ResponseBytesInner::from_bufview(compression, BufView::from(buffer))
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
} else {
|
2023-11-13 14:17:31 -05:00
|
|
|
set_promise_complete(http, status);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-24 17:24:40 -04:00
|
|
|
fn serve_http11_unconditional(
|
|
|
|
io: impl HttpServeStream,
|
2023-11-13 14:17:31 -05:00
|
|
|
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
2023-09-11 20:06:38 -04:00
|
|
|
cancel: Rc<CancelHandle>,
|
2023-12-27 11:59:57 -05:00
|
|
|
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
|
2023-04-24 17:24:40 -04:00
|
|
|
let conn = http1::Builder::new()
|
|
|
|
.keep_alive(true)
|
2023-06-01 10:07:26 -04:00
|
|
|
.writev(*USE_WRITEV)
|
2023-09-11 20:06:38 -04:00
|
|
|
.serve_connection(TokioIo::new(io), svc)
|
|
|
|
.with_upgrades();
|
|
|
|
|
|
|
|
async {
|
|
|
|
match conn.or_abort(cancel).await {
|
|
|
|
Err(mut conn) => {
|
|
|
|
Pin::new(&mut conn).graceful_shutdown();
|
|
|
|
conn.await
|
|
|
|
}
|
|
|
|
Ok(res) => res,
|
|
|
|
}
|
|
|
|
}
|
2023-04-24 17:24:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
fn serve_http2_unconditional(
|
|
|
|
io: impl HttpServeStream,
|
2023-11-13 14:17:31 -05:00
|
|
|
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
2023-09-11 20:06:38 -04:00
|
|
|
cancel: Rc<CancelHandle>,
|
2023-12-27 11:59:57 -05:00
|
|
|
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
|
2023-07-31 09:34:53 -04:00
|
|
|
let conn =
|
|
|
|
http2::Builder::new(LocalExecutor).serve_connection(TokioIo::new(io), svc);
|
2023-09-11 20:06:38 -04:00
|
|
|
async {
|
|
|
|
match conn.or_abort(cancel).await {
|
|
|
|
Err(mut conn) => {
|
|
|
|
Pin::new(&mut conn).graceful_shutdown();
|
|
|
|
conn.await
|
|
|
|
}
|
|
|
|
Ok(res) => res,
|
|
|
|
}
|
|
|
|
}
|
2023-04-24 17:24:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
async fn serve_http2_autodetect(
|
|
|
|
io: impl HttpServeStream,
|
2023-11-13 14:17:31 -05:00
|
|
|
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
2023-09-11 20:06:38 -04:00
|
|
|
cancel: Rc<CancelHandle>,
|
2023-04-24 17:24:40 -04:00
|
|
|
) -> Result<(), AnyError> {
|
|
|
|
let prefix = NetworkStreamPrefixCheck::new(io, HTTP2_PREFIX);
|
|
|
|
let (matches, io) = prefix.match_prefix().await?;
|
|
|
|
if matches {
|
2023-09-11 20:06:38 -04:00
|
|
|
serve_http2_unconditional(io, svc, cancel)
|
|
|
|
.await
|
|
|
|
.map_err(|e| e.into())
|
2023-04-24 17:24:40 -04:00
|
|
|
} else {
|
2023-09-11 20:06:38 -04:00
|
|
|
serve_http11_unconditional(io, svc, cancel)
|
|
|
|
.await
|
|
|
|
.map_err(|e| e.into())
|
2023-04-24 17:24:40 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
fn serve_https(
|
|
|
|
mut io: TlsStream,
|
|
|
|
request_info: HttpConnectionProperties,
|
2023-09-11 20:06:38 -04:00
|
|
|
lifetime: HttpLifetime,
|
2023-11-13 09:04:49 -05:00
|
|
|
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
2023-04-22 13:48:21 -04:00
|
|
|
) -> JoinHandle<Result<(), AnyError>> {
|
2023-09-11 20:06:38 -04:00
|
|
|
let HttpLifetime {
|
2023-11-13 12:32:34 -05:00
|
|
|
server_state,
|
2023-09-11 20:06:38 -04:00
|
|
|
connection_cancel_handle,
|
|
|
|
listen_cancel_handle,
|
|
|
|
} = lifetime;
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
let svc = service_fn(move |req: Request| {
|
2023-11-13 12:32:34 -05:00
|
|
|
handle_request(req, request_info.clone(), server_state.clone(), tx.clone())
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
2023-05-14 17:40:01 -04:00
|
|
|
spawn(
|
2023-05-08 03:52:56 -04:00
|
|
|
async {
|
2023-11-15 18:12:46 -05:00
|
|
|
let handshake = io.handshake().await?;
|
2023-05-08 03:52:56 -04:00
|
|
|
// If the client specifically negotiates a protocol, we will use it. If not, we'll auto-detect
|
|
|
|
// based on the prefix bytes
|
2023-11-15 18:12:46 -05:00
|
|
|
let handshake = handshake.alpn;
|
|
|
|
if Some(TLS_ALPN_HTTP_2) == handshake.as_deref() {
|
2023-09-11 20:06:38 -04:00
|
|
|
serve_http2_unconditional(io, svc, listen_cancel_handle)
|
|
|
|
.await
|
|
|
|
.map_err(|e| e.into())
|
2023-11-15 18:12:46 -05:00
|
|
|
} else if Some(TLS_ALPN_HTTP_11) == handshake.as_deref() {
|
2023-09-11 20:06:38 -04:00
|
|
|
serve_http11_unconditional(io, svc, listen_cancel_handle)
|
|
|
|
.await
|
|
|
|
.map_err(|e| e.into())
|
2023-05-08 03:52:56 -04:00
|
|
|
} else {
|
2023-09-11 20:06:38 -04:00
|
|
|
serve_http2_autodetect(io, svc, listen_cancel_handle).await
|
2023-05-08 03:52:56 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-09-11 20:06:38 -04:00
|
|
|
.try_or_cancel(connection_cancel_handle),
|
2023-05-08 03:52:56 -04:00
|
|
|
)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
fn serve_http(
|
2023-04-24 17:24:40 -04:00
|
|
|
io: impl HttpServeStream,
|
2023-04-22 13:48:21 -04:00
|
|
|
request_info: HttpConnectionProperties,
|
2023-09-11 20:06:38 -04:00
|
|
|
lifetime: HttpLifetime,
|
2023-11-13 09:04:49 -05:00
|
|
|
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
2023-04-22 13:48:21 -04:00
|
|
|
) -> JoinHandle<Result<(), AnyError>> {
|
2023-09-11 20:06:38 -04:00
|
|
|
let HttpLifetime {
|
2023-11-13 12:32:34 -05:00
|
|
|
server_state,
|
2023-09-11 20:06:38 -04:00
|
|
|
connection_cancel_handle,
|
|
|
|
listen_cancel_handle,
|
|
|
|
} = lifetime;
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
let svc = service_fn(move |req: Request| {
|
2023-11-13 12:32:34 -05:00
|
|
|
handle_request(req, request_info.clone(), server_state.clone(), tx.clone())
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
2023-09-11 20:06:38 -04:00
|
|
|
spawn(
|
|
|
|
serve_http2_autodetect(io, svc, listen_cancel_handle)
|
|
|
|
.try_or_cancel(connection_cancel_handle),
|
|
|
|
)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-05-10 10:23:26 -04:00
|
|
|
fn serve_http_on<HTTP>(
|
2023-05-15 10:55:47 -04:00
|
|
|
connection: HTTP::Connection,
|
2023-04-22 13:48:21 -04:00
|
|
|
listen_properties: &HttpListenProperties,
|
2023-09-11 20:06:38 -04:00
|
|
|
lifetime: HttpLifetime,
|
2023-11-13 09:04:49 -05:00
|
|
|
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
2023-05-10 10:23:26 -04:00
|
|
|
) -> JoinHandle<Result<(), AnyError>>
|
|
|
|
where
|
|
|
|
HTTP: HttpPropertyExtractor,
|
|
|
|
{
|
2023-04-22 13:48:21 -04:00
|
|
|
let connection_properties: HttpConnectionProperties =
|
2023-05-15 10:55:47 -04:00
|
|
|
HTTP::connection_properties(listen_properties, &connection);
|
|
|
|
|
|
|
|
let network_stream = HTTP::to_network_stream_from_connection(connection);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
match network_stream {
|
|
|
|
NetworkStream::Tcp(conn) => {
|
2023-09-11 20:06:38 -04:00
|
|
|
serve_http(conn, connection_properties, lifetime, tx)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
NetworkStream::Tls(conn) => {
|
2023-09-11 20:06:38 -04:00
|
|
|
serve_https(conn, connection_properties, lifetime, tx)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
#[cfg(unix)]
|
|
|
|
NetworkStream::Unix(conn) => {
|
2023-09-11 20:06:38 -04:00
|
|
|
serve_http(conn, connection_properties, lifetime, tx)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-11 20:06:38 -04:00
|
|
|
#[derive(Clone)]
|
|
|
|
struct HttpLifetime {
|
|
|
|
connection_cancel_handle: Rc<CancelHandle>,
|
|
|
|
listen_cancel_handle: Rc<CancelHandle>,
|
2023-11-23 11:39:17 -05:00
|
|
|
server_state: SignallingRc<HttpServerState>,
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
struct HttpJoinHandle {
|
|
|
|
join_handle: AsyncRefCell<Option<JoinHandle<Result<(), AnyError>>>>,
|
|
|
|
connection_cancel_handle: Rc<CancelHandle>,
|
|
|
|
listen_cancel_handle: Rc<CancelHandle>,
|
2023-11-13 09:04:49 -05:00
|
|
|
rx: AsyncRefCell<tokio::sync::mpsc::Receiver<Rc<HttpRecord>>>,
|
2023-11-23 11:39:17 -05:00
|
|
|
server_state: SignallingRc<HttpServerState>,
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
impl HttpJoinHandle {
|
2023-11-13 09:04:49 -05:00
|
|
|
fn new(rx: tokio::sync::mpsc::Receiver<Rc<HttpRecord>>) -> Self {
|
2023-09-11 20:06:38 -04:00
|
|
|
Self {
|
|
|
|
join_handle: AsyncRefCell::new(None),
|
|
|
|
connection_cancel_handle: CancelHandle::new_rc(),
|
|
|
|
listen_cancel_handle: CancelHandle::new_rc(),
|
|
|
|
rx: AsyncRefCell::new(rx),
|
2023-11-13 12:32:34 -05:00
|
|
|
server_state: HttpServerState::new(),
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn lifetime(self: &Rc<Self>) -> HttpLifetime {
|
|
|
|
HttpLifetime {
|
|
|
|
connection_cancel_handle: self.connection_cancel_handle.clone(),
|
|
|
|
listen_cancel_handle: self.listen_cancel_handle.clone(),
|
2023-11-13 12:32:34 -05:00
|
|
|
server_state: self.server_state.clone(),
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn connection_cancel_handle(self: &Rc<Self>) -> Rc<CancelHandle> {
|
|
|
|
self.connection_cancel_handle.clone()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn listen_cancel_handle(self: &Rc<Self>) -> Rc<CancelHandle> {
|
|
|
|
self.listen_cancel_handle.clone()
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Resource for HttpJoinHandle {
|
|
|
|
fn name(&self) -> Cow<str> {
|
|
|
|
"http".into()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn close(self: Rc<Self>) {
|
2023-09-11 20:06:38 -04:00
|
|
|
// During a close operation, we cancel everything
|
|
|
|
self.connection_cancel_handle.cancel();
|
|
|
|
self.listen_cancel_handle.cancel();
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-08 03:52:56 -04:00
|
|
|
impl Drop for HttpJoinHandle {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
// In some cases we may be dropped without closing, so let's cancel everything on the way out
|
2023-09-11 20:06:38 -04:00
|
|
|
self.connection_cancel_handle.cancel();
|
|
|
|
self.listen_cancel_handle.cancel();
|
2023-05-08 03:52:56 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
2023-05-10 10:23:26 -04:00
|
|
|
pub fn op_http_serve<HTTP>(
|
2023-04-22 13:48:21 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[smi] listener_rid: ResourceId,
|
2023-05-10 10:23:26 -04:00
|
|
|
) -> Result<(ResourceId, &'static str, String), AnyError>
|
|
|
|
where
|
|
|
|
HTTP: HttpPropertyExtractor,
|
|
|
|
{
|
2023-05-15 10:55:47 -04:00
|
|
|
let listener =
|
|
|
|
HTTP::get_listener_for_rid(&mut state.borrow_mut(), listener_rid)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-15 10:55:47 -04:00
|
|
|
let listen_properties = HTTP::listen_properties_from_listener(&listener)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
let (tx, rx) = tokio::sync::mpsc::channel(10);
|
2023-09-11 20:06:38 -04:00
|
|
|
let resource: Rc<HttpJoinHandle> = Rc::new(HttpJoinHandle::new(rx));
|
|
|
|
let listen_cancel_clone = resource.listen_cancel_handle();
|
|
|
|
|
|
|
|
let lifetime = resource.lifetime();
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-08 03:52:56 -04:00
|
|
|
let listen_properties_clone: HttpListenProperties = listen_properties.clone();
|
2023-05-14 17:40:01 -04:00
|
|
|
let handle = spawn(async move {
|
2023-04-22 13:48:21 -04:00
|
|
|
loop {
|
2023-05-15 10:55:47 -04:00
|
|
|
let conn = HTTP::accept_connection_from_listener(&listener)
|
2023-09-11 20:06:38 -04:00
|
|
|
.try_or_cancel(listen_cancel_clone.clone())
|
2023-04-22 13:48:21 -04:00
|
|
|
.await?;
|
2023-05-10 10:23:26 -04:00
|
|
|
serve_http_on::<HTTP>(
|
2023-04-22 13:48:21 -04:00
|
|
|
conn,
|
|
|
|
&listen_properties_clone,
|
2023-09-11 20:06:38 -04:00
|
|
|
lifetime.clone(),
|
2023-04-22 13:48:21 -04:00
|
|
|
tx.clone(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
#[allow(unreachable_code)]
|
|
|
|
Ok::<_, AnyError>(())
|
|
|
|
});
|
|
|
|
|
|
|
|
// Set the handle after we start the future
|
2023-09-11 20:06:38 -04:00
|
|
|
*RcRef::map(&resource, |this| &this.join_handle)
|
2023-04-22 13:48:21 -04:00
|
|
|
.try_borrow_mut()
|
|
|
|
.unwrap() = Some(handle);
|
|
|
|
|
|
|
|
Ok((
|
|
|
|
state.borrow_mut().resource_table.add_rc(resource),
|
|
|
|
listen_properties.scheme,
|
|
|
|
listen_properties.fallback_host,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
2023-05-10 10:23:26 -04:00
|
|
|
pub fn op_http_serve_on<HTTP>(
|
2023-04-22 13:48:21 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[smi] connection_rid: ResourceId,
|
2023-05-10 10:23:26 -04:00
|
|
|
) -> Result<(ResourceId, &'static str, String), AnyError>
|
|
|
|
where
|
|
|
|
HTTP: HttpPropertyExtractor,
|
|
|
|
{
|
2023-05-15 10:55:47 -04:00
|
|
|
let connection =
|
|
|
|
HTTP::get_connection_for_rid(&mut state.borrow_mut(), connection_rid)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-15 10:55:47 -04:00
|
|
|
let listen_properties = HTTP::listen_properties_from_connection(&connection)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
let (tx, rx) = tokio::sync::mpsc::channel(10);
|
2023-09-11 20:06:38 -04:00
|
|
|
let resource: Rc<HttpJoinHandle> = Rc::new(HttpJoinHandle::new(rx));
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-10 10:23:26 -04:00
|
|
|
let handle: JoinHandle<Result<(), deno_core::anyhow::Error>> =
|
|
|
|
serve_http_on::<HTTP>(
|
2023-05-15 10:55:47 -04:00
|
|
|
connection,
|
2023-05-10 10:23:26 -04:00
|
|
|
&listen_properties,
|
2023-09-11 20:06:38 -04:00
|
|
|
resource.lifetime(),
|
2023-05-10 10:23:26 -04:00
|
|
|
tx,
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
// Set the handle after we start the future
|
2023-09-11 20:06:38 -04:00
|
|
|
*RcRef::map(&resource, |this| &this.join_handle)
|
2023-04-22 13:48:21 -04:00
|
|
|
.try_borrow_mut()
|
|
|
|
.unwrap() = Some(handle);
|
|
|
|
|
|
|
|
Ok((
|
|
|
|
state.borrow_mut().resource_table.add_rc(resource),
|
|
|
|
listen_properties.scheme,
|
|
|
|
listen_properties.fallback_host,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2023-05-30 20:02:52 -04:00
|
|
|
/// Synchronous, non-blocking call to see if there are any further HTTP requests. If anything
|
2023-11-13 09:04:49 -05:00
|
|
|
/// goes wrong in this method we return null and let the async handler pick up the real error.
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(fast)]
|
2023-11-13 09:04:49 -05:00
|
|
|
pub fn op_http_try_wait(
|
|
|
|
state: &mut OpState,
|
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
) -> *const c_void {
|
2023-05-30 20:02:52 -04:00
|
|
|
// The resource needs to exist.
|
2023-08-27 00:04:12 -04:00
|
|
|
let Ok(join_handle) = state.resource_table.get::<HttpJoinHandle>(rid) else {
|
2023-11-13 09:04:49 -05:00
|
|
|
return null();
|
2023-05-30 20:02:52 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
// If join handle is somehow locked, just abort.
|
2023-08-27 00:04:12 -04:00
|
|
|
let Some(mut handle) =
|
2023-09-11 20:06:38 -04:00
|
|
|
RcRef::map(&join_handle, |this| &this.rx).try_borrow_mut()
|
2023-08-27 00:04:12 -04:00
|
|
|
else {
|
2023-11-13 09:04:49 -05:00
|
|
|
return null();
|
2023-05-30 20:02:52 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
// See if there are any requests waiting on this channel. If not, return.
|
2023-11-13 09:04:49 -05:00
|
|
|
let Ok(record) = handle.try_recv() else {
|
|
|
|
return null();
|
2023-05-30 20:02:52 -04:00
|
|
|
};
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
let ptr = ExternalPointer::new(RcHttpRecord(record));
|
|
|
|
ptr.into_raw()
|
2023-05-30 20:02:52 -04:00
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(async)]
|
2023-04-22 13:48:21 -04:00
|
|
|
pub async fn op_http_wait(
|
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[smi] rid: ResourceId,
|
2023-11-13 09:04:49 -05:00
|
|
|
) -> Result<*const c_void, AnyError> {
|
2023-04-22 13:48:21 -04:00
|
|
|
// We will get the join handle initially, as we might be consuming requests still
|
|
|
|
let join_handle = state
|
|
|
|
.borrow_mut()
|
|
|
|
.resource_table
|
|
|
|
.get::<HttpJoinHandle>(rid)?;
|
|
|
|
|
2023-09-11 20:06:38 -04:00
|
|
|
let cancel = join_handle.listen_cancel_handle();
|
2023-04-22 13:48:21 -04:00
|
|
|
let next = async {
|
2023-09-11 20:06:38 -04:00
|
|
|
let mut recv = RcRef::map(&join_handle, |this| &this.rx).borrow_mut().await;
|
2023-04-22 13:48:21 -04:00
|
|
|
recv.recv().await
|
|
|
|
}
|
|
|
|
.or_cancel(cancel)
|
|
|
|
.unwrap_or_else(|_| None)
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// Do we have a request?
|
2023-11-13 09:04:49 -05:00
|
|
|
if let Some(record) = next {
|
|
|
|
let ptr = ExternalPointer::new(RcHttpRecord(record));
|
|
|
|
return Ok(ptr.into_raw());
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// No - we're shutting down
|
2023-09-11 20:06:38 -04:00
|
|
|
let res = RcRef::map(join_handle, |this| &this.join_handle)
|
2023-04-22 13:48:21 -04:00
|
|
|
.borrow_mut()
|
|
|
|
.await
|
|
|
|
.take()
|
|
|
|
.unwrap()
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
// Filter out shutdown (ENOTCONN) errors
|
|
|
|
if let Err(err) = res {
|
|
|
|
if let Some(err) = err.source() {
|
|
|
|
if let Some(err) = err.downcast_ref::<io::Error>() {
|
|
|
|
if err.kind() == io::ErrorKind::NotConnected {
|
2023-11-13 09:04:49 -05:00
|
|
|
return Ok(null());
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Err(err);
|
|
|
|
}
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
Ok(null())
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-04-26 18:58:18 -04:00
|
|
|
|
2023-09-11 20:06:38 -04:00
|
|
|
/// Cancels the HTTP handle.
|
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_http_cancel(
|
|
|
|
state: &mut OpState,
|
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
graceful: bool,
|
|
|
|
) -> Result<(), AnyError> {
|
|
|
|
let join_handle = state.resource_table.get::<HttpJoinHandle>(rid)?;
|
|
|
|
|
|
|
|
if graceful {
|
|
|
|
// In a graceful shutdown, we close the listener and allow all the remaining connections to drain
|
|
|
|
join_handle.listen_cancel_handle().cancel();
|
|
|
|
} else {
|
|
|
|
// In a forceful shutdown, we close everything
|
|
|
|
join_handle.listen_cancel_handle().cancel();
|
|
|
|
join_handle.connection_cancel_handle().cancel();
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[op2(async)]
|
|
|
|
pub async fn op_http_close(
|
|
|
|
state: Rc<RefCell<OpState>>,
|
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
graceful: bool,
|
|
|
|
) -> Result<(), AnyError> {
|
|
|
|
let join_handle = state
|
|
|
|
.borrow_mut()
|
|
|
|
.resource_table
|
|
|
|
.take::<HttpJoinHandle>(rid)?;
|
|
|
|
|
|
|
|
if graceful {
|
2023-11-23 11:39:17 -05:00
|
|
|
http_general_trace!("graceful shutdown");
|
2023-09-11 20:06:38 -04:00
|
|
|
// In a graceful shutdown, we close the listener and allow all the remaining connections to drain
|
|
|
|
join_handle.listen_cancel_handle().cancel();
|
2023-11-23 11:39:17 -05:00
|
|
|
poll_fn(|cx| join_handle.server_state.poll_complete(cx)).await;
|
2023-09-11 20:06:38 -04:00
|
|
|
} else {
|
2023-11-23 11:39:17 -05:00
|
|
|
http_general_trace!("forceful shutdown");
|
2023-09-11 20:06:38 -04:00
|
|
|
// In a forceful shutdown, we close everything
|
|
|
|
join_handle.listen_cancel_handle().cancel();
|
|
|
|
join_handle.connection_cancel_handle().cancel();
|
2023-11-13 14:17:31 -05:00
|
|
|
// Give streaming responses a tick to close
|
|
|
|
tokio::task::yield_now().await;
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
|
|
|
|
2023-11-23 11:39:17 -05:00
|
|
|
http_general_trace!("awaiting shutdown");
|
|
|
|
|
2023-09-11 20:06:38 -04:00
|
|
|
let mut join_handle = RcRef::map(&join_handle, |this| &this.join_handle)
|
|
|
|
.borrow_mut()
|
|
|
|
.await;
|
|
|
|
if let Some(join_handle) = join_handle.take() {
|
|
|
|
join_handle.await??;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-04-26 18:58:18 -04:00
|
|
|
struct UpgradeStream {
|
|
|
|
read: AsyncRefCell<tokio::io::ReadHalf<tokio::io::DuplexStream>>,
|
|
|
|
write: AsyncRefCell<tokio::io::WriteHalf<tokio::io::DuplexStream>>,
|
|
|
|
cancel_handle: CancelHandle,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl UpgradeStream {
|
|
|
|
pub fn new(
|
|
|
|
read: tokio::io::ReadHalf<tokio::io::DuplexStream>,
|
|
|
|
write: tokio::io::WriteHalf<tokio::io::DuplexStream>,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
read: AsyncRefCell::new(read),
|
|
|
|
write: AsyncRefCell::new(write),
|
|
|
|
cancel_handle: CancelHandle::new(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn read(self: Rc<Self>, buf: &mut [u8]) -> Result<usize, AnyError> {
|
|
|
|
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
|
|
|
|
async {
|
|
|
|
let read = RcRef::map(self, |this| &this.read);
|
|
|
|
let mut read = read.borrow_mut().await;
|
|
|
|
Ok(Pin::new(&mut *read).read(buf).await?)
|
|
|
|
}
|
|
|
|
.try_or_cancel(cancel_handle)
|
|
|
|
.await
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, AnyError> {
|
|
|
|
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
|
|
|
|
async {
|
|
|
|
let write = RcRef::map(self, |this| &this.write);
|
|
|
|
let mut write = write.borrow_mut().await;
|
|
|
|
Ok(Pin::new(&mut *write).write(buf).await?)
|
|
|
|
}
|
|
|
|
.try_or_cancel(cancel_handle)
|
|
|
|
.await
|
|
|
|
}
|
2023-07-07 12:47:08 -04:00
|
|
|
|
|
|
|
async fn write_vectored(
|
|
|
|
self: Rc<Self>,
|
|
|
|
buf1: &[u8],
|
|
|
|
buf2: &[u8],
|
|
|
|
) -> Result<usize, AnyError> {
|
|
|
|
let mut wr = RcRef::map(self, |r| &r.write).borrow_mut().await;
|
|
|
|
|
|
|
|
let total = buf1.len() + buf2.len();
|
|
|
|
let mut bufs = [std::io::IoSlice::new(buf1), std::io::IoSlice::new(buf2)];
|
|
|
|
let mut nwritten = wr.write_vectored(&bufs).await?;
|
|
|
|
if nwritten == total {
|
|
|
|
return Ok(nwritten);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slightly more optimized than (unstable) write_all_vectored for 2 iovecs.
|
|
|
|
while nwritten <= buf1.len() {
|
|
|
|
bufs[0] = std::io::IoSlice::new(&buf1[nwritten..]);
|
|
|
|
nwritten += wr.write_vectored(&bufs).await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// First buffer out of the way.
|
|
|
|
if nwritten < total && nwritten > buf1.len() {
|
|
|
|
wr.write_all(&buf2[nwritten - buf1.len()..]).await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(total)
|
|
|
|
}
|
2023-04-26 18:58:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Resource for UpgradeStream {
|
|
|
|
fn name(&self) -> Cow<str> {
|
|
|
|
"httpRawUpgradeStream".into()
|
|
|
|
}
|
|
|
|
|
|
|
|
deno_core::impl_readable_byob!();
|
|
|
|
deno_core::impl_writable!();
|
|
|
|
|
|
|
|
fn close(self: Rc<Self>) {
|
|
|
|
self.cancel_handle.cancel();
|
|
|
|
}
|
|
|
|
}
|
2023-07-07 12:47:08 -04:00
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_can_write_vectored(
|
|
|
|
state: &mut OpState,
|
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
) -> bool {
|
2023-07-18 17:34:26 -04:00
|
|
|
state.resource_table.get::<UpgradeStream>(rid).is_ok()
|
|
|
|
}
|
|
|
|
|
2023-09-24 18:07:22 -04:00
|
|
|
#[op2(async)]
|
|
|
|
#[number]
|
2023-07-07 12:47:08 -04:00
|
|
|
pub async fn op_raw_write_vectored(
|
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-09-24 18:07:22 -04:00
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
#[buffer] buf1: JsBuffer,
|
|
|
|
#[buffer] buf2: JsBuffer,
|
2023-07-07 12:47:08 -04:00
|
|
|
) -> Result<usize, AnyError> {
|
|
|
|
let resource: Rc<UpgradeStream> =
|
|
|
|
state.borrow().resource_table.get::<UpgradeStream>(rid)?;
|
|
|
|
let nwritten = resource.write_vectored(&buf1, &buf2).await?;
|
|
|
|
Ok(nwritten)
|
|
|
|
}
|