2024-01-01 14:58:21 -05:00
|
|
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
2023-05-10 10:23:26 -04:00
|
|
|
use crate::compressible::is_content_compressible;
|
2023-04-22 13:48:21 -04:00
|
|
|
use crate::extract_network_stream;
|
2023-04-24 17:24:40 -04:00
|
|
|
use crate::network_buffered_stream::NetworkStreamPrefixCheck;
|
2023-04-22 13:48:21 -04:00
|
|
|
use crate::request_body::HttpRequestBody;
|
|
|
|
use crate::request_properties::HttpConnectionProperties;
|
|
|
|
use crate::request_properties::HttpListenProperties;
|
|
|
|
use crate::request_properties::HttpPropertyExtractor;
|
2023-05-10 07:23:14 -04:00
|
|
|
use crate::response_body::Compression;
|
2023-04-22 13:48:21 -04:00
|
|
|
use crate::response_body::ResponseBytesInner;
|
2023-11-13 09:04:49 -05:00
|
|
|
use crate::service::handle_request;
|
2023-11-23 11:39:17 -05:00
|
|
|
use crate::service::http_general_trace;
|
2023-11-13 09:04:49 -05:00
|
|
|
use crate::service::http_trace;
|
|
|
|
use crate::service::HttpRecord;
|
2023-11-13 14:17:31 -05:00
|
|
|
use crate::service::HttpRecordResponse;
|
2023-11-13 09:04:49 -05:00
|
|
|
use crate::service::HttpRequestBodyAutocloser;
|
2023-11-13 12:32:34 -05:00
|
|
|
use crate::service::HttpServerState;
|
2023-11-23 11:39:17 -05:00
|
|
|
use crate::service::SignallingRc;
|
2023-04-26 18:58:18 -04:00
|
|
|
use crate::websocket_upgrade::WebSocketUpgrade;
|
2023-04-22 13:48:21 -04:00
|
|
|
use crate::LocalExecutor;
|
2024-11-18 20:46:24 -05:00
|
|
|
use crate::Options;
|
2023-05-10 07:23:14 -04:00
|
|
|
use cache_control::CacheControl;
|
2024-04-10 18:08:23 -04:00
|
|
|
use deno_core::external;
|
2023-11-23 11:39:17 -05:00
|
|
|
use deno_core::futures::future::poll_fn;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::futures::TryFutureExt;
|
2023-06-24 07:54:10 -04:00
|
|
|
use deno_core::op2;
|
2023-06-06 10:55:37 -04:00
|
|
|
use deno_core::serde_v8::from_v8;
|
2023-08-23 19:03:05 -04:00
|
|
|
use deno_core::unsync::spawn;
|
|
|
|
use deno_core::unsync::JoinHandle;
|
2023-06-02 18:31:27 -04:00
|
|
|
use deno_core::v8;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::AsyncRefCell;
|
2023-04-26 18:58:18 -04:00
|
|
|
use deno_core::AsyncResult;
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
use deno_core::BufView;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::ByteString;
|
|
|
|
use deno_core::CancelFuture;
|
|
|
|
use deno_core::CancelHandle;
|
|
|
|
use deno_core::CancelTryFuture;
|
2023-11-13 09:04:49 -05:00
|
|
|
use deno_core::ExternalPointer;
|
2023-07-07 12:47:08 -04:00
|
|
|
use deno_core::JsBuffer;
|
2023-04-22 13:48:21 -04:00
|
|
|
use deno_core::OpState;
|
|
|
|
use deno_core::RcRef;
|
|
|
|
use deno_core::Resource;
|
|
|
|
use deno_core::ResourceId;
|
|
|
|
use deno_net::ops_tls::TlsStream;
|
|
|
|
use deno_net::raw::NetworkStream;
|
2023-05-15 19:24:41 -04:00
|
|
|
use deno_websocket::ws_create_server_stream;
|
2023-12-27 11:59:57 -05:00
|
|
|
use hyper::body::Incoming;
|
|
|
|
use hyper::header::HeaderMap;
|
|
|
|
use hyper::header::ACCEPT_ENCODING;
|
|
|
|
use hyper::header::CACHE_CONTROL;
|
|
|
|
use hyper::header::CONTENT_ENCODING;
|
|
|
|
use hyper::header::CONTENT_LENGTH;
|
|
|
|
use hyper::header::CONTENT_RANGE;
|
|
|
|
use hyper::header::CONTENT_TYPE;
|
|
|
|
use hyper::header::COOKIE;
|
|
|
|
use hyper::http::HeaderName;
|
|
|
|
use hyper::http::HeaderValue;
|
|
|
|
use hyper::server::conn::http1;
|
|
|
|
use hyper::server::conn::http2;
|
|
|
|
use hyper::service::service_fn;
|
|
|
|
use hyper::service::HttpService;
|
|
|
|
use hyper::StatusCode;
|
2023-12-21 19:54:28 -05:00
|
|
|
use hyper_util::rt::TokioIo;
|
2023-06-01 10:07:26 -04:00
|
|
|
use once_cell::sync::Lazy;
|
2023-06-02 18:31:27 -04:00
|
|
|
use smallvec::SmallVec;
|
2023-04-22 13:48:21 -04:00
|
|
|
use std::borrow::Cow;
|
|
|
|
use std::cell::RefCell;
|
2023-11-13 09:04:49 -05:00
|
|
|
use std::ffi::c_void;
|
2023-04-22 13:48:21 -04:00
|
|
|
use std::future::Future;
|
|
|
|
use std::io;
|
|
|
|
use std::pin::Pin;
|
2023-11-13 09:04:49 -05:00
|
|
|
use std::ptr::null;
|
2023-04-22 13:48:21 -04:00
|
|
|
use std::rc::Rc;
|
2023-04-26 18:58:18 -04:00
|
|
|
|
2023-12-14 13:43:33 -05:00
|
|
|
use super::fly_accept_encoding;
|
|
|
|
use fly_accept_encoding::Encoding;
|
|
|
|
|
2023-04-26 18:58:18 -04:00
|
|
|
use tokio::io::AsyncReadExt;
|
|
|
|
use tokio::io::AsyncWriteExt;
|
|
|
|
|
2023-12-27 11:59:57 -05:00
|
|
|
type Request = hyper::Request<Incoming>;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-06-01 10:07:26 -04:00
|
|
|
static USE_WRITEV: Lazy<bool> = Lazy::new(|| {
|
2023-06-08 06:55:33 -04:00
|
|
|
let enable = std::env::var("DENO_USE_WRITEV").ok();
|
2023-06-01 10:07:26 -04:00
|
|
|
|
2023-06-08 06:55:33 -04:00
|
|
|
if let Some(val) = enable {
|
|
|
|
return !val.is_empty();
|
2023-06-01 10:07:26 -04:00
|
|
|
}
|
|
|
|
|
2023-06-08 06:55:33 -04:00
|
|
|
false
|
2023-06-01 10:07:26 -04:00
|
|
|
});
|
|
|
|
|
2023-04-24 17:24:40 -04:00
|
|
|
/// All HTTP/2 connections start with this byte string.
|
|
|
|
///
|
|
|
|
/// In HTTP/2, each endpoint is required to send a connection preface as a final confirmation
|
|
|
|
/// of the protocol in use and to establish the initial settings for the HTTP/2 connection. The
|
|
|
|
/// client and server each send a different connection preface.
|
|
|
|
///
|
|
|
|
/// The client connection preface starts with a sequence of 24 octets, which in hex notation is:
|
|
|
|
///
|
|
|
|
/// 0x505249202a20485454502f322e300d0a0d0a534d0d0a0d0a
|
|
|
|
///
|
|
|
|
/// That is, the connection preface starts with the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence
|
|
|
|
/// MUST be followed by a SETTINGS frame (Section 6.5), which MAY be empty.
|
|
|
|
const HTTP2_PREFIX: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
|
|
|
|
|
2023-06-26 09:10:27 -04:00
|
|
|
/// ALPN negotiation for "h2"
|
2023-04-24 17:24:40 -04:00
|
|
|
const TLS_ALPN_HTTP_2: &[u8] = b"h2";
|
|
|
|
|
2023-06-26 09:10:27 -04:00
|
|
|
/// ALPN negotiation for "http/1.1"
|
2023-04-24 17:24:40 -04:00
|
|
|
const TLS_ALPN_HTTP_11: &[u8] = b"http/1.1";
|
|
|
|
|
|
|
|
/// Name a trait for streams we can serve HTTP over.
|
|
|
|
trait HttpServeStream:
|
|
|
|
tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static
|
|
|
|
{
|
|
|
|
}
|
|
|
|
impl<
|
|
|
|
S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static,
|
|
|
|
> HttpServeStream for S
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
#[repr(transparent)]
|
|
|
|
struct RcHttpRecord(Rc<HttpRecord>);
|
|
|
|
|
|
|
|
// Register the [`HttpRecord`] as an external.
|
|
|
|
external!(RcHttpRecord, "http record");
|
|
|
|
|
|
|
|
/// Construct Rc<HttpRecord> from raw external pointer, consuming
|
|
|
|
/// refcount. You must make sure the external is deleted on the JS side.
|
|
|
|
macro_rules! take_external {
|
|
|
|
($external:expr, $args:tt) => {{
|
|
|
|
let ptr = ExternalPointer::<RcHttpRecord>::from_raw($external);
|
|
|
|
let record = ptr.unsafely_take().0;
|
|
|
|
http_trace!(record, $args);
|
|
|
|
record
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Clone Rc<HttpRecord> from raw external pointer.
|
|
|
|
macro_rules! clone_external {
|
|
|
|
($external:expr, $args:tt) => {{
|
|
|
|
let ptr = ExternalPointer::<RcHttpRecord>::from_raw($external);
|
|
|
|
ptr.unsafely_deref().0.clone()
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
|
2024-10-18 18:57:12 -04:00
|
|
|
#[derive(Debug, thiserror::Error)]
|
|
|
|
pub enum HttpNextError {
|
|
|
|
#[error(transparent)]
|
|
|
|
Resource(deno_core::error::AnyError),
|
|
|
|
#[error("{0}")]
|
|
|
|
Io(#[from] io::Error),
|
|
|
|
#[error(transparent)]
|
|
|
|
WebSocketUpgrade(crate::websocket_upgrade::WebSocketUpgradeError),
|
|
|
|
#[error("{0}")]
|
|
|
|
Hyper(#[from] hyper::Error),
|
|
|
|
#[error(transparent)]
|
|
|
|
JoinError(#[from] tokio::task::JoinError),
|
|
|
|
#[error(transparent)]
|
|
|
|
Canceled(#[from] deno_core::Canceled),
|
|
|
|
#[error(transparent)]
|
|
|
|
HttpPropertyExtractor(deno_core::error::AnyError),
|
|
|
|
#[error(transparent)]
|
|
|
|
UpgradeUnavailable(#[from] crate::service::UpgradeUnavailableError),
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(fast)]
|
|
|
|
#[smi]
|
2023-05-08 17:07:45 -04:00
|
|
|
pub fn op_http_upgrade_raw(
|
2023-04-26 18:58:18 -04:00
|
|
|
state: &mut OpState,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<ResourceId, HttpNextError> {
|
2023-11-13 09:04:49 -05:00
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
let http = unsafe { take_external!(external, "op_http_upgrade_raw") };
|
|
|
|
|
2023-04-26 18:58:18 -04:00
|
|
|
// Stage 1: extract the upgrade future
|
2023-11-13 09:04:49 -05:00
|
|
|
let upgrade = http.upgrade()?;
|
2023-04-26 18:58:18 -04:00
|
|
|
let (read, write) = tokio::io::duplex(1024);
|
|
|
|
let (read_rx, write_tx) = tokio::io::split(read);
|
|
|
|
let (mut write_rx, mut read_tx) = tokio::io::split(write);
|
2023-05-14 17:40:01 -04:00
|
|
|
spawn(async move {
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut upgrade_stream = WebSocketUpgrade::<()>::default();
|
2023-04-26 18:58:18 -04:00
|
|
|
|
|
|
|
// Stage 2: Extract the Upgraded connection
|
|
|
|
let mut buf = [0; 1024];
|
|
|
|
let upgraded = loop {
|
|
|
|
let read = Pin::new(&mut write_rx).read(&mut buf).await?;
|
|
|
|
match upgrade_stream.write(&buf[..read]) {
|
|
|
|
Ok(None) => continue,
|
|
|
|
Ok(Some((response, bytes))) => {
|
2023-11-13 14:17:31 -05:00
|
|
|
let (response_parts, _) = response.into_parts();
|
|
|
|
*http.response_parts() = response_parts;
|
2023-05-16 19:00:59 -04:00
|
|
|
http.complete();
|
2023-07-31 09:34:53 -04:00
|
|
|
let mut upgraded = TokioIo::new(upgrade.await?);
|
2023-04-26 18:58:18 -04:00
|
|
|
upgraded.write_all(&bytes).await?;
|
|
|
|
break upgraded;
|
|
|
|
}
|
2024-10-18 18:57:12 -04:00
|
|
|
Err(err) => return Err(HttpNextError::WebSocketUpgrade(err)),
|
2023-04-26 18:58:18 -04:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Stage 3: Pump the data
|
|
|
|
let (mut upgraded_rx, mut upgraded_tx) = tokio::io::split(upgraded);
|
|
|
|
|
2023-05-14 17:40:01 -04:00
|
|
|
spawn(async move {
|
2023-04-26 18:58:18 -04:00
|
|
|
let mut buf = [0; 1024];
|
|
|
|
loop {
|
|
|
|
let read = upgraded_rx.read(&mut buf).await?;
|
|
|
|
if read == 0 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
read_tx.write_all(&buf[..read]).await?;
|
|
|
|
}
|
2024-10-18 18:57:12 -04:00
|
|
|
Ok::<_, HttpNextError>(())
|
2023-04-26 18:58:18 -04:00
|
|
|
});
|
2023-05-14 17:40:01 -04:00
|
|
|
spawn(async move {
|
2023-04-26 18:58:18 -04:00
|
|
|
let mut buf = [0; 1024];
|
|
|
|
loop {
|
|
|
|
let read = write_rx.read(&mut buf).await?;
|
|
|
|
if read == 0 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
upgraded_tx.write_all(&buf[..read]).await?;
|
|
|
|
}
|
2024-10-18 18:57:12 -04:00
|
|
|
Ok::<_, HttpNextError>(())
|
2023-04-26 18:58:18 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
});
|
|
|
|
|
|
|
|
Ok(
|
|
|
|
state
|
|
|
|
.resource_table
|
|
|
|
.add(UpgradeStream::new(read_rx, write_tx)),
|
|
|
|
)
|
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(async)]
|
|
|
|
#[smi]
|
2023-05-15 19:24:41 -04:00
|
|
|
pub async fn op_http_upgrade_websocket_next(
|
2023-04-22 13:48:21 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[serde] headers: Vec<(ByteString, ByteString)>,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<ResourceId, HttpNextError> {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_upgrade_websocket_next") };
|
2023-05-16 19:00:59 -04:00
|
|
|
// Stage 1: set the response to 101 Switching Protocols and send it
|
|
|
|
let upgrade = http.upgrade()?;
|
2023-11-13 09:04:49 -05:00
|
|
|
{
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut response_parts = http.response_parts();
|
|
|
|
response_parts.status = StatusCode::SWITCHING_PROTOCOLS;
|
2023-11-13 09:04:49 -05:00
|
|
|
for (name, value) in headers {
|
2023-11-13 14:17:31 -05:00
|
|
|
response_parts.headers.append(
|
2023-11-13 09:04:49 -05:00
|
|
|
HeaderName::from_bytes(&name).unwrap(),
|
|
|
|
HeaderValue::from_bytes(&value).unwrap(),
|
|
|
|
);
|
|
|
|
}
|
2023-05-16 19:00:59 -04:00
|
|
|
}
|
|
|
|
http.complete();
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
// Stage 2: wait for the request to finish upgrading
|
|
|
|
let upgraded = upgrade.await?;
|
|
|
|
|
2023-05-15 19:24:41 -04:00
|
|
|
// Stage 3: take the extracted raw network stream and upgrade it to a websocket, then return it
|
2023-04-22 13:48:21 -04:00
|
|
|
let (stream, bytes) = extract_network_stream(upgraded);
|
2024-10-18 15:30:46 -04:00
|
|
|
Ok(ws_create_server_stream(
|
|
|
|
&mut state.borrow_mut(),
|
|
|
|
stream,
|
|
|
|
bytes,
|
|
|
|
))
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-06-24 07:54:10 -04:00
|
|
|
#[op2(fast)]
|
2023-11-13 09:04:49 -05:00
|
|
|
pub fn op_http_set_promise_complete(external: *const c_void, status: u16) {
|
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_set_promise_complete") };
|
2023-11-13 14:17:31 -05:00
|
|
|
set_promise_complete(http, status);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_promise_complete(http: Rc<HttpRecord>, status: u16) {
|
2023-07-25 18:12:19 -04:00
|
|
|
// The Javascript code should never provide a status that is invalid here (see 23_response.js), so we
|
2024-03-04 09:28:57 -05:00
|
|
|
// will quietly ignore invalid values.
|
2023-07-25 18:12:19 -04:00
|
|
|
if let Ok(code) = StatusCode::from_u16(status) {
|
2023-11-13 14:17:31 -05:00
|
|
|
http.response_parts().status = code;
|
2023-07-25 18:12:19 -04:00
|
|
|
}
|
2023-05-16 19:00:59 -04:00
|
|
|
http.complete();
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-09-23 15:33:31 -04:00
|
|
|
#[op2]
|
2023-06-03 14:15:53 -04:00
|
|
|
pub fn op_http_get_request_method_and_url<'scope, HTTP>(
|
|
|
|
scope: &mut v8::HandleScope<'scope>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-09-23 15:33:31 -04:00
|
|
|
) -> v8::Local<'scope, v8::Array>
|
2023-05-10 10:23:26 -04:00
|
|
|
where
|
|
|
|
HTTP: HttpPropertyExtractor,
|
|
|
|
{
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_get_request_method_and_url") };
|
2023-05-16 19:00:59 -04:00
|
|
|
let request_info = http.request_info();
|
|
|
|
let request_parts = http.request_parts();
|
|
|
|
let request_properties = HTTP::request_properties(
|
2023-11-13 09:04:49 -05:00
|
|
|
&request_info,
|
2023-05-16 19:00:59 -04:00
|
|
|
&request_parts.uri,
|
|
|
|
&request_parts.headers,
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-06-03 14:15:53 -04:00
|
|
|
let method: v8::Local<v8::Value> = v8::String::new_from_utf8(
|
|
|
|
scope,
|
|
|
|
request_parts.method.as_str().as_bytes(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into();
|
|
|
|
|
|
|
|
let authority: v8::Local<v8::Value> = match request_properties.authority {
|
|
|
|
Some(authority) => v8::String::new_from_utf8(
|
|
|
|
scope,
|
2024-10-14 23:25:47 -04:00
|
|
|
authority.as_bytes(),
|
2023-06-03 14:15:53 -04:00
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
None => v8::undefined(scope).into(),
|
|
|
|
};
|
|
|
|
|
2023-05-16 19:00:59 -04:00
|
|
|
// Only extract the path part - we handle authority elsewhere
|
2024-10-16 00:43:20 -04:00
|
|
|
let path = match request_parts.uri.path_and_query() {
|
|
|
|
Some(path_and_query) => {
|
|
|
|
let path = path_and_query.as_str();
|
|
|
|
if matches!(path.as_bytes().first(), Some(b'/' | b'*')) {
|
|
|
|
Cow::Borrowed(path)
|
|
|
|
} else {
|
|
|
|
Cow::Owned(format!("/{}", path))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None => Cow::Borrowed(""),
|
2023-05-16 19:00:59 -04:00
|
|
|
};
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2024-10-16 00:43:20 -04:00
|
|
|
let path: v8::Local<v8::Value> = v8::String::new_from_utf8(
|
|
|
|
scope,
|
|
|
|
path.as_bytes(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into();
|
2023-06-03 14:15:53 -04:00
|
|
|
|
|
|
|
let peer_address: v8::Local<v8::Value> = v8::String::new_from_utf8(
|
|
|
|
scope,
|
|
|
|
request_info.peer_address.as_bytes(),
|
|
|
|
v8::NewStringType::Normal,
|
2023-05-16 19:00:59 -04:00
|
|
|
)
|
2023-06-03 14:15:53 -04:00
|
|
|
.unwrap()
|
|
|
|
.into();
|
|
|
|
|
|
|
|
let port: v8::Local<v8::Value> = match request_info.peer_port {
|
|
|
|
Some(port) => v8::Integer::new(scope, port.into()).into(),
|
|
|
|
None => v8::undefined(scope).into(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let vec = [method, authority, path, peer_address, port];
|
2023-09-23 15:33:31 -04:00
|
|
|
v8::Array::new_with_elements(scope, vec.as_slice())
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
2023-05-08 17:07:45 -04:00
|
|
|
pub fn op_http_get_request_header(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[string] name: String,
|
2023-05-08 17:07:45 -04:00
|
|
|
) -> Option<ByteString> {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_get_request_header") };
|
|
|
|
let request_parts = http.request_parts();
|
|
|
|
let value = request_parts.headers.get(name);
|
2023-05-16 19:00:59 -04:00
|
|
|
value.map(|value| value.as_bytes().into())
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-09-23 15:33:31 -04:00
|
|
|
#[op2]
|
2023-06-02 18:31:27 -04:00
|
|
|
pub fn op_http_get_request_headers<'scope>(
|
|
|
|
scope: &mut v8::HandleScope<'scope>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-09-23 15:33:31 -04:00
|
|
|
) -> v8::Local<'scope, v8::Array> {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_get_request_headers") };
|
2023-05-16 19:00:59 -04:00
|
|
|
let headers = &http.request_parts().headers;
|
2023-06-02 11:59:16 -04:00
|
|
|
// Two slots for each header key/value pair
|
2023-06-02 18:31:27 -04:00
|
|
|
let mut vec: SmallVec<[v8::Local<v8::Value>; 32]> =
|
|
|
|
SmallVec::with_capacity(headers.len() * 2);
|
|
|
|
|
2023-05-16 19:00:59 -04:00
|
|
|
let mut cookies: Option<Vec<&[u8]>> = None;
|
|
|
|
for (name, value) in headers {
|
|
|
|
if name == COOKIE {
|
|
|
|
if let Some(ref mut cookies) = cookies {
|
|
|
|
cookies.push(value.as_bytes());
|
2023-04-22 13:48:21 -04:00
|
|
|
} else {
|
2023-05-16 19:00:59 -04:00
|
|
|
cookies = Some(vec![value.as_bytes()]);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-05-16 19:00:59 -04:00
|
|
|
} else {
|
2023-06-02 18:31:27 -04:00
|
|
|
vec.push(
|
|
|
|
v8::String::new_from_one_byte(
|
|
|
|
scope,
|
|
|
|
name.as_ref(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
);
|
|
|
|
vec.push(
|
|
|
|
v8::String::new_from_one_byte(
|
|
|
|
scope,
|
|
|
|
value.as_bytes(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-05-16 19:00:59 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-16 19:00:59 -04:00
|
|
|
// We treat cookies specially, because we don't want them to get them
|
|
|
|
// mangled by the `Headers` object in JS. What we do is take all cookie
|
|
|
|
// headers and concat them into a single cookie header, separated by
|
|
|
|
// semicolons.
|
|
|
|
// TODO(mmastrac): This should probably happen on the JS side on-demand
|
|
|
|
if let Some(cookies) = cookies {
|
|
|
|
let cookie_sep = "; ".as_bytes();
|
2023-06-02 18:31:27 -04:00
|
|
|
|
|
|
|
vec.push(
|
|
|
|
v8::String::new_external_onebyte_static(scope, COOKIE.as_ref())
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
);
|
|
|
|
vec.push(
|
|
|
|
v8::String::new_from_one_byte(
|
|
|
|
scope,
|
|
|
|
cookies.join(cookie_sep).as_ref(),
|
|
|
|
v8::NewStringType::Normal,
|
|
|
|
)
|
|
|
|
.unwrap()
|
|
|
|
.into(),
|
|
|
|
);
|
2023-05-16 19:00:59 -04:00
|
|
|
}
|
2023-06-02 11:59:16 -04:00
|
|
|
|
2023-09-23 15:33:31 -04:00
|
|
|
v8::Array::new_with_elements(scope, vec.as_slice())
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-09-23 15:33:31 -04:00
|
|
|
#[op2(fast)]
|
|
|
|
#[smi]
|
2023-05-08 17:07:45 -04:00
|
|
|
pub fn op_http_read_request_body(
|
fix(ext/http): ensure request body resource lives as long as response is alive (#20206)
Deno.serve's fast streaming implementation was not keeping the request
body resource ID alive. We were taking the `Rc<Resource>` from the
resource table during the response, so a hairpin duplex response that
fed back the request body would work.
However, if any JS code attempted to read from the request body (which
requires the resource ID to be valid), the response would fail with a
difficult-to-diagnose "EOF" error.
This was affecting more complex duplex uses of `Deno.fetch` (though as
far as I can tell was unreported).
Simple test:
```ts
const reader = request.body.getReader();
return new Response(
new ReadableStream({
async pull(controller) {
const { done, value } = await reader.read();
if (done) {
controller.close();
} else {
controller.enqueue(value);
}
},
}),
```
And then attempt to use the stream in duplex mode:
```ts
async function testDuplex(
reader: ReadableStreamDefaultReader<Uint8Array>,
writable: WritableStreamDefaultWriter<Uint8Array>,
) {
await writable.write(new Uint8Array([1]));
const chunk1 = await reader.read();
assert(!chunk1.done);
assertEquals(chunk1.value, new Uint8Array([1]));
await writable.write(new Uint8Array([2]));
const chunk2 = await reader.read();
assert(!chunk2.done);
assertEquals(chunk2.value, new Uint8Array([2]));
await writable.close();
const chunk3 = await reader.read();
assert(chunk3.done);
}
```
In older versions of Deno, this would just lock up. I believe after
23ff0e722e3c4b0827940853c53c5ee2ede5ec9f, it started throwing a more
explicit error:
```
httpServerStreamDuplexJavascript => ./cli/tests/unit/serve_test.ts:1339:6
error: TypeError: request or response body error: error reading a body from connection: Connection reset by peer (os error 54)
at async Object.pull (ext:deno_web/06_streams.js:810:27)
```
2023-08-20 21:35:26 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-05-08 17:07:45 -04:00
|
|
|
) -> ResourceId {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_read_request_body") };
|
2023-11-13 14:17:31 -05:00
|
|
|
let rid = if let Some(incoming) = http.take_request_body() {
|
fix(ext/http): ensure request body resource lives as long as response is alive (#20206)
Deno.serve's fast streaming implementation was not keeping the request
body resource ID alive. We were taking the `Rc<Resource>` from the
resource table during the response, so a hairpin duplex response that
fed back the request body would work.
However, if any JS code attempted to read from the request body (which
requires the resource ID to be valid), the response would fail with a
difficult-to-diagnose "EOF" error.
This was affecting more complex duplex uses of `Deno.fetch` (though as
far as I can tell was unreported).
Simple test:
```ts
const reader = request.body.getReader();
return new Response(
new ReadableStream({
async pull(controller) {
const { done, value } = await reader.read();
if (done) {
controller.close();
} else {
controller.enqueue(value);
}
},
}),
```
And then attempt to use the stream in duplex mode:
```ts
async function testDuplex(
reader: ReadableStreamDefaultReader<Uint8Array>,
writable: WritableStreamDefaultWriter<Uint8Array>,
) {
await writable.write(new Uint8Array([1]));
const chunk1 = await reader.read();
assert(!chunk1.done);
assertEquals(chunk1.value, new Uint8Array([1]));
await writable.write(new Uint8Array([2]));
const chunk2 = await reader.read();
assert(!chunk2.done);
assertEquals(chunk2.value, new Uint8Array([2]));
await writable.close();
const chunk3 = await reader.read();
assert(chunk3.done);
}
```
In older versions of Deno, this would just lock up. I believe after
23ff0e722e3c4b0827940853c53c5ee2ede5ec9f, it started throwing a more
explicit error:
```
httpServerStreamDuplexJavascript => ./cli/tests/unit/serve_test.ts:1339:6
error: TypeError: request or response body error: error reading a body from connection: Connection reset by peer (os error 54)
at async Object.pull (ext:deno_web/06_streams.js:810:27)
```
2023-08-20 21:35:26 -04:00
|
|
|
let body_resource = Rc::new(HttpRequestBody::new(incoming));
|
|
|
|
state.borrow_mut().resource_table.add_rc(body_resource)
|
|
|
|
} else {
|
|
|
|
// This should not be possible, but rather than panicking we'll return an invalid
|
|
|
|
// resource value to JavaScript.
|
|
|
|
ResourceId::MAX
|
|
|
|
};
|
|
|
|
http.put_resource(HttpRequestBodyAutocloser::new(rid, state.clone()));
|
|
|
|
rid
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-08-10 17:35:01 -04:00
|
|
|
#[op2(fast)]
|
2023-06-12 19:43:49 -04:00
|
|
|
pub fn op_http_set_response_header(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-10 17:35:01 -04:00
|
|
|
#[string(onebyte)] name: Cow<[u8]>,
|
|
|
|
#[string(onebyte)] value: Cow<[u8]>,
|
2023-06-12 19:43:49 -04:00
|
|
|
) {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_set_response_header") };
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut response_parts = http.response_parts();
|
2023-05-16 19:00:59 -04:00
|
|
|
// These are valid latin-1 strings
|
2023-06-12 19:43:49 -04:00
|
|
|
let name = HeaderName::from_bytes(&name).unwrap();
|
2023-08-10 17:35:01 -04:00
|
|
|
let value = match value {
|
|
|
|
Cow::Borrowed(bytes) => HeaderValue::from_bytes(bytes).unwrap(),
|
|
|
|
// SAFETY: These are valid latin-1 strings
|
|
|
|
Cow::Owned(bytes_vec) => unsafe {
|
|
|
|
HeaderValue::from_maybe_shared_unchecked(bytes::Bytes::from(bytes_vec))
|
|
|
|
},
|
|
|
|
};
|
2023-11-13 14:17:31 -05:00
|
|
|
response_parts.headers.append(name, value);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2024-08-21 18:33:21 -04:00
|
|
|
#[op2(fast)]
|
2023-08-03 16:36:32 -04:00
|
|
|
pub fn op_http_set_response_headers(
|
2023-06-06 10:55:37 -04:00
|
|
|
scope: &mut v8::HandleScope,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
headers: v8::Local<v8::Array>,
|
2023-06-06 10:55:37 -04:00
|
|
|
) {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_set_response_headers") };
|
2023-04-22 13:48:21 -04:00
|
|
|
// TODO(mmastrac): Invalid headers should be handled?
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut response_parts = http.response_parts();
|
2023-06-06 10:55:37 -04:00
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
let len = headers.length();
|
2023-06-06 10:55:37 -04:00
|
|
|
let header_len = len * 2;
|
2023-11-13 14:17:31 -05:00
|
|
|
response_parts
|
|
|
|
.headers
|
|
|
|
.reserve(header_len.try_into().unwrap());
|
2023-06-06 10:55:37 -04:00
|
|
|
|
|
|
|
for i in 0..len {
|
2023-08-03 16:36:32 -04:00
|
|
|
let item = headers.get_index(scope, i).unwrap();
|
2023-06-06 10:55:37 -04:00
|
|
|
let pair = v8::Local::<v8::Array>::try_from(item).unwrap();
|
|
|
|
let name = pair.get_index(scope, 0).unwrap();
|
|
|
|
let value = pair.get_index(scope, 1).unwrap();
|
|
|
|
|
|
|
|
let v8_name: ByteString = from_v8(scope, name).unwrap();
|
|
|
|
let v8_value: ByteString = from_v8(scope, value).unwrap();
|
|
|
|
let header_name = HeaderName::from_bytes(&v8_name).unwrap();
|
2023-06-12 19:43:49 -04:00
|
|
|
let header_value =
|
2023-06-12 20:59:41 -04:00
|
|
|
// SAFETY: These are valid latin-1 strings
|
2023-06-12 19:43:49 -04:00
|
|
|
unsafe { HeaderValue::from_maybe_shared_unchecked(v8_value) };
|
2023-11-13 14:17:31 -05:00
|
|
|
response_parts.headers.append(header_name, header_value);
|
2023-05-16 19:00:59 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
2023-05-18 22:10:25 -04:00
|
|
|
pub fn op_http_set_response_trailers(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[serde] trailers: Vec<(ByteString, ByteString)>,
|
2023-05-18 22:10:25 -04:00
|
|
|
) {
|
2023-11-13 09:04:49 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_set_response_trailers") };
|
2023-05-18 22:10:25 -04:00
|
|
|
let mut trailer_map: HeaderMap = HeaderMap::with_capacity(trailers.len());
|
|
|
|
for (name, value) in trailers {
|
|
|
|
// These are valid latin-1 strings
|
|
|
|
let name = HeaderName::from_bytes(&name).unwrap();
|
2023-06-12 19:43:49 -04:00
|
|
|
// SAFETY: These are valid latin-1 strings
|
|
|
|
let value = unsafe { HeaderValue::from_maybe_shared_unchecked(value) };
|
2023-05-18 22:10:25 -04:00
|
|
|
trailer_map.append(name, value);
|
|
|
|
}
|
2023-11-13 14:17:31 -05:00
|
|
|
*http.trailers() = Some(trailer_map);
|
2023-05-18 22:10:25 -04:00
|
|
|
}
|
|
|
|
|
2023-09-16 17:15:15 -04:00
|
|
|
fn is_request_compressible(
|
|
|
|
length: Option<usize>,
|
|
|
|
headers: &HeaderMap,
|
|
|
|
) -> Compression {
|
|
|
|
if let Some(length) = length {
|
|
|
|
// By the time we add compression headers and Accept-Encoding, it probably doesn't make sense
|
|
|
|
// to compress stuff that's smaller than this.
|
|
|
|
if length < 64 {
|
|
|
|
return Compression::None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-10 07:23:14 -04:00
|
|
|
let Some(accept_encoding) = headers.get(ACCEPT_ENCODING) else {
|
|
|
|
return Compression::None;
|
|
|
|
};
|
2023-05-24 13:54:47 -04:00
|
|
|
|
2024-01-26 10:33:55 -05:00
|
|
|
match accept_encoding.to_str() {
|
2023-05-24 13:54:47 -04:00
|
|
|
// Firefox and Chrome send this -- no need to parse
|
2024-01-26 10:33:55 -05:00
|
|
|
Ok("gzip, deflate, br") => return Compression::Brotli,
|
2024-11-12 01:40:41 -05:00
|
|
|
Ok("gzip, deflate, br, zstd") => return Compression::Brotli,
|
2024-01-26 10:33:55 -05:00
|
|
|
Ok("gzip") => return Compression::GZip,
|
|
|
|
Ok("br") => return Compression::Brotli,
|
2023-05-24 13:54:47 -04:00
|
|
|
_ => (),
|
2023-05-10 07:23:14 -04:00
|
|
|
}
|
2023-05-24 13:54:47 -04:00
|
|
|
|
2023-05-10 07:23:14 -04:00
|
|
|
// Fall back to the expensive parser
|
2023-12-21 19:54:28 -05:00
|
|
|
let accepted =
|
|
|
|
fly_accept_encoding::encodings_iter_http_1(headers).filter(|r| {
|
|
|
|
matches!(
|
|
|
|
r,
|
|
|
|
Ok((
|
|
|
|
Some(Encoding::Identity | Encoding::Gzip | Encoding::Brotli),
|
|
|
|
_
|
|
|
|
))
|
|
|
|
)
|
|
|
|
});
|
2023-05-10 07:23:14 -04:00
|
|
|
match fly_accept_encoding::preferred(accepted) {
|
2023-05-24 13:54:47 -04:00
|
|
|
Ok(Some(fly_accept_encoding::Encoding::Gzip)) => Compression::GZip,
|
|
|
|
Ok(Some(fly_accept_encoding::Encoding::Brotli)) => Compression::Brotli,
|
|
|
|
_ => Compression::None,
|
2023-05-10 07:23:14 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_response_compressible(headers: &HeaderMap) -> bool {
|
|
|
|
if let Some(content_type) = headers.get(CONTENT_TYPE) {
|
|
|
|
if !is_content_compressible(content_type) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if headers.contains_key(CONTENT_ENCODING) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if headers.contains_key(CONTENT_RANGE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if let Some(cache_control) = headers.get(CACHE_CONTROL) {
|
|
|
|
if let Ok(s) = std::str::from_utf8(cache_control.as_bytes()) {
|
|
|
|
if let Some(cache_control) = CacheControl::from_value(s) {
|
|
|
|
if cache_control.no_transform {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
|
|
|
fn modify_compressibility_from_response(
|
|
|
|
compression: Compression,
|
|
|
|
headers: &mut HeaderMap,
|
|
|
|
) -> Compression {
|
|
|
|
ensure_vary_accept_encoding(headers);
|
|
|
|
if compression == Compression::None {
|
|
|
|
return Compression::None;
|
|
|
|
}
|
|
|
|
if !is_response_compressible(headers) {
|
|
|
|
return Compression::None;
|
|
|
|
}
|
2023-05-24 13:54:47 -04:00
|
|
|
let encoding = match compression {
|
|
|
|
Compression::Brotli => "br",
|
|
|
|
Compression::GZip => "gzip",
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
2023-05-10 07:23:14 -04:00
|
|
|
weaken_etag(headers);
|
|
|
|
headers.remove(CONTENT_LENGTH);
|
2023-05-24 13:54:47 -04:00
|
|
|
headers.insert(CONTENT_ENCODING, HeaderValue::from_static(encoding));
|
2023-05-10 07:23:14 -04:00
|
|
|
compression
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If the user provided a ETag header for uncompressed data, we need to ensure it is a
|
|
|
|
/// weak Etag header ("W/").
|
|
|
|
fn weaken_etag(hmap: &mut HeaderMap) {
|
2023-12-27 11:59:57 -05:00
|
|
|
if let Some(etag) = hmap.get_mut(hyper::header::ETAG) {
|
2023-05-10 07:23:14 -04:00
|
|
|
if !etag.as_bytes().starts_with(b"W/") {
|
|
|
|
let mut v = Vec::with_capacity(etag.as_bytes().len() + 2);
|
|
|
|
v.extend(b"W/");
|
|
|
|
v.extend(etag.as_bytes());
|
|
|
|
*etag = v.try_into().unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set Vary: Accept-Encoding header for direct body response.
|
|
|
|
// Note: we set the header irrespective of whether or not we compress the data
|
|
|
|
// to make sure cache services do not serve uncompressed data to clients that
|
|
|
|
// support compression.
|
|
|
|
fn ensure_vary_accept_encoding(hmap: &mut HeaderMap) {
|
2023-12-27 11:59:57 -05:00
|
|
|
if let Some(v) = hmap.get_mut(hyper::header::VARY) {
|
2023-05-10 07:23:14 -04:00
|
|
|
if let Ok(s) = v.to_str() {
|
|
|
|
if !s.to_lowercase().contains("accept-encoding") {
|
|
|
|
*v = format!("Accept-Encoding, {s}").try_into().unwrap()
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
hmap.insert(
|
2023-12-27 11:59:57 -05:00
|
|
|
hyper::header::VARY,
|
2023-05-10 07:23:14 -04:00
|
|
|
HeaderValue::from_static("Accept-Encoding"),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2023-09-25 11:23:55 -04:00
|
|
|
/// Sets the appropriate response body. Use `force_instantiate_body` if you need
|
|
|
|
/// to ensure that the response is cleaned up correctly (eg: for resources).
|
2023-05-10 07:23:14 -04:00
|
|
|
fn set_response(
|
2023-11-13 14:17:31 -05:00
|
|
|
http: Rc<HttpRecord>,
|
2023-05-10 07:23:14 -04:00
|
|
|
length: Option<usize>,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status: u16,
|
2023-09-25 11:23:55 -04:00
|
|
|
force_instantiate_body: bool,
|
2023-05-10 07:23:14 -04:00
|
|
|
response_fn: impl FnOnce(Compression) -> ResponseBytesInner,
|
|
|
|
) {
|
2023-08-28 15:29:34 -04:00
|
|
|
// The request may have been cancelled by this point and if so, there's no need for us to
|
|
|
|
// do all of this work to send the response.
|
|
|
|
if !http.cancelled() {
|
2023-09-16 17:15:15 -04:00
|
|
|
let compression =
|
|
|
|
is_request_compressible(length, &http.request_parts().headers);
|
2023-11-13 14:17:31 -05:00
|
|
|
let mut response_headers =
|
|
|
|
std::cell::RefMut::map(http.response_parts(), |this| &mut this.headers);
|
2023-09-16 17:15:15 -04:00
|
|
|
let compression =
|
2023-11-13 14:17:31 -05:00
|
|
|
modify_compressibility_from_response(compression, &mut response_headers);
|
|
|
|
drop(response_headers);
|
|
|
|
http.set_response_body(response_fn(compression));
|
2023-08-28 15:29:34 -04:00
|
|
|
|
|
|
|
// The Javascript code should never provide a status that is invalid here (see 23_response.js), so we
|
2024-03-04 09:28:57 -05:00
|
|
|
// will quietly ignore invalid values.
|
2023-08-28 15:29:34 -04:00
|
|
|
if let Ok(code) = StatusCode::from_u16(status) {
|
2023-11-13 14:17:31 -05:00
|
|
|
http.response_parts().status = code;
|
2023-08-28 15:29:34 -04:00
|
|
|
}
|
2023-09-25 11:23:55 -04:00
|
|
|
} else if force_instantiate_body {
|
|
|
|
response_fn(Compression::None).abort();
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
}
|
2023-09-25 11:23:55 -04:00
|
|
|
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
http.complete();
|
2023-05-10 07:23:14 -04:00
|
|
|
}
|
|
|
|
|
2024-11-07 06:42:13 -05:00
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_http_get_request_cancelled(external: *const c_void) -> bool {
|
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_get_request_cancelled") };
|
|
|
|
http.cancelled()
|
|
|
|
}
|
|
|
|
|
2024-11-08 08:16:11 -05:00
|
|
|
#[op2(async)]
|
|
|
|
pub async fn op_http_request_on_cancel(external: *const c_void) {
|
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_request_on_cancel") };
|
|
|
|
let (tx, rx) = tokio::sync::oneshot::channel();
|
|
|
|
|
|
|
|
http.on_cancel(tx);
|
|
|
|
drop(http);
|
|
|
|
|
|
|
|
rx.await.ok();
|
|
|
|
}
|
|
|
|
|
2023-11-13 14:17:31 -05:00
|
|
|
/// Returned promise resolves when body streaming finishes.
|
|
|
|
/// Call [`op_http_close_after_finish`] when done with the external.
|
|
|
|
#[op2(async)]
|
|
|
|
pub async fn op_http_set_response_body_resource(
|
2023-08-28 15:29:34 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[smi] stream_rid: ResourceId,
|
2023-04-22 13:48:21 -04:00
|
|
|
auto_close: bool,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status: u16,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<bool, HttpNextError> {
|
2023-11-13 14:17:31 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: op is called with external.
|
|
|
|
unsafe { clone_external!(external, "op_http_set_response_body_resource") };
|
|
|
|
|
2023-08-28 15:29:34 -04:00
|
|
|
// IMPORTANT: We might end up requiring the OpState lock in set_response if we need to drop the request
|
|
|
|
// body resource so we _cannot_ hold the OpState lock longer than necessary.
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
// If the stream is auto_close, we will hold the last ref to it until the response is complete.
|
2023-08-28 15:29:34 -04:00
|
|
|
// TODO(mmastrac): We should be using the same auto-close functionality rather than removing autoclose resources.
|
|
|
|
// It's possible things could fail elsewhere if code expects the rid to continue existing after the response has been
|
|
|
|
// returned.
|
|
|
|
let resource = {
|
|
|
|
let mut state = state.borrow_mut();
|
|
|
|
if auto_close {
|
2024-10-18 18:57:12 -04:00
|
|
|
state
|
|
|
|
.resource_table
|
|
|
|
.take_any(stream_rid)
|
|
|
|
.map_err(HttpNextError::Resource)?
|
2023-08-28 15:29:34 -04:00
|
|
|
} else {
|
2024-10-18 18:57:12 -04:00
|
|
|
state
|
|
|
|
.resource_table
|
|
|
|
.get_any(stream_rid)
|
|
|
|
.map_err(HttpNextError::Resource)?
|
2023-08-28 15:29:34 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
};
|
|
|
|
|
2023-11-18 15:16:53 -05:00
|
|
|
*http.needs_close_after_finish() = true;
|
|
|
|
|
2023-05-10 07:23:14 -04:00
|
|
|
set_response(
|
2023-11-13 14:17:31 -05:00
|
|
|
http.clone(),
|
2023-05-10 07:23:14 -04:00
|
|
|
resource.size_hint().1.map(|s| s as usize),
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status,
|
2023-09-25 11:23:55 -04:00
|
|
|
true,
|
2023-05-10 07:23:14 -04:00
|
|
|
move |compression| {
|
|
|
|
ResponseBytesInner::from_resource(compression, resource, auto_close)
|
|
|
|
},
|
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2024-04-24 14:03:37 -04:00
|
|
|
Ok(http.response_body_finished().await)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-11-13 14:17:31 -05:00
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_http_close_after_finish(external: *const c_void) {
|
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_close_after_finish") };
|
|
|
|
http.close_after_finish();
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_http_set_response_body_text(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[string] text: String,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
status: u16,
|
2023-08-03 16:36:32 -04:00
|
|
|
) {
|
2023-11-13 14:17:31 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_set_response_body_text") };
|
2023-04-22 13:48:21 -04:00
|
|
|
if !text.is_empty() {
|
2023-11-13 14:17:31 -05:00
|
|
|
set_response(http, Some(text.len()), status, false, |compression| {
|
2023-05-10 07:23:14 -04:00
|
|
|
ResponseBytesInner::from_vec(compression, text.into_bytes())
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
} else {
|
2023-11-13 14:17:31 -05:00
|
|
|
set_promise_complete(http, status);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-09 11:25:10 -05:00
|
|
|
#[op2]
|
2023-08-03 16:36:32 -04:00
|
|
|
pub fn op_http_set_response_body_bytes(
|
2023-11-13 09:04:49 -05:00
|
|
|
external: *const c_void,
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
#[buffer] buffer: JsBuffer,
|
|
|
|
status: u16,
|
2023-08-03 16:36:32 -04:00
|
|
|
) {
|
2023-11-13 14:17:31 -05:00
|
|
|
let http =
|
|
|
|
// SAFETY: external is deleted before calling this op.
|
|
|
|
unsafe { take_external!(external, "op_http_set_response_body_bytes") };
|
2023-04-22 13:48:21 -04:00
|
|
|
if !buffer.is_empty() {
|
2023-11-13 14:17:31 -05:00
|
|
|
set_response(http, Some(buffer.len()), status, false, |compression| {
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
ResponseBytesInner::from_bufview(compression, BufView::from(buffer))
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
feat(ext/web): resourceForReadableStream (#20180)
Extracted from fast streams work.
This is a resource wrapper for `ReadableStream`, allowing us to treat
all `ReadableStream` instances as resources, and remove special paths in
both `fetch` and `serve`.
Performance with a ReadableStream response yields ~18% improvement:
```
return new Response(new ReadableStream({
start(controller) {
controller.enqueue(new Uint8Array([104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100]));
controller.close();
}
})
```
This patch:
```
12:36 $ third_party/prebuilt/mac/wrk http://localhost:8080
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 99.96us 100.03us 6.65ms 98.84%
Req/Sec 47.73k 2.43k 51.02k 89.11%
959308 requests in 10.10s, 117.10MB read
Requests/sec: 94978.71
Transfer/sec: 11.59MB
```
main:
```
Running 10s test @ http://localhost:8080
2 threads and 10 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 163.03us 685.51us 19.73ms 99.27%
Req/Sec 39.50k 3.98k 66.11k 95.52%
789582 requests in 10.10s, 82.83MB read
Requests/sec: 78182.65
Transfer/sec: 8.20MB
```
2023-08-17 09:52:37 -04:00
|
|
|
} else {
|
2023-11-13 14:17:31 -05:00
|
|
|
set_promise_complete(http, status);
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-24 17:24:40 -04:00
|
|
|
fn serve_http11_unconditional(
|
|
|
|
io: impl HttpServeStream,
|
2023-11-13 14:17:31 -05:00
|
|
|
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
2023-09-11 20:06:38 -04:00
|
|
|
cancel: Rc<CancelHandle>,
|
2024-11-18 20:46:24 -05:00
|
|
|
http1_builder_hook: Option<fn(http1::Builder) -> http1::Builder>,
|
2023-12-27 11:59:57 -05:00
|
|
|
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
|
2024-11-18 20:46:24 -05:00
|
|
|
let mut builder = http1::Builder::new();
|
|
|
|
builder.keep_alive(true).writev(*USE_WRITEV);
|
|
|
|
|
|
|
|
if let Some(http1_builder_hook) = http1_builder_hook {
|
|
|
|
builder = http1_builder_hook(builder);
|
|
|
|
}
|
|
|
|
|
|
|
|
let conn = builder
|
2023-09-11 20:06:38 -04:00
|
|
|
.serve_connection(TokioIo::new(io), svc)
|
|
|
|
.with_upgrades();
|
|
|
|
|
|
|
|
async {
|
|
|
|
match conn.or_abort(cancel).await {
|
|
|
|
Err(mut conn) => {
|
|
|
|
Pin::new(&mut conn).graceful_shutdown();
|
|
|
|
conn.await
|
|
|
|
}
|
|
|
|
Ok(res) => res,
|
|
|
|
}
|
|
|
|
}
|
2023-04-24 17:24:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
fn serve_http2_unconditional(
|
|
|
|
io: impl HttpServeStream,
|
2023-11-13 14:17:31 -05:00
|
|
|
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
2023-09-11 20:06:38 -04:00
|
|
|
cancel: Rc<CancelHandle>,
|
2024-11-18 20:46:24 -05:00
|
|
|
http2_builder_hook: Option<
|
|
|
|
fn(http2::Builder<LocalExecutor>) -> http2::Builder<LocalExecutor>,
|
|
|
|
>,
|
2023-12-27 11:59:57 -05:00
|
|
|
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
|
2024-11-18 20:46:24 -05:00
|
|
|
let mut builder = http2::Builder::new(LocalExecutor);
|
|
|
|
|
|
|
|
if let Some(http2_builder_hook) = http2_builder_hook {
|
|
|
|
builder = http2_builder_hook(builder);
|
|
|
|
}
|
|
|
|
|
|
|
|
let conn = builder.serve_connection(TokioIo::new(io), svc);
|
2023-09-11 20:06:38 -04:00
|
|
|
async {
|
|
|
|
match conn.or_abort(cancel).await {
|
|
|
|
Err(mut conn) => {
|
|
|
|
Pin::new(&mut conn).graceful_shutdown();
|
|
|
|
conn.await
|
|
|
|
}
|
|
|
|
Ok(res) => res,
|
|
|
|
}
|
|
|
|
}
|
2023-04-24 17:24:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
async fn serve_http2_autodetect(
|
|
|
|
io: impl HttpServeStream,
|
2023-11-13 14:17:31 -05:00
|
|
|
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
|
2023-09-11 20:06:38 -04:00
|
|
|
cancel: Rc<CancelHandle>,
|
2024-11-18 20:46:24 -05:00
|
|
|
options: Options,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<(), HttpNextError> {
|
2023-04-24 17:24:40 -04:00
|
|
|
let prefix = NetworkStreamPrefixCheck::new(io, HTTP2_PREFIX);
|
|
|
|
let (matches, io) = prefix.match_prefix().await?;
|
|
|
|
if matches {
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_http2_unconditional(io, svc, cancel, options.http2_builder_hook)
|
2023-09-11 20:06:38 -04:00
|
|
|
.await
|
2024-10-18 18:57:12 -04:00
|
|
|
.map_err(HttpNextError::Hyper)
|
2023-04-24 17:24:40 -04:00
|
|
|
} else {
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_http11_unconditional(io, svc, cancel, options.http1_builder_hook)
|
2023-09-11 20:06:38 -04:00
|
|
|
.await
|
2024-10-18 18:57:12 -04:00
|
|
|
.map_err(HttpNextError::Hyper)
|
2023-04-24 17:24:40 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
fn serve_https(
|
|
|
|
mut io: TlsStream,
|
|
|
|
request_info: HttpConnectionProperties,
|
2023-09-11 20:06:38 -04:00
|
|
|
lifetime: HttpLifetime,
|
2023-11-13 09:04:49 -05:00
|
|
|
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
2024-11-18 20:46:24 -05:00
|
|
|
options: Options,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> JoinHandle<Result<(), HttpNextError>> {
|
2023-09-11 20:06:38 -04:00
|
|
|
let HttpLifetime {
|
2023-11-13 12:32:34 -05:00
|
|
|
server_state,
|
2023-09-11 20:06:38 -04:00
|
|
|
connection_cancel_handle,
|
|
|
|
listen_cancel_handle,
|
|
|
|
} = lifetime;
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
let svc = service_fn(move |req: Request| {
|
2023-11-13 12:32:34 -05:00
|
|
|
handle_request(req, request_info.clone(), server_state.clone(), tx.clone())
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
2023-05-14 17:40:01 -04:00
|
|
|
spawn(
|
2024-11-18 20:46:24 -05:00
|
|
|
async move {
|
2023-11-15 18:12:46 -05:00
|
|
|
let handshake = io.handshake().await?;
|
2023-05-08 03:52:56 -04:00
|
|
|
// If the client specifically negotiates a protocol, we will use it. If not, we'll auto-detect
|
|
|
|
// based on the prefix bytes
|
2023-11-15 18:12:46 -05:00
|
|
|
let handshake = handshake.alpn;
|
|
|
|
if Some(TLS_ALPN_HTTP_2) == handshake.as_deref() {
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_http2_unconditional(
|
|
|
|
io,
|
|
|
|
svc,
|
|
|
|
listen_cancel_handle,
|
|
|
|
options.http2_builder_hook,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map_err(HttpNextError::Hyper)
|
2023-11-15 18:12:46 -05:00
|
|
|
} else if Some(TLS_ALPN_HTTP_11) == handshake.as_deref() {
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_http11_unconditional(
|
|
|
|
io,
|
|
|
|
svc,
|
|
|
|
listen_cancel_handle,
|
|
|
|
options.http1_builder_hook,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map_err(HttpNextError::Hyper)
|
2023-05-08 03:52:56 -04:00
|
|
|
} else {
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_http2_autodetect(io, svc, listen_cancel_handle, options).await
|
2023-05-08 03:52:56 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-09-11 20:06:38 -04:00
|
|
|
.try_or_cancel(connection_cancel_handle),
|
2023-05-08 03:52:56 -04:00
|
|
|
)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
fn serve_http(
|
2023-04-24 17:24:40 -04:00
|
|
|
io: impl HttpServeStream,
|
2023-04-22 13:48:21 -04:00
|
|
|
request_info: HttpConnectionProperties,
|
2023-09-11 20:06:38 -04:00
|
|
|
lifetime: HttpLifetime,
|
2023-11-13 09:04:49 -05:00
|
|
|
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
2024-11-18 20:46:24 -05:00
|
|
|
options: Options,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> JoinHandle<Result<(), HttpNextError>> {
|
2023-09-11 20:06:38 -04:00
|
|
|
let HttpLifetime {
|
2023-11-13 12:32:34 -05:00
|
|
|
server_state,
|
2023-09-11 20:06:38 -04:00
|
|
|
connection_cancel_handle,
|
|
|
|
listen_cancel_handle,
|
|
|
|
} = lifetime;
|
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
let svc = service_fn(move |req: Request| {
|
2023-11-13 12:32:34 -05:00
|
|
|
handle_request(req, request_info.clone(), server_state.clone(), tx.clone())
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
2023-09-11 20:06:38 -04:00
|
|
|
spawn(
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_http2_autodetect(io, svc, listen_cancel_handle, options)
|
2023-09-11 20:06:38 -04:00
|
|
|
.try_or_cancel(connection_cancel_handle),
|
|
|
|
)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
2023-05-10 10:23:26 -04:00
|
|
|
fn serve_http_on<HTTP>(
|
2023-05-15 10:55:47 -04:00
|
|
|
connection: HTTP::Connection,
|
2023-04-22 13:48:21 -04:00
|
|
|
listen_properties: &HttpListenProperties,
|
2023-09-11 20:06:38 -04:00
|
|
|
lifetime: HttpLifetime,
|
2023-11-13 09:04:49 -05:00
|
|
|
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
|
2024-11-18 20:46:24 -05:00
|
|
|
options: Options,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> JoinHandle<Result<(), HttpNextError>>
|
2023-05-10 10:23:26 -04:00
|
|
|
where
|
|
|
|
HTTP: HttpPropertyExtractor,
|
|
|
|
{
|
2023-04-22 13:48:21 -04:00
|
|
|
let connection_properties: HttpConnectionProperties =
|
2023-05-15 10:55:47 -04:00
|
|
|
HTTP::connection_properties(listen_properties, &connection);
|
|
|
|
|
|
|
|
let network_stream = HTTP::to_network_stream_from_connection(connection);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
match network_stream {
|
|
|
|
NetworkStream::Tcp(conn) => {
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_http(conn, connection_properties, lifetime, tx, options)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
NetworkStream::Tls(conn) => {
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_https(conn, connection_properties, lifetime, tx, options)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
#[cfg(unix)]
|
|
|
|
NetworkStream::Unix(conn) => {
|
2024-11-18 20:46:24 -05:00
|
|
|
serve_http(conn, connection_properties, lifetime, tx, options)
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-11 20:06:38 -04:00
|
|
|
#[derive(Clone)]
|
|
|
|
struct HttpLifetime {
|
|
|
|
connection_cancel_handle: Rc<CancelHandle>,
|
|
|
|
listen_cancel_handle: Rc<CancelHandle>,
|
2023-11-23 11:39:17 -05:00
|
|
|
server_state: SignallingRc<HttpServerState>,
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
struct HttpJoinHandle {
|
2024-10-18 18:57:12 -04:00
|
|
|
join_handle: AsyncRefCell<Option<JoinHandle<Result<(), HttpNextError>>>>,
|
2023-09-11 20:06:38 -04:00
|
|
|
connection_cancel_handle: Rc<CancelHandle>,
|
|
|
|
listen_cancel_handle: Rc<CancelHandle>,
|
2023-11-13 09:04:49 -05:00
|
|
|
rx: AsyncRefCell<tokio::sync::mpsc::Receiver<Rc<HttpRecord>>>,
|
2023-11-23 11:39:17 -05:00
|
|
|
server_state: SignallingRc<HttpServerState>,
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
impl HttpJoinHandle {
|
2023-11-13 09:04:49 -05:00
|
|
|
fn new(rx: tokio::sync::mpsc::Receiver<Rc<HttpRecord>>) -> Self {
|
2023-09-11 20:06:38 -04:00
|
|
|
Self {
|
|
|
|
join_handle: AsyncRefCell::new(None),
|
|
|
|
connection_cancel_handle: CancelHandle::new_rc(),
|
|
|
|
listen_cancel_handle: CancelHandle::new_rc(),
|
|
|
|
rx: AsyncRefCell::new(rx),
|
2023-11-13 12:32:34 -05:00
|
|
|
server_state: HttpServerState::new(),
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn lifetime(self: &Rc<Self>) -> HttpLifetime {
|
|
|
|
HttpLifetime {
|
|
|
|
connection_cancel_handle: self.connection_cancel_handle.clone(),
|
|
|
|
listen_cancel_handle: self.listen_cancel_handle.clone(),
|
2023-11-13 12:32:34 -05:00
|
|
|
server_state: self.server_state.clone(),
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn connection_cancel_handle(self: &Rc<Self>) -> Rc<CancelHandle> {
|
|
|
|
self.connection_cancel_handle.clone()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn listen_cancel_handle(self: &Rc<Self>) -> Rc<CancelHandle> {
|
|
|
|
self.listen_cancel_handle.clone()
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Resource for HttpJoinHandle {
|
|
|
|
fn name(&self) -> Cow<str> {
|
|
|
|
"http".into()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn close(self: Rc<Self>) {
|
2023-09-11 20:06:38 -04:00
|
|
|
// During a close operation, we cancel everything
|
|
|
|
self.connection_cancel_handle.cancel();
|
|
|
|
self.listen_cancel_handle.cancel();
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-08 03:52:56 -04:00
|
|
|
impl Drop for HttpJoinHandle {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
// In some cases we may be dropped without closing, so let's cancel everything on the way out
|
2023-09-11 20:06:38 -04:00
|
|
|
self.connection_cancel_handle.cancel();
|
|
|
|
self.listen_cancel_handle.cancel();
|
2023-05-08 03:52:56 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
2023-05-10 10:23:26 -04:00
|
|
|
pub fn op_http_serve<HTTP>(
|
2023-04-22 13:48:21 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[smi] listener_rid: ResourceId,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<(ResourceId, &'static str, String), HttpNextError>
|
2023-05-10 10:23:26 -04:00
|
|
|
where
|
|
|
|
HTTP: HttpPropertyExtractor,
|
|
|
|
{
|
2023-05-15 10:55:47 -04:00
|
|
|
let listener =
|
2024-10-18 18:57:12 -04:00
|
|
|
HTTP::get_listener_for_rid(&mut state.borrow_mut(), listener_rid)
|
|
|
|
.map_err(HttpNextError::Resource)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-15 10:55:47 -04:00
|
|
|
let listen_properties = HTTP::listen_properties_from_listener(&listener)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
let (tx, rx) = tokio::sync::mpsc::channel(10);
|
2023-09-11 20:06:38 -04:00
|
|
|
let resource: Rc<HttpJoinHandle> = Rc::new(HttpJoinHandle::new(rx));
|
|
|
|
let listen_cancel_clone = resource.listen_cancel_handle();
|
|
|
|
|
|
|
|
let lifetime = resource.lifetime();
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2024-11-18 20:46:24 -05:00
|
|
|
let options = {
|
|
|
|
let state = state.borrow();
|
|
|
|
*state.borrow::<Options>()
|
|
|
|
};
|
|
|
|
|
2023-05-08 03:52:56 -04:00
|
|
|
let listen_properties_clone: HttpListenProperties = listen_properties.clone();
|
2023-05-14 17:40:01 -04:00
|
|
|
let handle = spawn(async move {
|
2023-04-22 13:48:21 -04:00
|
|
|
loop {
|
2023-05-15 10:55:47 -04:00
|
|
|
let conn = HTTP::accept_connection_from_listener(&listener)
|
2023-09-11 20:06:38 -04:00
|
|
|
.try_or_cancel(listen_cancel_clone.clone())
|
2024-10-18 18:57:12 -04:00
|
|
|
.await
|
|
|
|
.map_err(HttpNextError::HttpPropertyExtractor)?;
|
2023-05-10 10:23:26 -04:00
|
|
|
serve_http_on::<HTTP>(
|
2023-04-22 13:48:21 -04:00
|
|
|
conn,
|
|
|
|
&listen_properties_clone,
|
2023-09-11 20:06:38 -04:00
|
|
|
lifetime.clone(),
|
2023-04-22 13:48:21 -04:00
|
|
|
tx.clone(),
|
2024-11-18 20:46:24 -05:00
|
|
|
options,
|
2023-04-22 13:48:21 -04:00
|
|
|
);
|
|
|
|
}
|
|
|
|
#[allow(unreachable_code)]
|
2024-10-18 18:57:12 -04:00
|
|
|
Ok::<_, HttpNextError>(())
|
2023-04-22 13:48:21 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
// Set the handle after we start the future
|
2023-09-11 20:06:38 -04:00
|
|
|
*RcRef::map(&resource, |this| &this.join_handle)
|
2023-04-22 13:48:21 -04:00
|
|
|
.try_borrow_mut()
|
|
|
|
.unwrap() = Some(handle);
|
|
|
|
|
|
|
|
Ok((
|
|
|
|
state.borrow_mut().resource_table.add_rc(resource),
|
|
|
|
listen_properties.scheme,
|
|
|
|
listen_properties.fallback_host,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
2023-05-10 10:23:26 -04:00
|
|
|
pub fn op_http_serve_on<HTTP>(
|
2023-04-22 13:48:21 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[smi] connection_rid: ResourceId,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<(ResourceId, &'static str, String), HttpNextError>
|
2023-05-10 10:23:26 -04:00
|
|
|
where
|
|
|
|
HTTP: HttpPropertyExtractor,
|
|
|
|
{
|
2023-05-15 10:55:47 -04:00
|
|
|
let connection =
|
2024-10-18 18:57:12 -04:00
|
|
|
HTTP::get_connection_for_rid(&mut state.borrow_mut(), connection_rid)
|
|
|
|
.map_err(HttpNextError::Resource)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-05-15 10:55:47 -04:00
|
|
|
let listen_properties = HTTP::listen_properties_from_connection(&connection)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
let (tx, rx) = tokio::sync::mpsc::channel(10);
|
2023-09-11 20:06:38 -04:00
|
|
|
let resource: Rc<HttpJoinHandle> = Rc::new(HttpJoinHandle::new(rx));
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2024-11-18 20:46:24 -05:00
|
|
|
let options = {
|
|
|
|
let state = state.borrow();
|
|
|
|
*state.borrow::<Options>()
|
|
|
|
};
|
|
|
|
|
2024-10-18 18:57:12 -04:00
|
|
|
let handle = serve_http_on::<HTTP>(
|
|
|
|
connection,
|
|
|
|
&listen_properties,
|
|
|
|
resource.lifetime(),
|
|
|
|
tx,
|
2024-11-18 20:46:24 -05:00
|
|
|
options,
|
2024-10-18 18:57:12 -04:00
|
|
|
);
|
2023-04-22 13:48:21 -04:00
|
|
|
|
|
|
|
// Set the handle after we start the future
|
2023-09-11 20:06:38 -04:00
|
|
|
*RcRef::map(&resource, |this| &this.join_handle)
|
2023-04-22 13:48:21 -04:00
|
|
|
.try_borrow_mut()
|
|
|
|
.unwrap() = Some(handle);
|
|
|
|
|
|
|
|
Ok((
|
|
|
|
state.borrow_mut().resource_table.add_rc(resource),
|
|
|
|
listen_properties.scheme,
|
|
|
|
listen_properties.fallback_host,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2023-05-30 20:02:52 -04:00
|
|
|
/// Synchronous, non-blocking call to see if there are any further HTTP requests. If anything
|
2023-11-13 09:04:49 -05:00
|
|
|
/// goes wrong in this method we return null and let the async handler pick up the real error.
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(fast)]
|
2023-11-13 09:04:49 -05:00
|
|
|
pub fn op_http_try_wait(
|
|
|
|
state: &mut OpState,
|
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
) -> *const c_void {
|
2023-05-30 20:02:52 -04:00
|
|
|
// The resource needs to exist.
|
2023-08-27 00:04:12 -04:00
|
|
|
let Ok(join_handle) = state.resource_table.get::<HttpJoinHandle>(rid) else {
|
2023-11-13 09:04:49 -05:00
|
|
|
return null();
|
2023-05-30 20:02:52 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
// If join handle is somehow locked, just abort.
|
2023-08-27 00:04:12 -04:00
|
|
|
let Some(mut handle) =
|
2023-09-11 20:06:38 -04:00
|
|
|
RcRef::map(&join_handle, |this| &this.rx).try_borrow_mut()
|
2023-08-27 00:04:12 -04:00
|
|
|
else {
|
2023-11-13 09:04:49 -05:00
|
|
|
return null();
|
2023-05-30 20:02:52 -04:00
|
|
|
};
|
|
|
|
|
|
|
|
// See if there are any requests waiting on this channel. If not, return.
|
2023-11-13 09:04:49 -05:00
|
|
|
let Ok(record) = handle.try_recv() else {
|
|
|
|
return null();
|
2023-05-30 20:02:52 -04:00
|
|
|
};
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
let ptr = ExternalPointer::new(RcHttpRecord(record));
|
|
|
|
ptr.into_raw()
|
2023-05-30 20:02:52 -04:00
|
|
|
}
|
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(async)]
|
2023-04-22 13:48:21 -04:00
|
|
|
pub async fn op_http_wait(
|
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-08-03 16:36:32 -04:00
|
|
|
#[smi] rid: ResourceId,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<*const c_void, HttpNextError> {
|
2023-04-22 13:48:21 -04:00
|
|
|
// We will get the join handle initially, as we might be consuming requests still
|
|
|
|
let join_handle = state
|
|
|
|
.borrow_mut()
|
|
|
|
.resource_table
|
2024-10-18 18:57:12 -04:00
|
|
|
.get::<HttpJoinHandle>(rid)
|
|
|
|
.map_err(HttpNextError::Resource)?;
|
2023-04-22 13:48:21 -04:00
|
|
|
|
2023-09-11 20:06:38 -04:00
|
|
|
let cancel = join_handle.listen_cancel_handle();
|
2023-04-22 13:48:21 -04:00
|
|
|
let next = async {
|
2023-09-11 20:06:38 -04:00
|
|
|
let mut recv = RcRef::map(&join_handle, |this| &this.rx).borrow_mut().await;
|
2023-04-22 13:48:21 -04:00
|
|
|
recv.recv().await
|
|
|
|
}
|
|
|
|
.or_cancel(cancel)
|
|
|
|
.unwrap_or_else(|_| None)
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// Do we have a request?
|
2023-11-13 09:04:49 -05:00
|
|
|
if let Some(record) = next {
|
|
|
|
let ptr = ExternalPointer::new(RcHttpRecord(record));
|
|
|
|
return Ok(ptr.into_raw());
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// No - we're shutting down
|
2023-09-11 20:06:38 -04:00
|
|
|
let res = RcRef::map(join_handle, |this| &this.join_handle)
|
2023-04-22 13:48:21 -04:00
|
|
|
.borrow_mut()
|
|
|
|
.await
|
|
|
|
.take()
|
|
|
|
.unwrap()
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
// Filter out shutdown (ENOTCONN) errors
|
|
|
|
if let Err(err) = res {
|
2024-10-18 18:57:12 -04:00
|
|
|
if let HttpNextError::Io(err) = &err {
|
|
|
|
if err.kind() == io::ErrorKind::NotConnected {
|
|
|
|
return Ok(null());
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
|
|
|
}
|
2024-10-18 18:57:12 -04:00
|
|
|
|
2023-04-22 13:48:21 -04:00
|
|
|
return Err(err);
|
|
|
|
}
|
|
|
|
|
2023-11-13 09:04:49 -05:00
|
|
|
Ok(null())
|
2023-04-22 13:48:21 -04:00
|
|
|
}
|
2023-04-26 18:58:18 -04:00
|
|
|
|
2023-09-11 20:06:38 -04:00
|
|
|
/// Cancels the HTTP handle.
|
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_http_cancel(
|
|
|
|
state: &mut OpState,
|
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
graceful: bool,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<(), deno_core::error::AnyError> {
|
2023-09-11 20:06:38 -04:00
|
|
|
let join_handle = state.resource_table.get::<HttpJoinHandle>(rid)?;
|
|
|
|
|
|
|
|
if graceful {
|
|
|
|
// In a graceful shutdown, we close the listener and allow all the remaining connections to drain
|
|
|
|
join_handle.listen_cancel_handle().cancel();
|
|
|
|
} else {
|
|
|
|
// In a forceful shutdown, we close everything
|
|
|
|
join_handle.listen_cancel_handle().cancel();
|
|
|
|
join_handle.connection_cancel_handle().cancel();
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[op2(async)]
|
|
|
|
pub async fn op_http_close(
|
|
|
|
state: Rc<RefCell<OpState>>,
|
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
graceful: bool,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<(), HttpNextError> {
|
2023-09-11 20:06:38 -04:00
|
|
|
let join_handle = state
|
|
|
|
.borrow_mut()
|
|
|
|
.resource_table
|
2024-10-18 18:57:12 -04:00
|
|
|
.take::<HttpJoinHandle>(rid)
|
|
|
|
.map_err(HttpNextError::Resource)?;
|
2023-09-11 20:06:38 -04:00
|
|
|
|
|
|
|
if graceful {
|
2023-11-23 11:39:17 -05:00
|
|
|
http_general_trace!("graceful shutdown");
|
2023-09-11 20:06:38 -04:00
|
|
|
// In a graceful shutdown, we close the listener and allow all the remaining connections to drain
|
|
|
|
join_handle.listen_cancel_handle().cancel();
|
2023-11-23 11:39:17 -05:00
|
|
|
poll_fn(|cx| join_handle.server_state.poll_complete(cx)).await;
|
2023-09-11 20:06:38 -04:00
|
|
|
} else {
|
2023-11-23 11:39:17 -05:00
|
|
|
http_general_trace!("forceful shutdown");
|
2023-09-11 20:06:38 -04:00
|
|
|
// In a forceful shutdown, we close everything
|
|
|
|
join_handle.listen_cancel_handle().cancel();
|
|
|
|
join_handle.connection_cancel_handle().cancel();
|
2023-11-13 14:17:31 -05:00
|
|
|
// Give streaming responses a tick to close
|
|
|
|
tokio::task::yield_now().await;
|
2023-09-11 20:06:38 -04:00
|
|
|
}
|
|
|
|
|
2023-11-23 11:39:17 -05:00
|
|
|
http_general_trace!("awaiting shutdown");
|
|
|
|
|
2023-09-11 20:06:38 -04:00
|
|
|
let mut join_handle = RcRef::map(&join_handle, |this| &this.join_handle)
|
|
|
|
.borrow_mut()
|
|
|
|
.await;
|
|
|
|
if let Some(join_handle) = join_handle.take() {
|
|
|
|
join_handle.await??;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-04-26 18:58:18 -04:00
|
|
|
struct UpgradeStream {
|
|
|
|
read: AsyncRefCell<tokio::io::ReadHalf<tokio::io::DuplexStream>>,
|
|
|
|
write: AsyncRefCell<tokio::io::WriteHalf<tokio::io::DuplexStream>>,
|
|
|
|
cancel_handle: CancelHandle,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl UpgradeStream {
|
|
|
|
pub fn new(
|
|
|
|
read: tokio::io::ReadHalf<tokio::io::DuplexStream>,
|
|
|
|
write: tokio::io::WriteHalf<tokio::io::DuplexStream>,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
read: AsyncRefCell::new(read),
|
|
|
|
write: AsyncRefCell::new(write),
|
|
|
|
cancel_handle: CancelHandle::new(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-18 18:57:12 -04:00
|
|
|
async fn read(
|
|
|
|
self: Rc<Self>,
|
|
|
|
buf: &mut [u8],
|
|
|
|
) -> Result<usize, std::io::Error> {
|
2023-04-26 18:58:18 -04:00
|
|
|
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
|
|
|
|
async {
|
|
|
|
let read = RcRef::map(self, |this| &this.read);
|
|
|
|
let mut read = read.borrow_mut().await;
|
2024-10-18 18:57:12 -04:00
|
|
|
Pin::new(&mut *read).read(buf).await
|
2023-04-26 18:58:18 -04:00
|
|
|
}
|
|
|
|
.try_or_cancel(cancel_handle)
|
|
|
|
.await
|
|
|
|
}
|
|
|
|
|
2024-10-18 18:57:12 -04:00
|
|
|
async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, std::io::Error> {
|
2023-04-26 18:58:18 -04:00
|
|
|
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
|
|
|
|
async {
|
|
|
|
let write = RcRef::map(self, |this| &this.write);
|
|
|
|
let mut write = write.borrow_mut().await;
|
2024-10-18 18:57:12 -04:00
|
|
|
Pin::new(&mut *write).write(buf).await
|
2023-04-26 18:58:18 -04:00
|
|
|
}
|
|
|
|
.try_or_cancel(cancel_handle)
|
|
|
|
.await
|
|
|
|
}
|
2023-07-07 12:47:08 -04:00
|
|
|
|
|
|
|
async fn write_vectored(
|
|
|
|
self: Rc<Self>,
|
|
|
|
buf1: &[u8],
|
|
|
|
buf2: &[u8],
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<usize, std::io::Error> {
|
2023-07-07 12:47:08 -04:00
|
|
|
let mut wr = RcRef::map(self, |r| &r.write).borrow_mut().await;
|
|
|
|
|
|
|
|
let total = buf1.len() + buf2.len();
|
|
|
|
let mut bufs = [std::io::IoSlice::new(buf1), std::io::IoSlice::new(buf2)];
|
|
|
|
let mut nwritten = wr.write_vectored(&bufs).await?;
|
|
|
|
if nwritten == total {
|
|
|
|
return Ok(nwritten);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Slightly more optimized than (unstable) write_all_vectored for 2 iovecs.
|
|
|
|
while nwritten <= buf1.len() {
|
|
|
|
bufs[0] = std::io::IoSlice::new(&buf1[nwritten..]);
|
|
|
|
nwritten += wr.write_vectored(&bufs).await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// First buffer out of the way.
|
|
|
|
if nwritten < total && nwritten > buf1.len() {
|
|
|
|
wr.write_all(&buf2[nwritten - buf1.len()..]).await?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(total)
|
|
|
|
}
|
2023-04-26 18:58:18 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Resource for UpgradeStream {
|
|
|
|
fn name(&self) -> Cow<str> {
|
|
|
|
"httpRawUpgradeStream".into()
|
|
|
|
}
|
|
|
|
|
|
|
|
deno_core::impl_readable_byob!();
|
|
|
|
deno_core::impl_writable!();
|
|
|
|
|
|
|
|
fn close(self: Rc<Self>) {
|
|
|
|
self.cancel_handle.cancel();
|
|
|
|
}
|
|
|
|
}
|
2023-07-07 12:47:08 -04:00
|
|
|
|
2023-08-03 16:36:32 -04:00
|
|
|
#[op2(fast)]
|
|
|
|
pub fn op_can_write_vectored(
|
|
|
|
state: &mut OpState,
|
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
) -> bool {
|
2023-07-18 17:34:26 -04:00
|
|
|
state.resource_table.get::<UpgradeStream>(rid).is_ok()
|
|
|
|
}
|
|
|
|
|
2023-09-24 18:07:22 -04:00
|
|
|
#[op2(async)]
|
|
|
|
#[number]
|
2023-07-07 12:47:08 -04:00
|
|
|
pub async fn op_raw_write_vectored(
|
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-09-24 18:07:22 -04:00
|
|
|
#[smi] rid: ResourceId,
|
|
|
|
#[buffer] buf1: JsBuffer,
|
|
|
|
#[buffer] buf2: JsBuffer,
|
2024-10-18 18:57:12 -04:00
|
|
|
) -> Result<usize, HttpNextError> {
|
|
|
|
let resource: Rc<UpgradeStream> = state
|
|
|
|
.borrow()
|
|
|
|
.resource_table
|
|
|
|
.get::<UpgradeStream>(rid)
|
|
|
|
.map_err(HttpNextError::Resource)?;
|
2023-07-07 12:47:08 -04:00
|
|
|
let nwritten = resource.write_vectored(&buf1, &buf2).await?;
|
|
|
|
Ok(nwritten)
|
|
|
|
}
|