1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-21 15:04:11 -05:00

WIP fix(ext/http): #12193

This commit is contained in:
Bert Belder 2021-10-04 18:50:40 -07:00
parent 2bdc2926c3
commit 38f50331eb
No known key found for this signature in database
GPG key ID: 7A77887B2E2ED461
7 changed files with 605 additions and 639 deletions

View file

@ -45,9 +45,10 @@ jobs:
# e.g. a flaky test.
# Don't fast-fail on tag build because publishing binaries shouldn't be
# prevented if any of the stages fail (which can be a false negative).
fail-fast: ${{ github.event_name == 'pull_request' ||
(github.ref != 'refs/heads/main' &&
!startsWith(github.ref, 'refs/tags/')) }}
# fail-fast: ${{ github.event_name == 'pull_request' ||
# (github.ref != 'refs/heads/main' &&
# !startsWith(github.ref, 'refs/tags/')) }}
fail-fast: false
env:
CARGO_TERM_COLOR: always
@ -615,7 +616,7 @@ jobs:
publish-canary:
name: publish canary
runs-on: ubuntu-20.04
needs: ['build']
needs: ["build"]
if: github.repository == 'denoland/deno' &&
(github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/'))
steps:

View file

@ -865,6 +865,7 @@ unitTest(
const writer = writable.getWriter();
async function writeResponse() {
await delay(50);
await writer.write(
new TextEncoder().encode(
"written to the writable side of a TransformStream",

View file

@ -27,6 +27,7 @@
Set,
SetPrototypeAdd,
SetPrototypeDelete,
SetPrototypeHas,
SetPrototypeValues,
StringPrototypeIncludes,
StringPrototypeToLowerCase,
@ -42,6 +43,8 @@
class HttpConn {
#rid = 0;
#closed = false;
// This set holds resource ids of resources
// that were created during lifecycle of this request.
// When the connection is closed these resources should be closed
@ -66,6 +69,7 @@
this.#rid,
);
} catch (error) {
this.close();
// A connection error seen here would cause disrupted responses to throw
// a generic `BadResource` error. Instead store this error and replace
// those with it.
@ -79,26 +83,26 @@
}
throw error;
}
if (nextRequest === null) return null;
if (nextRequest == null) {
this.close();
return null;
}
const [
requestRid,
responseSenderRid,
streamRid,
method,
headersList,
url,
] = nextRequest;
SetPrototypeAdd(this.managedResources, streamRid);
/** @type {ReadableStream<Uint8Array> | undefined} */
let body = null;
if (typeof requestRid === "number") {
SetPrototypeAdd(this.managedResources, requestRid);
// There might be a body, but we don't expose it for GET/HEAD requests.
// It will be closed automatically once the request has been handled and
// the response has been sent.
if (method !== "GET" && method !== "HEAD") {
body = createRequestBodyStream(this, requestRid);
}
body = createRequestBodyStream(streamRid);
}
const innerRequest = newInnerRequest(
@ -111,22 +115,21 @@
const signal = abortSignal.newSignal();
const request = fromInnerRequest(innerRequest, signal, "immutable");
SetPrototypeAdd(this.managedResources, responseSenderRid);
const respondWith = createRespondWith(
this,
responseSenderRid,
requestRid,
);
const respondWith = createRespondWith(this, streamRid);
return { request, respondWith };
}
/** @returns {void} */
close() {
for (const rid of SetPrototypeValues(this.managedResources)) {
core.tryClose(rid);
}
if (!this.#closed) {
this.#closed = true;
core.close(this.#rid);
for (const rid of SetPrototypeValues(this.managedResources)) {
SetPrototypeDelete(this.managedResources, rid);
core.close(rid);
}
}
}
[SymbolAsyncIterator]() {
@ -136,22 +139,19 @@
async next() {
const reqEvt = await httpConn.nextRequest();
// Change with caution, current form avoids a v8 deopt
return { value: reqEvt, done: reqEvt === null };
return { value: reqEvt ?? undefined, done: reqEvt === null };
},
};
}
}
function readRequest(requestRid, zeroCopyBuf) {
return core.opAsync(
"op_http_request_read",
requestRid,
zeroCopyBuf,
);
function readRequest(streamRid, buf) {
return core.opAsync("op_http_request_read", streamRid, buf);
}
function createRespondWith(httpConn, responseSenderRid, requestRid) {
function createRespondWith(httpConn, streamRid) {
return async function respondWith(resp) {
try {
if (resp instanceof Promise) {
resp = await resp;
}
@ -170,7 +170,9 @@
/** @type {ReadableStream<Uint8Array> | Uint8Array | null} */
let respBody = null;
if (innerResp.body !== null) {
if (innerResp.body.unusable()) throw new TypeError("Body is unusable.");
if (innerResp.body.unusable()) {
throw new TypeError("Body is unusable.");
}
if (innerResp.body.streamOrStatic instanceof ReadableStream) {
if (
innerResp.body.length === null ||
@ -195,15 +197,14 @@
} else {
respBody = new Uint8Array(0);
}
const isStreamingResponseBody = !(respBody instanceof Uint8Array);
SetPrototypeDelete(httpConn.managedResources, responseSenderRid);
let responseBodyRid;
try {
responseBodyRid = await core.opAsync("op_http_response", [
responseSenderRid,
await core.opAsync("op_http_response", [
streamRid,
innerResp.status ?? 200,
innerResp.headerList,
], respBody instanceof Uint8Array ? respBody : null);
], isStreamingResponseBody ? null : respBody);
} catch (error) {
const connError = httpConn[connErrorSymbol];
if (error instanceof BadResource && connError != null) {
@ -216,11 +217,7 @@
throw error;
}
// If `respond` returns a responseBodyRid, we should stream the body
// to that resource.
if (responseBodyRid !== null) {
SetPrototypeAdd(httpConn.managedResources, responseBodyRid);
try {
if (isStreamingResponseBody) {
if (respBody === null || !(respBody instanceof ReadableStream)) {
throw new TypeError("Unreachable");
}
@ -233,11 +230,7 @@
break;
}
try {
await core.opAsync(
"op_http_response_write",
responseBodyRid,
value,
);
await core.opAsync("op_http_response_write", streamRid, value);
} catch (error) {
const connError = httpConn[connErrorSymbol];
if (error instanceof BadResource && connError != null) {
@ -248,31 +241,25 @@
throw error;
}
}
} finally {
// Once all chunks are sent, and the request body is closed, we can
// close the response body.
SetPrototypeDelete(httpConn.managedResources, responseBodyRid);
try {
await core.opAsync("op_http_response_close", responseBodyRid);
} catch { /* pass */ }
await core.opAsync("op_http_response_close", streamRid);
} catch (error) {
await reader.cancel(error);
throw error;
}
}
const ws = resp[_ws];
if (ws) {
if (typeof requestRid !== "number") {
throw new TypeError(
"This request can not be upgraded to a websocket connection.",
);
}
const wsRid = await core.opAsync(
"op_http_upgrade_websocket",
requestRid,
streamRid,
);
ws[_rid] = wsRid;
ws[_protocol] = resp.headers.get("sec-websocket-protocol");
httpConn.close();
if (ws[_readyState] === WebSocket.CLOSING) {
await core.opAsync("op_ws_close", { rid: wsRid });
@ -292,17 +279,17 @@
ws[_eventLoop]();
}
} else if (typeof requestRid === "number") {
// Try to close "request" resource. It might have been already consumed,
// but if it hasn't been we need to close it here to avoid resource
// leak.
SetPrototypeDelete(httpConn.managedResources, requestRid);
core.tryClose(requestRid);
}
} finally {
if (SetPrototypeHas(httpConn.managedResources, streamRid)) {
SetPrototypeDelete(httpConn.managedResources, streamRid);
core.close(streamRid);
}
}
};
}
function createRequestBodyStream(httpConn, requestRid) {
function createRequestBodyStream(streamRid) {
return new ReadableStream({
type: "bytes",
async pull(controller) {
@ -310,32 +297,21 @@
// This is the largest possible size for a single packet on a TLS
// stream.
const chunk = new Uint8Array(16 * 1024 + 256);
const read = await readRequest(
requestRid,
chunk,
);
const read = await readRequest(streamRid, chunk);
if (read > 0) {
// We read some data. Enqueue it onto the stream.
controller.enqueue(TypedArrayPrototypeSubarray(chunk, 0, read));
} else {
// We have reached the end of the body, so we close the stream.
controller.close();
SetPrototypeDelete(httpConn.managedResources, requestRid);
core.close(requestRid);
}
} catch (err) {
// There was an error while reading a chunk of the body, so we
// error.
controller.error(err);
controller.close();
SetPrototypeDelete(httpConn.managedResources, requestRid);
core.close(requestRid);
}
},
cancel() {
SetPrototypeDelete(httpConn.managedResources, requestRid);
core.close(requestRid);
},
});
}
@ -369,7 +345,6 @@
}
const accept = core.opSync("op_http_websocket_accept_header", websocketKey);
const r = newInnerResponse(101);
r.headerList = [
["upgrade", "websocket"],

File diff suppressed because it is too large Load diff

View file

@ -34,12 +34,13 @@ use std::sync::Arc;
use tokio::net::TcpStream;
use tokio_rustls::rustls::RootCertStore;
use tokio_rustls::TlsConnector;
use tokio_tungstenite::client_async;
use tokio_tungstenite::tungstenite::{
handshake::client::Response, protocol::frame::coding::CloseCode,
protocol::CloseFrame, Message,
protocol::CloseFrame, protocol::Role, Message,
};
use tokio_tungstenite::MaybeTlsStream;
use tokio_tungstenite::{client_async, WebSocketStream};
use tokio_tungstenite::WebSocketStream;
pub use tokio_tungstenite; // Re-export tokio_tungstenite
@ -72,6 +73,27 @@ pub enum WebSocketStreamType {
},
}
pub async fn ws_create_server_stream(
state: &Rc<RefCell<OpState>>,
transport: hyper::upgrade::Upgraded,
) -> Result<ResourceId, AnyError> {
let ws_stream =
WebSocketStream::from_raw_socket(transport, Role::Server, None).await;
let (ws_tx, ws_rx) = ws_stream.split();
let ws_resource = WsStreamResource {
stream: WebSocketStreamType::Server {
tx: AsyncRefCell::new(ws_tx),
rx: AsyncRefCell::new(ws_rx),
},
cancel: Default::default(),
};
let resource_table = &mut state.borrow_mut().resource_table;
let rid = resource_table.add(ws_resource);
Ok(rid)
}
pub struct WsStreamResource {
pub stream: WebSocketStreamType,
// When a `WsStreamResource` resource is closed, all pending 'read' ops are

View file

@ -17,6 +17,7 @@ use deno_fetch::reqwest;
use std::env;
use std::error::Error;
use std::io;
use std::sync::Arc;
fn get_dlopen_error_class(error: &dlopen::Error) -> &'static str {
use dlopen::Error::*;
@ -163,6 +164,10 @@ pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
.map(get_dlopen_error_class)
})
.or_else(|| e.downcast_ref::<hyper::Error>().map(get_hyper_error_class))
.or_else(|| {
e.downcast_ref::<Arc<hyper::Error>>()
.map(|e| get_hyper_error_class(&**e))
})
.or_else(|| {
e.downcast_ref::<deno_core::Canceled>().map(|e| {
let io_err: io::Error = e.to_owned().into();

View file

@ -6,6 +6,7 @@ use deno_core::op_sync;
use deno_core::Extension;
use deno_core::OpState;
use deno_core::ResourceId;
use deno_http::http_create_conn_resource;
use deno_net::io::TcpStreamResource;
use deno_net::ops_tls::TlsStreamResource;
@ -29,7 +30,7 @@ fn op_http_start(
let (read_half, write_half) = resource.into_inner();
let tcp_stream = read_half.reunite(write_half)?;
let addr = tcp_stream.local_addr()?;
return deno_http::start_http(state, tcp_stream, addr, "http");
return http_create_conn_resource(state, tcp_stream, addr, "http");
}
if let Ok(resource_rc) = state
@ -41,7 +42,7 @@ fn op_http_start(
let (read_half, write_half) = resource.into_inner();
let tls_stream = read_half.reunite(write_half);
let addr = tls_stream.get_ref().0.local_addr()?;
return deno_http::start_http(state, tls_stream, addr, "https");
return http_create_conn_resource(state, tls_stream, addr, "https");
}
Err(bad_resource_id())