1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-29 16:30:56 -05:00

Reland "refactor(fetch): reimplement fetch with hyper instead of reqwest" (#24593)

Originally landed in
f6fd6619e7.
Reverted in https://github.com/denoland/deno/pull/24574.

This reland contains a fix that sends "Accept: */*" header for calls made
from "FileFetcher". Absence of this header made downloading source code
from JSR broken. This is tested by ensuring this header is present in the
test server that servers JSR packages.

---------

Co-authored-by: Sean McArthur <sean@seanmonstar.com>
This commit is contained in:
Bartek Iwańczuk 2024-07-18 00:37:31 +01:00
parent a84ca673da
commit b2bacd969c
No known key found for this signature in database
GPG key ID: 0C6BCDDC3B3AD750
34 changed files with 1549 additions and 346 deletions

47
Cargo.lock generated
View file

@ -743,6 +743,7 @@ dependencies = [
"os_pipe", "os_pipe",
"pretty_assertions", "pretty_assertions",
"regex", "regex",
"reqwest",
"serde", "serde",
"test_server", "test_server",
"tokio", "tokio",
@ -1148,6 +1149,10 @@ dependencies = [
"fs3", "fs3",
"glibc_version", "glibc_version",
"glob", "glob",
"http 1.1.0",
"http-body 1.0.0",
"http-body-util",
"hyper-util",
"import_map", "import_map",
"indexmap", "indexmap",
"jsonc-parser", "jsonc-parser",
@ -1172,7 +1177,6 @@ dependencies = [
"quick-junit", "quick-junit",
"rand", "rand",
"regex", "regex",
"reqwest",
"ring", "ring",
"runtimelib", "runtimelib",
"rustyline", "rustyline",
@ -1462,6 +1466,7 @@ dependencies = [
name = "deno_fetch" name = "deno_fetch"
version = "0.185.0" version = "0.185.0"
dependencies = [ dependencies = [
"base64 0.21.7",
"bytes", "bytes",
"data-url", "data-url",
"deno_core", "deno_core",
@ -1469,11 +1474,20 @@ dependencies = [
"deno_tls", "deno_tls",
"dyn-clone", "dyn-clone",
"http 1.1.0", "http 1.1.0",
"reqwest", "http-body-util",
"hyper 1.4.1",
"hyper-rustls",
"hyper-util",
"ipnet",
"serde", "serde",
"serde_json", "serde_json",
"tokio", "tokio",
"tokio-rustls",
"tokio-socks",
"tokio-util", "tokio-util",
"tower",
"tower-http",
"tower-service",
] ]
[[package]] [[package]]
@ -1617,6 +1631,7 @@ dependencies = [
"denokv_sqlite", "denokv_sqlite",
"faster-hex", "faster-hex",
"http 1.1.0", "http 1.1.0",
"http-body-util",
"log", "log",
"num-bigint", "num-bigint",
"prost", "prost",
@ -1736,6 +1751,7 @@ dependencies = [
"hkdf", "hkdf",
"home", "home",
"http 1.1.0", "http 1.1.0",
"http-body-util",
"idna 0.3.0", "idna 0.3.0",
"indexmap", "indexmap",
"ipnetwork", "ipnetwork",
@ -1758,7 +1774,6 @@ dependencies = [
"pin-project-lite", "pin-project-lite",
"rand", "rand",
"regex", "regex",
"reqwest",
"ring", "ring",
"ripemd", "ripemd",
"rsa", "rsa",
@ -3358,12 +3373,12 @@ dependencies = [
[[package]] [[package]]
name = "http-body-util" name = "http-body-util"
version = "0.1.1" version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
dependencies = [ dependencies = [
"bytes", "bytes",
"futures-core", "futures-util",
"http 1.1.0", "http 1.1.0",
"http-body 1.0.0", "http-body 1.0.0",
"pin-project-lite", "pin-project-lite",
@ -7104,6 +7119,26 @@ dependencies = [
"tower-service", "tower-service",
] ]
[[package]]
name = "tower-http"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"
dependencies = [
"async-compression",
"bitflags 2.5.0",
"bytes",
"futures-core",
"http 1.1.0",
"http-body 1.0.0",
"http-body-util",
"pin-project-lite",
"tokio",
"tokio-util",
"tower-layer",
"tower-service",
]
[[package]] [[package]]
name = "tower-layer" name = "tower-layer"
version = "0.3.2" version = "0.3.2"

View file

@ -115,13 +115,16 @@ futures = "0.3.21"
glob = "0.3.1" glob = "0.3.1"
h2 = "0.4.4" h2 = "0.4.4"
http = "1.0" http = "1.0"
http-body-util = "0.1" http-body = "1.0"
http-body-util = "0.1.2"
http_v02 = { package = "http", version = "0.2.9" } http_v02 = { package = "http", version = "0.2.9" }
httparse = "1.8.0" httparse = "1.8.0"
hyper = { version = "=1.4.1", features = ["full"] } hyper = { version = "1.4.1", features = ["full"] }
hyper-util = { version = "=0.1.6", features = ["tokio", "server", "server-auto"] } hyper-rustls = { version = "0.27.2", default-features = false, features = ["http1", "http2", "tls12", "ring"] }
hyper-util = { version = "=0.1.6", features = ["tokio", "client", "client-legacy", "server", "server-auto"] }
hyper_v014 = { package = "hyper", version = "0.14.26", features = ["runtime", "http1"] } hyper_v014 = { package = "hyper", version = "0.14.26", features = ["runtime", "http1"] }
indexmap = { version = "2", features = ["serde"] } indexmap = { version = "2", features = ["serde"] }
ipnet = "2.3"
jsonc-parser = { version = "=0.23.0", features = ["serde"] } jsonc-parser = { version = "=0.23.0", features = ["serde"] }
lazy-regex = "3" lazy-regex = "3"
libc = "0.2.126" libc = "0.2.126"
@ -173,8 +176,13 @@ termcolor = "1.1.3"
thiserror = "1.0.61" thiserror = "1.0.61"
tokio = { version = "1.36.0", features = ["full"] } tokio = { version = "1.36.0", features = ["full"] }
tokio-metrics = { version = "0.3.0", features = ["rt"] } tokio-metrics = { version = "0.3.0", features = ["rt"] }
tokio-rustls = { version = "0.26.0", default-features = false, features = ["ring", "tls12"] }
tokio-socks = "0.5.1"
tokio-util = "0.7.4" tokio-util = "0.7.4"
tower = { version = "0.4.13", default-features = false, features = ["util"] }
tower-http = { version = "0.5.2", features = ["decompression-br", "decompression-gzip"] }
tower-lsp = { version = "=0.20.0", features = ["proposed"] } tower-lsp = { version = "=0.20.0", features = ["proposed"] }
tower-service = "0.3.2"
twox-hash = "=1.6.3" twox-hash = "=1.6.3"
# Upgrading past 2.4.1 may cause WPT failures # Upgrading past 2.4.1 may cause WPT failures
url = { version = "< 2.5.0", features = ["serde", "expose_internals"] } url = { version = "< 2.5.0", features = ["serde", "expose_internals"] }

View file

@ -107,6 +107,10 @@ faster-hex.workspace = true
flate2.workspace = true flate2.workspace = true
fs3.workspace = true fs3.workspace = true
glob = "0.3.1" glob = "0.3.1"
http.workspace = true
http-body.workspace = true
http-body-util.workspace = true
hyper-util.workspace = true
import_map = { version = "=0.20.0", features = ["ext"] } import_map = { version = "=0.20.0", features = ["ext"] }
indexmap.workspace = true indexmap.workspace = true
jsonc-parser.workspace = true jsonc-parser.workspace = true
@ -128,7 +132,6 @@ phf.workspace = true
quick-junit = "^0.3.5" quick-junit = "^0.3.5"
rand = { workspace = true, features = ["small_rng"] } rand = { workspace = true, features = ["small_rng"] }
regex.workspace = true regex.workspace = true
reqwest.workspace = true
ring.workspace = true ring.workspace = true
rustyline.workspace = true rustyline.workspace = true
rustyline-derive = "=0.7.0" rustyline-derive = "=0.7.0"

View file

@ -12,18 +12,22 @@ use deno_core::error::generic_error;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::futures::StreamExt; use deno_core::futures::StreamExt;
use deno_core::parking_lot::Mutex; use deno_core::parking_lot::Mutex;
use deno_core::serde;
use deno_core::serde_json;
use deno_core::url::Url; use deno_core::url::Url;
use deno_runtime::deno_fetch;
use deno_runtime::deno_fetch::create_http_client; use deno_runtime::deno_fetch::create_http_client;
use deno_runtime::deno_fetch::reqwest;
use deno_runtime::deno_fetch::reqwest::header::HeaderName;
use deno_runtime::deno_fetch::reqwest::header::HeaderValue;
use deno_runtime::deno_fetch::reqwest::header::ACCEPT;
use deno_runtime::deno_fetch::reqwest::header::AUTHORIZATION;
use deno_runtime::deno_fetch::reqwest::header::IF_NONE_MATCH;
use deno_runtime::deno_fetch::reqwest::header::LOCATION;
use deno_runtime::deno_fetch::reqwest::StatusCode;
use deno_runtime::deno_fetch::CreateHttpClientOptions; use deno_runtime::deno_fetch::CreateHttpClientOptions;
use deno_runtime::deno_tls::RootCertStoreProvider; use deno_runtime::deno_tls::RootCertStoreProvider;
use http::header::HeaderName;
use http::header::HeaderValue;
use http::header::ACCEPT;
use http::header::AUTHORIZATION;
use http::header::IF_NONE_MATCH;
use http::header::LOCATION;
use http::StatusCode;
use http_body_util::BodyExt;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::thread::ThreadId; use std::thread::ThreadId;
@ -208,8 +212,7 @@ pub struct HttpClientProvider {
// it's not safe to share a reqwest::Client across tokio runtimes, // it's not safe to share a reqwest::Client across tokio runtimes,
// so we store these Clients keyed by thread id // so we store these Clients keyed by thread id
// https://github.com/seanmonstar/reqwest/issues/1148#issuecomment-910868788 // https://github.com/seanmonstar/reqwest/issues/1148#issuecomment-910868788
#[allow(clippy::disallowed_types)] // reqwest::Client allowed here clients_by_thread_id: Mutex<HashMap<ThreadId, deno_fetch::Client>>,
clients_by_thread_id: Mutex<HashMap<ThreadId, reqwest::Client>>,
} }
impl std::fmt::Debug for HttpClientProvider { impl std::fmt::Debug for HttpClientProvider {
@ -270,9 +273,15 @@ pub struct BadResponseError {
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum DownloadError { pub enum DownloadError {
#[error(transparent)] #[error(transparent)]
Reqwest(#[from] reqwest::Error), Fetch(AnyError),
#[error(transparent)] #[error(transparent)]
ToStr(#[from] reqwest::header::ToStrError), UrlParse(#[from] deno_core::url::ParseError),
#[error(transparent)]
HttpParse(#[from] http::Error),
#[error(transparent)]
Json(#[from] serde_json::Error),
#[error(transparent)]
ToStr(#[from] http::header::ToStrError),
#[error("Redirection from '{}' did not provide location header", .request_url)] #[error("Redirection from '{}' did not provide location header", .request_url)]
NoRedirectHeader { request_url: Url }, NoRedirectHeader { request_url: Url },
#[error("Too many redirects.")] #[error("Too many redirects.")]
@ -283,8 +292,7 @@ pub enum DownloadError {
#[derive(Debug)] #[derive(Debug)]
pub struct HttpClient { pub struct HttpClient {
#[allow(clippy::disallowed_types)] // reqwest::Client allowed here client: deno_fetch::Client,
client: reqwest::Client,
// don't allow sending this across threads because then // don't allow sending this across threads because then
// it might be shared accidentally across tokio runtimes // it might be shared accidentally across tokio runtimes
// which will cause issues // which will cause issues
@ -295,22 +303,56 @@ pub struct HttpClient {
impl HttpClient { impl HttpClient {
// DO NOT make this public. You should always be creating one of these from // DO NOT make this public. You should always be creating one of these from
// the HttpClientProvider // the HttpClientProvider
#[allow(clippy::disallowed_types)] // reqwest::Client allowed here fn new(client: deno_fetch::Client) -> Self {
fn new(client: reqwest::Client) -> Self {
Self { Self {
client, client,
_unsend_marker: deno_core::unsync::UnsendMarker::default(), _unsend_marker: deno_core::unsync::UnsendMarker::default(),
} }
} }
// todo(dsherret): don't expose `reqwest::RequestBuilder` because it pub fn get(&self, url: Url) -> Result<RequestBuilder, http::Error> {
// is `Sync` and could accidentally be shared with multiple tokio runtimes let body = http_body_util::Empty::new()
pub fn get(&self, url: impl reqwest::IntoUrl) -> reqwest::RequestBuilder { .map_err(|never| match never {})
self.client.get(url) .boxed();
let mut req = http::Request::new(body);
*req.uri_mut() = url.as_str().parse()?;
Ok(RequestBuilder {
client: self.client.clone(),
req,
})
} }
pub fn post(&self, url: impl reqwest::IntoUrl) -> reqwest::RequestBuilder { pub fn post(
self.client.post(url) &self,
url: Url,
body: deno_fetch::ReqBody,
) -> Result<RequestBuilder, http::Error> {
let mut req = http::Request::new(body);
*req.method_mut() = http::Method::POST;
*req.uri_mut() = url.as_str().parse()?;
Ok(RequestBuilder {
client: self.client.clone(),
req,
})
}
pub fn post_json<S>(
&self,
url: Url,
ser: &S,
) -> Result<RequestBuilder, DownloadError>
where
S: serde::Serialize,
{
let json = deno_core::serde_json::to_vec(ser)?;
let body = http_body_util::Full::new(json.into())
.map_err(|never| match never {})
.boxed();
let builder = self.post(url, body)?;
Ok(builder.header(
http::header::CONTENT_TYPE,
"application/json".parse().map_err(http::Error::from)?,
))
} }
/// Asynchronously fetches the given HTTP URL one pass only. /// Asynchronously fetches the given HTTP URL one pass only.
@ -322,27 +364,35 @@ impl HttpClient {
&self, &self,
args: FetchOnceArgs<'a>, args: FetchOnceArgs<'a>,
) -> Result<FetchOnceResult, AnyError> { ) -> Result<FetchOnceResult, AnyError> {
let mut request = self.client.get(args.url.clone()); let body = http_body_util::Empty::new()
.map_err(|never| match never {})
.boxed();
let mut request = http::Request::new(body);
*request.uri_mut() = args.url.as_str().parse()?;
if let Some(etag) = args.maybe_etag { if let Some(etag) = args.maybe_etag {
let if_none_match_val = HeaderValue::from_str(&etag)?; let if_none_match_val = HeaderValue::from_str(&etag)?;
request = request.header(IF_NONE_MATCH, if_none_match_val); request
.headers_mut()
.insert(IF_NONE_MATCH, if_none_match_val);
} }
if let Some(auth_token) = args.maybe_auth_token { if let Some(auth_token) = args.maybe_auth_token {
let authorization_val = HeaderValue::from_str(&auth_token.to_string())?; let authorization_val = HeaderValue::from_str(&auth_token.to_string())?;
request = request.header(AUTHORIZATION, authorization_val); request
.headers_mut()
.insert(AUTHORIZATION, authorization_val);
} }
if let Some(accept) = args.maybe_accept { if let Some(accept) = args.maybe_accept {
let accepts_val = HeaderValue::from_str(&accept)?; let accepts_val = HeaderValue::from_str(&accept)?;
request = request.header(ACCEPT, accepts_val); request.headers_mut().insert(ACCEPT, accepts_val);
} }
let response = match request.send().await { let response = match self.client.clone().send(request).await {
Ok(resp) => resp, Ok(resp) => resp,
Err(err) => { Err(err) => {
if err.is_connect() || err.is_timeout() { if is_error_connect(&err) {
return Ok(FetchOnceResult::RequestError(err.to_string())); return Ok(FetchOnceResult::RequestError(err.to_string()));
} }
return Err(err.into()); return Err(err);
} }
}; };
@ -406,18 +456,12 @@ impl HttpClient {
Ok(FetchOnceResult::Code(body, result_headers)) Ok(FetchOnceResult::Code(body, result_headers))
} }
pub async fn download_text( pub async fn download_text(&self, url: Url) -> Result<String, AnyError> {
&self,
url: impl reqwest::IntoUrl,
) -> Result<String, AnyError> {
let bytes = self.download(url).await?; let bytes = self.download(url).await?;
Ok(String::from_utf8(bytes)?) Ok(String::from_utf8(bytes)?)
} }
pub async fn download( pub async fn download(&self, url: Url) -> Result<Vec<u8>, AnyError> {
&self,
url: impl reqwest::IntoUrl,
) -> Result<Vec<u8>, AnyError> {
let maybe_bytes = self.download_inner(url, None, None).await?; let maybe_bytes = self.download_inner(url, None, None).await?;
match maybe_bytes { match maybe_bytes {
Some(bytes) => Ok(bytes), Some(bytes) => Ok(bytes),
@ -427,7 +471,7 @@ impl HttpClient {
pub async fn download_with_progress( pub async fn download_with_progress(
&self, &self,
url: impl reqwest::IntoUrl, url: Url,
maybe_header: Option<(HeaderName, HeaderValue)>, maybe_header: Option<(HeaderName, HeaderValue)>,
progress_guard: &UpdateGuard, progress_guard: &UpdateGuard,
) -> Result<Option<Vec<u8>>, DownloadError> { ) -> Result<Option<Vec<u8>>, DownloadError> {
@ -438,26 +482,26 @@ impl HttpClient {
pub async fn get_redirected_url( pub async fn get_redirected_url(
&self, &self,
url: impl reqwest::IntoUrl, url: Url,
maybe_header: Option<(HeaderName, HeaderValue)>, maybe_header: Option<(HeaderName, HeaderValue)>,
) -> Result<Url, AnyError> { ) -> Result<Url, AnyError> {
let response = self.get_redirected_response(url, maybe_header).await?; let (_, url) = self.get_redirected_response(url, maybe_header).await?;
Ok(response.url().clone()) Ok(url)
} }
async fn download_inner( async fn download_inner(
&self, &self,
url: impl reqwest::IntoUrl, url: Url,
maybe_header: Option<(HeaderName, HeaderValue)>, maybe_header: Option<(HeaderName, HeaderValue)>,
progress_guard: Option<&UpdateGuard>, progress_guard: Option<&UpdateGuard>,
) -> Result<Option<Vec<u8>>, DownloadError> { ) -> Result<Option<Vec<u8>>, DownloadError> {
let response = self.get_redirected_response(url, maybe_header).await?; let (response, _) = self.get_redirected_response(url, maybe_header).await?;
if response.status() == 404 { if response.status() == 404 {
return Ok(None); return Ok(None);
} else if !response.status().is_success() { } else if !response.status().is_success() {
let status = response.status(); let status = response.status();
let maybe_response_text = response.text().await.ok(); let maybe_response_text = body_to_string(response).await.ok();
return Err(DownloadError::BadResponse(BadResponseError { return Err(DownloadError::BadResponse(BadResponseError {
status_code: status, status_code: status,
response_text: maybe_response_text response_text: maybe_response_text
@ -469,60 +513,77 @@ impl HttpClient {
get_response_body_with_progress(response, progress_guard) get_response_body_with_progress(response, progress_guard)
.await .await
.map(Some) .map(Some)
.map_err(Into::into) .map_err(DownloadError::Fetch)
} }
async fn get_redirected_response( async fn get_redirected_response(
&self, &self,
url: impl reqwest::IntoUrl, mut url: Url,
mut maybe_header: Option<(HeaderName, HeaderValue)>, mut maybe_header: Option<(HeaderName, HeaderValue)>,
) -> Result<reqwest::Response, DownloadError> { ) -> Result<(http::Response<deno_fetch::ResBody>, Url), DownloadError> {
let mut url = url.into_url()?; let mut req = self.get(url.clone())?.build();
let mut builder = self.get(url.clone());
if let Some((header_name, header_value)) = maybe_header.as_ref() { if let Some((header_name, header_value)) = maybe_header.as_ref() {
builder = builder.header(header_name, header_value); req.headers_mut().append(header_name, header_value.clone());
} }
let mut response = builder.send().await?; let mut response = self
.client
.clone()
.send(req)
.await
.map_err(DownloadError::Fetch)?;
let status = response.status(); let status = response.status();
if status.is_redirection() { if status.is_redirection() {
for _ in 0..5 { for _ in 0..5 {
let new_url = resolve_redirect_from_response(&url, &response)?; let new_url = resolve_redirect_from_response(&url, &response)?;
let mut builder = self.get(new_url.clone()); let mut req = self.get(new_url.clone())?.build();
if new_url.origin() == url.origin() { if new_url.origin() == url.origin() {
if let Some((header_name, header_value)) = maybe_header.as_ref() { if let Some((header_name, header_value)) = maybe_header.as_ref() {
builder = builder.header(header_name, header_value); req.headers_mut().append(header_name, header_value.clone());
} }
} else { } else {
maybe_header = None; maybe_header = None;
} }
let new_response = builder.send().await?; let new_response = self
.client
.clone()
.send(req)
.await
.map_err(DownloadError::Fetch)?;
let status = new_response.status(); let status = new_response.status();
if status.is_redirection() { if status.is_redirection() {
response = new_response; response = new_response;
url = new_url; url = new_url;
} else { } else {
return Ok(new_response); return Ok((new_response, new_url));
} }
} }
Err(DownloadError::TooManyRedirects) Err(DownloadError::TooManyRedirects)
} else { } else {
Ok(response) Ok((response, url))
} }
} }
} }
fn is_error_connect(err: &AnyError) -> bool {
err
.downcast_ref::<hyper_util::client::legacy::Error>()
.map(|err| err.is_connect())
.unwrap_or(false)
}
async fn get_response_body_with_progress( async fn get_response_body_with_progress(
response: reqwest::Response, response: http::Response<deno_fetch::ResBody>,
progress_guard: Option<&UpdateGuard>, progress_guard: Option<&UpdateGuard>,
) -> Result<Vec<u8>, reqwest::Error> { ) -> Result<Vec<u8>, AnyError> {
use http_body::Body as _;
if let Some(progress_guard) = progress_guard { if let Some(progress_guard) = progress_guard {
if let Some(total_size) = response.content_length() { if let Some(total_size) = response.body().size_hint().exact() {
progress_guard.set_total_size(total_size); progress_guard.set_total_size(total_size);
let mut current_size = 0; let mut current_size = 0;
let mut data = Vec::with_capacity(total_size as usize); let mut data = Vec::with_capacity(total_size as usize);
let mut stream = response.bytes_stream(); let mut stream = response.into_body().into_data_stream();
while let Some(item) = stream.next().await { while let Some(item) = stream.next().await {
let bytes = item?; let bytes = item?;
current_size += bytes.len() as u64; current_size += bytes.len() as u64;
@ -532,7 +593,7 @@ async fn get_response_body_with_progress(
return Ok(data); return Ok(data);
} }
} }
let bytes = response.bytes().await?; let bytes = response.collect().await?.to_bytes();
Ok(bytes.into()) Ok(bytes.into())
} }
@ -563,9 +624,9 @@ fn resolve_url_from_location(base_url: &Url, location: &str) -> Url {
} }
} }
fn resolve_redirect_from_response( fn resolve_redirect_from_response<B>(
request_url: &Url, request_url: &Url,
response: &reqwest::Response, response: &http::Response<B>,
) -> Result<Url, DownloadError> { ) -> Result<Url, DownloadError> {
debug_assert!(response.status().is_redirection()); debug_assert!(response.status().is_redirection());
if let Some(location) = response.headers().get(LOCATION) { if let Some(location) = response.headers().get(LOCATION) {
@ -580,6 +641,49 @@ fn resolve_redirect_from_response(
} }
} }
pub async fn body_to_string<B>(body: B) -> Result<String, AnyError>
where
B: http_body::Body,
AnyError: From<B::Error>,
{
let bytes = body.collect().await?.to_bytes();
let s = std::str::from_utf8(&bytes)?;
Ok(s.into())
}
pub async fn body_to_json<B, D>(body: B) -> Result<D, AnyError>
where
B: http_body::Body,
AnyError: From<B::Error>,
D: serde::de::DeserializeOwned,
{
let bytes = body.collect().await?.to_bytes();
let val = deno_core::serde_json::from_slice(&bytes)?;
Ok(val)
}
pub struct RequestBuilder {
client: deno_fetch::Client,
req: http::Request<deno_fetch::ReqBody>,
}
impl RequestBuilder {
pub fn header(mut self, name: HeaderName, value: HeaderValue) -> Self {
self.req.headers_mut().append(name, value);
self
}
pub async fn send(
self,
) -> Result<http::Response<deno_fetch::ResBody>, AnyError> {
self.client.send(self.req).await
}
pub fn build(self) -> http::Request<deno_fetch::ReqBody> {
self.req
}
}
#[allow(clippy::print_stdout)] #[allow(clippy::print_stdout)]
#[allow(clippy::print_stderr)] #[allow(clippy::print_stderr)]
#[cfg(test)] #[cfg(test)]
@ -600,14 +704,20 @@ mod test {
// make a request to the redirect server // make a request to the redirect server
let text = client let text = client
.download_text("http://localhost:4546/subdir/redirects/redirect1.js") .download_text(
Url::parse("http://localhost:4546/subdir/redirects/redirect1.js")
.unwrap(),
)
.await .await
.unwrap(); .unwrap();
assert_eq!(text, "export const redirect = 1;\n"); assert_eq!(text, "export const redirect = 1;\n");
// now make one to the infinite redirects server // now make one to the infinite redirects server
let err = client let err = client
.download_text("http://localhost:4549/subdir/redirects/redirect1.js") .download_text(
Url::parse("http://localhost:4549/subdir/redirects/redirect1.js")
.unwrap(),
)
.await .await
.err() .err()
.unwrap(); .unwrap();

View file

@ -1,7 +1,7 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_npm::npm_rc::RegistryConfig; use deno_npm::npm_rc::RegistryConfig;
use reqwest::header; use http::header;
// TODO(bartlomieju): support more auth methods besides token and basic auth // TODO(bartlomieju): support more auth methods besides token and basic auth
pub fn maybe_auth_header_for_npm_registry( pub fn maybe_auth_header_for_npm_registry(

View file

@ -11,12 +11,12 @@ use deno_core::error::AnyError;
use deno_core::futures::future::LocalBoxFuture; use deno_core::futures::future::LocalBoxFuture;
use deno_core::futures::FutureExt; use deno_core::futures::FutureExt;
use deno_core::parking_lot::Mutex; use deno_core::parking_lot::Mutex;
use deno_core::url::Url;
use deno_npm::npm_rc::ResolvedNpmRc; use deno_npm::npm_rc::ResolvedNpmRc;
use deno_npm::registry::NpmPackageVersionDistInfo; use deno_npm::registry::NpmPackageVersionDistInfo;
use deno_runtime::deno_fs::FileSystem; use deno_runtime::deno_fs::FileSystem;
use deno_semver::package::PackageNv; use deno_semver::package::PackageNv;
use reqwest::StatusCode; use http::StatusCode;
use reqwest::Url;
use crate::args::CacheSetting; use crate::args::CacheSetting;
use crate::http_util::DownloadError; use crate::http_util::DownloadError;

View file

@ -495,7 +495,7 @@ impl<'a> DenoCompileBinaryWriter<'a> {
self self
.http_client_provider .http_client_provider
.get_or_create()? .get_or_create()?
.download_with_progress(download_url, None, &progress) .download_with_progress(download_url.parse()?, None, &progress)
.await? .await?
}; };
let bytes = match maybe_bytes { let bytes = match maybe_bytes {

View file

@ -1,8 +1,9 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use crate::http_util;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::serde_json; use deno_core::serde_json;
use deno_runtime::deno_fetch::reqwest; use deno_runtime::deno_fetch;
use lsp_types::Url; use lsp_types::Url;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
@ -82,7 +83,7 @@ impl std::fmt::Debug for ApiError {
impl std::error::Error for ApiError {} impl std::error::Error for ApiError {}
pub async fn parse_response<T: DeserializeOwned>( pub async fn parse_response<T: DeserializeOwned>(
response: reqwest::Response, response: http::Response<deno_fetch::ResBody>,
) -> Result<T, ApiError> { ) -> Result<T, ApiError> {
let status = response.status(); let status = response.status();
let x_deno_ray = response let x_deno_ray = response
@ -90,7 +91,7 @@ pub async fn parse_response<T: DeserializeOwned>(
.get("x-deno-ray") .get("x-deno-ray")
.and_then(|value| value.to_str().ok()) .and_then(|value| value.to_str().ok())
.map(|s| s.to_string()); .map(|s| s.to_string());
let text = response.text().await.unwrap(); let text = http_util::body_to_string(response).await.unwrap();
if !status.is_success() { if !status.is_success() {
match serde_json::from_str::<ApiError>(&text) { match serde_json::from_str::<ApiError>(&text) {
@ -122,9 +123,9 @@ pub async fn get_scope(
client: &HttpClient, client: &HttpClient,
registry_api_url: &Url, registry_api_url: &Url,
scope: &str, scope: &str,
) -> Result<reqwest::Response, AnyError> { ) -> Result<http::Response<deno_fetch::ResBody>, AnyError> {
let scope_url = format!("{}scopes/{}", registry_api_url, scope); let scope_url = format!("{}scopes/{}", registry_api_url, scope);
let response = client.get(&scope_url).send().await?; let response = client.get(scope_url.parse()?)?.send().await?;
Ok(response) Ok(response)
} }
@ -141,9 +142,9 @@ pub async fn get_package(
registry_api_url: &Url, registry_api_url: &Url,
scope: &str, scope: &str,
package: &str, package: &str,
) -> Result<reqwest::Response, AnyError> { ) -> Result<http::Response<deno_fetch::ResBody>, AnyError> {
let package_url = get_package_api_url(registry_api_url, scope, package); let package_url = get_package_api_url(registry_api_url, scope, package);
let response = client.get(&package_url).send().await?; let response = client.get(package_url.parse()?)?.send().await?;
Ok(response) Ok(response)
} }

View file

@ -23,8 +23,8 @@ use deno_core::futures::StreamExt;
use deno_core::serde_json; use deno_core::serde_json;
use deno_core::serde_json::json; use deno_core::serde_json::json;
use deno_core::serde_json::Value; use deno_core::serde_json::Value;
use deno_runtime::deno_fetch::reqwest;
use deno_terminal::colors; use deno_terminal::colors;
use http_body_util::BodyExt;
use lsp_types::Url; use lsp_types::Url;
use serde::Deserialize; use serde::Deserialize;
use serde::Serialize; use serde::Serialize;
@ -539,11 +539,13 @@ async fn get_auth_headers(
let challenge = BASE64_STANDARD.encode(sha2::Sha256::digest(&verifier)); let challenge = BASE64_STANDARD.encode(sha2::Sha256::digest(&verifier));
let response = client let response = client
.post(format!("{}authorizations", registry_url)) .post_json(
.json(&serde_json::json!({ format!("{}authorizations", registry_url).parse()?,
"challenge": challenge, &serde_json::json!({
"permissions": permissions, "challenge": challenge,
})) "permissions": permissions,
}),
)?
.send() .send()
.await .await
.context("Failed to create interactive authorization")?; .context("Failed to create interactive authorization")?;
@ -573,11 +575,13 @@ async fn get_auth_headers(
loop { loop {
tokio::time::sleep(interval).await; tokio::time::sleep(interval).await;
let response = client let response = client
.post(format!("{}authorizations/exchange", registry_url)) .post_json(
.json(&serde_json::json!({ format!("{}authorizations/exchange", registry_url).parse()?,
"exchangeToken": auth.exchange_token, &serde_json::json!({
"verifier": verifier, "exchangeToken": auth.exchange_token,
})) "verifier": verifier,
}),
)?
.send() .send()
.await .await
.context("Failed to exchange authorization")?; .context("Failed to exchange authorization")?;
@ -634,15 +638,20 @@ async fn get_auth_headers(
); );
let response = client let response = client
.get(url) .get(url.parse()?)?
.bearer_auth(&oidc_config.token) .header(
http::header::AUTHORIZATION,
format!("Bearer {}", oidc_config.token).parse()?,
)
.send() .send()
.await .await
.context("Failed to get OIDC token")?; .context("Failed to get OIDC token")?;
let status = response.status(); let status = response.status();
let text = response.text().await.with_context(|| { let text = crate::http_util::body_to_string(response)
format!("Failed to get OIDC token: status {}", status) .await
})?; .with_context(|| {
format!("Failed to get OIDC token: status {}", status)
})?;
if !status.is_success() { if !status.is_success() {
bail!( bail!(
"Failed to get OIDC token: status {}, response: '{}'", "Failed to get OIDC token: status {}, response: '{}'",
@ -770,7 +779,7 @@ async fn ensure_scopes_and_packages_exist(
loop { loop {
tokio::time::sleep(std::time::Duration::from_secs(3)).await; tokio::time::sleep(std::time::Duration::from_secs(3)).await;
let response = client.get(&package_api_url).send().await?; let response = client.get(package_api_url.parse()?)?.send().await?;
if response.status() == 200 { if response.status() == 200 {
let name = format!("@{}/{}", package.scope, package.package); let name = format!("@{}/{}", package.scope, package.package);
log::info!("Package {} created", colors::green(name)); log::info!("Package {} created", colors::green(name));
@ -894,11 +903,19 @@ async fn publish_package(
package.config package.config
); );
let body = http_body_util::Full::new(package.tarball.bytes.clone())
.map_err(|never| match never {})
.boxed();
let response = http_client let response = http_client
.post(url) .post(url.parse()?, body)?
.header(reqwest::header::AUTHORIZATION, authorization) .header(
.header(reqwest::header::CONTENT_ENCODING, "gzip") http::header::AUTHORIZATION,
.body(package.tarball.bytes.clone()) authorization.parse().map_err(http::Error::from)?,
)
.header(
http::header::CONTENT_ENCODING,
"gzip".parse().map_err(http::Error::from)?,
)
.send() .send()
.await?; .await?;
@ -943,7 +960,7 @@ async fn publish_package(
while task.status != "success" && task.status != "failure" { while task.status != "success" && task.status != "failure" {
tokio::time::sleep(interval).await; tokio::time::sleep(interval).await;
let resp = http_client let resp = http_client
.get(format!("{}publish_status/{}", registry_api_url, task.id)) .get(format!("{}publish_status/{}", registry_api_url, task.id).parse()?)?
.send() .send()
.await .await
.with_context(|| { .with_context(|| {
@ -992,7 +1009,8 @@ async fn publish_package(
package.scope, package.package, package.version package.scope, package.package, package.version
))?; ))?;
let meta_bytes = http_client.get(meta_url).send().await?.bytes().await?; let resp = http_client.get(meta_url)?.send().await?;
let meta_bytes = resp.collect().await?.to_bytes();
if std::env::var("DISABLE_JSR_MANIFEST_VERIFICATION_FOR_TESTING").is_err() { if std::env::var("DISABLE_JSR_MANIFEST_VERIFICATION_FOR_TESTING").is_err() {
verify_version_manifest(&meta_bytes, &package)?; verify_version_manifest(&meta_bytes, &package)?;
@ -1023,9 +1041,8 @@ async fn publish_package(
registry_api_url, package.scope, package.package, package.version registry_api_url, package.scope, package.package, package.version
); );
http_client http_client
.post(provenance_url) .post_json(provenance_url.parse()?, &json!({ "bundle": bundle }))?
.header(reqwest::header::AUTHORIZATION, authorization) .header(http::header::AUTHORIZATION, authorization.parse()?)
.json(&json!({ "bundle": bundle }))
.send() .send()
.await?; .await?;
} }

View file

@ -1,5 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use crate::http_util;
use crate::http_util::HttpClient; use crate::http_util::HttpClient;
use super::api::OidcTokenResponse; use super::api::OidcTokenResponse;
@ -12,6 +13,8 @@ use deno_core::anyhow;
use deno_core::anyhow::bail; use deno_core::anyhow::bail;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::serde_json; use deno_core::serde_json;
use deno_core::url::Url;
use http_body_util::BodyExt;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use p256::elliptic_curve; use p256::elliptic_curve;
use p256::pkcs8::AssociatedOid; use p256::pkcs8::AssociatedOid;
@ -504,12 +507,12 @@ impl<'a> FulcioSigner<'a> {
let response = self let response = self
.http_client .http_client
.post(url) .post_json(url.parse()?, &request_body)?
.json(&request_body)
.send() .send()
.await?; .await?;
let body: SigningCertificateResponse = response.json().await?; let body: SigningCertificateResponse =
http_util::body_to_json(response).await?;
let key = body let key = body
.signed_certificate_embedded_sct .signed_certificate_embedded_sct
@ -527,15 +530,23 @@ impl<'a> FulcioSigner<'a> {
bail!("No OIDC token available"); bail!("No OIDC token available");
}; };
let res = self let mut url = req_url.parse::<Url>()?;
url.query_pairs_mut().append_pair("audience", aud);
let res_bytes = self
.http_client .http_client
.get(&req_url) .get(url)?
.bearer_auth(token) .header(
.query(&[("audience", aud)]) http::header::AUTHORIZATION,
format!("Bearer {}", token)
.parse()
.map_err(http::Error::from)?,
)
.send() .send()
.await? .await?
.json::<OidcTokenResponse>() .collect()
.await?; .await?
.to_bytes();
let res: OidcTokenResponse = serde_json::from_slice(&res_bytes)?;
Ok(res.value) Ok(res.value)
} }
} }
@ -685,11 +696,10 @@ async fn testify(
let url = format!("{}/api/v1/log/entries", *DEFAULT_REKOR_URL); let url = format!("{}/api/v1/log/entries", *DEFAULT_REKOR_URL);
let res = http_client let res = http_client
.post(&url) .post_json(url.parse()?, &proposed_intoto_entry)?
.json(&proposed_intoto_entry)
.send() .send()
.await?; .await?;
let body: RekorEntry = res.json().await?; let body: RekorEntry = http_util::body_to_json(res).await?;
Ok(body) Ok(body)
} }

View file

@ -881,12 +881,11 @@ async fn run_tests_for_worker_inner(
// failing. If we don't do this, a connection to a test server we just tore down might be re-used in // failing. If we don't do this, a connection to a test server we just tore down might be re-used in
// the next test. // the next test.
// TODO(mmastrac): this should be some sort of callback that we can implement for any subsystem // TODO(mmastrac): this should be some sort of callback that we can implement for any subsystem
#[allow(clippy::disallowed_types)] // allow using reqwest::Client here
worker worker
.js_runtime .js_runtime
.op_state() .op_state()
.borrow_mut() .borrow_mut()
.try_take::<deno_runtime::deno_fetch::reqwest::Client>(); .try_take::<deno_runtime::deno_fetch::Client>();
if desc.ignore { if desc.ignore {
send_test_event( send_test_event(

View file

@ -571,7 +571,7 @@ async fn get_latest_version(
check_kind: UpgradeCheckKind, check_kind: UpgradeCheckKind,
) -> Result<String, AnyError> { ) -> Result<String, AnyError> {
let url = get_url(release_kind, env!("TARGET"), check_kind); let url = get_url(release_kind, env!("TARGET"), check_kind);
let text = client.download_text(url).await?; let text = client.download_text(url.parse()?).await?;
Ok(normalize_version_from_server(release_kind, &text)) Ok(normalize_version_from_server(release_kind, &text))
} }
@ -624,7 +624,7 @@ async fn download_package(
// text above which will stay alive after the progress bars are complete // text above which will stay alive after the progress bars are complete
let progress = progress_bar.update(""); let progress = progress_bar.update("");
client client
.download_with_progress(download_url, None, &progress) .download_with_progress(download_url.parse()?, None, &progress)
.await? .await?
}; };
match maybe_bytes { match maybe_bytes {

View file

@ -14,6 +14,7 @@ description = "Fetch API implementation for Deno"
path = "lib.rs" path = "lib.rs"
[dependencies] [dependencies]
base64.workspace = true
bytes.workspace = true bytes.workspace = true
data-url.workspace = true data-url.workspace = true
deno_core.workspace = true deno_core.workspace = true
@ -21,8 +22,17 @@ deno_permissions.workspace = true
deno_tls.workspace = true deno_tls.workspace = true
dyn-clone = "1" dyn-clone = "1"
http.workspace = true http.workspace = true
reqwest.workspace = true http-body-util.workspace = true
hyper.workspace = true
hyper-rustls.workspace = true
hyper-util.workspace = true
ipnet.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true serde_json.workspace = true
tokio.workspace = true tokio.workspace = true
tokio-rustls.workspace = true
tokio-socks.workspace = true
tokio-util = { workspace = true, features = ["io"] } tokio-util = { workspace = true, features = ["io"] }
tower.workspace = true
tower-http.workspace = true
tower-service.workspace = true

View file

@ -7,10 +7,12 @@ use crate::FetchHandler;
use deno_core::error::type_error; use deno_core::error::type_error;
use deno_core::futures::FutureExt; use deno_core::futures::FutureExt;
use deno_core::futures::TryFutureExt; use deno_core::futures::TryFutureExt;
use deno_core::futures::TryStreamExt;
use deno_core::url::Url; use deno_core::url::Url;
use deno_core::CancelFuture; use deno_core::CancelFuture;
use deno_core::OpState; use deno_core::OpState;
use reqwest::StatusCode; use http::StatusCode;
use http_body_util::BodyExt;
use std::rc::Rc; use std::rc::Rc;
use tokio_util::io::ReaderStream; use tokio_util::io::ReaderStream;
@ -23,19 +25,21 @@ impl FetchHandler for FsFetchHandler {
fn fetch_file( fn fetch_file(
&self, &self,
_state: &mut OpState, _state: &mut OpState,
url: Url, url: &Url,
) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) { ) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) {
let cancel_handle = CancelHandle::new_rc(); let cancel_handle = CancelHandle::new_rc();
let path_result = url.to_file_path();
let response_fut = async move { let response_fut = async move {
let path = url.to_file_path()?; let path = path_result?;
let file = tokio::fs::File::open(path).map_err(|_| ()).await?; let file = tokio::fs::File::open(path).map_err(|_| ()).await?;
let stream = ReaderStream::new(file); let stream = ReaderStream::new(file)
let body = reqwest::Body::wrap_stream(stream); .map_ok(hyper::body::Frame::data)
.map_err(Into::into);
let body = http_body_util::StreamBody::new(stream).boxed();
let response = http::Response::builder() let response = http::Response::builder()
.status(StatusCode::OK) .status(StatusCode::OK)
.body(body) .body(body)
.map_err(|_| ())? .map_err(|_| ())?;
.into();
Ok::<_, ()>(response) Ok::<_, ()>(response)
} }
.map_err(move |_| { .map_err(move |_| {

View file

@ -1,6 +1,7 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
mod fs_fetch_handler; mod fs_fetch_handler;
mod proxy;
use std::borrow::Cow; use std::borrow::Cow;
use std::cell::RefCell; use std::cell::RefCell;
@ -14,7 +15,7 @@ use std::sync::Arc;
use std::task::Context; use std::task::Context;
use std::task::Poll; use std::task::Poll;
use bytes::Bytes; use deno_core::anyhow::anyhow;
use deno_core::anyhow::Error; use deno_core::anyhow::Error;
use deno_core::error::type_error; use deno_core::error::type_error;
use deno_core::error::AnyError; use deno_core::error::AnyError;
@ -42,34 +43,39 @@ use deno_core::ResourceId;
use deno_tls::rustls::RootCertStore; use deno_tls::rustls::RootCertStore;
use deno_tls::Proxy; use deno_tls::Proxy;
use deno_tls::RootCertStoreProvider; use deno_tls::RootCertStoreProvider;
use data_url::DataUrl;
use deno_tls::TlsKey; use deno_tls::TlsKey;
use deno_tls::TlsKeys; use deno_tls::TlsKeys;
use deno_tls::TlsKeysHolder; use deno_tls::TlsKeysHolder;
use bytes::Bytes;
use data_url::DataUrl;
use http::header::HeaderName;
use http::header::HeaderValue;
use http::header::ACCEPT;
use http::header::ACCEPT_ENCODING;
use http::header::CONTENT_LENGTH; use http::header::CONTENT_LENGTH;
use http::header::HOST;
use http::header::PROXY_AUTHORIZATION;
use http::header::RANGE;
use http::header::USER_AGENT;
use http::Method;
use http::Uri; use http::Uri;
use reqwest::header::HeaderMap; use http_body_util::BodyExt;
use reqwest::header::HeaderName; use hyper::body::Frame;
use reqwest::header::HeaderValue; use hyper_rustls::HttpsConnector;
use reqwest::header::ACCEPT_ENCODING; use hyper_util::client::legacy::connect::HttpConnector;
use reqwest::header::HOST; use hyper_util::rt::TokioExecutor;
use reqwest::header::RANGE; use hyper_util::rt::TokioIo;
use reqwest::header::USER_AGENT; use hyper_util::rt::TokioTimer;
use reqwest::redirect::Policy;
use reqwest::Body;
use reqwest::Client;
use reqwest::Method;
use reqwest::RequestBuilder;
use reqwest::Response;
use serde::Deserialize; use serde::Deserialize;
use serde::Serialize; use serde::Serialize;
use tokio::io::AsyncReadExt; use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
use tower::ServiceExt;
use tower_http::decompression::Decompression;
// Re-export reqwest and data_url // Re-export data_url
pub use data_url; pub use data_url;
pub use reqwest;
pub use fs_fetch_handler::FsFetchHandler; pub use fs_fetch_handler::FsFetchHandler;
@ -78,8 +84,9 @@ pub struct Options {
pub user_agent: String, pub user_agent: String,
pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>, pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>,
pub proxy: Option<Proxy>, pub proxy: Option<Proxy>,
#[allow(clippy::type_complexity)]
pub request_builder_hook: pub request_builder_hook:
Option<fn(RequestBuilder) -> Result<RequestBuilder, AnyError>>, Option<fn(&mut http::Request<ReqBody>) -> Result<(), AnyError>>,
pub unsafely_ignore_certificate_errors: Option<Vec<String>>, pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub client_cert_chain_and_key: TlsKeys, pub client_cert_chain_and_key: TlsKeys,
pub file_fetch_handler: Rc<dyn FetchHandler>, pub file_fetch_handler: Rc<dyn FetchHandler>,
@ -146,7 +153,7 @@ pub trait FetchHandler: dyn_clone::DynClone {
fn fetch_file( fn fetch_file(
&self, &self,
state: &mut OpState, state: &mut OpState,
url: Url, url: &Url,
) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>); ) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>);
} }
@ -160,7 +167,7 @@ impl FetchHandler for DefaultFileFetchHandler {
fn fetch_file( fn fetch_file(
&self, &self,
_state: &mut OpState, _state: &mut OpState,
_url: Url, _url: &Url,
) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) { ) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) {
let fut = async move { let fut = async move {
Ok(Err(type_error( Ok(Err(type_error(
@ -183,20 +190,20 @@ pub struct FetchReturn {
pub fn get_or_create_client_from_state( pub fn get_or_create_client_from_state(
state: &mut OpState, state: &mut OpState,
) -> Result<reqwest::Client, AnyError> { ) -> Result<Client, AnyError> {
if let Some(client) = state.try_borrow::<reqwest::Client>() { if let Some(client) = state.try_borrow::<Client>() {
Ok(client.clone()) Ok(client.clone())
} else { } else {
let options = state.borrow::<Options>(); let options = state.borrow::<Options>();
let client = create_client_from_options(options)?; let client = create_client_from_options(options)?;
state.put::<reqwest::Client>(client.clone()); state.put::<Client>(client.clone());
Ok(client) Ok(client)
} }
} }
pub fn create_client_from_options( pub fn create_client_from_options(
options: &Options, options: &Options,
) -> Result<reqwest::Client, AnyError> { ) -> Result<Client, AnyError> {
create_http_client( create_http_client(
&options.user_agent, &options.user_agent,
CreateHttpClientOptions { CreateHttpClientOptions {
@ -253,11 +260,11 @@ impl Stream for ResourceToBodyAdapter {
} }
Poll::Ready(res) => match res { Poll::Ready(res) => match res {
Ok(buf) if buf.is_empty() => Poll::Ready(None), Ok(buf) if buf.is_empty() => Poll::Ready(None),
Ok(_) => { Ok(buf) => {
this.1 = Some(this.0.clone().read(64 * 1024)); this.1 = Some(this.0.clone().read(64 * 1024));
Poll::Ready(Some(res.map(|b| b.to_vec().into()))) Poll::Ready(Some(Ok(buf.to_vec().into())))
} }
_ => Poll::Ready(Some(res.map(|b| b.to_vec().into()))), Err(err) => Poll::Ready(Some(Err(err))),
}, },
} }
} else { } else {
@ -266,6 +273,22 @@ impl Stream for ResourceToBodyAdapter {
} }
} }
impl hyper::body::Body for ResourceToBodyAdapter {
type Data = Bytes;
type Error = Error;
fn poll_frame(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
match self.poll_next(cx) {
Poll::Ready(Some(res)) => Poll::Ready(Some(res.map(Frame::data))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
impl Drop for ResourceToBodyAdapter { impl Drop for ResourceToBodyAdapter {
fn drop(&mut self) { fn drop(&mut self) {
self.0.clone().close() self.0.clone().close()
@ -347,9 +370,11 @@ where
file_fetch_handler, .. file_fetch_handler, ..
} = state.borrow_mut::<Options>(); } = state.borrow_mut::<Options>();
let file_fetch_handler = file_fetch_handler.clone(); let file_fetch_handler = file_fetch_handler.clone();
let (request, maybe_cancel_handle) = let (future, maybe_cancel_handle) =
file_fetch_handler.fetch_file(state, url); file_fetch_handler.fetch_file(state, &url);
let request_rid = state.resource_table.add(FetchRequestResource(request)); let request_rid = state
.resource_table
.add(FetchRequestResource { future, url });
let maybe_cancel_handle_rid = maybe_cancel_handle let maybe_cancel_handle_rid = maybe_cancel_handle
.map(|ch| state.resource_table.add(FetchCancelHandle(ch))); .map(|ch| state.resource_table.add(FetchCancelHandle(ch)));
@ -359,31 +384,31 @@ where
let permissions = state.borrow_mut::<FP>(); let permissions = state.borrow_mut::<FP>();
permissions.check_net_url(&url, "fetch()")?; permissions.check_net_url(&url, "fetch()")?;
// Make sure that we have a valid URI early, as reqwest's `RequestBuilder::send` let uri = url
// internally uses `expect_uri`, which panics instead of returning a usable `Result`. .as_str()
if url.as_str().parse::<Uri>().is_err() { .parse::<Uri>()
return Err(type_error("Invalid URL")); .map_err(|_| type_error("Invalid URL"))?;
}
let mut request = client.request(method.clone(), url); let mut con_len = None;
let body = if has_body {
if has_body {
match (data, resource) { match (data, resource) {
(Some(data), _) => { (Some(data), _) => {
// If a body is passed, we use it, and don't return a body for streaming. // If a body is passed, we use it, and don't return a body for streaming.
request = request.body(data.to_vec()); con_len = Some(data.len() as u64);
http_body_util::Full::new(data.to_vec().into())
.map_err(|never| match never {})
.boxed()
} }
(_, Some(resource)) => { (_, Some(resource)) => {
let resource = state.resource_table.take_any(resource)?; let resource = state.resource_table.take_any(resource)?;
match resource.size_hint() { match resource.size_hint() {
(body_size, Some(n)) if body_size == n && body_size > 0 => { (body_size, Some(n)) if body_size == n && body_size > 0 => {
request = con_len = Some(body_size);
request.header(CONTENT_LENGTH, HeaderValue::from(body_size));
} }
_ => {} _ => {}
} }
request = request ReqBody::new(ResourceToBodyAdapter::new(resource))
.body(Body::wrap_stream(ResourceToBodyAdapter::new(resource)))
} }
(None, None) => unreachable!(), (None, None) => unreachable!(),
} }
@ -391,11 +416,21 @@ where
// POST and PUT requests should always have a 0 length content-length, // POST and PUT requests should always have a 0 length content-length,
// if there is no body. https://fetch.spec.whatwg.org/#http-network-or-cache-fetch // if there is no body. https://fetch.spec.whatwg.org/#http-network-or-cache-fetch
if matches!(method, Method::POST | Method::PUT) { if matches!(method, Method::POST | Method::PUT) {
request = request.header(CONTENT_LENGTH, HeaderValue::from(0)); con_len = Some(0);
} }
http_body_util::Empty::new()
.map_err(|never| match never {})
.boxed()
}; };
let mut header_map = HeaderMap::new(); let mut request = http::Request::new(body);
*request.method_mut() = method.clone();
*request.uri_mut() = uri;
if let Some(len) = con_len {
request.headers_mut().insert(CONTENT_LENGTH, len.into());
}
for (key, value) in headers { for (key, value) in headers {
let name = HeaderName::from_bytes(&key) let name = HeaderName::from_bytes(&key)
.map_err(|err| type_error(err.to_string()))?; .map_err(|err| type_error(err.to_string()))?;
@ -403,38 +438,34 @@ where
.map_err(|err| type_error(err.to_string()))?; .map_err(|err| type_error(err.to_string()))?;
if (name != HOST || allow_host) && name != CONTENT_LENGTH { if (name != HOST || allow_host) && name != CONTENT_LENGTH {
header_map.append(name, v); request.headers_mut().append(name, v);
} }
} }
if header_map.contains_key(RANGE) { if request.headers().contains_key(RANGE) {
// https://fetch.spec.whatwg.org/#http-network-or-cache-fetch step 18 // https://fetch.spec.whatwg.org/#http-network-or-cache-fetch step 18
// If httpRequests header list contains `Range`, then append (`Accept-Encoding`, `identity`) // If httpRequests header list contains `Range`, then append (`Accept-Encoding`, `identity`)
header_map request
.headers_mut()
.insert(ACCEPT_ENCODING, HeaderValue::from_static("identity")); .insert(ACCEPT_ENCODING, HeaderValue::from_static("identity"));
} }
request = request.headers(header_map);
let options = state.borrow::<Options>(); let options = state.borrow::<Options>();
if let Some(request_builder_hook) = options.request_builder_hook { if let Some(request_builder_hook) = options.request_builder_hook {
request = request_builder_hook(request) request_builder_hook(&mut request)
.map_err(|err| type_error(err.to_string()))?; .map_err(|err| type_error(err.to_string()))?;
} }
let cancel_handle = CancelHandle::new_rc(); let cancel_handle = CancelHandle::new_rc();
let cancel_handle_ = cancel_handle.clone(); let cancel_handle_ = cancel_handle.clone();
let fut = async move { let fut =
request async move { client.send(request).or_cancel(cancel_handle_).await };
.send()
.or_cancel(cancel_handle_)
.await
.map(|res| res.map_err(|err| err.into()))
};
let request_rid = state let request_rid = state.resource_table.add(FetchRequestResource {
.resource_table future: Box::pin(fut),
.add(FetchRequestResource(Box::pin(fut))); url,
});
let cancel_handle_rid = let cancel_handle_rid =
state.resource_table.add(FetchCancelHandle(cancel_handle)); state.resource_table.add(FetchCancelHandle(cancel_handle));
@ -448,17 +479,21 @@ where
let (body, _) = data_url let (body, _) = data_url
.decode_to_vec() .decode_to_vec()
.map_err(|e| type_error(format!("{e:?}")))?; .map_err(|e| type_error(format!("{e:?}")))?;
let body = http_body_util::Full::new(body.into())
.map_err(|never| match never {})
.boxed();
let response = http::Response::builder() let response = http::Response::builder()
.status(http::StatusCode::OK) .status(http::StatusCode::OK)
.header(http::header::CONTENT_TYPE, data_url.mime_type().to_string()) .header(http::header::CONTENT_TYPE, data_url.mime_type().to_string())
.body(reqwest::Body::from(body))?; .body(body)?;
let fut = async move { Ok(Ok(Response::from(response))) }; let fut = async move { Ok(Ok(response)) };
let request_rid = state let request_rid = state.resource_table.add(FetchRequestResource {
.resource_table future: Box::pin(fut),
.add(FetchRequestResource(Box::pin(fut))); url,
});
(request_rid, None) (request_rid, None)
} }
@ -505,24 +540,21 @@ pub async fn op_fetch_send(
.ok() .ok()
.expect("multiple op_fetch_send ongoing"); .expect("multiple op_fetch_send ongoing");
let res = match request.0.await { let res = match request.future.await {
Ok(Ok(res)) => res, Ok(Ok(res)) => res,
Ok(Err(err)) => { Ok(Err(err)) => {
// We're going to try and rescue the error cause from a stream and return it from this fetch. // We're going to try and rescue the error cause from a stream and return it from this fetch.
// If any error in the chain is a reqwest body error, return that as a special result we can use to // If any error in the chain is a hyper body error, return that as a special result we can use to
// reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`). // reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`).
// TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead // TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead
let mut err_ref: &dyn std::error::Error = err.as_ref(); let mut err_ref: &dyn std::error::Error = err.as_ref();
while let Some(err) = std::error::Error::source(err_ref) { while let Some(err) = std::error::Error::source(err_ref) {
if let Some(err) = err.downcast_ref::<reqwest::Error>() { if let Some(err) = err.downcast_ref::<hyper::Error>() {
if err.is_body() { if let Some(err) = std::error::Error::source(err) {
// Extracts the next error cause and uses that for the message return Ok(FetchResponse {
if let Some(err) = std::error::Error::source(err) { error: Some(err.to_string()),
return Ok(FetchResponse { ..Default::default()
error: Some(err.to_string()), });
..Default::default()
});
}
} }
} }
err_ref = err; err_ref = err;
@ -534,14 +566,17 @@ pub async fn op_fetch_send(
}; };
let status = res.status(); let status = res.status();
let url = res.url().to_string(); let url = request.url.into();
let mut res_headers = Vec::new(); let mut res_headers = Vec::new();
for (key, val) in res.headers().iter() { for (key, val) in res.headers().iter() {
res_headers.push((key.as_str().into(), val.as_bytes().into())); res_headers.push((key.as_str().into(), val.as_bytes().into()));
} }
let content_length = res.content_length(); let content_length = hyper::body::Body::size_hint(res.body()).exact();
let remote_addr = res.remote_addr(); let remote_addr = res
.extensions()
.get::<hyper_util::client::legacy::connect::HttpInfo>()
.map(|info| info.remote_addr());
let (remote_addr_ip, remote_addr_port) = if let Some(addr) = remote_addr { let (remote_addr_ip, remote_addr_port) = if let Some(addr) = remote_addr {
(Some(addr.ip().to_string()), Some(addr.port())) (Some(addr.ip().to_string()), Some(addr.port()))
} else { } else {
@ -585,7 +620,8 @@ pub async fn op_fetch_response_upgrade(
let upgraded = raw_response.upgrade().await?; let upgraded = raw_response.upgrade().await?;
{ {
// Stage 3: Pump the data // Stage 3: Pump the data
let (mut upgraded_rx, mut upgraded_tx) = tokio::io::split(upgraded); let (mut upgraded_rx, mut upgraded_tx) =
tokio::io::split(TokioIo::new(upgraded));
spawn(async move { spawn(async move {
let mut buf = [0; 1024]; let mut buf = [0; 1024];
@ -673,11 +709,13 @@ impl Resource for UpgradeStream {
} }
} }
type CancelableResponseResult = Result<Result<Response, AnyError>, Canceled>; type CancelableResponseResult =
Result<Result<http::Response<ResBody>, AnyError>, Canceled>;
pub struct FetchRequestResource( pub struct FetchRequestResource {
pub Pin<Box<dyn Future<Output = CancelableResponseResult>>>, pub future: Pin<Box<dyn Future<Output = CancelableResponseResult>>>,
); pub url: Url,
}
impl Resource for FetchRequestResource { impl Resource for FetchRequestResource {
fn name(&self) -> Cow<str> { fn name(&self) -> Cow<str> {
@ -701,7 +739,7 @@ type BytesStream =
Pin<Box<dyn Stream<Item = Result<bytes::Bytes, std::io::Error>> + Unpin>>; Pin<Box<dyn Stream<Item = Result<bytes::Bytes, std::io::Error>> + Unpin>>;
pub enum FetchResponseReader { pub enum FetchResponseReader {
Start(Response), Start(http::Response<ResBody>),
BodyReader(Peekable<BytesStream>), BodyReader(Peekable<BytesStream>),
} }
@ -719,7 +757,7 @@ pub struct FetchResponseResource {
} }
impl FetchResponseResource { impl FetchResponseResource {
pub fn new(response: Response, size: Option<u64>) -> Self { pub fn new(response: http::Response<ResBody>, size: Option<u64>) -> Self {
Self { Self {
response_reader: AsyncRefCell::new(FetchResponseReader::Start(response)), response_reader: AsyncRefCell::new(FetchResponseReader::Start(response)),
cancel: CancelHandle::default(), cancel: CancelHandle::default(),
@ -727,10 +765,10 @@ impl FetchResponseResource {
} }
} }
pub async fn upgrade(self) -> Result<reqwest::Upgraded, AnyError> { pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, AnyError> {
let reader = self.response_reader.into_inner(); let reader = self.response_reader.into_inner();
match reader { match reader {
FetchResponseReader::Start(resp) => Ok(resp.upgrade().await?), FetchResponseReader::Start(resp) => Ok(hyper::upgrade::on(resp).await?),
_ => unreachable!(), _ => unreachable!(),
} }
} }
@ -754,11 +792,12 @@ impl Resource for FetchResponseResource {
match std::mem::take(&mut *reader) { match std::mem::take(&mut *reader) {
FetchResponseReader::Start(resp) => { FetchResponseReader::Start(resp) => {
let stream: BytesStream = Box::pin(resp.bytes_stream().map(|r| { let stream: BytesStream =
r.map_err(|err| { Box::pin(resp.into_body().into_data_stream().map(|r| {
std::io::Error::new(std::io::ErrorKind::Other, err) r.map_err(|err| {
}) std::io::Error::new(std::io::ErrorKind::Other, err)
})); })
}));
*reader = FetchResponseReader::BodyReader(stream.peekable()); *reader = FetchResponseReader::BodyReader(stream.peekable());
} }
FetchResponseReader::BodyReader(_) => unreachable!(), FetchResponseReader::BodyReader(_) => unreachable!(),
@ -922,7 +961,7 @@ impl Default for CreateHttpClientOptions {
} }
} }
/// Create new instance of async reqwest::Client. This client supports /// Create new instance of async Client. This client supports
/// proxies and doesn't follow redirects. /// proxies and doesn't follow redirects.
pub fn create_http_client( pub fn create_http_client(
user_agent: &str, user_agent: &str,
@ -944,43 +983,64 @@ pub fn create_http_client(
alpn_protocols.push("http/1.1".into()); alpn_protocols.push("http/1.1".into());
} }
tls_config.alpn_protocols = alpn_protocols; tls_config.alpn_protocols = alpn_protocols;
let tls_config = Arc::from(tls_config);
let mut headers = HeaderMap::new(); let mut http_connector = HttpConnector::new();
headers.insert(USER_AGENT, user_agent.parse().unwrap()); http_connector.enforce_http(false);
let mut builder = Client::builder() let connector = HttpsConnector::from((http_connector, tls_config.clone()));
.redirect(Policy::none())
.default_headers(headers)
.use_preconfigured_tls(tls_config);
let user_agent = user_agent
.parse::<HeaderValue>()
.map_err(|_| type_error("illegal characters in User-Agent"))?;
let mut builder =
hyper_util::client::legacy::Builder::new(TokioExecutor::new());
builder.timer(TokioTimer::new());
builder.pool_timer(TokioTimer::new());
let mut proxies = proxy::from_env();
if let Some(proxy) = options.proxy { if let Some(proxy) = options.proxy {
let mut reqwest_proxy = reqwest::Proxy::all(&proxy.url)?; let mut intercept = proxy::Intercept::all(&proxy.url)
.ok_or_else(|| type_error("invalid proxy url"))?;
if let Some(basic_auth) = &proxy.basic_auth { if let Some(basic_auth) = &proxy.basic_auth {
reqwest_proxy = intercept.set_auth(&basic_auth.username, &basic_auth.password);
reqwest_proxy.basic_auth(&basic_auth.username, &basic_auth.password);
} }
builder = builder.proxy(reqwest_proxy); proxies.prepend(intercept);
} }
let proxies = Arc::new(proxies);
let mut connector =
proxy::ProxyConnector::new(proxies.clone(), connector, tls_config);
connector.user_agent(user_agent.clone());
if let Some(pool_max_idle_per_host) = options.pool_max_idle_per_host { if let Some(pool_max_idle_per_host) = options.pool_max_idle_per_host {
builder = builder.pool_max_idle_per_host(pool_max_idle_per_host); builder.pool_max_idle_per_host(pool_max_idle_per_host);
} }
if let Some(pool_idle_timeout) = options.pool_idle_timeout { if let Some(pool_idle_timeout) = options.pool_idle_timeout {
builder = builder.pool_idle_timeout( builder.pool_idle_timeout(
pool_idle_timeout.map(std::time::Duration::from_millis), pool_idle_timeout.map(std::time::Duration::from_millis),
); );
} }
match (options.http1, options.http2) { match (options.http1, options.http2) {
(true, false) => builder = builder.http1_only(), (true, false) => {} // noop, handled by ALPN above
(false, true) => builder = builder.http2_prior_knowledge(), (false, true) => {
builder.http2_only(true);
}
(true, true) => {} (true, true) => {}
(false, false) => { (false, false) => {
return Err(type_error("Either `http1` or `http2` needs to be true")) return Err(type_error("Either `http1` or `http2` needs to be true"))
} }
} }
builder.build().map_err(|e| e.into()) let pooled_client = builder.build(connector);
let decompress = Decompression::new(pooled_client).gzip(true).br(true);
Ok(Client {
inner: decompress,
proxies,
user_agent,
})
} }
#[op2] #[op2]
@ -990,3 +1050,41 @@ pub fn op_utf8_to_byte_string(
) -> Result<ByteString, AnyError> { ) -> Result<ByteString, AnyError> {
Ok(input.into()) Ok(input.into())
} }
#[derive(Clone, Debug)]
pub struct Client {
inner: Decompression<hyper_util::client::legacy::Client<Connector, ReqBody>>,
// Used to check whether to include a proxy-authorization header
proxies: Arc<proxy::Proxies>,
user_agent: HeaderValue,
}
type Connector = proxy::ProxyConnector<HttpsConnector<HttpConnector>>;
// clippy is wrong here
#[allow(clippy::declare_interior_mutable_const)]
const STAR_STAR: HeaderValue = HeaderValue::from_static("*/*");
impl Client {
pub async fn send(
self,
mut req: http::Request<ReqBody>,
) -> Result<http::Response<ResBody>, AnyError> {
req
.headers_mut()
.entry(USER_AGENT)
.or_insert_with(|| self.user_agent.clone());
req.headers_mut().entry(ACCEPT).or_insert(STAR_STAR);
if let Some(auth) = self.proxies.http_forward_auth(req.uri()) {
req.headers_mut().insert(PROXY_AUTHORIZATION, auth.clone());
}
let resp = self.inner.oneshot(req).await?;
Ok(resp.map(|b| b.map_err(|e| anyhow!(e)).boxed()))
}
}
pub type ReqBody = http_body_util::combinators::BoxBody<Bytes, Error>;
pub type ResBody = http_body_util::combinators::BoxBody<Bytes, Error>;

860
ext/fetch/proxy.rs Normal file
View file

@ -0,0 +1,860 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
//! Parts of this module should be able to be replaced with other crates
//! eventually, once generic versions appear in hyper-util, et al.
use std::env;
use std::future::Future;
use std::net::IpAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
use deno_core::futures::TryFutureExt;
use deno_tls::rustls::ClientConfig as TlsConfig;
use http::header::HeaderValue;
use http::uri::Scheme;
use http::Uri;
use hyper_util::client::legacy::connect::Connected;
use hyper_util::client::legacy::connect::Connection;
use hyper_util::rt::TokioIo;
use ipnet::IpNet;
use tokio::net::TcpStream;
use tokio_rustls::client::TlsStream;
use tokio_rustls::TlsConnector;
use tokio_socks::tcp::Socks5Stream;
use tower_service::Service;
#[derive(Debug, Clone)]
pub(crate) struct ProxyConnector<C> {
connector: C,
proxies: Arc<Proxies>,
tls: Arc<TlsConfig>,
user_agent: Option<HeaderValue>,
}
#[derive(Debug)]
pub(crate) struct Proxies {
no: Option<NoProxy>,
intercepts: Vec<Intercept>,
}
#[derive(Clone)]
pub(crate) struct Intercept {
filter: Filter,
target: Target,
}
#[derive(Clone)]
enum Target {
Http {
dst: Uri,
auth: Option<HeaderValue>,
},
Https {
dst: Uri,
auth: Option<HeaderValue>,
},
Socks {
dst: Uri,
auth: Option<(String, String)>,
},
}
#[derive(Debug, Clone, Copy)]
enum Filter {
Http,
Https,
All,
}
pub(crate) fn from_env() -> Proxies {
let mut intercepts = Vec::new();
if let Some(proxy) = parse_env_var("ALL_PROXY", Filter::All) {
intercepts.push(proxy);
} else if let Some(proxy) = parse_env_var("all_proxy", Filter::All) {
intercepts.push(proxy);
}
if let Some(proxy) = parse_env_var("HTTPS_PROXY", Filter::Https) {
intercepts.push(proxy);
} else if let Some(proxy) = parse_env_var("https_proxy", Filter::Https) {
intercepts.push(proxy);
}
// In a CGI context, headers become environment variables. So, "Proxy:" becomes HTTP_PROXY.
// To prevent an attacker from injecting a proxy, check if we are in CGI.
if env::var_os("REQUEST_METHOD").is_none() {
if let Some(proxy) = parse_env_var("HTTP_PROXY", Filter::Http) {
intercepts.push(proxy);
} else if let Some(proxy) = parse_env_var("http_proxy", Filter::Https) {
intercepts.push(proxy);
}
}
let no = NoProxy::from_env();
Proxies { intercepts, no }
}
pub(crate) fn basic_auth(user: &str, pass: &str) -> HeaderValue {
use base64::prelude::BASE64_STANDARD;
use base64::write::EncoderWriter;
use std::io::Write;
let mut buf = b"Basic ".to_vec();
{
let mut encoder = EncoderWriter::new(&mut buf, &BASE64_STANDARD);
let _ = write!(encoder, "{user}:{pass}");
}
let mut header =
HeaderValue::from_bytes(&buf).expect("base64 is always valid HeaderValue");
header.set_sensitive(true);
header
}
fn parse_env_var(name: &str, filter: Filter) -> Option<Intercept> {
let val = env::var(name).ok()?;
let target = Target::parse(&val)?;
Some(Intercept { filter, target })
}
impl Intercept {
pub(crate) fn all(s: &str) -> Option<Self> {
let target = Target::parse(s)?;
Some(Intercept {
filter: Filter::All,
target,
})
}
pub(crate) fn set_auth(&mut self, user: &str, pass: &str) {
match self.target {
Target::Http { ref mut auth, .. } => {
*auth = Some(basic_auth(user, pass));
}
Target::Https { ref mut auth, .. } => {
*auth = Some(basic_auth(user, pass));
}
Target::Socks { ref mut auth, .. } => {
*auth = Some((user.into(), pass.into()));
}
}
}
}
impl std::fmt::Debug for Intercept {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Intercept")
.field("filter", &self.filter)
.finish()
}
}
impl Target {
fn parse(val: &str) -> Option<Self> {
let uri = val.parse::<Uri>().ok()?;
let mut builder = Uri::builder();
let mut is_socks = false;
let mut http_auth = None;
let mut socks_auth = None;
builder = builder.scheme(match uri.scheme() {
Some(s) => {
if s == &Scheme::HTTP || s == &Scheme::HTTPS {
s.clone()
} else if s.as_str() == "socks5" || s.as_str() == "socks5h" {
is_socks = true;
s.clone()
} else {
// can't use this proxy scheme
return None;
}
}
// if no scheme provided, assume they meant 'http'
None => Scheme::HTTP,
});
let authority = uri.authority()?;
if let Some((userinfo, host_port)) = authority.as_str().split_once('@') {
let (user, pass) = userinfo.split_once(':')?;
if is_socks {
socks_auth = Some((user.into(), pass.into()));
} else {
http_auth = Some(basic_auth(user, pass));
}
builder = builder.authority(host_port);
} else {
builder = builder.authority(authority.clone());
}
// removing any path, but we MUST specify one or the builder errors
builder = builder.path_and_query("/");
let dst = builder.build().ok()?;
let target = match dst.scheme().unwrap().as_str() {
"https" => Target::Https {
dst,
auth: http_auth,
},
"http" => Target::Http {
dst,
auth: http_auth,
},
"socks5" | "socks5h" => Target::Socks {
dst,
auth: socks_auth,
},
// shouldn't happen
_ => return None,
};
Some(target)
}
}
#[derive(Debug)]
struct NoProxy {
domains: DomainMatcher,
ips: IpMatcher,
}
/// Represents a possible matching entry for an IP address
#[derive(Clone, Debug)]
enum Ip {
Address(IpAddr),
Network(IpNet),
}
/// A wrapper around a list of IP cidr blocks or addresses with a [IpMatcher::contains] method for
/// checking if an IP address is contained within the matcher
#[derive(Clone, Debug, Default)]
struct IpMatcher(Vec<Ip>);
/// A wrapper around a list of domains with a [DomainMatcher::contains] method for checking if a
/// domain is contained within the matcher
#[derive(Clone, Debug, Default)]
struct DomainMatcher(Vec<String>);
impl NoProxy {
/// Returns a new no-proxy configuration based on environment variables (or `None` if no variables are set)
/// see [self::NoProxy::from_string()] for the string format
fn from_env() -> Option<NoProxy> {
let raw = env::var("NO_PROXY")
.or_else(|_| env::var("no_proxy"))
.unwrap_or_default();
Self::from_string(&raw)
}
/// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables
/// are set)
/// The rules are as follows:
/// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked
/// * If neither environment variable is set, `None` is returned
/// * Entries are expected to be comma-separated (whitespace between entries is ignored)
/// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size,
/// for example "`192.168.1.0/24`").
/// * An entry "`*`" matches all hostnames (this is the only wildcard allowed)
/// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com`
/// and `.google.com` are equivalent) and would match both that domain AND all subdomains.
///
/// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match
/// (and therefore would bypass the proxy):
/// * `http://google.com/`
/// * `http://www.google.com/`
/// * `http://192.168.1.42/`
///
/// The URL `http://notgoogle.com/` would not match.
fn from_string(no_proxy_list: &str) -> Option<Self> {
if no_proxy_list.is_empty() {
return None;
}
let mut ips = Vec::new();
let mut domains = Vec::new();
let parts = no_proxy_list.split(',').map(str::trim);
for part in parts {
match part.parse::<IpNet>() {
// If we can parse an IP net or address, then use it, otherwise, assume it is a domain
Ok(ip) => ips.push(Ip::Network(ip)),
Err(_) => match part.parse::<IpAddr>() {
Ok(addr) => ips.push(Ip::Address(addr)),
Err(_) => domains.push(part.to_owned()),
},
}
}
Some(NoProxy {
ips: IpMatcher(ips),
domains: DomainMatcher(domains),
})
}
fn contains(&self, host: &str) -> bool {
// According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off
// the end in order to parse correctly
let host = if host.starts_with('[') {
let x: &[_] = &['[', ']'];
host.trim_matches(x)
} else {
host
};
match host.parse::<IpAddr>() {
// If we can parse an IP addr, then use it, otherwise, assume it is a domain
Ok(ip) => self.ips.contains(ip),
Err(_) => self.domains.contains(host),
}
}
}
impl IpMatcher {
fn contains(&self, addr: IpAddr) -> bool {
for ip in &self.0 {
match ip {
Ip::Address(address) => {
if &addr == address {
return true;
}
}
Ip::Network(net) => {
if net.contains(&addr) {
return true;
}
}
}
}
false
}
}
impl DomainMatcher {
// The following links may be useful to understand the origin of these rules:
// * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html
// * https://github.com/curl/curl/issues/1208
fn contains(&self, domain: &str) -> bool {
let domain_len = domain.len();
for d in &self.0 {
if d == domain || d.strip_prefix('.') == Some(domain) {
return true;
} else if domain.ends_with(d) {
if d.starts_with('.') {
// If the first character of d is a dot, that means the first character of domain
// must also be a dot, so we are looking at a subdomain of d and that matches
return true;
} else if domain.as_bytes().get(domain_len - d.len() - 1) == Some(&b'.')
{
// Given that d is a prefix of domain, if the prior character in domain is a dot
// then that means we must be matching a subdomain of d, and that matches
return true;
}
} else if d == "*" {
return true;
}
}
false
}
}
impl<C> ProxyConnector<C> {
pub(crate) fn new(
proxies: Arc<Proxies>,
connector: C,
tls: Arc<TlsConfig>,
) -> Self {
ProxyConnector {
connector,
proxies,
tls,
user_agent: None,
}
}
pub(crate) fn user_agent(&mut self, val: HeaderValue) {
self.user_agent = Some(val);
}
fn intercept(&self, dst: &Uri) -> Option<&Intercept> {
self.proxies.intercept(dst)
}
}
impl Proxies {
pub(crate) fn prepend(&mut self, intercept: Intercept) {
self.intercepts.insert(0, intercept);
}
pub(crate) fn http_forward_auth(&self, dst: &Uri) -> Option<&HeaderValue> {
let intercept = self.intercept(dst)?;
match intercept.target {
// Only if the proxy target is http
Target::Http { ref auth, .. } => auth.as_ref(),
_ => None,
}
}
fn intercept(&self, dst: &Uri) -> Option<&Intercept> {
if let Some(no_proxy) = self.no.as_ref() {
if no_proxy.contains(dst.host()?) {
return None;
}
}
for intercept in &self.intercepts {
return match (
intercept.filter,
dst.scheme().map(Scheme::as_str).unwrap_or(""),
) {
(Filter::All, _) => Some(intercept),
(Filter::Https, "https") => Some(intercept),
(Filter::Http, "http") => Some(intercept),
_ => continue,
};
}
None
}
}
type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send>>;
type BoxError = Box<dyn std::error::Error + Send + Sync>;
// These variatns are not to be inspected.
pub enum Proxied<T> {
/// Not proxied
PassThrough(T),
/// An HTTP forwarding proxy needed absolute-form
HttpForward(T),
/// Tunneled through HTTP CONNECT
HttpTunneled(Box<TokioIo<TlsStream<TokioIo<T>>>>),
/// Tunneled through SOCKS
Socks(TokioIo<TcpStream>),
/// Tunneled through SOCKS and TLS
SocksTls(TokioIo<TlsStream<TokioIo<TokioIo<TcpStream>>>>),
}
impl<C> Service<Uri> for ProxyConnector<C>
where
C: Service<Uri>,
C::Response: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static,
C::Future: Send + 'static,
C::Error: Into<BoxError> + 'static,
{
type Response = Proxied<C::Response>;
type Error = BoxError;
type Future = BoxFuture<Result<Self::Response, Self::Error>>;
fn poll_ready(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.connector.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, orig_dst: Uri) -> Self::Future {
if let Some(intercept) = self.intercept(&orig_dst).cloned() {
let is_https = orig_dst.scheme() == Some(&Scheme::HTTPS);
let user_agent = self.user_agent.clone();
return match intercept.target {
Target::Http {
dst: proxy_dst,
auth,
}
| Target::Https {
dst: proxy_dst,
auth,
} => {
let connecting = self.connector.call(proxy_dst);
let tls = TlsConnector::from(self.tls.clone());
Box::pin(async move {
let mut io = connecting.await.map_err(Into::into)?;
if is_https {
tunnel(&mut io, &orig_dst, user_agent, auth).await?;
let tokio_io = TokioIo::new(io);
let io = tls
.connect(
TryFrom::try_from(orig_dst.host().unwrap().to_owned())?,
tokio_io,
)
.await?;
Ok(Proxied::HttpTunneled(Box::new(TokioIo::new(io))))
} else {
Ok(Proxied::HttpForward(io))
}
})
}
Target::Socks {
dst: proxy_dst,
auth,
} => {
let tls = TlsConnector::from(self.tls.clone());
Box::pin(async move {
let socks_addr = (
proxy_dst.host().unwrap(),
proxy_dst.port().map(|p| p.as_u16()).unwrap_or(1080),
);
let host = orig_dst.host().ok_or("no host in url")?;
let port = match orig_dst.port() {
Some(p) => p.as_u16(),
None if is_https => 443,
_ => 80,
};
let io = if let Some((user, pass)) = auth {
Socks5Stream::connect_with_password(
socks_addr,
(host, port),
&user,
&pass,
)
.await?
} else {
Socks5Stream::connect(socks_addr, (host, port)).await?
};
let io = TokioIo::new(io.into_inner());
if is_https {
let tokio_io = TokioIo::new(io);
let io = tls
.connect(TryFrom::try_from(host.to_owned())?, tokio_io)
.await?;
Ok(Proxied::SocksTls(TokioIo::new(io)))
} else {
Ok(Proxied::Socks(io))
}
})
}
};
}
Box::pin(
self
.connector
.call(orig_dst)
.map_ok(Proxied::PassThrough)
.map_err(Into::into),
)
}
}
async fn tunnel<T>(
io: &mut T,
dst: &Uri,
user_agent: Option<HeaderValue>,
auth: Option<HeaderValue>,
) -> Result<(), BoxError>
where
T: hyper::rt::Read + hyper::rt::Write + Unpin,
{
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
let host = dst.host().expect("proxy dst has host");
let port = match dst.port() {
Some(p) => p.as_u16(),
None => match dst.scheme().map(Scheme::as_str).unwrap_or("") {
"https" => 443,
"http" => 80,
_ => return Err("proxy dst unexpected scheme".into()),
},
};
let mut buf = format!(
"\
CONNECT {host}:{port} HTTP/1.1\r\n\
Host: {host}:{port}\r\n\
"
)
.into_bytes();
// user-agent
if let Some(user_agent) = user_agent {
buf.extend_from_slice(b"User-Agent: ");
buf.extend_from_slice(user_agent.as_bytes());
buf.extend_from_slice(b"\r\n");
}
// proxy-authorization
if let Some(value) = auth {
buf.extend_from_slice(b"Proxy-Authorization: ");
buf.extend_from_slice(value.as_bytes());
buf.extend_from_slice(b"\r\n");
}
// headers end
buf.extend_from_slice(b"\r\n");
let mut tokio_conn = TokioIo::new(io);
tokio_conn.write_all(&buf).await?;
let mut buf = [0; 8192];
let mut pos = 0;
loop {
let n = tokio_conn.read(&mut buf[pos..]).await?;
if n == 0 {
return Err("unexpected eof while tunneling".into());
}
pos += n;
let recvd = &buf[..pos];
if recvd.starts_with(b"HTTP/1.1 200") || recvd.starts_with(b"HTTP/1.0 200")
{
if recvd.ends_with(b"\r\n\r\n") {
return Ok(());
}
if pos == buf.len() {
return Err("proxy headers too long for tunnel".into());
}
// else read more
} else if recvd.starts_with(b"HTTP/1.1 407") {
return Err("proxy authentication required".into());
} else {
return Err("unsuccessful tunnel".into());
}
}
}
impl<T> hyper::rt::Read for Proxied<T>
where
T: hyper::rt::Read + hyper::rt::Write + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<Result<(), std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => Pin::new(p).poll_read(cx, buf),
Proxied::HttpForward(ref mut p) => Pin::new(p).poll_read(cx, buf),
Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_read(cx, buf),
Proxied::Socks(ref mut p) => Pin::new(p).poll_read(cx, buf),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_read(cx, buf),
}
}
}
impl<T> hyper::rt::Write for Proxied<T>
where
T: hyper::rt::Read + hyper::rt::Write + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => Pin::new(p).poll_write(cx, buf),
Proxied::HttpForward(ref mut p) => Pin::new(p).poll_write(cx, buf),
Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_write(cx, buf),
Proxied::Socks(ref mut p) => Pin::new(p).poll_write(cx, buf),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_write(cx, buf),
}
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => Pin::new(p).poll_flush(cx),
Proxied::HttpForward(ref mut p) => Pin::new(p).poll_flush(cx),
Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_flush(cx),
Proxied::Socks(ref mut p) => Pin::new(p).poll_flush(cx),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_flush(cx),
}
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => Pin::new(p).poll_shutdown(cx),
Proxied::HttpForward(ref mut p) => Pin::new(p).poll_shutdown(cx),
Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_shutdown(cx),
Proxied::Socks(ref mut p) => Pin::new(p).poll_shutdown(cx),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_shutdown(cx),
}
}
fn is_write_vectored(&self) -> bool {
match *self {
Proxied::PassThrough(ref p) => p.is_write_vectored(),
Proxied::HttpForward(ref p) => p.is_write_vectored(),
Proxied::HttpTunneled(ref p) => p.is_write_vectored(),
Proxied::Socks(ref p) => p.is_write_vectored(),
Proxied::SocksTls(ref p) => p.is_write_vectored(),
}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => {
Pin::new(p).poll_write_vectored(cx, bufs)
}
Proxied::HttpForward(ref mut p) => {
Pin::new(p).poll_write_vectored(cx, bufs)
}
Proxied::HttpTunneled(ref mut p) => {
Pin::new(p).poll_write_vectored(cx, bufs)
}
Proxied::Socks(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs),
}
}
}
impl<T> Connection for Proxied<T>
where
T: Connection,
{
fn connected(&self) -> Connected {
match self {
Proxied::PassThrough(ref p) => p.connected(),
Proxied::HttpForward(ref p) => p.connected().proxy(true),
Proxied::HttpTunneled(ref p) => p.inner().get_ref().0.connected(),
Proxied::Socks(ref p) => p.connected(),
Proxied::SocksTls(ref p) => p.inner().get_ref().0.connected(),
}
}
}
#[test]
fn test_proxy_parse_from_env() {
fn parse(s: &str) -> Target {
Target::parse(s).unwrap()
}
// normal
match parse("http://127.0.0.1:6666") {
Target::Http { dst, auth } => {
assert_eq!(dst, "http://127.0.0.1:6666");
assert!(auth.is_none());
}
_ => panic!("bad target"),
}
// without scheme
match parse("127.0.0.1:6666") {
Target::Http { dst, auth } => {
assert_eq!(dst, "http://127.0.0.1:6666");
assert!(auth.is_none());
}
_ => panic!("bad target"),
}
// with userinfo
match parse("user:pass@127.0.0.1:6666") {
Target::Http { dst, auth } => {
assert_eq!(dst, "http://127.0.0.1:6666");
assert!(auth.is_some());
assert!(auth.unwrap().is_sensitive());
}
_ => panic!("bad target"),
}
// socks
match parse("socks5://user:pass@127.0.0.1:6666") {
Target::Socks { dst, auth } => {
assert_eq!(dst, "socks5://127.0.0.1:6666");
assert!(auth.is_some());
}
_ => panic!("bad target"),
}
// socks5h
match parse("socks5h://localhost:6666") {
Target::Socks { dst, auth } => {
assert_eq!(dst, "socks5h://localhost:6666");
assert!(auth.is_none());
}
_ => panic!("bad target"),
}
}
#[test]
fn test_domain_matcher() {
let domains = vec![".foo.bar".into(), "bar.foo".into()];
let matcher = DomainMatcher(domains);
// domains match with leading `.`
assert!(matcher.contains("foo.bar"));
// subdomains match with leading `.`
assert!(matcher.contains("www.foo.bar"));
// domains match with no leading `.`
assert!(matcher.contains("bar.foo"));
// subdomains match with no leading `.`
assert!(matcher.contains("www.bar.foo"));
// non-subdomain string prefixes don't match
assert!(!matcher.contains("notfoo.bar"));
assert!(!matcher.contains("notbar.foo"));
}
#[test]
fn test_no_proxy_wildcard() {
let no_proxy = NoProxy::from_string("*").unwrap();
assert!(no_proxy.contains("any.where"));
}
#[test]
fn test_no_proxy_ip_ranges() {
let no_proxy = NoProxy::from_string(
".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17",
)
.unwrap();
let should_not_match = [
// random url, not in no_proxy
"deno.com",
// make sure that random non-subdomain string prefixes don't match
"notfoo.bar",
// make sure that random non-subdomain string prefixes don't match
"notbar.baz",
// ipv4 address out of range
"10.43.1.1",
// ipv4 address out of range
"10.124.7.7",
// ipv6 address out of range
"[ffff:db8:a0b:12f0::1]",
// ipv6 address out of range
"[2005:db8:a0b:12f0::1]",
];
for host in &should_not_match {
assert!(!no_proxy.contains(host), "should not contain {:?}", host);
}
let should_match = [
// make sure subdomains (with leading .) match
"hello.foo.bar",
// make sure exact matches (without leading .) match (also makes sure spaces between entries work)
"bar.baz",
// make sure subdomains (without leading . in no_proxy) match
"foo.bar.baz",
// make sure subdomains (without leading . in no_proxy) match - this differs from cURL
"foo.bar",
// ipv4 address match within range
"10.42.1.100",
// ipv6 address exact match
"[::1]",
// ipv6 address match within range
"[2001:db8:a0b:12f0::1]",
// ipv4 address exact match
"10.124.7.8",
];
for host in &should_match {
assert!(no_proxy.contains(host), "should contain {:?}", host);
}
}

View file

@ -29,6 +29,7 @@ denokv_remote.workspace = true
denokv_sqlite.workspace = true denokv_sqlite.workspace = true
faster-hex.workspace = true faster-hex.workspace = true
http.workspace = true http.workspace = true
http-body-util.workspace = true
log.workspace = true log.workspace = true
num-bigint.workspace = true num-bigint.workspace = true
prost.workspace = true prost.workspace = true

View file

@ -12,10 +12,8 @@ use bytes::Bytes;
use deno_core::error::type_error; use deno_core::error::type_error;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::futures::Stream; use deno_core::futures::Stream;
use deno_core::futures::TryStreamExt as _;
use deno_core::OpState; use deno_core::OpState;
use deno_fetch::create_http_client; use deno_fetch::create_http_client;
use deno_fetch::reqwest;
use deno_fetch::CreateHttpClientOptions; use deno_fetch::CreateHttpClientOptions;
use deno_tls::rustls::RootCertStore; use deno_tls::rustls::RootCertStore;
use deno_tls::Proxy; use deno_tls::Proxy;
@ -25,6 +23,7 @@ use denokv_remote::MetadataEndpoint;
use denokv_remote::Remote; use denokv_remote::Remote;
use denokv_remote::RemoteResponse; use denokv_remote::RemoteResponse;
use denokv_remote::RemoteTransport; use denokv_remote::RemoteTransport;
use http_body_util::BodyExt;
use url::Url; use url::Url;
#[derive(Clone)] #[derive(Clone)]
@ -109,35 +108,43 @@ impl<P: RemoteDbHandlerPermissions + 'static> denokv_remote::RemotePermissions
} }
#[derive(Clone)] #[derive(Clone)]
pub struct ReqwestClient(reqwest::Client); pub struct FetchClient(deno_fetch::Client);
pub struct ReqwestResponse(reqwest::Response); pub struct FetchResponse(http::Response<deno_fetch::ResBody>);
impl RemoteTransport for ReqwestClient { impl RemoteTransport for FetchClient {
type Response = ReqwestResponse; type Response = FetchResponse;
async fn post( async fn post(
&self, &self,
url: Url, url: Url,
headers: http::HeaderMap, headers: http::HeaderMap,
body: Bytes, body: Bytes,
) -> Result<(Url, http::StatusCode, Self::Response), anyhow::Error> { ) -> Result<(Url, http::StatusCode, Self::Response), anyhow::Error> {
let res = self.0.post(url).headers(headers).body(body).send().await?; let body = http_body_util::Full::new(body)
let url = res.url().clone(); .map_err(|never| match never {})
.boxed();
let mut req = http::Request::new(body);
*req.method_mut() = http::Method::POST;
*req.uri_mut() = url.as_str().parse()?;
*req.headers_mut() = headers;
let res = self.0.clone().send(req).await?;
let status = res.status(); let status = res.status();
Ok((url, status, ReqwestResponse(res))) Ok((url, status, FetchResponse(res)))
} }
} }
impl RemoteResponse for ReqwestResponse { impl RemoteResponse for FetchResponse {
async fn bytes(self) -> Result<Bytes, anyhow::Error> { async fn bytes(self) -> Result<Bytes, anyhow::Error> {
Ok(self.0.bytes().await?) Ok(self.0.collect().await?.to_bytes())
} }
fn stream( fn stream(
self, self,
) -> impl Stream<Item = Result<Bytes, anyhow::Error>> + Send + Sync { ) -> impl Stream<Item = Result<Bytes, anyhow::Error>> + Send + Sync {
self.0.bytes_stream().map_err(|e| e.into()) self.0.into_body().into_data_stream()
} }
async fn text(self) -> Result<String, anyhow::Error> { async fn text(self) -> Result<String, anyhow::Error> {
Ok(self.0.text().await?) let bytes = self.bytes().await?;
Ok(std::str::from_utf8(&bytes)?.into())
} }
} }
@ -145,7 +152,7 @@ impl RemoteResponse for ReqwestResponse {
impl<P: RemoteDbHandlerPermissions + 'static> DatabaseHandler impl<P: RemoteDbHandlerPermissions + 'static> DatabaseHandler
for RemoteDbHandler<P> for RemoteDbHandler<P>
{ {
type DB = Remote<PermissionChecker<P>, ReqwestClient>; type DB = Remote<PermissionChecker<P>, FetchClient>;
async fn open( async fn open(
&self, &self,
@ -201,14 +208,14 @@ impl<P: RemoteDbHandlerPermissions + 'static> DatabaseHandler
http2: true, http2: true,
}, },
)?; )?;
let reqwest_client = ReqwestClient(client); let fetch_client = FetchClient(client);
let permissions = PermissionChecker { let permissions = PermissionChecker {
state: state.clone(), state: state.clone(),
_permissions: PhantomData, _permissions: PhantomData,
}; };
let remote = Remote::new(reqwest_client, permissions, metadata_endpoint); let remote = Remote::new(fetch_client, permissions, metadata_endpoint);
Ok(remote) Ok(remote)
} }

View file

@ -42,6 +42,7 @@ h2.workspace = true
hkdf.workspace = true hkdf.workspace = true
home = "0.5.9" home = "0.5.9"
http.workspace = true http.workspace = true
http-body-util.workspace = true
idna = "0.3.0" idna = "0.3.0"
indexmap.workspace = true indexmap.workspace = true
ipnetwork = "0.20.0" ipnetwork = "0.20.0"
@ -64,7 +65,6 @@ pbkdf2 = "0.12.1"
pin-project-lite = "0.2.13" pin-project-lite = "0.2.13"
rand.workspace = true rand.workspace = true
regex.workspace = true regex.workspace = true
reqwest.workspace = true
ring.workspace = true ring.workspace = true
ripemd = { version = "0.1.3", features = ["oid"] } ripemd = { version = "0.1.3", features = ["oid"] }
rsa.workspace = true rsa.workspace = true

View file

@ -15,12 +15,12 @@ use deno_fetch::FetchRequestResource;
use deno_fetch::FetchReturn; use deno_fetch::FetchReturn;
use deno_fetch::HttpClientResource; use deno_fetch::HttpClientResource;
use deno_fetch::ResourceToBodyAdapter; use deno_fetch::ResourceToBodyAdapter;
use reqwest::header::HeaderMap; use http::header::HeaderMap;
use reqwest::header::HeaderName; use http::header::HeaderName;
use reqwest::header::HeaderValue; use http::header::HeaderValue;
use reqwest::header::CONTENT_LENGTH; use http::header::CONTENT_LENGTH;
use reqwest::Body; use http::Method;
use reqwest::Method; use http_body_util::BodyExt;
#[op2] #[op2]
#[serde] #[serde]
@ -60,34 +60,54 @@ where
header_map.append(name, v); header_map.append(name, v);
} }
let mut request = client.request(method.clone(), url).headers(header_map); let (body, con_len) = if let Some(body) = body {
(
if let Some(body) = body { ResourceToBodyAdapter::new(state.resource_table.take_any(body)?).boxed(),
request = request.body(Body::wrap_stream(ResourceToBodyAdapter::new( None,
state.resource_table.take_any(body)?, )
)));
} else { } else {
// POST and PUT requests should always have a 0 length content-length, // POST and PUT requests should always have a 0 length content-length,
// if there is no body. https://fetch.spec.whatwg.org/#http-network-or-cache-fetch // if there is no body. https://fetch.spec.whatwg.org/#http-network-or-cache-fetch
if matches!(method, Method::POST | Method::PUT) { let len = if matches!(method, Method::POST | Method::PUT) {
request = request.header(CONTENT_LENGTH, HeaderValue::from(0)); Some(0)
} } else {
None
};
(
http_body_util::Empty::new()
.map_err(|never| match never {})
.boxed(),
len,
)
}; };
let mut request = http::Request::new(body);
*request.method_mut() = method.clone();
*request.uri_mut() = url
.as_str()
.parse()
.map_err(|_| type_error("Invalid URL"))?;
*request.headers_mut() = header_map;
if let Some(len) = con_len {
request.headers_mut().insert(CONTENT_LENGTH, len.into());
}
let cancel_handle = CancelHandle::new_rc(); let cancel_handle = CancelHandle::new_rc();
let cancel_handle_ = cancel_handle.clone(); let cancel_handle_ = cancel_handle.clone();
let fut = async move { let fut = async move {
request client
.send() .send(request)
.or_cancel(cancel_handle_) .or_cancel(cancel_handle_)
.await .await
.map(|res| res.map_err(|err| type_error(err.to_string()))) .map(|res| res.map_err(|err| type_error(err.to_string())))
}; };
let request_rid = state let request_rid = state.resource_table.add(FetchRequestResource {
.resource_table future: Box::pin(fut),
.add(FetchRequestResource(Box::pin(fut))); url,
});
let cancel_handle_rid = let cancel_handle_rid =
state.resource_table.add(FetchCancelHandle(cancel_handle)); state.resource_table.add(FetchCancelHandle(cancel_handle));

View file

@ -27,12 +27,12 @@ use h2;
use h2::Reason; use h2::Reason;
use h2::RecvStream; use h2::RecvStream;
use http; use http;
use http::header::HeaderName;
use http::header::HeaderValue;
use http::request::Parts; use http::request::Parts;
use http::HeaderMap; use http::HeaderMap;
use http::Response; use http::Response;
use http::StatusCode; use http::StatusCode;
use reqwest::header::HeaderName;
use reqwest::header::HeaderValue;
use url::Url; use url::Url;
pub struct Http2Client { pub struct Http2Client {

View file

@ -13,7 +13,6 @@ use deno_core::error::AnyError;
use deno_core::serde_json; use deno_core::serde_json;
use deno_core::url; use deno_core::url;
use deno_core::ModuleResolutionError; use deno_core::ModuleResolutionError;
use deno_fetch::reqwest;
use std::env; use std::env;
use std::error::Error; use std::error::Error;
use std::io; use std::io;
@ -101,27 +100,6 @@ fn get_regex_error_class(error: &regex::Error) -> &'static str {
} }
} }
fn get_request_error_class(error: &reqwest::Error) -> &'static str {
error
.source()
.and_then(|inner_err| {
(inner_err
.downcast_ref::<io::Error>()
.map(get_io_error_class))
.or_else(|| {
inner_err
.downcast_ref::<serde_json::error::Error>()
.map(get_serde_json_error_class)
})
.or_else(|| {
inner_err
.downcast_ref::<url::ParseError>()
.map(get_url_parse_error_class)
})
})
.unwrap_or("Http")
}
fn get_serde_json_error_class( fn get_serde_json_error_class(
error: &serde_json::error::Error, error: &serde_json::error::Error,
) -> &'static str { ) -> &'static str {
@ -142,7 +120,17 @@ fn get_url_parse_error_class(_error: &url::ParseError) -> &'static str {
"URIError" "URIError"
} }
fn get_hyper_error_class(_error: &hyper_v014::Error) -> &'static str { fn get_hyper_error_class(_error: &hyper::Error) -> &'static str {
"Http"
}
fn get_hyper_util_error_class(
_error: &hyper_util::client::legacy::Error,
) -> &'static str {
"Http"
}
fn get_hyper_v014_error_class(_error: &hyper_v014::Error) -> &'static str {
"Http" "Http"
} }
@ -175,13 +163,18 @@ pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
e.downcast_ref::<dlopen2::Error>() e.downcast_ref::<dlopen2::Error>()
.map(get_dlopen_error_class) .map(get_dlopen_error_class)
}) })
.or_else(|| e.downcast_ref::<hyper::Error>().map(get_hyper_error_class))
.or_else(|| {
e.downcast_ref::<hyper_util::client::legacy::Error>()
.map(get_hyper_util_error_class)
})
.or_else(|| { .or_else(|| {
e.downcast_ref::<hyper_v014::Error>() e.downcast_ref::<hyper_v014::Error>()
.map(get_hyper_error_class) .map(get_hyper_v014_error_class)
}) })
.or_else(|| { .or_else(|| {
e.downcast_ref::<Arc<hyper_v014::Error>>() e.downcast_ref::<Arc<hyper_v014::Error>>()
.map(|e| get_hyper_error_class(e)) .map(|e| get_hyper_v014_error_class(e))
}) })
.or_else(|| { .or_else(|| {
e.downcast_ref::<deno_core::Canceled>().map(|e| { e.downcast_ref::<deno_core::Canceled>().map(|e| {
@ -202,10 +195,6 @@ pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
e.downcast_ref::<notify::Error>() e.downcast_ref::<notify::Error>()
.map(get_notify_error_class) .map(get_notify_error_class)
}) })
.or_else(|| {
e.downcast_ref::<reqwest::Error>()
.map(get_request_error_class)
})
.or_else(|| e.downcast_ref::<regex::Error>().map(get_regex_error_class)) .or_else(|| e.downcast_ref::<regex::Error>().map(get_regex_error_class))
.or_else(|| { .or_else(|| {
e.downcast_ref::<serde_json::error::Error>() e.downcast_ref::<serde_json::error::Error>()

View file

@ -13,6 +13,7 @@ use deno_core::OpState;
use deno_fetch::data_url::DataUrl; use deno_fetch::data_url::DataUrl;
use deno_web::BlobStore; use deno_web::BlobStore;
use deno_websocket::DomExceptionNetworkError; use deno_websocket::DomExceptionNetworkError;
use http_body_util::BodyExt;
use hyper::body::Bytes; use hyper::body::Bytes;
use serde::Deserialize; use serde::Deserialize;
use serde::Serialize; use serde::Serialize;
@ -78,10 +79,23 @@ pub fn op_worker_sync_fetch(
let (body, mime_type, res_url) = match script_url.scheme() { let (body, mime_type, res_url) = match script_url.scheme() {
"http" | "https" => { "http" | "https" => {
let resp = let mut req = http::Request::new(
client.get(script_url).send().await?.error_for_status()?; http_body_util::Empty::new()
.map_err(|never| match never {})
.boxed(),
);
*req.uri_mut() = script_url.as_str().parse()?;
let res_url = resp.url().to_string(); let resp = client.send(req).await?;
if resp.status().is_client_error()
|| resp.status().is_server_error()
{
return Err(type_error(format!(
"http status error: {}",
resp.status()
)));
}
// TODO(andreubotella) Properly run fetch's "extract a MIME type". // TODO(andreubotella) Properly run fetch's "extract a MIME type".
let mime_type = resp let mime_type = resp
@ -93,9 +107,9 @@ pub fn op_worker_sync_fetch(
// Always check the MIME type with HTTP(S). // Always check the MIME type with HTTP(S).
loose_mime_checks = false; loose_mime_checks = false;
let body = resp.bytes().await?; let body = resp.collect().await?.to_bytes();
(body, mime_type, res_url) (body, mime_type, script)
} }
"data" => { "data" => {
let data_url = DataUrl::process(&script) let data_url = DataUrl::process(&script)

View file

@ -53,6 +53,7 @@ once_cell.workspace = true
os_pipe.workspace = true os_pipe.workspace = true
pretty_assertions.workspace = true pretty_assertions.workspace = true
regex.workspace = true regex.workspace = true
reqwest.workspace = true
serde.workspace = true serde.workspace = true
test_util.workspace = true test_util.workspace = true
tokio.workspace = true tokio.workspace = true

View file

@ -6,7 +6,7 @@ use deno_core::error::AnyError;
use deno_core::serde_json; use deno_core::serde_json;
use deno_core::serde_json::json; use deno_core::serde_json::json;
use deno_core::url; use deno_core::url;
use deno_fetch::reqwest;
use fastwebsockets::FragmentCollector; use fastwebsockets::FragmentCollector;
use fastwebsockets::Frame; use fastwebsockets::Frame;
use fastwebsockets::WebSocket; use fastwebsockets::WebSocket;

View file

@ -3,7 +3,7 @@
use deno_core::serde_json; use deno_core::serde_json;
use deno_core::serde_json::json; use deno_core::serde_json::json;
use deno_core::serde_json::Value; use deno_core::serde_json::Value;
use deno_fetch::reqwest;
use pretty_assertions::assert_eq; use pretty_assertions::assert_eq;
use test_util as util; use test_util as util;
use test_util::itest; use test_util::itest;

View file

@ -11,7 +11,7 @@ use std::sync::Arc;
use bytes::Bytes; use bytes::Bytes;
use deno_core::serde_json::json; use deno_core::serde_json::json;
use deno_core::url; use deno_core::url;
use deno_fetch::reqwest;
use deno_tls::rustls; use deno_tls::rustls;
use deno_tls::rustls::ClientConnection; use deno_tls::rustls::ClientConnection;
use deno_tls::rustls_pemfile; use deno_tls::rustls_pemfile;

View file

@ -2,7 +2,6 @@
use std::io::Read; use std::io::Read;
use deno_fetch::reqwest;
use pretty_assertions::assert_eq; use pretty_assertions::assert_eq;
use regex::Regex; use regex::Regex;
use test_util as util; use test_util as util;

View file

@ -1,3 +1,3 @@
DANGER: TLS certificate validation is disabled for: deno.land DANGER: TLS certificate validation is disabled for: deno.land
error: Import 'https://localhost:5545/subdir/mod2.ts' failed: error sending request for url (https://localhost:5545/subdir/mod2.ts) error: Import 'https://localhost:5545/subdir/mod2.ts' failed: client error[WILDCARD]
at file:///[WILDCARD]/cafile_url_imports.ts:[WILDCARD] at file:///[WILDCARD]/cafile_url_imports.ts:[WILDCARD]

View file

@ -1,4 +1,4 @@
error: Uncaught (in promise) TypeError: error sending request for url[WILDCARD] error: Uncaught (in promise) TypeError: client error[WILDCARD]
await fetch("https://nonexistent.deno.land/"); await fetch("https://nonexistent.deno.land/");
^[WILDCARD] ^[WILDCARD]
at async fetch (ext:[WILDCARD]) at async fetch (ext:[WILDCARD])

View file

@ -67,7 +67,7 @@ Deno.test(
await fetch(`http://localhost:${port}`); await fetch(`http://localhost:${port}`);
}, },
TypeError, TypeError,
"error sending request for url", "client error (Connect)",
); );
}, },
); );
@ -80,7 +80,7 @@ Deno.test(
await fetch("http://nil/"); await fetch("http://nil/");
}, },
TypeError, TypeError,
"error sending request for url", "client error (Connect)",
); );
}, },
); );
@ -688,7 +688,7 @@ Deno.test(
"accept: */*\r\n", "accept: */*\r\n",
"accept-language: *\r\n", "accept-language: *\r\n",
`user-agent: Deno/${Deno.version.deno}\r\n`, `user-agent: Deno/${Deno.version.deno}\r\n`,
"accept-encoding: gzip, br\r\n", "accept-encoding: gzip,br\r\n",
`host: ${addr}\r\n\r\n`, `host: ${addr}\r\n\r\n`,
].join(""); ].join("");
assertEquals(actual, expected); assertEquals(actual, expected);
@ -720,7 +720,7 @@ Deno.test(
"accept: text/html\r\n", "accept: text/html\r\n",
"accept-language: en-US\r\n", "accept-language: en-US\r\n",
`user-agent: Deno/${Deno.version.deno}\r\n`, `user-agent: Deno/${Deno.version.deno}\r\n`,
"accept-encoding: gzip, br\r\n", "accept-encoding: gzip,br\r\n",
`host: ${addr}\r\n\r\n`, `host: ${addr}\r\n\r\n`,
].join(""); ].join("");
assertEquals(actual, expected); assertEquals(actual, expected);
@ -750,15 +750,16 @@ Deno.test(
const actual = new TextDecoder().decode((await bufPromise).bytes()); const actual = new TextDecoder().decode((await bufPromise).bytes());
const expected = [ const expected = [
"POST /blah HTTP/1.1\r\n", "POST /blah HTTP/1.1\r\n",
`content-length: ${body.length}\r\n`,
"hello: World\r\n", "hello: World\r\n",
"foo: Bar\r\n", "foo: Bar\r\n",
"content-type: text/plain;charset=UTF-8\r\n", "content-type: text/plain;charset=UTF-8\r\n",
"accept: */*\r\n", "accept: */*\r\n",
"accept-language: *\r\n", "accept-language: *\r\n",
`user-agent: Deno/${Deno.version.deno}\r\n`, `user-agent: Deno/${Deno.version.deno}\r\n`,
"accept-encoding: gzip, br\r\n", "accept-encoding: gzip,br\r\n",
`host: ${addr}\r\n`, `host: ${addr}\r\n`,
`content-length: ${body.length}\r\n\r\n`, `\r\n`,
body, body,
].join(""); ].join("");
assertEquals(actual, expected); assertEquals(actual, expected);
@ -789,14 +790,15 @@ Deno.test(
const actual = new TextDecoder().decode((await bufPromise).bytes()); const actual = new TextDecoder().decode((await bufPromise).bytes());
const expected = [ const expected = [
"POST /blah HTTP/1.1\r\n", "POST /blah HTTP/1.1\r\n",
`content-length: ${body.byteLength}\r\n`,
"hello: World\r\n", "hello: World\r\n",
"foo: Bar\r\n", "foo: Bar\r\n",
"accept: */*\r\n", "accept: */*\r\n",
"accept-language: *\r\n", "accept-language: *\r\n",
`user-agent: Deno/${Deno.version.deno}\r\n`, `user-agent: Deno/${Deno.version.deno}\r\n`,
"accept-encoding: gzip, br\r\n", "accept-encoding: gzip,br\r\n",
`host: ${addr}\r\n`, `host: ${addr}\r\n`,
`content-length: ${body.byteLength}\r\n\r\n`, `\r\n`,
bodyStr, bodyStr,
].join(""); ].join("");
assertEquals(actual, expected); assertEquals(actual, expected);
@ -827,7 +829,7 @@ Deno.test(
"accept: */*\r\n", "accept: */*\r\n",
"accept-language: *\r\n", "accept-language: *\r\n",
`user-agent: Deno/${Deno.version.deno}\r\n`, `user-agent: Deno/${Deno.version.deno}\r\n`,
"accept-encoding: gzip, br\r\n", "accept-encoding: gzip,br\r\n",
`host: ${addr}\r\n\r\n`, `host: ${addr}\r\n\r\n`,
].join(""); ].join("");
assertEquals(actual, expected); assertEquals(actual, expected);
@ -859,7 +861,7 @@ Deno.test(
"accept: */*\r\n", "accept: */*\r\n",
"accept-language: *\r\n", "accept-language: *\r\n",
`user-agent: Deno/${Deno.version.deno}\r\n`, `user-agent: Deno/${Deno.version.deno}\r\n`,
"accept-encoding: gzip, br\r\n\r\n", "accept-encoding: gzip,br\r\n\r\n",
].join(""); ].join("");
assertEquals(actual, expected); assertEquals(actual, expected);
}, },
@ -1226,7 +1228,7 @@ Deno.test(
"accept: */*\r\n", "accept: */*\r\n",
"accept-language: *\r\n", "accept-language: *\r\n",
`user-agent: Deno/${Deno.version.deno}\r\n`, `user-agent: Deno/${Deno.version.deno}\r\n`,
"accept-encoding: gzip, br\r\n", "accept-encoding: gzip,br\r\n",
`host: ${addr}\r\n`, `host: ${addr}\r\n`,
`transfer-encoding: chunked\r\n\r\n`, `transfer-encoding: chunked\r\n\r\n`,
"B\r\n", "B\r\n",
@ -1824,7 +1826,7 @@ Deno.test(
await fetch(`http://${addr}/`); await fetch(`http://${addr}/`);
}, },
TypeError, TypeError,
"error sending request", "client error",
); );
listener.close(); listener.close();
@ -1880,7 +1882,7 @@ Deno.test(
await response.arrayBuffer(); await response.arrayBuffer();
}, },
Error, Error,
"error decoding response body", "body",
); );
listener.close(); listener.close();

View file

@ -2572,9 +2572,11 @@ for (const compression of [true, false]) {
const result = await reader.read(); const result = await reader.read();
assert(!result.done); assert(!result.done);
assertEquals(result.value, new Uint8Array([65])); assertEquals(result.value, new Uint8Array([65]));
const err = await assertRejects(() => reader.read()); await assertRejects(
assert(err instanceof TypeError); () => reader.read(),
assert(err.message.includes("error decoding response body")); TypeError,
"body",
);
const httpConn = await server; const httpConn = await server;
httpConn.close(); httpConn.close();
@ -2608,9 +2610,11 @@ for (const compression of [true, false]) {
const result = await reader.read(); const result = await reader.read();
assert(!result.done); assert(!result.done);
assertEquals(result.value, new Uint8Array([65])); assertEquals(result.value, new Uint8Array([65]));
const err = await assertRejects(() => reader.read()); await assertRejects(
assert(err instanceof TypeError); () => reader.read(),
assert(err.message.includes("error decoding response body")); TypeError,
"body",
);
const httpConn = await server; const httpConn = await server;
httpConn.close(); httpConn.close();

View file

@ -3522,7 +3522,7 @@ Deno.test(
fail(); fail();
} catch (clientError) { } catch (clientError) {
assert(clientError instanceof TypeError); assert(clientError instanceof TypeError);
assert(clientError.message.includes("error sending request for url")); assert(clientError.message.includes("client error"));
} finally { } finally {
ac.abort(); ac.abort();
await server.finished; await server.finished;
@ -3570,7 +3570,7 @@ Deno.test({
fail(); fail();
} catch (clientError) { } catch (clientError) {
assert(clientError instanceof TypeError); assert(clientError instanceof TypeError);
assert(clientError.message.includes("error sending request for url")); assert(clientError.message.includes("client error"));
} finally { } finally {
ac.abort(); ac.abort();
await server.finished; await server.finished;

View file

@ -139,6 +139,17 @@ async fn registry_server_handler(
return Ok(res); return Ok(res);
} }
let accept_header = req
.headers()
.get("accept")
.and_then(|s| s.to_str().ok())
.unwrap_or_default();
if accept_header != "*/*" {
let res = Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(UnsyncBoxBody::new(Empty::new()))?;
return Ok(res);
}
// serve the registry package files // serve the registry package files
let mut file_path = tests_path().join("registry").join("jsr").to_path_buf(); let mut file_path = tests_path().join("registry").join("jsr").to_path_buf();
file_path.push( file_path.push(