2023-01-13 02:51:32 -05:00
|
|
|
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
2022-03-14 13:44:15 -04:00
|
|
|
|
2021-07-05 09:34:37 -04:00
|
|
|
use std::cell::RefCell;
|
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::fmt::Debug;
|
|
|
|
use std::rc::Rc;
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
2023-01-13 02:51:32 -05:00
|
|
|
use async_trait::async_trait;
|
|
|
|
use deno_core::error::type_error;
|
2021-07-05 09:34:37 -04:00
|
|
|
use deno_core::error::AnyError;
|
2023-10-05 08:34:38 -04:00
|
|
|
use deno_core::op2;
|
2023-01-13 02:51:32 -05:00
|
|
|
use deno_core::parking_lot::Mutex;
|
|
|
|
use deno_core::url::Url;
|
2023-06-22 17:37:56 -04:00
|
|
|
use deno_core::JsBuffer;
|
2023-02-28 01:26:48 -05:00
|
|
|
use deno_core::OpState;
|
2023-06-22 17:37:56 -04:00
|
|
|
use deno_core::ToJsBuffer;
|
2023-01-14 23:18:58 -05:00
|
|
|
use serde::Deserialize;
|
|
|
|
use serde::Serialize;
|
2021-07-05 09:34:37 -04:00
|
|
|
use uuid::Uuid;
|
|
|
|
|
|
|
|
use crate::Location;
|
|
|
|
|
2021-08-14 04:27:27 -04:00
|
|
|
pub type PartMap = HashMap<Uuid, Arc<dyn BlobPart + Send + Sync>>;
|
2021-07-05 09:34:37 -04:00
|
|
|
|
2023-07-01 18:52:30 -04:00
|
|
|
#[derive(Default, Debug)]
|
2021-07-05 09:34:37 -04:00
|
|
|
pub struct BlobStore {
|
2023-07-01 18:52:30 -04:00
|
|
|
parts: Mutex<PartMap>,
|
|
|
|
object_urls: Mutex<HashMap<Url, Arc<Blob>>>,
|
2021-07-05 09:34:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
impl BlobStore {
|
2021-08-14 04:27:27 -04:00
|
|
|
pub fn insert_part(&self, part: Arc<dyn BlobPart + Send + Sync>) -> Uuid {
|
2021-07-05 09:34:37 -04:00
|
|
|
let id = Uuid::new_v4();
|
2021-07-06 23:48:01 -04:00
|
|
|
let mut parts = self.parts.lock();
|
2021-08-14 04:27:27 -04:00
|
|
|
parts.insert(id, part);
|
2021-07-05 09:34:37 -04:00
|
|
|
id
|
|
|
|
}
|
|
|
|
|
2021-08-14 04:27:27 -04:00
|
|
|
pub fn get_part(&self, id: &Uuid) -> Option<Arc<dyn BlobPart + Send + Sync>> {
|
2021-07-06 23:48:01 -04:00
|
|
|
let parts = self.parts.lock();
|
2021-07-30 09:03:41 -04:00
|
|
|
let part = parts.get(id);
|
2021-07-05 09:34:37 -04:00
|
|
|
part.cloned()
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn remove_part(
|
|
|
|
&self,
|
|
|
|
id: &Uuid,
|
2021-08-14 04:27:27 -04:00
|
|
|
) -> Option<Arc<dyn BlobPart + Send + Sync>> {
|
2021-07-06 23:48:01 -04:00
|
|
|
let mut parts = self.parts.lock();
|
2021-07-30 09:03:41 -04:00
|
|
|
parts.remove(id)
|
2021-07-05 09:34:37 -04:00
|
|
|
}
|
|
|
|
|
2023-01-04 07:20:36 -05:00
|
|
|
pub fn get_object_url(&self, mut url: Url) -> Option<Arc<Blob>> {
|
2021-07-06 23:48:01 -04:00
|
|
|
let blob_store = self.object_urls.lock();
|
2021-07-05 09:34:37 -04:00
|
|
|
url.set_fragment(None);
|
2023-01-04 07:20:36 -05:00
|
|
|
blob_store.get(&url).cloned()
|
2021-07-05 09:34:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn insert_object_url(
|
|
|
|
&self,
|
|
|
|
blob: Blob,
|
|
|
|
maybe_location: Option<Url>,
|
|
|
|
) -> Url {
|
|
|
|
let origin = if let Some(location) = maybe_location {
|
|
|
|
location.origin().ascii_serialization()
|
|
|
|
} else {
|
|
|
|
"null".to_string()
|
|
|
|
};
|
|
|
|
let id = Uuid::new_v4();
|
2023-01-27 10:43:16 -05:00
|
|
|
let url = Url::parse(&format!("blob:{origin}/{id}")).unwrap();
|
2021-07-05 09:34:37 -04:00
|
|
|
|
2021-07-06 23:48:01 -04:00
|
|
|
let mut blob_store = self.object_urls.lock();
|
2021-07-05 09:34:37 -04:00
|
|
|
blob_store.insert(url.clone(), Arc::new(blob));
|
|
|
|
|
|
|
|
url
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn remove_object_url(&self, url: &Url) {
|
2021-07-06 23:48:01 -04:00
|
|
|
let mut blob_store = self.object_urls.lock();
|
2021-07-30 09:03:41 -04:00
|
|
|
blob_store.remove(url);
|
2021-07-05 09:34:37 -04:00
|
|
|
}
|
2023-07-01 18:52:30 -04:00
|
|
|
|
|
|
|
pub fn clear(&self) {
|
|
|
|
self.parts.lock().clear();
|
|
|
|
self.object_urls.lock().clear();
|
|
|
|
}
|
2021-07-05 09:34:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct Blob {
|
|
|
|
pub media_type: String,
|
|
|
|
|
2021-08-14 04:27:27 -04:00
|
|
|
pub parts: Vec<Arc<dyn BlobPart + Send + Sync>>,
|
2021-07-05 09:34:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Blob {
|
|
|
|
// TODO(lucacsonato): this should be a stream!
|
|
|
|
pub async fn read_all(&self) -> Result<Vec<u8>, AnyError> {
|
|
|
|
let size = self.size();
|
|
|
|
let mut bytes = Vec::with_capacity(size);
|
|
|
|
|
|
|
|
for part in &self.parts {
|
|
|
|
let chunk = part.read().await?;
|
|
|
|
bytes.extend_from_slice(chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_eq!(bytes.len(), size);
|
|
|
|
|
|
|
|
Ok(bytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn size(&self) -> usize {
|
|
|
|
let mut total = 0;
|
|
|
|
for part in &self.parts {
|
|
|
|
total += part.size()
|
|
|
|
}
|
|
|
|
total
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
pub trait BlobPart: Debug {
|
|
|
|
// TODO(lucacsonato): this should be a stream!
|
|
|
|
async fn read(&self) -> Result<&[u8], AnyError>;
|
|
|
|
fn size(&self) -> usize;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct InMemoryBlobPart(Vec<u8>);
|
|
|
|
|
|
|
|
impl From<Vec<u8>> for InMemoryBlobPart {
|
|
|
|
fn from(vec: Vec<u8>) -> Self {
|
|
|
|
Self(vec)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
impl BlobPart for InMemoryBlobPart {
|
|
|
|
async fn read(&self) -> Result<&[u8], AnyError> {
|
|
|
|
Ok(&self.0)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn size(&self) -> usize {
|
|
|
|
self.0.len()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub struct SlicedBlobPart {
|
2021-08-14 04:27:27 -04:00
|
|
|
part: Arc<dyn BlobPart + Send + Sync>,
|
2021-07-05 09:34:37 -04:00
|
|
|
start: usize,
|
|
|
|
len: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
impl BlobPart for SlicedBlobPart {
|
|
|
|
async fn read(&self) -> Result<&[u8], AnyError> {
|
|
|
|
let original = self.part.read().await?;
|
|
|
|
Ok(&original[self.start..self.start + self.len])
|
|
|
|
}
|
|
|
|
|
|
|
|
fn size(&self) -> usize {
|
|
|
|
self.len
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-05 08:34:38 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
|
|
|
pub fn op_blob_create_part(
|
|
|
|
state: &mut OpState,
|
|
|
|
#[buffer] data: JsBuffer,
|
|
|
|
) -> Uuid {
|
2023-07-01 18:52:30 -04:00
|
|
|
let blob_store = state.borrow::<Arc<BlobStore>>();
|
2021-07-05 09:34:37 -04:00
|
|
|
let part = InMemoryBlobPart(data.to_vec());
|
2022-05-13 04:36:31 -04:00
|
|
|
blob_store.insert_part(Arc::new(part))
|
2021-07-05 09:34:37 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Deserialize)]
|
|
|
|
#[serde(rename_all = "camelCase")]
|
|
|
|
pub struct SliceOptions {
|
|
|
|
start: usize,
|
|
|
|
len: usize,
|
|
|
|
}
|
|
|
|
|
2023-10-05 08:34:38 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
2021-07-05 09:34:37 -04:00
|
|
|
pub fn op_blob_slice_part(
|
2023-02-28 01:26:48 -05:00
|
|
|
state: &mut OpState,
|
2023-10-05 08:34:38 -04:00
|
|
|
#[serde] id: Uuid,
|
|
|
|
#[serde] options: SliceOptions,
|
2021-07-05 09:34:37 -04:00
|
|
|
) -> Result<Uuid, AnyError> {
|
2023-07-01 18:52:30 -04:00
|
|
|
let blob_store = state.borrow::<Arc<BlobStore>>();
|
2021-07-05 09:34:37 -04:00
|
|
|
let part = blob_store
|
|
|
|
.get_part(&id)
|
|
|
|
.ok_or_else(|| type_error("Blob part not found"))?;
|
|
|
|
|
|
|
|
let SliceOptions { start, len } = options;
|
|
|
|
|
|
|
|
let size = part.size();
|
|
|
|
if start + len > size {
|
|
|
|
return Err(type_error(
|
|
|
|
"start + len can not be larger than blob part size",
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
let sliced_part = SlicedBlobPart { part, start, len };
|
2021-08-14 04:27:27 -04:00
|
|
|
let id = blob_store.insert_part(Arc::new(sliced_part));
|
2021-07-05 09:34:37 -04:00
|
|
|
|
|
|
|
Ok(id)
|
|
|
|
}
|
|
|
|
|
2023-10-05 08:34:38 -04:00
|
|
|
#[op2(async)]
|
|
|
|
#[serde]
|
2021-07-05 09:34:37 -04:00
|
|
|
pub async fn op_blob_read_part(
|
2023-02-28 01:26:48 -05:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2023-10-05 08:34:38 -04:00
|
|
|
#[serde] id: Uuid,
|
2023-06-22 17:37:56 -04:00
|
|
|
) -> Result<ToJsBuffer, AnyError> {
|
2021-07-05 09:34:37 -04:00
|
|
|
let part = {
|
|
|
|
let state = state.borrow();
|
2023-07-01 18:52:30 -04:00
|
|
|
let blob_store = state.borrow::<Arc<BlobStore>>();
|
2021-07-05 09:34:37 -04:00
|
|
|
blob_store.get_part(&id)
|
|
|
|
}
|
|
|
|
.ok_or_else(|| type_error("Blob part not found"))?;
|
|
|
|
let buf = part.read().await?;
|
2023-06-22 17:37:56 -04:00
|
|
|
Ok(ToJsBuffer::from(buf.to_vec()))
|
2021-07-05 09:34:37 -04:00
|
|
|
}
|
|
|
|
|
2023-10-05 08:34:38 -04:00
|
|
|
#[op2]
|
|
|
|
pub fn op_blob_remove_part(state: &mut OpState, #[serde] id: Uuid) {
|
2023-07-01 18:52:30 -04:00
|
|
|
let blob_store = state.borrow::<Arc<BlobStore>>();
|
2021-07-05 09:34:37 -04:00
|
|
|
blob_store.remove_part(&id);
|
|
|
|
}
|
|
|
|
|
2023-10-05 08:34:38 -04:00
|
|
|
#[op2]
|
|
|
|
#[string]
|
2021-07-05 09:34:37 -04:00
|
|
|
pub fn op_blob_create_object_url(
|
2023-02-28 01:26:48 -05:00
|
|
|
state: &mut OpState,
|
2023-10-05 08:34:38 -04:00
|
|
|
#[string] media_type: String,
|
|
|
|
#[serde] part_ids: Vec<Uuid>,
|
2021-07-05 09:34:37 -04:00
|
|
|
) -> Result<String, AnyError> {
|
|
|
|
let mut parts = Vec::with_capacity(part_ids.len());
|
2023-07-01 18:52:30 -04:00
|
|
|
let blob_store = state.borrow::<Arc<BlobStore>>();
|
2021-07-05 09:34:37 -04:00
|
|
|
for part_id in part_ids {
|
|
|
|
let part = blob_store
|
|
|
|
.get_part(&part_id)
|
|
|
|
.ok_or_else(|| type_error("Blob part not found"))?;
|
|
|
|
parts.push(part);
|
|
|
|
}
|
|
|
|
|
|
|
|
let blob = Blob { media_type, parts };
|
|
|
|
|
|
|
|
let maybe_location = state.try_borrow::<Location>();
|
2023-07-01 18:52:30 -04:00
|
|
|
let blob_store = state.borrow::<Arc<BlobStore>>();
|
2021-07-05 09:34:37 -04:00
|
|
|
|
|
|
|
let url = blob_store
|
|
|
|
.insert_object_url(blob, maybe_location.map(|location| location.0.clone()));
|
|
|
|
|
|
|
|
Ok(url.to_string())
|
|
|
|
}
|
|
|
|
|
2023-10-05 08:34:38 -04:00
|
|
|
#[op2(fast)]
|
2021-07-05 09:34:37 -04:00
|
|
|
pub fn op_blob_revoke_object_url(
|
2023-10-05 08:34:38 -04:00
|
|
|
state: &mut OpState,
|
|
|
|
#[string] url: &str,
|
2021-07-05 09:34:37 -04:00
|
|
|
) -> Result<(), AnyError> {
|
2023-03-03 08:34:10 -05:00
|
|
|
let url = Url::parse(url)?;
|
2023-07-01 18:52:30 -04:00
|
|
|
let blob_store = state.borrow::<Arc<BlobStore>>();
|
2021-07-05 09:34:37 -04:00
|
|
|
blob_store.remove_object_url(&url);
|
|
|
|
Ok(())
|
|
|
|
}
|
fix: a `Request` whose URL is a revoked blob URL should still fetch (#11947)
In the spec, a URL record has an associated "blob URL entry", which for
`blob:` URLs is populated during parsing to contain a reference to the
`Blob` object that backs that object URL. It is this blob URL entry that
the `fetch` API uses to resolve an object URL.
Therefore, since the `Request` constructor parses URL inputs, it will
have an associated blob URL entry which will be used when fetching, even
if the object URL has been revoked since the construction of the
`Request` object. (The `Request` constructor takes the URL as a string
and parses it, so the object URL must be live at the time it is called.)
This PR adds a new `blobFromObjectUrl` JS function (backed by a new
`op_blob_from_object_url` op) that, if the URL is a valid object URL,
returns a new `Blob` object whose parts are references to the same Rust
`BlobPart`s used by the original `Blob` object. It uses this function to
add a new `blobUrlEntry` field to inner requests, which will be `null`
or such a `Blob`, and then uses `Blob.prototype.stream()` as the
response's body. As a result of this, the `blob:` URL resolution from
`op_fetch` is now useless, and has been removed.
2021-09-08 05:29:21 -04:00
|
|
|
|
|
|
|
#[derive(Serialize)]
|
|
|
|
pub struct ReturnBlob {
|
|
|
|
pub media_type: String,
|
|
|
|
pub parts: Vec<ReturnBlobPart>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Serialize)]
|
|
|
|
pub struct ReturnBlobPart {
|
|
|
|
pub uuid: Uuid,
|
|
|
|
pub size: usize,
|
|
|
|
}
|
|
|
|
|
2023-10-05 08:34:38 -04:00
|
|
|
#[op2]
|
|
|
|
#[serde]
|
fix: a `Request` whose URL is a revoked blob URL should still fetch (#11947)
In the spec, a URL record has an associated "blob URL entry", which for
`blob:` URLs is populated during parsing to contain a reference to the
`Blob` object that backs that object URL. It is this blob URL entry that
the `fetch` API uses to resolve an object URL.
Therefore, since the `Request` constructor parses URL inputs, it will
have an associated blob URL entry which will be used when fetching, even
if the object URL has been revoked since the construction of the
`Request` object. (The `Request` constructor takes the URL as a string
and parses it, so the object URL must be live at the time it is called.)
This PR adds a new `blobFromObjectUrl` JS function (backed by a new
`op_blob_from_object_url` op) that, if the URL is a valid object URL,
returns a new `Blob` object whose parts are references to the same Rust
`BlobPart`s used by the original `Blob` object. It uses this function to
add a new `blobUrlEntry` field to inner requests, which will be `null`
or such a `Blob`, and then uses `Blob.prototype.stream()` as the
response's body. As a result of this, the `blob:` URL resolution from
`op_fetch` is now useless, and has been removed.
2021-09-08 05:29:21 -04:00
|
|
|
pub fn op_blob_from_object_url(
|
2023-02-28 01:26:48 -05:00
|
|
|
state: &mut OpState,
|
2023-10-05 08:34:38 -04:00
|
|
|
#[string] url: String,
|
fix: a `Request` whose URL is a revoked blob URL should still fetch (#11947)
In the spec, a URL record has an associated "blob URL entry", which for
`blob:` URLs is populated during parsing to contain a reference to the
`Blob` object that backs that object URL. It is this blob URL entry that
the `fetch` API uses to resolve an object URL.
Therefore, since the `Request` constructor parses URL inputs, it will
have an associated blob URL entry which will be used when fetching, even
if the object URL has been revoked since the construction of the
`Request` object. (The `Request` constructor takes the URL as a string
and parses it, so the object URL must be live at the time it is called.)
This PR adds a new `blobFromObjectUrl` JS function (backed by a new
`op_blob_from_object_url` op) that, if the URL is a valid object URL,
returns a new `Blob` object whose parts are references to the same Rust
`BlobPart`s used by the original `Blob` object. It uses this function to
add a new `blobUrlEntry` field to inner requests, which will be `null`
or such a `Blob`, and then uses `Blob.prototype.stream()` as the
response's body. As a result of this, the `blob:` URL resolution from
`op_fetch` is now useless, and has been removed.
2021-09-08 05:29:21 -04:00
|
|
|
) -> Result<Option<ReturnBlob>, AnyError> {
|
|
|
|
let url = Url::parse(&url)?;
|
|
|
|
if url.scheme() != "blob" {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
|
2023-07-01 18:52:30 -04:00
|
|
|
let blob_store = state.try_borrow::<Arc<BlobStore>>().ok_or_else(|| {
|
fix: a `Request` whose URL is a revoked blob URL should still fetch (#11947)
In the spec, a URL record has an associated "blob URL entry", which for
`blob:` URLs is populated during parsing to contain a reference to the
`Blob` object that backs that object URL. It is this blob URL entry that
the `fetch` API uses to resolve an object URL.
Therefore, since the `Request` constructor parses URL inputs, it will
have an associated blob URL entry which will be used when fetching, even
if the object URL has been revoked since the construction of the
`Request` object. (The `Request` constructor takes the URL as a string
and parses it, so the object URL must be live at the time it is called.)
This PR adds a new `blobFromObjectUrl` JS function (backed by a new
`op_blob_from_object_url` op) that, if the URL is a valid object URL,
returns a new `Blob` object whose parts are references to the same Rust
`BlobPart`s used by the original `Blob` object. It uses this function to
add a new `blobUrlEntry` field to inner requests, which will be `null`
or such a `Blob`, and then uses `Blob.prototype.stream()` as the
response's body. As a result of this, the `blob:` URL resolution from
`op_fetch` is now useless, and has been removed.
2021-09-08 05:29:21 -04:00
|
|
|
type_error("Blob URLs are not supported in this context.")
|
|
|
|
})?;
|
2023-01-04 07:20:36 -05:00
|
|
|
if let Some(blob) = blob_store.get_object_url(url) {
|
fix: a `Request` whose URL is a revoked blob URL should still fetch (#11947)
In the spec, a URL record has an associated "blob URL entry", which for
`blob:` URLs is populated during parsing to contain a reference to the
`Blob` object that backs that object URL. It is this blob URL entry that
the `fetch` API uses to resolve an object URL.
Therefore, since the `Request` constructor parses URL inputs, it will
have an associated blob URL entry which will be used when fetching, even
if the object URL has been revoked since the construction of the
`Request` object. (The `Request` constructor takes the URL as a string
and parses it, so the object URL must be live at the time it is called.)
This PR adds a new `blobFromObjectUrl` JS function (backed by a new
`op_blob_from_object_url` op) that, if the URL is a valid object URL,
returns a new `Blob` object whose parts are references to the same Rust
`BlobPart`s used by the original `Blob` object. It uses this function to
add a new `blobUrlEntry` field to inner requests, which will be `null`
or such a `Blob`, and then uses `Blob.prototype.stream()` as the
response's body. As a result of this, the `blob:` URL resolution from
`op_fetch` is now useless, and has been removed.
2021-09-08 05:29:21 -04:00
|
|
|
let parts = blob
|
|
|
|
.parts
|
|
|
|
.iter()
|
|
|
|
.map(|part| ReturnBlobPart {
|
|
|
|
uuid: blob_store.insert_part(part.clone()),
|
|
|
|
size: part.size(),
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
Ok(Some(ReturnBlob {
|
|
|
|
media_type: blob.media_type.clone(),
|
|
|
|
parts,
|
|
|
|
}))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|