1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-25 00:29:09 -05:00

feat(serde_v8): bytes::Bytes support (#14412)

This commit is contained in:
Aaron O'Mullan 2022-05-13 12:53:13 +02:00 committed by GitHub
parent 3e7afb8918
commit 67e5a850cc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 191 additions and 5 deletions

1
Cargo.lock generated
View file

@ -3615,6 +3615,7 @@ name = "serde_v8"
version = "0.45.0" version = "0.45.0"
dependencies = [ dependencies = [
"bencher", "bencher",
"bytes",
"derive_more", "derive_more",
"serde", "serde",
"serde_json", "serde_json",

View file

@ -589,10 +589,7 @@ fn http_response(
Some(data) => { Some(data) => {
// If a buffer was passed, but isn't compressible, we use it to // If a buffer was passed, but isn't compressible, we use it to
// construct a response body. // construct a response body.
Ok(( Ok((HttpResponseWriter::Closed, Bytes::from(data).into()))
HttpResponseWriter::Closed,
Bytes::copy_from_slice(&data).into(),
))
} }
None if compressing => { None if compressing => {
// Create a one way pipe that implements tokio's async io traits. To do // Create a one way pipe that implements tokio's async io traits. To do
@ -766,7 +763,7 @@ async fn op_http_write(
} }
} }
HttpResponseWriter::BodyUncompressed(body) => { HttpResponseWriter::BodyUncompressed(body) => {
let bytes = Bytes::copy_from_slice(&buf[..]); let bytes = Bytes::from(buf);
match body.send_data(bytes).await { match body.send_data(bytes).await {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(err) => { Err(err) => {

View file

@ -13,6 +13,7 @@ description = "Rust to V8 serialization and deserialization"
path = "lib.rs" path = "lib.rs"
[dependencies] [dependencies]
bytes = "1"
derive_more = "0.99.17" derive_more = "0.99.17"
serde = { version = "1.0.136", features = ["derive"] } serde = { version = "1.0.136", features = ["derive"] }
v8 = "0.42.0" v8 = "0.42.0"

View file

@ -144,3 +144,19 @@ impl FromV8 for ZeroCopyBuf {
Ok(Self::FromV8(V8Slice::from_v8(scope, value)?)) Ok(Self::FromV8(V8Slice::from_v8(scope, value)?))
} }
} }
impl From<ZeroCopyBuf> for bytes::Bytes {
fn from(zbuf: ZeroCopyBuf) -> bytes::Bytes {
match zbuf {
ZeroCopyBuf::FromV8(v) => v.into(),
// WARNING(AaronO): potential footgun, but will disappear in future ZeroCopyBuf refactor
ZeroCopyBuf::ToV8(v) => v
.lock()
.unwrap()
.take()
.expect("ZeroCopyBuf was empty")
.into(),
ZeroCopyBuf::Temp(v) => v.into(),
}
}
}

View file

@ -2,6 +2,7 @@
pub mod buffer; pub mod buffer;
pub mod bytestring; pub mod bytestring;
pub mod detached_buffer; pub mod detached_buffer;
pub(super) mod rawbytes;
pub mod string_or_buffer; pub mod string_or_buffer;
pub mod transl8; pub mod transl8;
pub mod u16string; pub mod u16string;

View file

@ -0,0 +1,96 @@
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
pub(crate) type AtomicPtr<T> = *mut T;
#[allow(unused)]
pub(crate) struct RawBytes {
ptr: *const u8,
len: usize,
// inlined "trait object"
data: AtomicPtr<()>,
vtable: &'static Vtable,
}
impl RawBytes {
pub fn new_raw(
ptr: *const u8,
len: usize,
data: AtomicPtr<()>,
vtable: &'static Vtable,
) -> bytes::Bytes {
RawBytes {
ptr,
len,
data,
vtable,
}
.into()
}
}
#[allow(unused)]
pub(crate) struct Vtable {
/// fn(data, ptr, len)
pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> bytes::Bytes,
/// fn(data, ptr, len)
pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
}
impl From<RawBytes> for bytes::Bytes {
fn from(b: RawBytes) -> Self {
// SAFETY: RawBytes has the same layout as bytes::Bytes
// this is tested below, both are composed of usize-d ptrs/values
// thus aren't currently subject to rust's field re-ordering to minimize padding
unsafe { std::mem::transmute(b) }
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem;
const HELLO: &str = "hello";
// ===== impl StaticVtable =====
const STATIC_VTABLE: Vtable = Vtable {
clone: static_clone,
drop: static_drop,
};
unsafe fn static_clone(
_: &AtomicPtr<()>,
ptr: *const u8,
len: usize,
) -> bytes::Bytes {
from_static(std::slice::from_raw_parts(ptr, len)).into()
}
unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
// nothing to drop for &'static [u8]
}
fn from_static(bytes: &'static [u8]) -> RawBytes {
RawBytes {
ptr: bytes.as_ptr(),
len: bytes.len(),
data: std::ptr::null_mut(),
vtable: &STATIC_VTABLE,
}
}
#[test]
fn bytes_identity() {
let b1: bytes::Bytes = from_static(HELLO.as_bytes()).into();
let b2 = bytes::Bytes::from_static(HELLO.as_bytes());
assert_eq!(b1, b2); // Values are equal
}
#[test]
fn bytes_layout() {
let u1: [usize; 4] =
unsafe { mem::transmute(from_static(HELLO.as_bytes())) };
let u2: [usize; 4] =
unsafe { mem::transmute(bytes::Bytes::from_static(HELLO.as_bytes())) };
assert_eq!(u1[..3], u2[..3]); // Struct bytes are equal besides Vtables
}
}

View file

@ -48,3 +48,12 @@ impl FromV8 for StringOrBuffer {
Err(Error::ExpectedBuffer) Err(Error::ExpectedBuffer)
} }
} }
impl From<StringOrBuffer> for bytes::Bytes {
fn from(sob: StringOrBuffer) -> Self {
match sob {
StringOrBuffer::Buffer(b) => b.into(),
StringOrBuffer::String(s) => s.into_bytes().into(),
}
}
}

View file

@ -3,7 +3,9 @@
use std::ops::Deref; use std::ops::Deref;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::ops::Range; use std::ops::Range;
use std::rc::Rc;
use super::rawbytes;
use super::transl8::FromV8; use super::transl8::FromV8;
/// A V8Slice encapsulates a slice that's been borrowed from a JavaScript /// A V8Slice encapsulates a slice that's been borrowed from a JavaScript
@ -117,3 +119,52 @@ impl AsMut<[u8]> for V8Slice {
self.as_slice_mut() self.as_slice_mut()
} }
} }
// Implement V8Slice -> bytes::Bytes
impl V8Slice {
fn rc_into_byte_parts(self: Rc<Self>) -> (*const u8, usize, *mut V8Slice) {
let (ptr, len) = {
let slice = self.as_ref();
(slice.as_ptr(), slice.len())
};
let rc_raw = Rc::into_raw(self);
let data = rc_raw as *mut V8Slice;
(ptr, len, data)
}
}
impl From<V8Slice> for bytes::Bytes {
fn from(v8slice: V8Slice) -> Self {
let (ptr, len, data) = Rc::new(v8slice).rc_into_byte_parts();
rawbytes::RawBytes::new_raw(ptr, len, data.cast(), &V8SLICE_VTABLE)
}
}
// NOTE: in the limit we could avoid extra-indirection and use the C++ shared_ptr
// but we can't store both the underlying data ptr & ctrl ptr ... so instead we
// use a shared rust ptr (Rc/Arc) that itself controls the C++ shared_ptr
const V8SLICE_VTABLE: rawbytes::Vtable = rawbytes::Vtable {
clone: v8slice_clone,
drop: v8slice_drop,
};
unsafe fn v8slice_clone(
data: &rawbytes::AtomicPtr<()>,
ptr: *const u8,
len: usize,
) -> bytes::Bytes {
let rc = Rc::from_raw(*data as *const V8Slice);
let (_, _, data) = rc.clone().rc_into_byte_parts();
std::mem::forget(rc);
// NOTE: `bytes::Bytes` does bounds checking so we trust its ptr, len inputs
// and must use them to allow cloning Bytes it has sliced
rawbytes::RawBytes::new_raw(ptr, len, data.cast(), &V8SLICE_VTABLE)
}
unsafe fn v8slice_drop(
data: &mut rawbytes::AtomicPtr<()>,
_: *const u8,
_: usize,
) {
drop(Rc::from_raw(*data as *const V8Slice))
}

View file

@ -129,6 +129,20 @@ fn magic_buffer() {
assert!(eq.is_true()); assert!(eq.is_true());
let eq = js_exec(scope, "t3.b[4] === 11"); let eq = js_exec(scope, "t3.b[4] === 11");
assert!(eq.is_true()); assert!(eq.is_true());
// ZeroCopyBuf as bytes::Bytes
let v8_array = js_exec(scope, "new Uint8Array([1,2,3,4,5])");
let zbuf: serde_v8::ZeroCopyBuf =
serde_v8::from_v8(scope, v8_array).unwrap();
let buf: bytes::Bytes = zbuf.into();
assert_eq!(buf, bytes::Bytes::from_static(&[1, 2, 3, 4, 5]));
assert_eq!(buf, bytes::Bytes::from_static(&[1, 2, 3, 4, 5]));
assert_eq!(buf.slice(0..2), bytes::Bytes::from_static(&[1, 2]));
assert_eq!(buf.slice(2..), bytes::Bytes::from_static(&[3, 4, 5]));
// We're specifically testing that slices are preserved post-clone
#[allow(clippy::redundant_clone)]
let buf2 = buf.slice(2..).clone();
assert_eq!(buf2, bytes::Bytes::from_static(&[3, 4, 5]));
}) })
} }