2021-01-10 21:59:07 -05:00
|
|
|
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
|
2019-04-21 12:16:55 -04:00
|
|
|
/*
|
|
|
|
SharedQueue Binary Layout
|
|
|
|
+-------------------------------+-------------------------------+
|
|
|
|
| NUM_RECORDS (32) |
|
|
|
|
+---------------------------------------------------------------+
|
|
|
|
| NUM_SHIFTED_OFF (32) |
|
|
|
|
+---------------------------------------------------------------+
|
|
|
|
| HEAD (32) |
|
|
|
|
+---------------------------------------------------------------+
|
|
|
|
| OFFSETS (32) |
|
|
|
|
+---------------------------------------------------------------+
|
|
|
|
| RECORD_ENDS (*MAX_RECORDS) ...
|
|
|
|
+---------------------------------------------------------------+
|
|
|
|
| RECORDS (*MAX_RECORDS) ...
|
|
|
|
+---------------------------------------------------------------+
|
|
|
|
*/
|
|
|
|
|
2020-04-22 14:24:49 -04:00
|
|
|
use crate::bindings;
|
2020-01-06 14:07:35 -05:00
|
|
|
use crate::ops::OpId;
|
2021-03-26 12:34:25 -04:00
|
|
|
use log::debug;
|
2020-01-21 14:24:31 -05:00
|
|
|
use rusty_v8 as v8;
|
2020-09-05 20:34:02 -04:00
|
|
|
use std::convert::TryInto;
|
2019-03-14 19:17:52 -04:00
|
|
|
|
|
|
|
const MAX_RECORDS: usize = 100;
|
|
|
|
/// Total number of records added.
|
|
|
|
const INDEX_NUM_RECORDS: usize = 0;
|
|
|
|
/// Number of records that have been shifted off.
|
|
|
|
const INDEX_NUM_SHIFTED_OFF: usize = 1;
|
|
|
|
/// The head is the number of initialized bytes in SharedQueue.
|
|
|
|
/// It grows monotonically.
|
|
|
|
const INDEX_HEAD: usize = 2;
|
|
|
|
const INDEX_OFFSETS: usize = 3;
|
2019-08-07 14:02:29 -04:00
|
|
|
const INDEX_RECORDS: usize = INDEX_OFFSETS + 2 * MAX_RECORDS;
|
2019-03-14 19:17:52 -04:00
|
|
|
/// Byte offset of where the records begin. Also where the head starts.
|
|
|
|
const HEAD_INIT: usize = 4 * INDEX_RECORDS;
|
|
|
|
/// A rough guess at how big we should make the shared buffer in bytes.
|
|
|
|
pub const RECOMMENDED_SIZE: usize = 128 * MAX_RECORDS;
|
|
|
|
|
|
|
|
pub struct SharedQueue {
|
2020-01-21 14:24:31 -05:00
|
|
|
buf: v8::SharedRef<v8::BackingStore>,
|
2019-03-14 19:17:52 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
impl SharedQueue {
|
|
|
|
pub fn new(len: usize) -> Self {
|
2020-11-12 17:17:31 -05:00
|
|
|
let buf = vec![0; HEAD_INIT + len].into_boxed_slice();
|
2020-01-29 07:03:42 -05:00
|
|
|
let buf = v8::SharedArrayBuffer::new_backing_store_from_boxed_slice(buf);
|
2020-01-23 20:22:05 -05:00
|
|
|
let mut q = Self {
|
|
|
|
buf: buf.make_shared(),
|
|
|
|
};
|
2019-03-14 19:17:52 -04:00
|
|
|
q.reset();
|
|
|
|
q
|
|
|
|
}
|
|
|
|
|
2020-01-21 14:24:31 -05:00
|
|
|
pub fn get_backing_store(&mut self) -> &mut v8::SharedRef<v8::BackingStore> {
|
|
|
|
&mut self.buf
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn bytes(&self) -> &[u8] {
|
2020-04-22 14:24:49 -04:00
|
|
|
unsafe {
|
|
|
|
bindings::get_backing_store_slice(&self.buf, 0, self.buf.byte_length())
|
|
|
|
}
|
2020-01-21 14:24:31 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn bytes_mut(&mut self) -> &mut [u8] {
|
2020-04-22 14:24:49 -04:00
|
|
|
unsafe {
|
|
|
|
bindings::get_backing_store_slice_mut(
|
|
|
|
&self.buf,
|
|
|
|
0,
|
|
|
|
self.buf.byte_length(),
|
|
|
|
)
|
|
|
|
}
|
2020-01-21 14:24:31 -05:00
|
|
|
}
|
|
|
|
|
2019-03-15 15:49:41 -04:00
|
|
|
fn reset(&mut self) {
|
2019-04-21 12:16:55 -04:00
|
|
|
debug!("rust:shared_queue:reset");
|
2019-03-14 19:17:52 -04:00
|
|
|
let s: &mut [u32] = self.as_u32_slice_mut();
|
2019-03-15 15:49:41 -04:00
|
|
|
s[INDEX_NUM_RECORDS] = 0;
|
|
|
|
s[INDEX_NUM_SHIFTED_OFF] = 0;
|
2019-03-14 19:17:52 -04:00
|
|
|
s[INDEX_HEAD] = HEAD_INIT as u32;
|
|
|
|
}
|
|
|
|
|
2019-03-14 19:17:52 -04:00
|
|
|
fn as_u32_slice(&self) -> &[u32] {
|
2020-01-21 14:24:31 -05:00
|
|
|
let p = self.bytes().as_ptr();
|
2019-03-14 19:17:52 -04:00
|
|
|
// Assert pointer is 32 bit aligned before casting.
|
|
|
|
assert_eq!((p as usize) % std::mem::align_of::<u32>(), 0);
|
|
|
|
#[allow(clippy::cast_ptr_alignment)]
|
|
|
|
let p32 = p as *const u32;
|
2020-01-21 14:24:31 -05:00
|
|
|
unsafe { std::slice::from_raw_parts(p32, self.bytes().len() / 4) }
|
2019-03-14 19:17:52 -04:00
|
|
|
}
|
|
|
|
|
2019-03-14 19:17:52 -04:00
|
|
|
fn as_u32_slice_mut(&mut self) -> &mut [u32] {
|
2020-01-21 14:24:31 -05:00
|
|
|
let p = self.bytes_mut().as_mut_ptr();
|
2019-03-14 19:17:52 -04:00
|
|
|
// Assert pointer is 32 bit aligned before casting.
|
|
|
|
assert_eq!((p as usize) % std::mem::align_of::<u32>(), 0);
|
|
|
|
#[allow(clippy::cast_ptr_alignment)]
|
|
|
|
let p32 = p as *mut u32;
|
2020-01-21 14:24:31 -05:00
|
|
|
unsafe { std::slice::from_raw_parts_mut(p32, self.bytes().len() / 4) }
|
2019-03-14 19:17:52 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn size(&self) -> usize {
|
|
|
|
let s = self.as_u32_slice();
|
|
|
|
(s[INDEX_NUM_RECORDS] - s[INDEX_NUM_SHIFTED_OFF]) as usize
|
|
|
|
}
|
|
|
|
|
|
|
|
fn num_records(&self) -> usize {
|
|
|
|
let s = self.as_u32_slice();
|
|
|
|
s[INDEX_NUM_RECORDS] as usize
|
|
|
|
}
|
|
|
|
|
|
|
|
fn head(&self) -> usize {
|
|
|
|
let s = self.as_u32_slice();
|
|
|
|
s[INDEX_HEAD] as usize
|
|
|
|
}
|
|
|
|
|
2019-04-21 12:16:55 -04:00
|
|
|
fn num_shifted_off(&self) -> usize {
|
|
|
|
let s = self.as_u32_slice();
|
2019-05-02 20:37:02 -04:00
|
|
|
s[INDEX_NUM_SHIFTED_OFF] as usize
|
2019-04-21 12:16:55 -04:00
|
|
|
}
|
|
|
|
|
2019-08-07 14:02:29 -04:00
|
|
|
fn set_meta(&mut self, index: usize, end: usize, op_id: OpId) {
|
2019-03-14 19:17:52 -04:00
|
|
|
let s = self.as_u32_slice_mut();
|
2019-08-07 14:02:29 -04:00
|
|
|
s[INDEX_OFFSETS + 2 * index] = end as u32;
|
2020-09-05 20:34:02 -04:00
|
|
|
s[INDEX_OFFSETS + 2 * index + 1] = op_id.try_into().unwrap();
|
2019-03-14 19:17:52 -04:00
|
|
|
}
|
|
|
|
|
2019-08-05 20:12:15 -04:00
|
|
|
#[cfg(test)]
|
2019-08-07 14:02:29 -04:00
|
|
|
fn get_meta(&self, index: usize) -> Option<(OpId, usize)> {
|
2019-03-14 19:17:52 -04:00
|
|
|
if index < self.num_records() {
|
|
|
|
let s = self.as_u32_slice();
|
2019-08-07 14:02:29 -04:00
|
|
|
let end = s[INDEX_OFFSETS + 2 * index] as usize;
|
2020-09-05 20:34:02 -04:00
|
|
|
let op_id = s[INDEX_OFFSETS + 2 * index + 1] as OpId;
|
2019-08-07 14:02:29 -04:00
|
|
|
Some((op_id, end))
|
2019-03-14 19:17:52 -04:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-05 20:12:15 -04:00
|
|
|
#[cfg(test)]
|
2019-03-14 19:17:52 -04:00
|
|
|
fn get_offset(&self, index: usize) -> Option<usize> {
|
|
|
|
if index < self.num_records() {
|
|
|
|
Some(if index == 0 {
|
|
|
|
HEAD_INIT
|
|
|
|
} else {
|
|
|
|
let s = self.as_u32_slice();
|
2020-03-01 17:17:59 -05:00
|
|
|
let prev_end = s[INDEX_OFFSETS + 2 * (index - 1)] as usize;
|
|
|
|
(prev_end + 3) & !3
|
2019-03-14 19:17:52 -04:00
|
|
|
})
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns none if empty.
|
2019-08-05 20:12:15 -04:00
|
|
|
#[cfg(test)]
|
2019-08-07 14:02:29 -04:00
|
|
|
pub fn shift(&mut self) -> Option<(OpId, &[u8])> {
|
2019-03-14 19:17:52 -04:00
|
|
|
let u32_slice = self.as_u32_slice();
|
|
|
|
let i = u32_slice[INDEX_NUM_SHIFTED_OFF] as usize;
|
2019-03-15 15:49:41 -04:00
|
|
|
if self.size() == 0 {
|
|
|
|
assert_eq!(i, 0);
|
2019-03-14 19:17:52 -04:00
|
|
|
return None;
|
|
|
|
}
|
2019-03-15 15:49:41 -04:00
|
|
|
|
2019-03-14 19:17:52 -04:00
|
|
|
let off = self.get_offset(i).unwrap();
|
2019-08-07 14:02:29 -04:00
|
|
|
let (op_id, end) = self.get_meta(i).unwrap();
|
2019-03-15 15:49:41 -04:00
|
|
|
if self.size() > 1 {
|
|
|
|
let u32_slice = self.as_u32_slice_mut();
|
|
|
|
u32_slice[INDEX_NUM_SHIFTED_OFF] += 1;
|
|
|
|
} else {
|
|
|
|
self.reset();
|
|
|
|
}
|
2019-08-07 14:02:29 -04:00
|
|
|
println!(
|
2019-04-21 12:16:55 -04:00
|
|
|
"rust:shared_queue:shift: num_records={}, num_shifted_off={}, head={}",
|
|
|
|
self.num_records(),
|
|
|
|
self.num_shifted_off(),
|
|
|
|
self.head()
|
|
|
|
);
|
2020-01-21 14:24:31 -05:00
|
|
|
Some((op_id, &self.bytes()[off..end]))
|
2019-03-14 19:17:52 -04:00
|
|
|
}
|
|
|
|
|
2020-03-01 17:17:59 -05:00
|
|
|
/// Because JS-side may cast popped message to Int32Array it is required
|
|
|
|
/// that every message is aligned to 4-bytes.
|
2019-08-07 14:02:29 -04:00
|
|
|
pub fn push(&mut self, op_id: OpId, record: &[u8]) -> bool {
|
2019-03-14 19:17:52 -04:00
|
|
|
let off = self.head();
|
2020-03-01 17:17:59 -05:00
|
|
|
assert_eq!(off % 4, 0);
|
2019-06-14 13:58:20 -04:00
|
|
|
let end = off + record.len();
|
2020-03-01 17:17:59 -05:00
|
|
|
let aligned_end = (end + 3) & !3;
|
2019-08-26 07:48:40 -04:00
|
|
|
debug!(
|
2020-03-01 17:17:59 -05:00
|
|
|
"rust:shared_queue:pre-push: op={}, off={}, end={}, len={}, aligned_end={}",
|
2019-08-26 07:48:40 -04:00
|
|
|
op_id,
|
|
|
|
off,
|
|
|
|
end,
|
2020-03-01 17:17:59 -05:00
|
|
|
record.len(),
|
|
|
|
aligned_end,
|
2019-08-26 07:48:40 -04:00
|
|
|
);
|
2019-03-14 19:17:52 -04:00
|
|
|
let index = self.num_records();
|
2020-03-01 17:17:59 -05:00
|
|
|
if aligned_end > self.bytes().len() || index >= MAX_RECORDS {
|
2019-03-28 14:13:34 -04:00
|
|
|
debug!("WARNING the sharedQueue overflowed");
|
2019-03-14 19:17:52 -04:00
|
|
|
return false;
|
|
|
|
}
|
2020-03-01 17:17:59 -05:00
|
|
|
assert_eq!(aligned_end % 4, 0);
|
2019-08-07 14:02:29 -04:00
|
|
|
self.set_meta(index, end, op_id);
|
2019-06-14 13:58:20 -04:00
|
|
|
assert_eq!(end - off, record.len());
|
2020-01-21 14:24:31 -05:00
|
|
|
self.bytes_mut()[off..end].copy_from_slice(record);
|
2019-03-14 19:17:52 -04:00
|
|
|
let u32_slice = self.as_u32_slice_mut();
|
|
|
|
u32_slice[INDEX_NUM_RECORDS] += 1;
|
2020-03-01 17:17:59 -05:00
|
|
|
u32_slice[INDEX_HEAD] = aligned_end as u32;
|
2019-04-21 12:16:55 -04:00
|
|
|
debug!(
|
|
|
|
"rust:shared_queue:push: num_records={}, num_shifted_off={}, head={}",
|
|
|
|
self.num_records(),
|
|
|
|
self.num_shifted_off(),
|
|
|
|
self.head()
|
|
|
|
);
|
2019-03-14 19:17:52 -04:00
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn basic() {
|
|
|
|
let mut q = SharedQueue::new(RECOMMENDED_SIZE);
|
|
|
|
|
|
|
|
let h = q.head();
|
|
|
|
assert!(h > 0);
|
|
|
|
|
2019-08-26 07:48:40 -04:00
|
|
|
let r = vec![1u8, 2, 3, 4].into_boxed_slice();
|
2019-06-14 13:58:20 -04:00
|
|
|
let len = r.len() + h;
|
2019-08-07 14:02:29 -04:00
|
|
|
assert!(q.push(0, &r));
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.head(), len);
|
|
|
|
|
2019-08-26 07:48:40 -04:00
|
|
|
let r = vec![5, 6, 7, 8].into_boxed_slice();
|
2019-08-07 14:02:29 -04:00
|
|
|
assert!(q.push(0, &r));
|
2019-03-14 19:17:52 -04:00
|
|
|
|
2019-08-26 07:48:40 -04:00
|
|
|
let r = vec![9, 10, 11, 12].into_boxed_slice();
|
2019-08-07 14:02:29 -04:00
|
|
|
assert!(q.push(0, &r));
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.num_records(), 3);
|
|
|
|
assert_eq!(q.size(), 3);
|
|
|
|
|
2019-08-07 14:02:29 -04:00
|
|
|
let (_op_id, r) = q.shift().unwrap();
|
2019-08-26 07:48:40 -04:00
|
|
|
assert_eq!(r, vec![1, 2, 3, 4].as_slice());
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.num_records(), 3);
|
|
|
|
assert_eq!(q.size(), 2);
|
|
|
|
|
2019-08-07 14:02:29 -04:00
|
|
|
let (_op_id, r) = q.shift().unwrap();
|
2019-08-26 07:48:40 -04:00
|
|
|
assert_eq!(r, vec![5, 6, 7, 8].as_slice());
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.num_records(), 3);
|
|
|
|
assert_eq!(q.size(), 1);
|
|
|
|
|
2019-08-07 14:02:29 -04:00
|
|
|
let (_op_id, r) = q.shift().unwrap();
|
2019-08-26 07:48:40 -04:00
|
|
|
assert_eq!(r, vec![9, 10, 11, 12].as_slice());
|
2019-03-15 15:49:41 -04:00
|
|
|
assert_eq!(q.num_records(), 0);
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.size(), 0);
|
|
|
|
|
|
|
|
assert!(q.shift().is_none());
|
|
|
|
assert!(q.shift().is_none());
|
|
|
|
|
|
|
|
assert_eq!(q.num_records(), 0);
|
|
|
|
assert_eq!(q.size(), 0);
|
|
|
|
}
|
|
|
|
|
2020-09-05 20:34:02 -04:00
|
|
|
fn alloc_buf(byte_length: usize) -> Box<[u8]> {
|
2020-11-12 17:17:31 -05:00
|
|
|
vec![0; byte_length].into_boxed_slice()
|
2019-03-14 19:17:52 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn overflow() {
|
|
|
|
let mut q = SharedQueue::new(RECOMMENDED_SIZE);
|
2020-03-01 17:17:59 -05:00
|
|
|
assert!(q.push(0, &alloc_buf(RECOMMENDED_SIZE - 5)));
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.size(), 1);
|
2020-03-01 17:17:59 -05:00
|
|
|
assert!(!q.push(0, &alloc_buf(6)));
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.size(), 1);
|
2020-02-09 13:54:16 -05:00
|
|
|
assert!(q.push(0, &alloc_buf(1)));
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.size(), 2);
|
|
|
|
|
2019-08-07 14:02:29 -04:00
|
|
|
let (_op_id, buf) = q.shift().unwrap();
|
2020-03-01 17:17:59 -05:00
|
|
|
assert_eq!(buf.len(), RECOMMENDED_SIZE - 5);
|
2019-03-14 19:17:52 -04:00
|
|
|
assert_eq!(q.size(), 1);
|
|
|
|
|
2020-02-09 13:54:16 -05:00
|
|
|
assert!(!q.push(0, &alloc_buf(1)));
|
2019-03-15 15:49:41 -04:00
|
|
|
|
2019-08-07 14:02:29 -04:00
|
|
|
let (_op_id, buf) = q.shift().unwrap();
|
2020-02-09 13:54:16 -05:00
|
|
|
assert_eq!(buf.len(), 1);
|
2019-03-15 15:49:41 -04:00
|
|
|
assert_eq!(q.size(), 0);
|
2019-03-14 19:17:52 -04:00
|
|
|
}
|
2019-04-21 12:16:55 -04:00
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn full_records() {
|
|
|
|
let mut q = SharedQueue::new(RECOMMENDED_SIZE);
|
|
|
|
for _ in 0..MAX_RECORDS {
|
2020-02-09 13:54:16 -05:00
|
|
|
assert!(q.push(0, &alloc_buf(1)))
|
2019-04-21 12:16:55 -04:00
|
|
|
}
|
2020-02-09 13:54:16 -05:00
|
|
|
assert_eq!(q.push(0, &alloc_buf(1)), false);
|
2019-04-21 12:16:55 -04:00
|
|
|
// Even if we shift one off, we still cannot push a new record.
|
2019-08-07 14:02:29 -04:00
|
|
|
let _ignored = q.shift().unwrap();
|
2020-02-09 13:54:16 -05:00
|
|
|
assert_eq!(q.push(0, &alloc_buf(1)), false);
|
2019-08-26 07:48:40 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2020-02-09 13:54:16 -05:00
|
|
|
fn allow_any_buf_length() {
|
2019-08-26 07:48:40 -04:00
|
|
|
let mut q = SharedQueue::new(RECOMMENDED_SIZE);
|
2020-03-01 17:17:59 -05:00
|
|
|
// Check that `record` that has length not a multiple of 4 will
|
|
|
|
// not cause panic. Still make sure that records are always
|
|
|
|
// aligned to 4 bytes.
|
|
|
|
for i in 1..9 {
|
|
|
|
q.push(0, &alloc_buf(i));
|
|
|
|
assert_eq!(q.num_records(), i);
|
|
|
|
assert_eq!(q.head() % 4, 0);
|
|
|
|
}
|
2019-04-21 12:16:55 -04:00
|
|
|
}
|
2019-03-14 19:17:52 -04:00
|
|
|
}
|