2021-01-10 21:59:07 -05:00
|
|
|
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
|
2021-04-13 07:25:21 -04:00
|
|
|
|
2021-08-16 08:29:54 -04:00
|
|
|
use crate::ops::TestingFeaturesEnabled;
|
2021-03-17 17:45:12 -04:00
|
|
|
use crate::permissions::resolve_read_allowlist;
|
|
|
|
use crate::permissions::resolve_write_allowlist;
|
2021-04-13 07:25:21 -04:00
|
|
|
use crate::permissions::EnvDescriptor;
|
2021-08-06 17:28:10 -04:00
|
|
|
use crate::permissions::FfiDescriptor;
|
2021-03-21 08:49:58 -04:00
|
|
|
use crate::permissions::NetDescriptor;
|
2021-01-06 15:31:16 -05:00
|
|
|
use crate::permissions::PermissionState;
|
2020-05-04 14:10:59 -04:00
|
|
|
use crate::permissions::Permissions;
|
2021-03-21 08:49:58 -04:00
|
|
|
use crate::permissions::ReadDescriptor;
|
2021-04-09 18:12:00 -04:00
|
|
|
use crate::permissions::RunDescriptor;
|
2021-01-06 15:31:16 -05:00
|
|
|
use crate::permissions::UnaryPermission;
|
2021-03-21 08:49:58 -04:00
|
|
|
use crate::permissions::UnitPermission;
|
|
|
|
use crate::permissions::WriteDescriptor;
|
2020-12-06 22:30:40 -05:00
|
|
|
use crate::web_worker::run_web_worker;
|
2021-06-22 10:30:16 -04:00
|
|
|
use crate::web_worker::SendableWebWorkerHandle;
|
2020-11-26 09:17:45 -05:00
|
|
|
use crate::web_worker::WebWorker;
|
|
|
|
use crate::web_worker::WebWorkerHandle;
|
2021-08-16 08:29:54 -04:00
|
|
|
use crate::web_worker::WebWorkerType;
|
2021-06-22 10:30:16 -04:00
|
|
|
use crate::web_worker::WorkerControlEvent;
|
2021-05-11 15:09:09 -04:00
|
|
|
use crate::web_worker::WorkerId;
|
2021-01-06 15:31:16 -05:00
|
|
|
use deno_core::error::custom_error;
|
2020-09-14 12:48:57 -04:00
|
|
|
use deno_core::error::AnyError;
|
2021-05-02 19:22:57 -04:00
|
|
|
use deno_core::op_async;
|
|
|
|
use deno_core::op_sync;
|
2021-01-06 15:31:16 -05:00
|
|
|
use deno_core::serde::de;
|
|
|
|
use deno_core::serde::de::SeqAccess;
|
|
|
|
use deno_core::serde::Deserialize;
|
|
|
|
use deno_core::serde::Deserializer;
|
2021-05-02 19:22:57 -04:00
|
|
|
use deno_core::Extension;
|
2020-04-23 05:51:07 -04:00
|
|
|
use deno_core::ModuleSpecifier;
|
2020-09-10 09:57:45 -04:00
|
|
|
use deno_core::OpState;
|
2021-06-22 10:30:16 -04:00
|
|
|
use deno_web::JsMessageData;
|
2021-03-26 12:34:25 -04:00
|
|
|
use log::debug;
|
2020-09-10 09:57:45 -04:00
|
|
|
use std::cell::RefCell;
|
2020-09-19 19:17:35 -04:00
|
|
|
use std::collections::HashMap;
|
2021-01-06 15:31:16 -05:00
|
|
|
use std::collections::HashSet;
|
2019-08-14 11:03:02 -04:00
|
|
|
use std::convert::From;
|
2021-01-06 15:31:16 -05:00
|
|
|
use std::fmt;
|
|
|
|
use std::path::PathBuf;
|
2020-08-18 12:30:13 -04:00
|
|
|
use std::rc::Rc;
|
2020-12-11 12:49:26 -05:00
|
|
|
use std::sync::Arc;
|
2020-02-18 14:47:11 -05:00
|
|
|
use std::thread::JoinHandle;
|
2019-10-11 14:41:54 -04:00
|
|
|
|
2020-12-11 12:49:26 -05:00
|
|
|
pub struct CreateWebWorkerArgs {
|
|
|
|
pub name: String,
|
2021-05-11 15:09:09 -04:00
|
|
|
pub worker_id: WorkerId,
|
2021-01-06 15:31:16 -05:00
|
|
|
pub parent_permissions: Permissions,
|
2020-12-11 12:49:26 -05:00
|
|
|
pub permissions: Permissions,
|
|
|
|
pub main_module: ModuleSpecifier,
|
|
|
|
pub use_deno_namespace: bool,
|
2021-08-16 08:29:54 -04:00
|
|
|
pub worker_type: WebWorkerType,
|
2020-12-11 12:49:26 -05:00
|
|
|
}
|
|
|
|
|
2021-06-22 10:30:16 -04:00
|
|
|
pub type CreateWebWorkerCb = dyn Fn(CreateWebWorkerArgs) -> (WebWorker, SendableWebWorkerHandle)
|
|
|
|
+ Sync
|
|
|
|
+ Send;
|
2020-12-11 12:49:26 -05:00
|
|
|
|
|
|
|
/// A holder for callback that is used to create a new
|
|
|
|
/// WebWorker. It's a struct instead of a type alias
|
|
|
|
/// because `GothamState` used in `OpState` overrides
|
|
|
|
/// value if type alises have the same underlying type
|
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct CreateWebWorkerCbHolder(Arc<CreateWebWorkerCb>);
|
|
|
|
|
2021-01-06 15:31:16 -05:00
|
|
|
pub struct WorkerThread {
|
2021-09-22 12:02:15 -04:00
|
|
|
// It's an Option so we can take the value before dropping the WorkerThread.
|
|
|
|
join_handle: Option<JoinHandle<Result<(), AnyError>>>,
|
2021-01-06 15:31:16 -05:00
|
|
|
worker_handle: WebWorkerHandle,
|
Don't drop messages from workers that have already been closed (#11913)
When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.
The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.
The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.
The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
2021-09-06 05:05:02 -04:00
|
|
|
|
|
|
|
// A WorkerThread that hasn't been explicitly terminated can only be removed
|
|
|
|
// from the WorkersTable once close messages have been received for both the
|
|
|
|
// control and message channels. See `close_channel`.
|
|
|
|
ctrl_closed: bool,
|
|
|
|
message_closed: bool,
|
2021-01-06 15:31:16 -05:00
|
|
|
}
|
|
|
|
|
2021-09-22 12:02:15 -04:00
|
|
|
impl WorkerThread {
|
|
|
|
fn terminate(mut self) {
|
|
|
|
self.worker_handle.clone().terminate();
|
|
|
|
self
|
|
|
|
.join_handle
|
|
|
|
.take()
|
|
|
|
.unwrap()
|
|
|
|
.join()
|
|
|
|
.expect("Worker thread panicked")
|
|
|
|
.expect("Panic in worker event loop");
|
|
|
|
|
|
|
|
// Optimization so the Drop impl doesn't try to terminate the worker handle
|
|
|
|
// again.
|
|
|
|
self.ctrl_closed = true;
|
|
|
|
self.message_closed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for WorkerThread {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
// If either of the channels is closed, the worker thread has at least
|
|
|
|
// started closing, and its event loop won't start another run.
|
|
|
|
if !(self.ctrl_closed || self.message_closed) {
|
|
|
|
self.worker_handle.clone().terminate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-11 15:09:09 -04:00
|
|
|
pub type WorkersTable = HashMap<WorkerId, WorkerThread>;
|
2021-01-06 15:31:16 -05:00
|
|
|
|
2021-05-11 15:09:09 -04:00
|
|
|
pub fn init(create_web_worker_cb: Arc<CreateWebWorkerCb>) -> Extension {
|
2021-05-02 19:22:57 -04:00
|
|
|
Extension::builder()
|
|
|
|
.state(move |state| {
|
|
|
|
state.put::<WorkersTable>(WorkersTable::default());
|
|
|
|
state.put::<WorkerId>(WorkerId::default());
|
2020-12-11 12:49:26 -05:00
|
|
|
|
2021-05-02 19:22:57 -04:00
|
|
|
let create_module_loader =
|
|
|
|
CreateWebWorkerCbHolder(create_web_worker_cb.clone());
|
|
|
|
state.put::<CreateWebWorkerCbHolder>(create_module_loader);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
.ops(vec![
|
|
|
|
("op_create_worker", op_sync(op_create_worker)),
|
|
|
|
(
|
|
|
|
"op_host_terminate_worker",
|
|
|
|
op_sync(op_host_terminate_worker),
|
|
|
|
),
|
|
|
|
("op_host_post_message", op_sync(op_host_post_message)),
|
2021-06-22 10:30:16 -04:00
|
|
|
("op_host_recv_ctrl", op_async(op_host_recv_ctrl)),
|
|
|
|
("op_host_recv_message", op_async(op_host_recv_message)),
|
2021-05-02 19:22:57 -04:00
|
|
|
])
|
|
|
|
.build()
|
2020-02-11 04:04:59 -05:00
|
|
|
}
|
|
|
|
|
2021-03-17 17:45:12 -04:00
|
|
|
fn merge_boolean_permission(
|
2021-03-21 08:49:58 -04:00
|
|
|
mut main: UnitPermission,
|
2021-03-19 13:27:41 -04:00
|
|
|
worker: Option<PermissionState>,
|
2021-03-21 08:49:58 -04:00
|
|
|
) -> Result<UnitPermission, AnyError> {
|
2021-03-19 13:27:41 -04:00
|
|
|
if let Some(worker) = worker {
|
|
|
|
if worker < main.state {
|
|
|
|
return Err(custom_error(
|
|
|
|
"PermissionDenied",
|
|
|
|
"Can't escalate parent thread permissions",
|
|
|
|
));
|
|
|
|
} else {
|
|
|
|
main.state = worker;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(main)
|
2021-01-06 15:31:16 -05:00
|
|
|
}
|
|
|
|
|
2021-03-19 13:27:41 -04:00
|
|
|
fn merge_net_permission(
|
2021-03-21 08:49:58 -04:00
|
|
|
mut main: UnaryPermission<NetDescriptor>,
|
|
|
|
worker: Option<UnaryPermission<NetDescriptor>>,
|
|
|
|
) -> Result<UnaryPermission<NetDescriptor>, AnyError> {
|
2021-03-19 13:27:41 -04:00
|
|
|
if let Some(worker) = worker {
|
|
|
|
if (worker.global_state < main.global_state)
|
|
|
|
|| !worker
|
|
|
|
.granted_list
|
|
|
|
.iter()
|
|
|
|
.all(|x| main.check(&(&x.0, x.1)).is_ok())
|
|
|
|
{
|
|
|
|
return Err(custom_error(
|
2021-01-06 15:31:16 -05:00
|
|
|
"PermissionDenied",
|
|
|
|
"Can't escalate parent thread permissions",
|
2021-03-19 13:27:41 -04:00
|
|
|
));
|
|
|
|
} else {
|
|
|
|
main.global_state = worker.global_state;
|
|
|
|
main.granted_list = worker.granted_list;
|
|
|
|
}
|
2021-01-06 15:31:16 -05:00
|
|
|
}
|
2021-03-19 13:27:41 -04:00
|
|
|
Ok(main)
|
2021-01-06 15:31:16 -05:00
|
|
|
}
|
|
|
|
|
2021-03-19 13:27:41 -04:00
|
|
|
fn merge_read_permission(
|
2021-03-21 08:49:58 -04:00
|
|
|
mut main: UnaryPermission<ReadDescriptor>,
|
|
|
|
worker: Option<UnaryPermission<ReadDescriptor>>,
|
|
|
|
) -> Result<UnaryPermission<ReadDescriptor>, AnyError> {
|
2021-03-19 13:27:41 -04:00
|
|
|
if let Some(worker) = worker {
|
|
|
|
if (worker.global_state < main.global_state)
|
|
|
|
|| !worker
|
|
|
|
.granted_list
|
|
|
|
.iter()
|
|
|
|
.all(|x| main.check(x.0.as_path()).is_ok())
|
|
|
|
{
|
|
|
|
return Err(custom_error(
|
2021-03-17 17:45:12 -04:00
|
|
|
"PermissionDenied",
|
|
|
|
"Can't escalate parent thread permissions",
|
2021-03-19 13:27:41 -04:00
|
|
|
));
|
|
|
|
} else {
|
|
|
|
main.global_state = worker.global_state;
|
|
|
|
main.granted_list = worker.granted_list;
|
|
|
|
}
|
2021-03-17 17:45:12 -04:00
|
|
|
}
|
2021-03-19 13:27:41 -04:00
|
|
|
Ok(main)
|
2021-01-06 15:31:16 -05:00
|
|
|
}
|
|
|
|
|
2021-03-19 13:27:41 -04:00
|
|
|
fn merge_write_permission(
|
2021-03-21 08:49:58 -04:00
|
|
|
mut main: UnaryPermission<WriteDescriptor>,
|
|
|
|
worker: Option<UnaryPermission<WriteDescriptor>>,
|
|
|
|
) -> Result<UnaryPermission<WriteDescriptor>, AnyError> {
|
2021-03-19 13:27:41 -04:00
|
|
|
if let Some(worker) = worker {
|
|
|
|
if (worker.global_state < main.global_state)
|
|
|
|
|| !worker
|
|
|
|
.granted_list
|
|
|
|
.iter()
|
|
|
|
.all(|x| main.check(x.0.as_path()).is_ok())
|
|
|
|
{
|
|
|
|
return Err(custom_error(
|
2021-01-06 15:31:16 -05:00
|
|
|
"PermissionDenied",
|
|
|
|
"Can't escalate parent thread permissions",
|
2021-03-19 13:27:41 -04:00
|
|
|
));
|
|
|
|
} else {
|
|
|
|
main.global_state = worker.global_state;
|
|
|
|
main.granted_list = worker.granted_list;
|
|
|
|
}
|
2021-01-06 15:31:16 -05:00
|
|
|
}
|
2021-03-19 13:27:41 -04:00
|
|
|
Ok(main)
|
2021-01-06 15:31:16 -05:00
|
|
|
}
|
|
|
|
|
2021-04-13 07:25:21 -04:00
|
|
|
fn merge_env_permission(
|
|
|
|
mut main: UnaryPermission<EnvDescriptor>,
|
|
|
|
worker: Option<UnaryPermission<EnvDescriptor>>,
|
|
|
|
) -> Result<UnaryPermission<EnvDescriptor>, AnyError> {
|
|
|
|
if let Some(worker) = worker {
|
|
|
|
if (worker.global_state < main.global_state)
|
|
|
|
|| !worker.granted_list.iter().all(|x| main.check(&x.0).is_ok())
|
|
|
|
{
|
|
|
|
return Err(custom_error(
|
|
|
|
"PermissionDenied",
|
|
|
|
"Can't escalate parent thread permissions",
|
|
|
|
));
|
|
|
|
} else {
|
|
|
|
main.global_state = worker.global_state;
|
|
|
|
main.granted_list = worker.granted_list;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(main)
|
|
|
|
}
|
|
|
|
|
2021-04-09 18:12:00 -04:00
|
|
|
fn merge_run_permission(
|
|
|
|
mut main: UnaryPermission<RunDescriptor>,
|
|
|
|
worker: Option<UnaryPermission<RunDescriptor>>,
|
|
|
|
) -> Result<UnaryPermission<RunDescriptor>, AnyError> {
|
|
|
|
if let Some(worker) = worker {
|
|
|
|
if (worker.global_state < main.global_state)
|
|
|
|
|| !worker.granted_list.iter().all(|x| main.check(&x.0).is_ok())
|
|
|
|
{
|
|
|
|
return Err(custom_error(
|
|
|
|
"PermissionDenied",
|
|
|
|
"Can't escalate parent thread permissions",
|
|
|
|
));
|
|
|
|
} else {
|
|
|
|
main.global_state = worker.global_state;
|
|
|
|
main.granted_list = worker.granted_list;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(main)
|
|
|
|
}
|
|
|
|
|
2021-08-06 17:28:10 -04:00
|
|
|
fn merge_ffi_permission(
|
|
|
|
mut main: UnaryPermission<FfiDescriptor>,
|
|
|
|
worker: Option<UnaryPermission<FfiDescriptor>>,
|
|
|
|
) -> Result<UnaryPermission<FfiDescriptor>, AnyError> {
|
|
|
|
if let Some(worker) = worker {
|
|
|
|
if (worker.global_state < main.global_state)
|
|
|
|
|| !worker.granted_list.iter().all(|x| main.check(&x.0).is_ok())
|
|
|
|
{
|
|
|
|
return Err(custom_error(
|
|
|
|
"PermissionDenied",
|
|
|
|
"Can't escalate parent thread permissions",
|
|
|
|
));
|
|
|
|
} else {
|
|
|
|
main.global_state = worker.global_state;
|
|
|
|
main.granted_list = worker.granted_list;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(main)
|
|
|
|
}
|
|
|
|
|
2021-04-25 17:38:59 -04:00
|
|
|
pub fn create_worker_permissions(
|
2021-03-19 13:27:41 -04:00
|
|
|
main_perms: Permissions,
|
|
|
|
worker_perms: PermissionsArg,
|
2021-01-06 15:31:16 -05:00
|
|
|
) -> Result<Permissions, AnyError> {
|
|
|
|
Ok(Permissions {
|
2021-04-13 07:25:21 -04:00
|
|
|
env: merge_env_permission(main_perms.env, worker_perms.env)?,
|
2021-03-19 13:27:41 -04:00
|
|
|
hrtime: merge_boolean_permission(main_perms.hrtime, worker_perms.hrtime)?,
|
|
|
|
net: merge_net_permission(main_perms.net, worker_perms.net)?,
|
2021-08-06 17:28:10 -04:00
|
|
|
ffi: merge_ffi_permission(main_perms.ffi, worker_perms.ffi)?,
|
2021-03-19 13:27:41 -04:00
|
|
|
read: merge_read_permission(main_perms.read, worker_perms.read)?,
|
2021-04-09 18:12:00 -04:00
|
|
|
run: merge_run_permission(main_perms.run, worker_perms.run)?,
|
2021-03-19 13:27:41 -04:00
|
|
|
write: merge_write_permission(main_perms.write, worker_perms.write)?,
|
2021-01-06 15:31:16 -05:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Deserialize)]
|
2021-04-25 17:38:59 -04:00
|
|
|
pub struct PermissionsArg {
|
2021-04-13 07:25:21 -04:00
|
|
|
#[serde(default, deserialize_with = "as_unary_env_permission")]
|
|
|
|
env: Option<UnaryPermission<EnvDescriptor>>,
|
2021-01-06 15:31:16 -05:00
|
|
|
#[serde(default, deserialize_with = "as_permission_state")]
|
|
|
|
hrtime: Option<PermissionState>,
|
2021-03-17 17:45:12 -04:00
|
|
|
#[serde(default, deserialize_with = "as_unary_net_permission")]
|
2021-03-21 08:49:58 -04:00
|
|
|
net: Option<UnaryPermission<NetDescriptor>>,
|
2021-08-06 17:28:10 -04:00
|
|
|
#[serde(default, deserialize_with = "as_unary_ffi_permission")]
|
|
|
|
ffi: Option<UnaryPermission<FfiDescriptor>>,
|
2021-03-17 17:45:12 -04:00
|
|
|
#[serde(default, deserialize_with = "as_unary_read_permission")]
|
2021-03-21 08:49:58 -04:00
|
|
|
read: Option<UnaryPermission<ReadDescriptor>>,
|
2021-04-09 18:12:00 -04:00
|
|
|
#[serde(default, deserialize_with = "as_unary_run_permission")]
|
|
|
|
run: Option<UnaryPermission<RunDescriptor>>,
|
2021-03-17 17:45:12 -04:00
|
|
|
#[serde(default, deserialize_with = "as_unary_write_permission")]
|
2021-03-21 08:49:58 -04:00
|
|
|
write: Option<UnaryPermission<WriteDescriptor>>,
|
2021-01-06 15:31:16 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
fn as_permission_state<'de, D>(
|
|
|
|
deserializer: D,
|
|
|
|
) -> Result<Option<PermissionState>, D::Error>
|
|
|
|
where
|
|
|
|
D: Deserializer<'de>,
|
|
|
|
{
|
|
|
|
let value: bool = Deserialize::deserialize(deserializer)?;
|
|
|
|
|
|
|
|
match value {
|
|
|
|
true => Ok(Some(PermissionState::Granted)),
|
|
|
|
false => Ok(Some(PermissionState::Denied)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct UnaryPermissionBase {
|
|
|
|
global_state: PermissionState,
|
|
|
|
paths: Vec<String>,
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ParseBooleanOrStringVec;
|
|
|
|
|
|
|
|
impl<'de> de::Visitor<'de> for ParseBooleanOrStringVec {
|
|
|
|
type Value = UnaryPermissionBase;
|
|
|
|
|
|
|
|
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
formatter.write_str("a vector of strings or a boolean")
|
|
|
|
}
|
|
|
|
|
2021-03-31 10:37:38 -04:00
|
|
|
// visit_unit maps undefined/missing values to false
|
|
|
|
fn visit_unit<E>(self) -> Result<UnaryPermissionBase, E>
|
|
|
|
where
|
|
|
|
E: de::Error,
|
|
|
|
{
|
|
|
|
self.visit_bool(false)
|
|
|
|
}
|
|
|
|
|
2021-01-06 15:31:16 -05:00
|
|
|
fn visit_bool<E>(self, v: bool) -> Result<UnaryPermissionBase, E>
|
|
|
|
where
|
|
|
|
E: de::Error,
|
|
|
|
{
|
|
|
|
Ok(UnaryPermissionBase {
|
|
|
|
global_state: match v {
|
|
|
|
true => PermissionState::Granted,
|
|
|
|
false => PermissionState::Denied,
|
|
|
|
},
|
|
|
|
paths: Vec::new(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn visit_seq<V>(self, mut visitor: V) -> Result<UnaryPermissionBase, V::Error>
|
|
|
|
where
|
|
|
|
V: SeqAccess<'de>,
|
|
|
|
{
|
|
|
|
let mut vec: Vec<String> = Vec::new();
|
|
|
|
|
|
|
|
let mut value = visitor.next_element::<String>()?;
|
|
|
|
while value.is_some() {
|
|
|
|
vec.push(value.unwrap());
|
|
|
|
value = visitor.next_element()?;
|
|
|
|
}
|
|
|
|
Ok(UnaryPermissionBase {
|
|
|
|
global_state: PermissionState::Prompt,
|
|
|
|
paths: vec,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-17 17:45:12 -04:00
|
|
|
fn as_unary_net_permission<'de, D>(
|
2021-01-06 15:31:16 -05:00
|
|
|
deserializer: D,
|
2021-03-21 08:49:58 -04:00
|
|
|
) -> Result<Option<UnaryPermission<NetDescriptor>>, D::Error>
|
2021-01-06 15:31:16 -05:00
|
|
|
where
|
|
|
|
D: Deserializer<'de>,
|
|
|
|
{
|
|
|
|
let value: UnaryPermissionBase =
|
|
|
|
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
|
|
|
|
|
2021-03-21 08:49:58 -04:00
|
|
|
let allowed: HashSet<NetDescriptor> = value
|
2021-03-17 17:45:12 -04:00
|
|
|
.paths
|
|
|
|
.into_iter()
|
2021-03-21 08:49:58 -04:00
|
|
|
.map(NetDescriptor::from_string)
|
2021-03-17 17:45:12 -04:00
|
|
|
.collect();
|
2021-01-06 15:31:16 -05:00
|
|
|
|
2021-03-21 08:49:58 -04:00
|
|
|
Ok(Some(UnaryPermission::<NetDescriptor> {
|
2021-01-06 15:31:16 -05:00
|
|
|
global_state: value.global_state,
|
|
|
|
granted_list: allowed,
|
|
|
|
..Default::default()
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2021-03-17 17:45:12 -04:00
|
|
|
fn as_unary_read_permission<'de, D>(
|
|
|
|
deserializer: D,
|
2021-03-21 08:49:58 -04:00
|
|
|
) -> Result<Option<UnaryPermission<ReadDescriptor>>, D::Error>
|
2021-03-17 17:45:12 -04:00
|
|
|
where
|
|
|
|
D: Deserializer<'de>,
|
|
|
|
{
|
|
|
|
let value: UnaryPermissionBase =
|
|
|
|
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
|
|
|
|
|
|
|
|
let paths: Vec<PathBuf> =
|
|
|
|
value.paths.into_iter().map(PathBuf::from).collect();
|
|
|
|
|
2021-03-21 08:49:58 -04:00
|
|
|
Ok(Some(UnaryPermission::<ReadDescriptor> {
|
2021-03-17 17:45:12 -04:00
|
|
|
global_state: value.global_state,
|
|
|
|
granted_list: resolve_read_allowlist(&Some(paths)),
|
|
|
|
..Default::default()
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn as_unary_write_permission<'de, D>(
|
2021-01-06 15:31:16 -05:00
|
|
|
deserializer: D,
|
2021-03-21 08:49:58 -04:00
|
|
|
) -> Result<Option<UnaryPermission<WriteDescriptor>>, D::Error>
|
2021-01-06 15:31:16 -05:00
|
|
|
where
|
|
|
|
D: Deserializer<'de>,
|
|
|
|
{
|
|
|
|
let value: UnaryPermissionBase =
|
|
|
|
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
|
|
|
|
|
|
|
|
let paths: Vec<PathBuf> =
|
|
|
|
value.paths.into_iter().map(PathBuf::from).collect();
|
|
|
|
|
2021-03-21 08:49:58 -04:00
|
|
|
Ok(Some(UnaryPermission::<WriteDescriptor> {
|
2021-01-06 15:31:16 -05:00
|
|
|
global_state: value.global_state,
|
2021-03-17 17:45:12 -04:00
|
|
|
granted_list: resolve_write_allowlist(&Some(paths)),
|
2021-01-06 15:31:16 -05:00
|
|
|
..Default::default()
|
|
|
|
}))
|
|
|
|
}
|
2019-08-14 11:03:02 -04:00
|
|
|
|
2021-04-13 07:25:21 -04:00
|
|
|
fn as_unary_env_permission<'de, D>(
|
|
|
|
deserializer: D,
|
|
|
|
) -> Result<Option<UnaryPermission<EnvDescriptor>>, D::Error>
|
|
|
|
where
|
|
|
|
D: Deserializer<'de>,
|
|
|
|
{
|
|
|
|
let value: UnaryPermissionBase =
|
|
|
|
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
|
|
|
|
|
|
|
|
Ok(Some(UnaryPermission::<EnvDescriptor> {
|
|
|
|
global_state: value.global_state,
|
|
|
|
granted_list: value
|
|
|
|
.paths
|
|
|
|
.into_iter()
|
|
|
|
.map(|env| EnvDescriptor(env.to_uppercase()))
|
|
|
|
.collect(),
|
|
|
|
..Default::default()
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2021-04-09 18:12:00 -04:00
|
|
|
fn as_unary_run_permission<'de, D>(
|
|
|
|
deserializer: D,
|
|
|
|
) -> Result<Option<UnaryPermission<RunDescriptor>>, D::Error>
|
|
|
|
where
|
|
|
|
D: Deserializer<'de>,
|
|
|
|
{
|
|
|
|
let value: UnaryPermissionBase =
|
|
|
|
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
|
|
|
|
|
|
|
|
Ok(Some(UnaryPermission::<RunDescriptor> {
|
|
|
|
global_state: value.global_state,
|
|
|
|
granted_list: value.paths.into_iter().map(RunDescriptor).collect(),
|
|
|
|
..Default::default()
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2021-08-06 17:28:10 -04:00
|
|
|
fn as_unary_ffi_permission<'de, D>(
|
|
|
|
deserializer: D,
|
|
|
|
) -> Result<Option<UnaryPermission<FfiDescriptor>>, D::Error>
|
|
|
|
where
|
|
|
|
D: Deserializer<'de>,
|
|
|
|
{
|
|
|
|
let value: UnaryPermissionBase =
|
|
|
|
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
|
|
|
|
|
|
|
|
Ok(Some(UnaryPermission::<FfiDescriptor> {
|
|
|
|
global_state: value.global_state,
|
|
|
|
granted_list: value.paths.into_iter().map(FfiDescriptor).collect(),
|
|
|
|
..Default::default()
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2019-08-26 08:50:21 -04:00
|
|
|
#[derive(Deserialize)]
|
|
|
|
#[serde(rename_all = "camelCase")]
|
2021-03-18 14:42:01 -04:00
|
|
|
pub struct CreateWorkerArgs {
|
2019-08-26 08:50:21 -04:00
|
|
|
has_source_code: bool,
|
2021-01-06 15:31:16 -05:00
|
|
|
name: Option<String>,
|
|
|
|
permissions: Option<PermissionsArg>,
|
2019-08-26 08:50:21 -04:00
|
|
|
source_code: String,
|
2021-01-06 15:31:16 -05:00
|
|
|
specifier: String,
|
2020-04-16 17:40:29 -04:00
|
|
|
use_deno_namespace: bool,
|
2021-08-16 08:29:54 -04:00
|
|
|
worker_type: WebWorkerType,
|
2019-08-14 11:03:02 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create worker as the host
|
2019-10-11 14:41:54 -04:00
|
|
|
fn op_create_worker(
|
2020-09-10 09:57:45 -04:00
|
|
|
state: &mut OpState,
|
2021-03-18 14:42:01 -04:00
|
|
|
args: CreateWorkerArgs,
|
2021-05-08 08:37:42 -04:00
|
|
|
_: (),
|
2021-04-05 12:40:24 -04:00
|
|
|
) -> Result<WorkerId, AnyError> {
|
2020-02-03 18:08:44 -05:00
|
|
|
let specifier = args.specifier.clone();
|
2020-04-16 17:40:29 -04:00
|
|
|
let maybe_source_code = if args.has_source_code {
|
|
|
|
Some(args.source_code.clone())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2020-02-03 18:08:44 -05:00
|
|
|
let args_name = args.name;
|
2020-04-16 17:40:29 -04:00
|
|
|
let use_deno_namespace = args.use_deno_namespace;
|
2020-05-07 15:15:59 -04:00
|
|
|
if use_deno_namespace {
|
2021-01-06 15:31:16 -05:00
|
|
|
super::check_unstable(state, "Worker.deno.namespace");
|
2020-05-07 15:15:59 -04:00
|
|
|
}
|
2021-08-16 08:29:54 -04:00
|
|
|
let worker_type = args.worker_type;
|
|
|
|
if let WebWorkerType::Classic = worker_type {
|
|
|
|
if let TestingFeaturesEnabled(false) = state.borrow() {
|
|
|
|
return Err(
|
|
|
|
deno_webstorage::DomExceptionNotSupportedError::new(
|
|
|
|
"Classic workers are not supported.",
|
|
|
|
)
|
|
|
|
.into(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
2021-01-06 15:31:16 -05:00
|
|
|
let parent_permissions = state.borrow::<Permissions>().clone();
|
|
|
|
let worker_permissions = if let Some(permissions) = args.permissions {
|
|
|
|
super::check_unstable(state, "Worker.deno.permissions");
|
2021-03-19 13:27:41 -04:00
|
|
|
create_worker_permissions(parent_permissions.clone(), permissions)?
|
2021-01-06 15:31:16 -05:00
|
|
|
} else {
|
|
|
|
parent_permissions.clone()
|
|
|
|
};
|
|
|
|
|
2020-09-19 19:17:35 -04:00
|
|
|
let worker_id = state.take::<WorkerId>();
|
2020-12-11 12:49:26 -05:00
|
|
|
let create_module_loader = state.take::<CreateWebWorkerCbHolder>();
|
|
|
|
state.put::<CreateWebWorkerCbHolder>(create_module_loader.clone());
|
2021-05-11 15:09:09 -04:00
|
|
|
state.put::<WorkerId>(worker_id.next().unwrap());
|
2020-02-08 14:34:31 -05:00
|
|
|
|
2021-02-17 13:47:18 -05:00
|
|
|
let module_specifier = deno_core::resolve_url(&specifier)?;
|
2020-04-14 11:41:06 -04:00
|
|
|
let worker_name = args_name.unwrap_or_else(|| "".to_string());
|
2020-12-06 22:30:40 -05:00
|
|
|
|
2021-06-22 10:30:16 -04:00
|
|
|
let (handle_sender, handle_receiver) = std::sync::mpsc::sync_channel::<
|
|
|
|
Result<SendableWebWorkerHandle, AnyError>,
|
|
|
|
>(1);
|
2020-12-06 22:30:40 -05:00
|
|
|
|
|
|
|
// Setup new thread
|
|
|
|
let thread_builder =
|
2021-05-11 15:09:09 -04:00
|
|
|
std::thread::Builder::new().name(format!("{}", worker_id));
|
2020-12-06 22:30:40 -05:00
|
|
|
|
|
|
|
// Spawn it
|
|
|
|
let join_handle = thread_builder.spawn(move || {
|
|
|
|
// Any error inside this block is terminal:
|
|
|
|
// - JS worker is useless - meaning it throws an exception and can't do anything else,
|
|
|
|
// all action done upon it should be noops
|
|
|
|
// - newly spawned thread exits
|
2020-12-11 12:49:26 -05:00
|
|
|
|
2021-06-22 10:30:16 -04:00
|
|
|
let (worker, external_handle) =
|
|
|
|
(create_module_loader.0)(CreateWebWorkerArgs {
|
|
|
|
name: worker_name,
|
|
|
|
worker_id,
|
|
|
|
parent_permissions,
|
|
|
|
permissions: worker_permissions,
|
|
|
|
main_module: module_specifier.clone(),
|
|
|
|
use_deno_namespace,
|
2021-08-16 08:29:54 -04:00
|
|
|
worker_type,
|
2021-06-22 10:30:16 -04:00
|
|
|
});
|
2020-12-06 22:30:40 -05:00
|
|
|
|
2021-05-11 15:09:09 -04:00
|
|
|
// Send thread safe handle from newly created worker to host thread
|
2021-06-22 10:30:16 -04:00
|
|
|
handle_sender.send(Ok(external_handle)).unwrap();
|
2020-12-06 22:30:40 -05:00
|
|
|
drop(handle_sender);
|
|
|
|
|
|
|
|
// At this point the only method of communication with host
|
|
|
|
// is using `worker.internal_channels`.
|
|
|
|
//
|
|
|
|
// Host can already push messages and interact with worker.
|
|
|
|
run_web_worker(worker, module_specifier, maybe_source_code)
|
|
|
|
})?;
|
|
|
|
|
2021-05-11 15:09:09 -04:00
|
|
|
// Receive WebWorkerHandle from newly created worker
|
2020-12-06 22:30:40 -05:00
|
|
|
let worker_handle = handle_receiver.recv().unwrap()?;
|
|
|
|
|
|
|
|
let worker_thread = WorkerThread {
|
2021-09-22 12:02:15 -04:00
|
|
|
join_handle: Some(join_handle),
|
2021-06-22 10:30:16 -04:00
|
|
|
worker_handle: worker_handle.into(),
|
Don't drop messages from workers that have already been closed (#11913)
When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.
The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.
The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.
The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
2021-09-06 05:05:02 -04:00
|
|
|
ctrl_closed: false,
|
|
|
|
message_closed: false,
|
2020-12-06 22:30:40 -05:00
|
|
|
};
|
2020-01-17 18:43:53 -05:00
|
|
|
|
2020-02-11 04:04:59 -05:00
|
|
|
// At this point all interactions with worker happen using thread
|
2020-12-06 22:30:40 -05:00
|
|
|
// safe handler returned from previous function calls
|
2020-09-19 19:17:35 -04:00
|
|
|
state
|
|
|
|
.borrow_mut::<WorkersTable>()
|
2020-12-06 22:30:40 -05:00
|
|
|
.insert(worker_id, worker_thread);
|
2019-08-26 08:50:21 -04:00
|
|
|
|
2021-04-05 12:40:24 -04:00
|
|
|
Ok(worker_id)
|
2019-08-14 11:03:02 -04:00
|
|
|
}
|
|
|
|
|
2020-02-11 04:04:59 -05:00
|
|
|
fn op_host_terminate_worker(
|
2020-09-10 09:57:45 -04:00
|
|
|
state: &mut OpState,
|
2021-04-05 12:40:24 -04:00
|
|
|
id: WorkerId,
|
2021-05-08 08:37:42 -04:00
|
|
|
_: (),
|
2021-04-05 12:40:24 -04:00
|
|
|
) -> Result<(), AnyError> {
|
2021-07-16 18:51:06 -04:00
|
|
|
if let Some(worker_thread) = state.borrow_mut::<WorkersTable>().remove(&id) {
|
2021-09-22 12:02:15 -04:00
|
|
|
worker_thread.terminate();
|
2021-07-16 18:51:06 -04:00
|
|
|
} else {
|
|
|
|
debug!("tried to terminate non-existent worker {}", id);
|
|
|
|
}
|
2021-04-05 12:40:24 -04:00
|
|
|
Ok(())
|
2020-01-17 18:43:53 -05:00
|
|
|
}
|
|
|
|
|
Don't drop messages from workers that have already been closed (#11913)
When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.
The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.
The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.
The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
2021-09-06 05:05:02 -04:00
|
|
|
enum WorkerChannel {
|
|
|
|
Ctrl,
|
|
|
|
Messages,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Close a worker's channel. If this results in both of a worker's channels
|
|
|
|
/// being closed, the worker will be removed from the workers table.
|
|
|
|
fn close_channel(
|
|
|
|
state: Rc<RefCell<OpState>>,
|
|
|
|
id: WorkerId,
|
|
|
|
channel: WorkerChannel,
|
|
|
|
) {
|
|
|
|
use std::collections::hash_map::Entry;
|
|
|
|
|
2020-12-06 22:30:40 -05:00
|
|
|
let mut s = state.borrow_mut();
|
|
|
|
let workers = s.borrow_mut::<WorkersTable>();
|
Don't drop messages from workers that have already been closed (#11913)
When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.
The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.
The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.
The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
2021-09-06 05:05:02 -04:00
|
|
|
|
|
|
|
// `Worker.terminate()` might have been called already, meaning that we won't
|
|
|
|
// find the worker in the table - in that case ignore.
|
|
|
|
if let Entry::Occupied(mut entry) = workers.entry(id) {
|
|
|
|
let terminate = {
|
|
|
|
let worker_thread = entry.get_mut();
|
|
|
|
match channel {
|
|
|
|
WorkerChannel::Ctrl => {
|
|
|
|
worker_thread.ctrl_closed = true;
|
|
|
|
worker_thread.message_closed
|
|
|
|
}
|
|
|
|
WorkerChannel::Messages => {
|
|
|
|
worker_thread.message_closed = true;
|
|
|
|
worker_thread.ctrl_closed
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if terminate {
|
2021-09-22 12:02:15 -04:00
|
|
|
entry.remove().terminate();
|
Don't drop messages from workers that have already been closed (#11913)
When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.
The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.
The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.
The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
2021-09-06 05:05:02 -04:00
|
|
|
}
|
2020-12-06 22:30:40 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-22 10:30:16 -04:00
|
|
|
/// Get control event from guest worker as host
|
|
|
|
async fn op_host_recv_ctrl(
|
2020-09-10 09:57:45 -04:00
|
|
|
state: Rc<RefCell<OpState>>,
|
2021-04-05 12:40:24 -04:00
|
|
|
id: WorkerId,
|
2021-05-08 08:37:42 -04:00
|
|
|
_: (),
|
2021-06-22 10:30:16 -04:00
|
|
|
) -> Result<WorkerControlEvent, AnyError> {
|
2020-09-10 09:57:45 -04:00
|
|
|
let worker_handle = {
|
2021-06-22 10:30:16 -04:00
|
|
|
let state = state.borrow();
|
|
|
|
let workers_table = state.borrow::<WorkersTable>();
|
2020-09-10 09:57:45 -04:00
|
|
|
let maybe_handle = workers_table.get(&id);
|
|
|
|
if let Some(handle) = maybe_handle {
|
2020-12-06 22:30:40 -05:00
|
|
|
handle.worker_handle.clone()
|
2020-09-10 09:57:45 -04:00
|
|
|
} else {
|
|
|
|
// If handle was not found it means worker has already shutdown
|
2021-06-22 10:30:16 -04:00
|
|
|
return Ok(WorkerControlEvent::Close);
|
2020-09-10 09:57:45 -04:00
|
|
|
}
|
2020-08-28 11:08:24 -04:00
|
|
|
};
|
|
|
|
|
2021-06-22 10:30:16 -04:00
|
|
|
let maybe_event = worker_handle.get_control_event().await?;
|
2020-12-06 22:30:40 -05:00
|
|
|
if let Some(event) = maybe_event {
|
|
|
|
// Terminal error means that worker should be removed from worker table.
|
2021-06-22 10:30:16 -04:00
|
|
|
if let WorkerControlEvent::TerminalError(_) = &event {
|
Don't drop messages from workers that have already been closed (#11913)
When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.
The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.
The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.
The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
2021-09-06 05:05:02 -04:00
|
|
|
close_channel(state, id, WorkerChannel::Ctrl);
|
2020-08-28 11:08:24 -04:00
|
|
|
}
|
2021-05-11 15:09:09 -04:00
|
|
|
return Ok(event);
|
2020-12-06 22:30:40 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there was no event from worker it means it has already been closed.
|
Don't drop messages from workers that have already been closed (#11913)
When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.
The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.
The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.
The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
2021-09-06 05:05:02 -04:00
|
|
|
close_channel(state, id, WorkerChannel::Ctrl);
|
2021-06-22 10:30:16 -04:00
|
|
|
Ok(WorkerControlEvent::Close)
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn op_host_recv_message(
|
|
|
|
state: Rc<RefCell<OpState>>,
|
|
|
|
id: WorkerId,
|
|
|
|
_: (),
|
|
|
|
) -> Result<Option<JsMessageData>, AnyError> {
|
|
|
|
let worker_handle = {
|
|
|
|
let s = state.borrow();
|
|
|
|
let workers_table = s.borrow::<WorkersTable>();
|
|
|
|
let maybe_handle = workers_table.get(&id);
|
|
|
|
if let Some(handle) = maybe_handle {
|
|
|
|
handle.worker_handle.clone()
|
|
|
|
} else {
|
|
|
|
// If handle was not found it means worker has already shutdown
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
};
|
Don't drop messages from workers that have already been closed (#11913)
When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.
The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.
The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.
The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
2021-09-06 05:05:02 -04:00
|
|
|
|
|
|
|
let ret = worker_handle.port.recv(state.clone()).await?;
|
|
|
|
if ret.is_none() {
|
|
|
|
close_channel(state, id, WorkerChannel::Messages);
|
|
|
|
}
|
|
|
|
Ok(ret)
|
2019-08-26 08:50:21 -04:00
|
|
|
}
|
|
|
|
|
2019-08-14 11:03:02 -04:00
|
|
|
/// Post message to guest worker as host
|
2019-10-11 14:41:54 -04:00
|
|
|
fn op_host_post_message(
|
2020-09-10 09:57:45 -04:00
|
|
|
state: &mut OpState,
|
2021-04-05 12:40:24 -04:00
|
|
|
id: WorkerId,
|
2021-06-22 10:30:16 -04:00
|
|
|
data: JsMessageData,
|
2021-04-05 12:40:24 -04:00
|
|
|
) -> Result<(), AnyError> {
|
2021-07-16 18:51:06 -04:00
|
|
|
if let Some(worker_thread) = state.borrow::<WorkersTable>().get(&id) {
|
|
|
|
debug!("post message to worker {}", id);
|
|
|
|
let worker_handle = worker_thread.worker_handle.clone();
|
|
|
|
worker_handle.port.send(state, data)?;
|
|
|
|
} else {
|
|
|
|
debug!("tried to post message to non-existent worker {}", id);
|
|
|
|
}
|
2021-04-05 12:40:24 -04:00
|
|
|
Ok(())
|
2019-08-14 11:03:02 -04:00
|
|
|
}
|