1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-24 08:09:08 -05:00

Don't drop messages from workers that have already been closed (#11913)

When `worker.terminate()` is called, the spec requires that the
corresponding port message queue is emptied, so no messages can be
received after the call, even if they were sent from the worker before
it was terminated.

The spec doesn't require this of `self.close()`, and since Deno uses
different channels to send messages and to notify that the worker was
closed, messages might still arrive after the worker is known to be
closed, which are currently being dropped. This change fixes that.

The fix involves two parts: one on the JS side and one on the Rust side.
The JS side was using the `#terminated` flag to keep track of whether
the worker is known to be closed, without distinguishing whether further
messages should be dropped or not. This PR changes that flag to an
enum `#state`, which can be one of `"RUNNING"`, `"CLOSED"` or
`"TERMINATED"`.

The Rust side was removing the `WorkerThread` struct from the workers
table when a close control was received, regardless of whether there
were any messages left to read, which made any subsequent calls to
`op_host_recv_message` to return `Ok(None)`, as if there were no more
mesasges. This change instead waits for both a close control and for
the message channel's sender to be closed before the worker thread is
removed from the table.
This commit is contained in:
Andreu Botella 2021-09-06 11:05:02 +02:00 committed by GitHub
parent 930cb0afd8
commit b7c2902c97
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 112 additions and 25 deletions

View file

@ -1177,6 +1177,11 @@ itest!(worker_close_race {
output: "worker_close_race.js.out",
});
itest!(worker_message_before_close {
args: "run --quiet --reload --allow-read worker_message_before_close.js",
output: "worker_message_before_close.js.out",
});
#[test]
fn no_validate_asm() {
let output = util::deno_cmd()

View file

@ -0,0 +1,16 @@
for (let i = 0; i < 4; i++) {
const worker = new Worker(
new URL("./workers/message_before_close.js", import.meta.url).href,
{ type: "module", name: String(i) },
);
worker.addEventListener("message", (message) => {
// Only print responses after all reception logs.
setTimeout(() => {
console.log("response from worker %d received", message.data);
}, 500);
});
worker.postMessage(i);
}
export {};

View file

@ -0,0 +1,8 @@
message received in worker 0
message received in worker 1
message received in worker 2
message received in worker 3
response from worker 0 received
response from worker 1 received
response from worker 2 received
response from worker 3 received

View file

@ -0,0 +1,6 @@
self.onmessage = (params) => {
const workerId = params.data;
console.log("message received in worker %d", workerId);
self.postMessage(workerId);
self.close();
};

View file

@ -139,7 +139,13 @@
class Worker extends EventTarget {
#id = 0;
#name = "";
#terminated = false;
// "RUNNING" | "CLOSED" | "TERMINATED"
// "TERMINATED" means that any controls or messages received will be
// discarded. "CLOSED" means that we have received a control
// indicating that the worker is no longer running, but there might
// still be messages left to receive.
#status = "RUNNING";
constructor(specifier, options = {}) {
super();
@ -243,17 +249,17 @@
}
#pollControl = async () => {
while (!this.#terminated) {
while (this.#status === "RUNNING") {
const [type, data] = await hostRecvCtrl(this.#id);
// If terminate was called then we ignore all messages
if (this.#terminated) {
if (this.#status === "TERMINATED") {
return;
}
switch (type) {
case 1: { // TerminalError
this.#terminated = true;
this.#status = "CLOSED";
} /* falls through */
case 2: { // Error
if (!this.#handleError(data)) {
@ -270,7 +276,7 @@
}
case 3: { // Close
log(`Host got "close" message from worker: ${this.#name}`);
this.#terminated = true;
this.#status = "CLOSED";
return;
}
default: {
@ -281,9 +287,11 @@
};
#pollMessages = async () => {
while (!this.terminated) {
while (this.#status !== "TERMINATED") {
const data = await hostRecvMessage(this.#id);
if (data === null) break;
if (this.#status === "TERMINATED" || data === null) {
return;
}
let message, transferables;
try {
const v = deserializeJsMessageData(data);
@ -332,13 +340,14 @@
}
const { transfer } = options;
const data = serializeJsMessageData(message, transfer);
if (this.#terminated) return;
if (this.#status === "RUNNING") {
hostPostMessage(this.#id, data);
}
}
terminate() {
if (!this.#terminated) {
this.#terminated = true;
if (this.#status !== "TERMINATED") {
this.#status = "TERMINATED";
hostTerminateWorker(this.#id);
}
}

View file

@ -67,6 +67,12 @@ pub struct CreateWebWorkerCbHolder(Arc<CreateWebWorkerCb>);
pub struct WorkerThread {
join_handle: JoinHandle<Result<(), AnyError>>,
worker_handle: WebWorkerHandle,
// A WorkerThread that hasn't been explicitly terminated can only be removed
// from the WorkersTable once close messages have been received for both the
// control and message channels. See `close_channel`.
ctrl_closed: bool,
message_closed: bool,
}
pub type WorkersTable = HashMap<WorkerId, WorkerThread>;
@ -553,6 +559,8 @@ fn op_create_worker(
let worker_thread = WorkerThread {
join_handle,
worker_handle: worker_handle.into(),
ctrl_closed: false,
message_closed: false,
};
// At this point all interactions with worker happen using thread
@ -582,13 +590,42 @@ fn op_host_terminate_worker(
Ok(())
}
/// Try to remove worker from workers table - NOTE: `Worker.terminate()`
/// might have been called already meaning that we won't find worker in
/// table - in that case ignore.
fn try_remove_and_close(state: Rc<RefCell<OpState>>, id: WorkerId) {
enum WorkerChannel {
Ctrl,
Messages,
}
/// Close a worker's channel. If this results in both of a worker's channels
/// being closed, the worker will be removed from the workers table.
fn close_channel(
state: Rc<RefCell<OpState>>,
id: WorkerId,
channel: WorkerChannel,
) {
use std::collections::hash_map::Entry;
let mut s = state.borrow_mut();
let workers = s.borrow_mut::<WorkersTable>();
if let Some(worker_thread) = workers.remove(&id) {
// `Worker.terminate()` might have been called already, meaning that we won't
// find the worker in the table - in that case ignore.
if let Entry::Occupied(mut entry) = workers.entry(id) {
let terminate = {
let worker_thread = entry.get_mut();
match channel {
WorkerChannel::Ctrl => {
worker_thread.ctrl_closed = true;
worker_thread.message_closed
}
WorkerChannel::Messages => {
worker_thread.message_closed = true;
worker_thread.ctrl_closed
}
}
};
if terminate {
let worker_thread = entry.remove();
worker_thread.worker_handle.terminate();
worker_thread
.join_handle
@ -597,6 +634,7 @@ fn try_remove_and_close(state: Rc<RefCell<OpState>>, id: WorkerId) {
.expect("Panic in worker event loop");
}
}
}
/// Get control event from guest worker as host
async fn op_host_recv_ctrl(
@ -620,13 +658,13 @@ async fn op_host_recv_ctrl(
if let Some(event) = maybe_event {
// Terminal error means that worker should be removed from worker table.
if let WorkerControlEvent::TerminalError(_) = &event {
try_remove_and_close(state, id);
close_channel(state, id, WorkerChannel::Ctrl);
}
return Ok(event);
}
// If there was no event from worker it means it has already been closed.
try_remove_and_close(state, id);
close_channel(state, id, WorkerChannel::Ctrl);
Ok(WorkerControlEvent::Close)
}
@ -646,7 +684,12 @@ async fn op_host_recv_message(
return Ok(None);
}
};
worker_handle.port.recv(state).await
let ret = worker_handle.port.recv(state.clone()).await?;
if ret.is_none() {
close_channel(state, id, WorkerChannel::Messages);
}
Ok(ret)
}
/// Post message to guest worker as host