1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-21 15:04:11 -05:00

perf(cli): use new deno_core timers (#22569)

Improves #19100 

Fixes #20356

Replaces #20428



Changes made in deno_core to support this:

 - [x] Errors must be handled in setTimeout callbacks
 - [x] Microtask ordering is not-quite-right
 - [x] Timer cancellation must be checked right before dispatch
 - [x] Timer sanitizer
 - [x] Move high-res timer to deno_core
 - [x] Timers need opcall tracing
This commit is contained in:
Matt Mastracci 2024-03-01 11:15:18 -07:00 committed by GitHub
parent 7ac0408330
commit 736b91edd0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 190 additions and 569 deletions

12
Cargo.lock generated
View file

@ -1257,9 +1257,9 @@ dependencies = [
[[package]]
name = "deno_core"
version = "0.265.0"
version = "0.266.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f40a3dc5c31b35feedda9304ceff8b541dd5c8d7deeb69eb6036f8fa65bfdf08"
checksum = "cfbfe65d133be3abefb973f87b601f686666bb05d5521f0fab16d9dd28a5d5cf"
dependencies = [
"anyhow",
"bincode",
@ -1714,9 +1714,9 @@ dependencies = [
[[package]]
name = "deno_ops"
version = "0.141.0"
version = "0.142.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86efc44027a9d370fa677988cb463fb8c9a48c5d8b53e91431a69a44540a6c11"
checksum = "f96d7d6806eb726b4feb3092cbdaf887ae412b6e8be7c67617c61dbe8b3f1693"
dependencies = [
"proc-macro-rules",
"proc-macro2",
@ -5600,9 +5600,9 @@ dependencies = [
[[package]]
name = "serde_v8"
version = "0.174.0"
version = "0.175.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f15fc8c65ebdf37ec94b72dacad9622ad8e04a7cf7504060a709eb21ed02b88"
checksum = "dfb0193c9e969f1d7a660a2c5ad7c94bb00146c5f5367607eabb2af78094160f"
dependencies = [
"bytes",
"derive_more",

View file

@ -43,7 +43,7 @@ repository = "https://github.com/denoland/deno"
[workspace.dependencies]
deno_ast = { version = "0.34.1", features = ["transpiling"] }
deno_core = { version = "0.265.0", features = ["snapshot_data_bincode"] }
deno_core = { version = "0.266.0", features = ["snapshot_data_bincode"] }
deno_bench_util = { version = "0.134.0", path = "./bench_util" }
deno_lockfile = "0.19.0"

View file

@ -147,14 +147,46 @@ fn format_sanitizer_accum(
String::new()
};
output.push(value);
} else if item_type == RuntimeActivityType::Timer {
let (count_str, plural, tense) = if count == 1 {
(Cow::Borrowed("A"), "", "was")
} else {
// TODO(mmastrac): this will be done in a later PR
unimplemented!(
"Unhandled diff: {appeared} {} {:?} {}",
count,
item_type,
item_name
);
(Cow::Owned(count.to_string()), "s", "were")
};
let phrase = if appeared {
"started in this test, but never completed"
} else {
"started before the test, but completed during the test. Intervals and timers should not complete in a test if they were not started in that test"
};
let mut value = format!("{count_str} timer{plural} {tense} {phrase}. This is often caused by not calling `clearTimeout`.");
value += &if let Some(trace) = trace {
format!(" The operation {tense} started here:\n{trace}")
} else {
needs_trace_leaks = true;
String::new()
};
output.push(value);
} else if item_type == RuntimeActivityType::Interval {
let (count_str, plural, tense) = if count == 1 {
(Cow::Borrowed("An"), "", "was")
} else {
(Cow::Owned(count.to_string()), "s", "were")
};
let phrase = if appeared {
"started in this test, but never completed"
} else {
"started before the test, but completed during the test. Intervals and timers should not complete in a test if they were not started in that test"
};
let mut value = format!("{count_str} interval{plural} {tense} {phrase}. This is often caused by not calling `clearInterval`.");
value += &if let Some(trace) = trace {
format!(" The operation {tense} started here:\n{trace}")
} else {
needs_trace_leaks = true;
String::new()
};
output.push(value);
} else {
unreachable!()
}
}
if needs_trace_leaks {
@ -176,9 +208,9 @@ fn format_sanitizer_accum_item(
RuntimeActivity::AsyncOp(_, trace, name) => {
(activity_type, name.into(), trace)
}
RuntimeActivity::Interval(..) => (activity_type, "".into(), None),
RuntimeActivity::Resource(.., name) => (activity_type, name.into(), None),
RuntimeActivity::Timer(..) => (activity_type, "".into(), None),
RuntimeActivity::Resource(_, _, name) => (activity_type, name.into(), None),
RuntimeActivity::Interval(_, trace) => (activity_type, "".into(), trace),
RuntimeActivity::Timer(_, trace) => (activity_type, "".into(), trace),
}
}
@ -310,8 +342,6 @@ pub const OP_DETAILS: phf::Map<&'static str, [&'static str; 2]> = phf_map! {
"op_run_status" => ["get the status of a subprocess", "awaiting the result of a `Deno.Process#status` call"],
"op_seek_async" => ["seek in a file", "awaiting the result of a `Deno.File#seek` call"],
"op_signal_poll" => ["get the next signal", "un-registering a OS signal handler"],
"op_sleep_interval" => ["sleep for a duration", "cancelling a `setTimeout` or `setInterval` call"],
"op_sleep" => ["sleep for a duration", "cancelling a `setTimeout` or `setInterval` call"],
"op_stat_async" => ["get file metadata", "awaiting the result of a `Deno.stat` call"],
"op_symlink_async" => ["create a symlink", "awaiting the result of a `Deno.symlink` call"],
"op_net_accept_tls" => ["accept a TLS stream", "closing a `Deno.TlsListener`"],

View file

@ -728,6 +728,7 @@ async fn run_tests_for_worker_inner(
let mut filter = RuntimeActivityStatsFilter::default();
filter = filter.with_resources();
filter = filter.with_ops();
filter = filter.with_timers();
filter = filter.omit_op(op_id_host_recv_ctrl as _);
filter = filter.omit_op(op_id_host_recv_message as _);
@ -825,37 +826,6 @@ async fn run_tests_for_worker_inner(
Ok(())
}
/// Removes timer resources and op_sleep_interval calls. When an interval is started before a test
/// and resolves during a test, there's a false alarm.
fn preprocess_timer_activity(activities: &mut Vec<RuntimeActivity>) {
// TODO(mmastrac): Once we get to the new timer implementation, all of this
// code can go away and be replaced by a proper timer sanitizer.
let mut timer_resource_leaked = false;
// First, search for any timer resources which will indicate that we have an interval leak
activities.retain(|activity| {
if let RuntimeActivity::Resource(.., name) = activity {
if name == "timer" {
timer_resource_leaked = true;
return false;
}
}
true
});
// If we've leaked a timer resource, we un-mute op_sleep_interval calls. Otherwise, we remove
// them.
if !timer_resource_leaked {
activities.retain(|activity| {
if let RuntimeActivity::AsyncOp(.., op) = activity {
*op != "op_sleep_interval"
} else {
true
}
})
}
}
async fn wait_for_activity_to_stabilize(
worker: &mut MainWorker,
stats: &RuntimeActivityStatsFactory,
@ -867,8 +837,6 @@ async fn wait_for_activity_to_stabilize(
// First, check to see if there's any diff at all. If not, just continue.
let after = stats.clone().capture(filter);
let mut diff = RuntimeActivityStats::diff(&before, &after);
preprocess_timer_activity(&mut diff.appeared);
preprocess_timer_activity(&mut diff.disappeared);
if diff.is_empty() {
// No activity, so we return early
return Ok(None);
@ -884,8 +852,6 @@ async fn wait_for_activity_to_stabilize(
let after = stats.clone().capture(filter);
diff = RuntimeActivityStats::diff(&before, &after);
preprocess_timer_activity(&mut diff.appeared);
preprocess_timer_activity(&mut diff.disappeared);
if diff.is_empty() {
return Ok(None);
}
@ -908,6 +874,23 @@ async fn wait_for_activity_to_stabilize(
.retain(|activity| !matches!(activity, RuntimeActivity::Resource(..)));
}
// Since we don't have an option to disable timer sanitization, we use sanitize_ops == false &&
// sanitize_resources == false to disable those.
if !sanitize_ops && !sanitize_resources {
diff.appeared.retain(|activity| {
!matches!(
activity,
RuntimeActivity::Timer(..) | RuntimeActivity::Interval(..)
)
});
diff.disappeared.retain(|activity| {
!matches!(
activity,
RuntimeActivity::Timer(..) | RuntimeActivity::Interval(..)
)
});
}
Ok(if diff.is_empty() { None } else { Some(diff) })
}

View file

@ -21,7 +21,7 @@ import * as timers from "ext:deno_web/02_timers.js";
const clearTimeout_ = timers.clearTimeout;
const clearInterval_ = timers.clearInterval;
const setTimeoutUnclamped = timers.setTimeoutUnclamped;
const setImmediate_ = timers.setImmediate;
export function setTimeout(
callback: (...args: unknown[]) => void,
@ -76,7 +76,7 @@ export function setImmediate(
cb: (...args: unknown[]) => void,
...args: unknown[]
): Timeout {
return setTimeoutUnclamped(cb, 0, ...args);
return setImmediate_(cb, ...args);
}
export const clearImmediate = clearTimeout;

View file

@ -1,33 +1,18 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
import { core, primordials } from "ext:core/mod.js";
import {
op_now,
op_sleep,
op_sleep_interval,
op_timer_handle,
} from "ext:core/ops";
import { op_defer, op_now } from "ext:core/ops";
const {
ArrayPrototypePush,
ArrayPrototypeShift,
FunctionPrototypeCall,
MapPrototypeDelete,
MapPrototypeGet,
MapPrototypeHas,
MapPrototypeSet,
Uint8Array,
Uint32Array,
PromisePrototypeThen,
SafeArrayIterator,
SafeMap,
TypedArrayPrototypeGetBuffer,
TypeError,
indirectEval,
ReflectApply,
} = primordials;
import * as webidl from "ext:deno_webidl/00_webidl.js";
import { reportException } from "ext:deno_web/02_event.js";
import { assert } from "ext:deno_web/00_infra.js";
const hrU8 = new Uint8Array(8);
const hr = new Uint32Array(TypedArrayPrototypeGetBuffer(hrU8));
@ -38,371 +23,118 @@ function opNow() {
// ---------------------------------------------------------------------------
/**
* The task queue corresponding to the timer task source.
*
* @type { {action: () => void, nestingLevel: number}[] }
*/
const timerTasks = [];
/**
* The current task's timer nesting level, or zero if we're not currently
* running a timer task (since the minimum nesting level is 1).
*
* @type {number}
*/
let timerNestingLevel = 0;
function handleTimerMacrotask() {
// We have no work to do, tell the runtime that we don't
// need to perform microtask checkpoint.
if (timerTasks.length === 0) {
return undefined;
}
const task = ArrayPrototypeShift(timerTasks);
timerNestingLevel = task.nestingLevel;
try {
task.action();
} finally {
timerNestingLevel = 0;
}
return timerTasks.length === 0;
}
// ---------------------------------------------------------------------------
/**
* The keys in this map correspond to the key ID's in the spec's map of active
* timers. The values are the timeout's cancel rid.
*
* @type {Map<number, { cancelRid: number, isRef: boolean, promise: Promise<void> }>}
*/
const activeTimers = new SafeMap();
let nextId = 1;
/**
* @param {Function | string} callback
* @param {number} timeout
* @param {Array<any>} args
* @param {boolean} repeat
* @param {number | undefined} prevId
* @returns {number} The timer ID
*/
function initializeTimer(
callback,
timeout,
args,
repeat,
prevId,
// TODO(bartlomieju): remove this option, once `nextTick` and `setImmediate`
// in Node compat are cleaned up
respectNesting = true,
) {
// 2. If previousId was given, let id be previousId; otherwise, let
// previousId be an implementation-defined integer than is greater than zero
// and does not already exist in global's map of active timers.
let id;
let timerInfo;
if (prevId !== undefined) {
// `prevId` is only passed for follow-up calls on intervals
assert(repeat);
id = prevId;
timerInfo = MapPrototypeGet(activeTimers, id);
} else {
// TODO(@andreubotella): Deal with overflow.
// https://github.com/whatwg/html/issues/7358
id = nextId++;
const cancelRid = op_timer_handle();
timerInfo = { cancelRid, isRef: true, promise: null };
// Step 4 in "run steps after a timeout".
MapPrototypeSet(activeTimers, id, timerInfo);
}
// 3. If the surrounding agent's event loop's currently running task is a
// task that was created by this algorithm, then let nesting level be the
// task's timer nesting level. Otherwise, let nesting level be zero.
// 4. If timeout is less than 0, then set timeout to 0.
// 5. If nesting level is greater than 5, and timeout is less than 4, then
// set timeout to 4.
//
// The nesting level of 5 and minimum of 4 ms are spec-mandated magic
// constants.
if (timeout < 0) timeout = 0;
if (timerNestingLevel > 5 && timeout < 4 && respectNesting) timeout = 4;
// 9. Let task be a task that runs the following steps:
const task = {
action: () => {
// 1. If id does not exist in global's map of active timers, then abort
// these steps.
//
// This is relevant if the timer has been canceled after the sleep op
// resolves but before this task runs.
if (!MapPrototypeHas(activeTimers, id)) {
return;
}
// 2.
// 3.
if (typeof callback === "function") {
try {
FunctionPrototypeCall(
callback,
globalThis,
...new SafeArrayIterator(args),
);
} catch (error) {
reportException(error);
}
} else {
indirectEval(callback);
}
if (repeat) {
if (MapPrototypeHas(activeTimers, id)) {
// 4. If id does not exist in global's map of active timers, then
// abort these steps.
// NOTE: If might have been removed via the author code in handler
// calling clearTimeout() or clearInterval().
// 5. If repeat is true, then perform the timer initialization steps
// again, given global, handler, timeout, arguments, true, and id.
initializeTimer(callback, timeout, args, true, id);
}
} else {
// 6. Otherwise, remove global's map of active timers[id].
core.tryClose(timerInfo.cancelRid);
MapPrototypeDelete(activeTimers, id);
}
},
// 10. Increment nesting level by one.
// 11. Set task's timer nesting level to nesting level.
nestingLevel: timerNestingLevel + 1,
};
// 12. Let completionStep be an algorithm step which queues a global task on
// the timer task source given global to run task.
// 13. Run steps after a timeout given global, "setTimeout/setInterval",
// timeout, completionStep, and id.
runAfterTimeout(
task,
timeout,
timerInfo,
repeat,
);
return id;
}
// ---------------------------------------------------------------------------
/**
* @typedef ScheduledTimer
* @property {number} millis
* @property { {action: () => void, nestingLevel: number}[] } task
* @property {boolean} resolved
* @property {ScheduledTimer | null} prev
* @property {ScheduledTimer | null} next
*/
/**
* A doubly linked list of timers.
* @type { { head: ScheduledTimer | null, tail: ScheduledTimer | null } }
*/
const scheduledTimers = { head: null, tail: null };
/**
* @param { {action: () => void, nestingLevel: number}[] } task Will be run
* after the timeout, if it hasn't been cancelled.
* @param {number} millis
* @param {{ cancelRid: number, isRef: boolean, promise: Promise<void> }} timerInfo
* @param {boolean} repeat
*/
function runAfterTimeout(task, millis, timerInfo, repeat) {
const cancelRid = timerInfo.cancelRid;
const sleepPromise = repeat
? op_sleep_interval(millis, cancelRid)
: op_sleep(millis, cancelRid);
timerInfo.promise = sleepPromise;
if (!timerInfo.isRef) {
core.unrefOpPromise(timerInfo.promise);
}
/** @type {ScheduledTimer} */
const timerObject = {
millis,
resolved: false,
prev: scheduledTimers.tail,
next: null,
task,
};
// Add timerObject to the end of the list.
if (scheduledTimers.tail === null) {
assert(scheduledTimers.head === null);
scheduledTimers.head = scheduledTimers.tail = timerObject;
} else {
scheduledTimers.tail.next = timerObject;
scheduledTimers.tail = timerObject;
}
// 1.
PromisePrototypeThen(
sleepPromise,
(cancelled) => {
if (timerObject.resolved) {
return;
}
// "op_void_async_deferred" returns null
if (cancelled !== null && !cancelled) {
// The timer was cancelled.
removeFromScheduledTimers(timerObject);
return;
}
// 2. Wait until any invocations of this algorithm that had the same
// global and orderingIdentifier, that started before this one, and
// whose milliseconds is equal to or less than this one's, have
// completed.
// 4. Perform completionSteps.
// IMPORTANT: Since the sleep ops aren't guaranteed to resolve in the
// right order, whenever one resolves, we run through the scheduled
// timers list (which is in the order in which they were scheduled), and
// we call the callback for every timer which both:
// a) has resolved, and
// b) its timeout is lower than the lowest unresolved timeout found so
// far in the list.
let currentEntry = scheduledTimers.head;
while (currentEntry !== null) {
if (currentEntry.millis <= timerObject.millis) {
currentEntry.resolved = true;
ArrayPrototypePush(timerTasks, currentEntry.task);
removeFromScheduledTimers(currentEntry);
if (currentEntry === timerObject) {
break;
}
}
currentEntry = currentEntry.next;
}
},
);
}
/** @param {ScheduledTimer} timerObj */
function removeFromScheduledTimers(timerObj) {
if (timerObj.prev !== null) {
timerObj.prev.next = timerObj.next;
} else {
assert(scheduledTimers.head === timerObj);
scheduledTimers.head = timerObj.next;
}
if (timerObj.next !== null) {
timerObj.next.prev = timerObj.prev;
} else {
assert(scheduledTimers.tail === timerObj);
scheduledTimers.tail = timerObj.prev;
}
}
// ---------------------------------------------------------------------------
function checkThis(thisArg) {
if (thisArg !== null && thisArg !== undefined && thisArg !== globalThis) {
throw new TypeError("Illegal invocation");
}
}
function setTimeout(callback, timeout = 0, ...args) {
checkThis(this);
if (typeof callback !== "function") {
callback = webidl.converters.DOMString(callback);
/**
* Call a callback function immediately.
*/
function setImmediate(callback, ...args) {
if (args.length > 0) {
const unboundCallback = callback;
callback = () => ReflectApply(unboundCallback, window, args);
}
timeout = webidl.converters.long(timeout);
return initializeTimer(callback, timeout, args, false);
return core.queueImmediate(
callback,
);
}
/**
* Call a callback function after a delay.
*/
function setTimeout(callback, timeout = 0, ...args) {
checkThis(this);
// If callback is a string, replace it with a function that evals the string on every timeout
if (typeof callback !== "function") {
const unboundCallback = webidl.converters.DOMString(callback);
callback = () => indirectEval(unboundCallback);
}
if (args.length > 0) {
const unboundCallback = callback;
callback = () => ReflectApply(unboundCallback, window, args);
}
timeout = webidl.converters.long(timeout);
return core.queueUserTimer(
core.getTimerDepth() + 1,
false,
timeout,
callback,
);
}
/**
* Call a callback function after a delay.
*/
function setInterval(callback, timeout = 0, ...args) {
checkThis(this);
if (typeof callback !== "function") {
callback = webidl.converters.DOMString(callback);
const unboundCallback = webidl.converters.DOMString(callback);
callback = () => indirectEval(unboundCallback);
}
if (args.length > 0) {
const unboundCallback = callback;
callback = () => ReflectApply(unboundCallback, window, args);
}
timeout = webidl.converters.long(timeout);
return initializeTimer(callback, timeout, args, true);
}
// TODO(bartlomieju): remove this option, once `nextTick` and `setImmediate`
// in Node compat are cleaned up
function setTimeoutUnclamped(callback, timeout = 0, ...args) {
checkThis(this);
if (typeof callback !== "function") {
callback = webidl.converters.DOMString(callback);
}
timeout = webidl.converters.long(timeout);
return initializeTimer(callback, timeout, args, false, undefined, false);
return core.queueUserTimer(
core.getTimerDepth() + 1,
true,
timeout,
callback,
);
}
/**
* Clear a timeout or interval.
*/
function clearTimeout(id = 0) {
checkThis(this);
id = webidl.converters.long(id);
const timerInfo = MapPrototypeGet(activeTimers, id);
if (timerInfo !== undefined) {
core.tryClose(timerInfo.cancelRid);
MapPrototypeDelete(activeTimers, id);
}
core.cancelTimer(id);
}
/**
* Clear a timeout or interval.
*/
function clearInterval(id = 0) {
checkThis(this);
clearTimeout(id);
}
function refTimer(id) {
const timerInfo = MapPrototypeGet(activeTimers, id);
if (timerInfo === undefined || timerInfo.isRef) {
return;
}
timerInfo.isRef = true;
core.refOpPromise(timerInfo.promise);
id = webidl.converters.long(id);
core.cancelTimer(id);
}
/**
* Mark a timer as not blocking event loop exit.
*/
function unrefTimer(id) {
const timerInfo = MapPrototypeGet(activeTimers, id);
if (timerInfo === undefined || !timerInfo.isRef) {
return;
}
timerInfo.isRef = false;
core.unrefOpPromise(timerInfo.promise);
core.unrefTimer(id);
}
/**
* Mark a timer as blocking event loop exit.
*/
function refTimer(id) {
core.refTimer(id);
}
// Defer to avoid starving the event loop. Not using queueMicrotask()
// for that reason: it lets promises make forward progress but can
// still starve other parts of the event loop.
function defer(go) {
// If we pass a delay of zero to op_sleep, it returns at the next event spin
PromisePrototypeThen(op_sleep(0, 0), () => go());
PromisePrototypeThen(op_defer(), () => go());
}
export {
clearInterval,
clearTimeout,
defer,
handleTimerMacrotask,
opNow,
refTimer,
setImmediate,
setInterval,
setTimeout,
setTimeoutUnclamped,
unrefTimer,
};

View file

@ -21,9 +21,8 @@ fn setup() -> Vec<Extension> {
esm_entry_point = "ext:bench_setup/setup",
esm = ["ext:bench_setup/setup" = {
source = r#"
import { setTimeout, handleTimerMacrotask } from "ext:deno_web/02_timers.js";
import { setTimeout } from "ext:deno_web/02_timers.js";
globalThis.setTimeout = setTimeout;
Deno.core.setMacrotaskCallback(handleTimerMacrotask);
"#
}],
state = |state| {

View file

@ -1,67 +0,0 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
#[cfg(target_os = "windows")]
mod windows {
use std::marker::PhantomData;
use std::sync::atomic::AtomicU32;
pub(crate) struct HrTimerLock {
pub(super) _unconstructable: PhantomData<()>,
}
/// Decrease the reference count of the HR timer on drop.
impl Drop for HrTimerLock {
fn drop(&mut self) {
dec_ref();
}
}
/// Maintains the HR timer refcount. This should be more than sufficient as 2^32 timers would be
/// an impossible situation, and if it does somehow happen, the worst case is that we'll disable
/// the high-res timer when we shouldn't (and things would eventually return to proper operation).
static TIMER_REFCOUNT: AtomicU32 = AtomicU32::new(0);
pub(super) fn inc_ref() {
let old = TIMER_REFCOUNT.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
// Overflow/underflow sanity check in debug mode
debug_assert!(old != u32::MAX);
if old == 0 {
lock_hr();
}
}
fn dec_ref() {
let old = TIMER_REFCOUNT.fetch_sub(1, std::sync::atomic::Ordering::SeqCst);
// Overflow/underflow sanity check in debug mode
debug_assert!(old != 0);
if old == 1 {
unlock_hr();
}
}
/// If the refcount is > 0, we ask Windows for a lower timer period once. While the underlying
/// Windows timeBeginPeriod/timeEndPeriod API can manage its own reference counts, we choose to
/// use it once per process and avoid nesting these calls.
fn lock_hr() {
// SAFETY: We just want to set the timer period here
unsafe { windows_sys::Win32::Media::timeBeginPeriod(1) };
}
fn unlock_hr() {
// SAFETY: We just want to set the timer period here
unsafe { windows_sys::Win32::Media::timeEndPeriod(1) };
}
}
#[cfg(target_os = "windows")]
pub(crate) fn hr_timer_lock() -> windows::HrTimerLock {
windows::inc_ref();
windows::HrTimerLock {
_unconstructable: Default::default(),
}
}
/// No-op on other platforms.
#[cfg(not(target_os = "windows"))]
pub(crate) fn hr_timer_lock() -> (std::marker::PhantomData<()>,) {
Default::default()
}

View file

@ -2,7 +2,6 @@
mod blob;
mod compression;
mod hr_timer_lock;
mod message_port;
mod stream_resource;
mod timers;
@ -50,10 +49,8 @@ use crate::message_port::op_message_port_recv_message;
pub use crate::message_port::JsMessageData;
pub use crate::message_port::MessagePort;
use crate::timers::op_defer;
use crate::timers::op_now;
use crate::timers::op_sleep;
use crate::timers::op_sleep_interval;
use crate::timers::op_timer_handle;
use crate::timers::StartTime;
pub use crate::timers::TimersPermission;
@ -85,9 +82,7 @@ deno_core::extension!(deno_web,
compression::op_compression_write,
compression::op_compression_finish,
op_now<P>,
op_timer_handle,
op_sleep,
op_sleep_interval,
op_defer,
op_transfer_arraybuffer,
stream_resource::op_readable_stream_resource_allocate,
stream_resource::op_readable_stream_resource_allocate_sized,

View file

@ -2,18 +2,8 @@
//! This module helps deno implement timers and performance APIs.
use crate::hr_timer_lock::hr_timer_lock;
use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
use deno_core::OpState;
use deno_core::Resource;
use deno_core::ResourceId;
use std::borrow::Cow;
use std::cell::RefCell;
use std::rc::Rc;
use std::time::Duration;
use std::time::Instant;
pub trait TimersPermission {
@ -53,78 +43,5 @@ where
buf[1] = subsec_nanos;
}
pub struct TimerHandle(Rc<CancelHandle>);
impl Resource for TimerHandle {
fn name(&self) -> Cow<str> {
"timer".into()
}
fn close(self: Rc<Self>) {
self.0.cancel();
}
}
/// Creates a [`TimerHandle`] resource that can be used to cancel invocations of
/// [`op_sleep`].
#[op2(fast)]
#[smi]
pub fn op_timer_handle(state: &mut OpState) -> ResourceId {
state
.resource_table
.add(TimerHandle(CancelHandle::new_rc()))
}
/// Bifurcate the op_sleep op into an interval one we can use for sanitization purposes.
#[op2(async(lazy), fast)]
pub async fn op_sleep_interval(
state: Rc<RefCell<OpState>>,
#[smi] millis: u64,
#[smi] rid: ResourceId,
) -> Result<bool, AnyError> {
op_sleep::call(state, millis, rid).await
}
/// Waits asynchronously until either `millis` milliseconds have passed or the
/// [`TimerHandle`] resource given by `rid` has been canceled.
///
/// If the timer is canceled, this returns `false`. Otherwise, it returns `true`.
#[op2(async(lazy), fast)]
pub async fn op_sleep(
state: Rc<RefCell<OpState>>,
#[smi] millis: u64,
#[smi] rid: ResourceId,
) -> Result<bool, AnyError> {
// If this timeout is scheduled for 0ms it means we want it to run at the
// end of the event loop turn. Since this is a lazy op, we can just return
// having already spun the event loop.
if millis == 0 {
return Ok(true);
}
// If the timer is not present in the resource table it was cancelled before
// this op was polled.
let Ok(handle) = state.borrow().resource_table.get::<TimerHandle>(rid) else {
return Ok(false);
};
// If a timer is requested with <=100ms resolution, request the high-res timer. Since the default
// Windows timer period is 15ms, this means a 100ms timer could fire at 115ms (15% late). We assume that
// timers longer than 100ms are a reasonable cutoff here.
// The high-res timers on Windows are still limited. Unfortunately this means that our shortest duration 4ms timers
// can still be 25% late, but without a more complex timer system or spinning on the clock itself, we're somewhat
// bounded by the OS' scheduler itself.
let _hr_timer_lock = if millis <= 100 {
Some(hr_timer_lock())
} else {
None
};
let res = tokio::time::sleep(Duration::from_millis(millis))
.or_cancel(handle.0.clone())
.await;
// We release the high-res timer lock here, either by being cancelled or resolving.
Ok(res.is_ok())
}
pub async fn op_defer() {}

View file

@ -457,7 +457,6 @@ function runtimeStart(
tsVersion,
target,
) {
core.setMacrotaskCallback(timers.handleTimerMacrotask);
core.setWasmStreamingCallback(fetch.handleWasmStreaming);
core.setReportExceptionCallback(event.reportException);
op_set_format_exception_callback(formatException);

View file

@ -265,6 +265,12 @@ itest!(resource_sanitizer {
output: "test/sanitizer/resource_sanitizer.out",
});
itest!(ops_sanitizer_tcp {
args: "test --allow-net --trace-leaks test/sanitizer/ops_sanitizer_tcp.ts",
exit_code: 1,
output: "test/sanitizer/ops_sanitizer_tcp.out",
});
itest!(exit_sanitizer {
args: "test test/sanitizer/exit_sanitizer.ts",
output: "test/sanitizer/exit_sanitizer.out",

View file

@ -2,7 +2,6 @@ error: Uncaught (in worker "") Error
throw new Error();
^
at [WILDCARD]/workers/drop_handle_race.js:2:9
at Object.action (ext:deno_web/02_timers.js:[WILDCARD])
at handleTimerMacrotask (ext:deno_web/02_timers.js:[WILDCARD])
at [WILDCARD]
error: Uncaught (in promise) Error: Unhandled error in child worker.
at Worker.#pollControl [WILDCARD]

View file

@ -6,7 +6,7 @@ test 1 ... FAILED [WILDCARD]
test 1 => [WILDCARD]/ops_sanitizer_closed_inside_started_before.ts:[WILDCARD]
error: Leaks detected:
- An async operation to sleep for a duration was started before the test, but completed during the test. Async operations should not complete in a test if they were not started in that test. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
- A timer was started before the test, but completed during the test. Intervals and timers should not complete in a test if they were not started in that test. This is often caused by not calling `clearTimeout`. The operation was started here:
at [WILDCARD]
at [WILDCARD]/ops_sanitizer_closed_inside_started_before.ts:[WILDCARD]

View file

@ -7,13 +7,13 @@ test 2 ... FAILED ([WILDCARD])
test 1 => [WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
error: Leaks detected:
- An async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
- A timer was started in this test, but never completed. This is often caused by not calling `clearTimeout`. The operation was started here:
at [WILDCARD]
at setTimeout ([WILDCARD])
at test ([WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD])
at [WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:8:27
at [WILDCARD]
- An async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
- A timer was started in this test, but never completed. This is often caused by not calling `clearTimeout`. The operation was started here:
at [WILDCARD]
at setTimeout ([WILDCARD])
at test ([WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD])
@ -22,13 +22,13 @@ error: Leaks detected:
test 2 => [WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
error: Leaks detected:
- An async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
- A timer was started in this test, but never completed. This is often caused by not calling `clearTimeout`. The operation was started here:
at [WILDCARD]
at setTimeout ([WILDCARD])
at test ([WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD])
at [WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:10:27
at [WILDCARD]
- An async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
- A timer was started in this test, but never completed. This is often caused by not calling `clearTimeout`. The operation was started here:
at [WILDCARD]
at setTimeout ([WILDCARD])
at test ([WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD])

View file

@ -7,12 +7,12 @@ test 2 ... FAILED ([WILDCARD])
test 1 => [WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
error: Leaks detected:
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
- 2 timers were started in this test, but never completed. This is often caused by not calling `clearTimeout`.
To get more details where leaks occurred, run again with the --trace-leaks flag.
test 2 => [WILDCARD]/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
error: Leaks detected:
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
- 2 timers were started in this test, but never completed. This is often caused by not calling `clearTimeout`.
To get more details where leaks occurred, run again with the --trace-leaks flag.
FAILURES

View file

@ -0,0 +1,24 @@
Check [WILDCARD]/ops_sanitizer_tcp.ts
running 1 test from [WILDCARD]/ops_sanitizer_tcp.ts
testLeakTcpOps ... FAILED ([WILDCARD])
ERRORS
testLeakTcpOps => [WILDCARD]/ops_sanitizer_tcp.ts:[WILDCARD]
error: Leaks detected:
- A TCP listener was opened during the test, but not closed during the test. Close the TCP listener by calling `tcpListener.close()`.
- An async operation to accept a TCP stream was started in this test, but never completed. This is often caused by not closing a `Deno.Listener`. The operation was started here:
at op_net_accept_tcp ([WILDCARD])
at Listener.accept ([WILDCARD])
at testLeakTcpOps ([WILDCARD])
at innerWrapped ([WILDCARD])
at exitSanitizer ([WILDCARD])
at outerWrapped ([WILDCARD])
FAILURES
testLeakTcpOps => [WILDCARD]/ops_sanitizer_tcp.ts:[WILDCARD]
FAILED | 0 passed | 1 failed ([WILDCARD])
error: Test failed

View file

@ -0,0 +1,4 @@
Deno.test(function testLeakTcpOps() {
const listener1 = Deno.listen({ port: 0 });
listener1.accept();
});

View file

@ -9,7 +9,7 @@ leak interval ... FAILED ([WILDCARD])
leak interval => [WILDCARD]/ops_sanitizer_unstable.ts:[WILDCARD]
error: Leaks detected:
- An async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
- An interval was started in this test, but never completed. This is often caused by not calling `clearInterval`. The operation was started here:
at [WILDCARD]
at setInterval ([WILDCARD])
at fn ([WILDCARD]/ops_sanitizer_unstable.ts:[WILDCARD])

View file

@ -238,8 +238,8 @@ Deno.test(async function callbackTakesLongerThanInterval() {
Atomics.wait(new Int32Array(new SharedArrayBuffer(4)), 0, 0, 300);
timeEndOfFirstCallback = Date.now();
} else {
// Second callback
assert(Date.now() - 100 >= timeEndOfFirstCallback);
// Second callback should be nearly instantaneous
assert(Date.now() - timeEndOfFirstCallback < 10);
clearInterval(interval);
resolve();
}