1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-25 16:49:18 -05:00

feat: "deno bench" subcommand (#13713)

This commit adds "deno bench" subcommand and "Deno.bench()"
API that allows to register bench cases. 

The API is modelled after "Deno.test()" and "deno test" subcommand.

Currently the output is rudimentary and bench cases and not
subject to "ops" and "resource" sanitizers.

Co-authored-by: evan <github@evan.lol>
This commit is contained in:
Bartek Iwańczuk 2022-03-11 23:07:02 +01:00 committed by GitHub
parent 32c059544b
commit 09ae512ccb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
63 changed files with 2240 additions and 19 deletions

26
Cargo.lock generated
View file

@ -119,6 +119,15 @@ version = "1.0.55"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "159bb86af3a200e19a068f4224eae4c8bb2d0fa054c7e5d1cacd5cef95e684cd" checksum = "159bb86af3a200e19a068f4224eae4c8bb2d0fa054c7e5d1cacd5cef95e684cd"
[[package]]
name = "arrayvec"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9"
dependencies = [
"nodrop",
]
[[package]] [[package]]
name = "arrayvec" name = "arrayvec"
version = "0.5.2" version = "0.5.2"
@ -775,6 +784,7 @@ dependencies = [
"nix", "nix",
"node_resolver", "node_resolver",
"notify", "notify",
"num-format",
"once_cell", "once_cell",
"os_pipe", "os_pipe",
"percent-encoding", "percent-encoding",
@ -2506,6 +2516,12 @@ dependencies = [
"serde_json", "serde_json",
] ]
[[package]]
name = "nodrop"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb"
[[package]] [[package]]
name = "notify" name = "notify"
version = "5.0.0-pre.12" version = "5.0.0-pre.12"
@ -2563,6 +2579,16 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "num-format"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465"
dependencies = [
"arrayvec 0.4.12",
"itoa 0.4.8",
]
[[package]] [[package]]
name = "num-integer" name = "num-integer"
version = "0.1.44" version = "0.1.44"

View file

@ -77,6 +77,7 @@ log = { version = "=0.4.14", features = ["serde"] }
lspower = "=1.4.0" lspower = "=1.4.0"
node_resolver = "0.1.0" node_resolver = "0.1.0"
notify = "=5.0.0-pre.12" notify = "=5.0.0-pre.12"
num-format = "=0.4.0"
once_cell = "=1.9.0" once_cell = "=1.9.0"
percent-encoding = "=2.1.0" percent-encoding = "=2.1.0"
pin-project = "=1.0.8" pin-project = "=1.0.8"

View file

@ -457,6 +457,7 @@ declare namespace Deno {
options: Omit<TestDefinition, "fn" | "name">, options: Omit<TestDefinition, "fn" | "name">,
fn: (t: TestContext) => void | Promise<void>, fn: (t: TestContext) => void | Promise<void>,
): void; ): void;
/** Exit the Deno process with optional exit code. If no exit code is supplied /** Exit the Deno process with optional exit code. If no exit code is supplied
* then Deno will exit with return code of 0. * then Deno will exit with return code of 0.
* *

View file

@ -4,6 +4,180 @@
/// <reference lib="deno.ns" /> /// <reference lib="deno.ns" />
declare namespace Deno { declare namespace Deno {
export interface BenchDefinition {
fn: () => void | Promise<void>;
name: string;
ignore?: boolean;
/** Specify number of iterations benchmark should perform. Defaults to 1000. */
n?: number;
/** Specify number of warmup iterations benchmark should perform. Defaults
* to 1000.
*
* These iterations are not measured. It allows the code to be optimized
* by JIT compiler before measuring its performance. */
warmup?: number;
/** If at least one bench has `only` set to true, only run benches that have
* `only` set to true and fail the bench suite. */
only?: boolean;
/** Ensure the bench case does not prematurely cause the process to exit,
* for example via a call to `Deno.exit`. Defaults to true. */
sanitizeExit?: boolean;
/** Specifies the permissions that should be used to run the bench.
* Set this to "inherit" to keep the calling thread's permissions.
* Set this to "none" to revoke all permissions.
*
* Defaults to "inherit".
*/
permissions?: Deno.PermissionOptions;
}
/** Register a bench which will be run when `deno bench` is used on the command
* line and the containing module looks like a bench module.
* `fn` can be async if required.
* ```ts
* import {assert, fail, assertEquals} from "https://deno.land/std/testing/asserts.ts";
*
* Deno.bench({
* name: "example test",
* fn(): void {
* assertEquals("world", "world");
* },
* });
*
* Deno.bench({
* name: "example ignored test",
* ignore: Deno.build.os === "windows",
* fn(): void {
* // This test is ignored only on Windows machines
* },
* });
*
* Deno.bench({
* name: "example async test",
* async fn() {
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello_world.txt");
* assertEquals(decoder.decode(data), "Hello world");
* }
* });
* ```
*/
export function bench(t: BenchDefinition): void;
/** Register a bench which will be run when `deno bench` is used on the command
* line and the containing module looks like a bench module.
* `fn` can be async if required.
*
* ```ts
* import {assert, fail, assertEquals} from "https://deno.land/std/testing/asserts.ts";
*
* Deno.bench("My test description", (): void => {
* assertEquals("hello", "hello");
* });
*
* Deno.bench("My async test description", async (): Promise<void> => {
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello_world.txt");
* assertEquals(decoder.decode(data), "Hello world");
* });
* ```
*/
export function bench(
name: string,
fn: () => void | Promise<void>,
): void;
/** Register a bench which will be run when `deno bench` is used on the command
* line and the containing module looks like a bench module.
* `fn` can be async if required. Declared function must have a name.
*
* ```ts
* import {assert, fail, assertEquals} from "https://deno.land/std/testing/asserts.ts";
*
* Deno.bench(function myTestName(): void {
* assertEquals("hello", "hello");
* });
*
* Deno.bench(async function myOtherTestName(): Promise<void> {
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello_world.txt");
* assertEquals(decoder.decode(data), "Hello world");
* });
* ```
*/
export function bench(fn: () => void | Promise<void>): void;
/** Register a bench which will be run when `deno bench` is used on the command
* line and the containing module looks like a bench module.
* `fn` can be async if required.
*
* ```ts
* import {assert, fail, assertEquals} from "https://deno.land/std/testing/asserts.ts";
*
* Deno.bench("My test description", { permissions: { read: true } }, (): void => {
* assertEquals("hello", "hello");
* });
*
* Deno.bench("My async test description", { permissions: { read: false } }, async (): Promise<void> => {
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello_world.txt");
* assertEquals(decoder.decode(data), "Hello world");
* });
* ```
*/
export function bench(
name: string,
options: Omit<BenchDefinition, "fn" | "name">,
fn: () => void | Promise<void>,
): void;
/** Register a bench which will be run when `deno bench` is used on the command
* line and the containing module looks like a bench module.
* `fn` can be async if required.
*
* ```ts
* import {assert, fail, assertEquals} from "https://deno.land/std/testing/asserts.ts";
*
* Deno.bench({ name: "My test description", permissions: { read: true } }, (): void => {
* assertEquals("hello", "hello");
* });
*
* Deno.bench({ name: "My async test description", permissions: { read: false } }, async (): Promise<void> => {
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello_world.txt");
* assertEquals(decoder.decode(data), "Hello world");
* });
* ```
*/
export function bench(
options: Omit<BenchDefinition, "fn">,
fn: () => void | Promise<void>,
): void;
/** Register a bench which will be run when `deno bench` is used on the command
* line and the containing module looks like a bench module.
* `fn` can be async if required. Declared function must have a name.
*
* ```ts
* import {assert, fail, assertEquals} from "https://deno.land/std/testing/asserts.ts";
*
* Deno.bench({ permissions: { read: true } }, function myTestName(): void {
* assertEquals("hello", "hello");
* });
*
* Deno.bench({ permissions: { read: false } }, async function myOtherTestName(): Promise<void> {
* const decoder = new TextDecoder("utf-8");
* const data = await Deno.readFile("hello_world.txt");
* assertEquals(decoder.decode(data), "Hello world");
* });
* ```
*/
export function bench(
options: Omit<BenchDefinition, "fn" | "name">,
fn: () => void | Promise<void>,
): void;
/** /**
* **UNSTABLE**: New API, yet to be vetted. This API is under consideration to * **UNSTABLE**: New API, yet to be vetted. This API is under consideration to
* determine if permissions are required to call it. * determine if permissions are required to call it.

View file

@ -35,6 +35,13 @@ static LONG_VERSION: Lazy<String> = Lazy::new(|| {
) )
}); });
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct BenchFlags {
pub ignore: Vec<PathBuf>,
pub include: Option<Vec<String>>,
pub filter: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct BundleFlags { pub struct BundleFlags {
pub source_file: String, pub source_file: String,
@ -177,6 +184,7 @@ pub struct VendorFlags {
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] #[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum DenoSubcommand { pub enum DenoSubcommand {
Bench(BenchFlags),
Bundle(BundleFlags), Bundle(BundleFlags),
Cache(CacheFlags), Cache(CacheFlags),
Compile(CompileFlags), Compile(CompileFlags),
@ -487,26 +495,27 @@ pub fn flags_from_vec(args: Vec<String>) -> clap::Result<Flags> {
} }
match matches.subcommand() { match matches.subcommand() {
Some(("run", m)) => run_parse(&mut flags, m), Some(("bench", m)) => bench_parse(&mut flags, m),
Some(("fmt", m)) => fmt_parse(&mut flags, m),
Some(("types", m)) => types_parse(&mut flags, m),
Some(("cache", m)) => cache_parse(&mut flags, m),
Some(("coverage", m)) => coverage_parse(&mut flags, m),
Some(("info", m)) => info_parse(&mut flags, m),
Some(("eval", m)) => eval_parse(&mut flags, m),
Some(("repl", m)) => repl_parse(&mut flags, m),
Some(("bundle", m)) => bundle_parse(&mut flags, m), Some(("bundle", m)) => bundle_parse(&mut flags, m),
Some(("install", m)) => install_parse(&mut flags, m), Some(("cache", m)) => cache_parse(&mut flags, m),
Some(("uninstall", m)) => uninstall_parse(&mut flags, m),
Some(("completions", m)) => completions_parse(&mut flags, m, app),
Some(("test", m)) => test_parse(&mut flags, m),
Some(("upgrade", m)) => upgrade_parse(&mut flags, m),
Some(("doc", m)) => doc_parse(&mut flags, m),
Some(("lint", m)) => lint_parse(&mut flags, m),
Some(("compile", m)) => compile_parse(&mut flags, m), Some(("compile", m)) => compile_parse(&mut flags, m),
Some(("completions", m)) => completions_parse(&mut flags, m, app),
Some(("coverage", m)) => coverage_parse(&mut flags, m),
Some(("doc", m)) => doc_parse(&mut flags, m),
Some(("eval", m)) => eval_parse(&mut flags, m),
Some(("fmt", m)) => fmt_parse(&mut flags, m),
Some(("info", m)) => info_parse(&mut flags, m),
Some(("install", m)) => install_parse(&mut flags, m),
Some(("lint", m)) => lint_parse(&mut flags, m),
Some(("lsp", m)) => lsp_parse(&mut flags, m), Some(("lsp", m)) => lsp_parse(&mut flags, m),
Some(("vendor", m)) => vendor_parse(&mut flags, m), Some(("repl", m)) => repl_parse(&mut flags, m),
Some(("run", m)) => run_parse(&mut flags, m),
Some(("task", m)) => task_parse(&mut flags, m), Some(("task", m)) => task_parse(&mut flags, m),
Some(("test", m)) => test_parse(&mut flags, m),
Some(("types", m)) => types_parse(&mut flags, m),
Some(("uninstall", m)) => uninstall_parse(&mut flags, m),
Some(("upgrade", m)) => upgrade_parse(&mut flags, m),
Some(("vendor", m)) => vendor_parse(&mut flags, m),
_ => handle_repl_flags(&mut flags, ReplFlags { eval: None }), _ => handle_repl_flags(&mut flags, ReplFlags { eval: None }),
} }
@ -560,6 +569,7 @@ If the flag is set, restrict these messages to errors.",
) )
.global(true), .global(true),
) )
.subcommand(bench_subcommand())
.subcommand(bundle_subcommand()) .subcommand(bundle_subcommand())
.subcommand(cache_subcommand()) .subcommand(cache_subcommand())
.subcommand(compile_subcommand()) .subcommand(compile_subcommand())
@ -584,6 +594,50 @@ If the flag is set, restrict these messages to errors.",
.after_help(ENV_VARIABLES_HELP) .after_help(ENV_VARIABLES_HELP)
} }
fn bench_subcommand<'a>() -> App<'a> {
runtime_args(App::new("bench"), true, false)
.setting(AppSettings::TrailingVarArg)
.arg(
Arg::new("ignore")
.long("ignore")
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Ignore files"),
)
.arg(
Arg::new("filter")
.setting(ArgSettings::AllowHyphenValues)
.long("filter")
.takes_value(true)
.help("Run benchmarks with this string or pattern in the bench name"),
)
.arg(
Arg::new("files")
.help("List of file names to run")
.takes_value(true)
.multiple_values(true)
.multiple_occurrences(true),
)
.arg(watch_arg(false))
.arg(no_clear_screen_arg())
.arg(script_arg().last(true))
.about("Run benchmarks")
.long_about(
"Run benchmarks using Deno's built-in bench tool.
Evaluate the given modules, run all benches declared with 'Deno.bench()' and
report results to standard output:
deno bench src/fetch_bench.ts src/signal_bench.ts
Directory arguments are expanded to all contained files matching the glob
{*_,*.,}bench.{js,mjs,ts,jsx,tsx}:
deno bench src/",
)
}
fn bundle_subcommand<'a>() -> App<'a> { fn bundle_subcommand<'a>() -> App<'a> {
compile_args(App::new("bundle")) compile_args(App::new("bundle"))
.arg(Arg::new("source_file").takes_value(true).required(true)) .arg(Arg::new("source_file").takes_value(true).required(true))
@ -1880,6 +1934,51 @@ fn unsafely_ignore_certificate_errors_arg<'a>() -> Arg<'a> {
.validator(crate::flags_allow_net::validator) .validator(crate::flags_allow_net::validator)
} }
fn bench_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
runtime_args_parse(flags, matches, true, false);
// NOTE: `deno bench` always uses `--no-prompt`, tests shouldn't ever do
// interactive prompts, unless done by user code
flags.no_prompt = true;
let ignore = match matches.values_of("ignore") {
Some(f) => f.map(PathBuf::from).collect(),
None => vec![],
};
let filter = matches.value_of("filter").map(String::from);
if matches.is_present("script_arg") {
let script_arg: Vec<String> = matches
.values_of("script_arg")
.unwrap()
.map(String::from)
.collect();
for v in script_arg {
flags.argv.push(v);
}
}
let include = if matches.is_present("files") {
let files: Vec<String> = matches
.values_of("files")
.unwrap()
.map(String::from)
.collect();
Some(files)
} else {
None
};
watch_arg_parse(flags, matches, false);
flags.subcommand = DenoSubcommand::Bench(BenchFlags {
include,
ignore,
filter,
});
}
fn bundle_parse(flags: &mut Flags, matches: &clap::ArgMatches) { fn bundle_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
compile_args_parse(flags, matches); compile_args_parse(flags, matches);
@ -5166,4 +5265,39 @@ mod tests {
} }
); );
} }
#[test]
fn bench_with_flags() {
let r = flags_from_vec(svec![
"deno",
"bench",
"--unstable",
"--filter",
"- foo",
"--location",
"https:foo",
"--allow-net",
"dir1/",
"dir2/",
"--",
"arg1",
"arg2"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Bench(BenchFlags {
filter: Some("- foo".to_string()),
include: Some(svec!["dir1/", "dir2/"]),
ignore: vec![],
}),
unstable: true,
location: Some(Url::parse("https://foo/").unwrap()),
allow_net: Some(vec![]),
no_prompt: true,
argv: svec!["arg1", "arg2"],
..Flags::default()
}
);
}
} }

View file

@ -152,6 +152,19 @@ pub fn is_supported_test_path(path: &Path) -> bool {
} }
} }
/// Checks if the path has a basename and extension Deno supports for benches.
pub fn is_supported_bench_path(path: &Path) -> bool {
if let Some(name) = path.file_stem() {
let basename = name.to_string_lossy();
(basename.ends_with("_bench")
|| basename.ends_with(".bench")
|| basename == "bench")
&& is_supported_ext(path)
} else {
false
}
}
/// Checks if the path has an extension Deno supports for tests. /// Checks if the path has an extension Deno supports for tests.
pub fn is_supported_test_ext(path: &Path) -> bool { pub fn is_supported_test_ext(path: &Path) -> bool {
if let Some(ext) = get_extension(path) { if let Some(ext) = get_extension(path) {

View file

@ -40,6 +40,7 @@ mod windows_util;
use crate::file_fetcher::File; use crate::file_fetcher::File;
use crate::file_watcher::ResolutionResult; use crate::file_watcher::ResolutionResult;
use crate::flags::BenchFlags;
use crate::flags::BundleFlags; use crate::flags::BundleFlags;
use crate::flags::CacheFlags; use crate::flags::CacheFlags;
use crate::flags::CheckFlag; use crate::flags::CheckFlag;
@ -1248,6 +1249,19 @@ async fn coverage_command(
Ok(0) Ok(0)
} }
async fn bench_command(
flags: Flags,
bench_flags: BenchFlags,
) -> Result<i32, AnyError> {
if flags.watch.is_some() {
tools::bench::run_benchmarks_with_watch(flags, bench_flags).await?;
} else {
tools::bench::run_benchmarks(flags, bench_flags).await?;
}
Ok(0)
}
async fn test_command( async fn test_command(
flags: Flags, flags: Flags,
test_flags: TestFlags, test_flags: TestFlags,
@ -1328,6 +1342,9 @@ fn get_subcommand(
flags: Flags, flags: Flags,
) -> Pin<Box<dyn Future<Output = Result<i32, AnyError>>>> { ) -> Pin<Box<dyn Future<Output = Result<i32, AnyError>>>> {
match flags.subcommand.clone() { match flags.subcommand.clone() {
DenoSubcommand::Bench(bench_flags) => {
bench_command(flags, bench_flags).boxed_local()
}
DenoSubcommand::Bundle(bundle_flags) => { DenoSubcommand::Bundle(bundle_flags) => {
bundle_command(flags, bundle_flags).boxed_local() bundle_command(flags, bundle_flags).boxed_local()
} }

99
cli/ops/bench.rs Normal file
View file

@ -0,0 +1,99 @@
use crate::tools::bench::BenchEvent;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::op_sync;
use deno_core::Extension;
use deno_core::ModuleSpecifier;
use deno_core::OpState;
use deno_runtime::permissions::create_child_permissions;
use deno_runtime::permissions::ChildPermissionsArg;
use deno_runtime::permissions::Permissions;
use std::time;
use tokio::sync::mpsc::UnboundedSender;
use uuid::Uuid;
pub fn init(sender: UnboundedSender<BenchEvent>) -> Extension {
Extension::builder()
.ops(vec![
(
"op_pledge_test_permissions",
op_sync(op_pledge_test_permissions),
),
(
"op_restore_test_permissions",
op_sync(op_restore_test_permissions),
),
("op_get_bench_origin", op_sync(op_get_bench_origin)),
("op_dispatch_bench_event", op_sync(op_dispatch_bench_event)),
("op_bench_now", op_sync(op_bench_now)),
])
.state(move |state| {
state.put(sender.clone());
Ok(())
})
.build()
}
#[derive(Clone)]
struct PermissionsHolder(Uuid, Permissions);
pub fn op_pledge_test_permissions(
state: &mut OpState,
args: ChildPermissionsArg,
_: (),
) -> Result<Uuid, AnyError> {
let token = Uuid::new_v4();
let parent_permissions = state.borrow_mut::<Permissions>();
let worker_permissions = create_child_permissions(parent_permissions, args)?;
let parent_permissions = parent_permissions.clone();
state.put::<PermissionsHolder>(PermissionsHolder(token, parent_permissions));
// NOTE: This call overrides current permission set for the worker
state.put::<Permissions>(worker_permissions);
Ok(token)
}
pub fn op_restore_test_permissions(
state: &mut OpState,
token: Uuid,
_: (),
) -> Result<(), AnyError> {
if let Some(permissions_holder) = state.try_take::<PermissionsHolder>() {
if token != permissions_holder.0 {
panic!("restore test permissions token does not match the stored token");
}
let permissions = permissions_holder.1;
state.put::<Permissions>(permissions);
Ok(())
} else {
Err(generic_error("no permissions to restore"))
}
}
fn op_get_bench_origin(
state: &mut OpState,
_: (),
_: (),
) -> Result<String, AnyError> {
Ok(state.borrow::<ModuleSpecifier>().to_string())
}
fn op_dispatch_bench_event(
state: &mut OpState,
event: BenchEvent,
_: (),
) -> Result<(), AnyError> {
let sender = state.borrow::<UnboundedSender<BenchEvent>>().clone();
sender.send(event).ok();
Ok(())
}
fn op_bench_now(state: &mut OpState, _: (), _: ()) -> Result<u64, AnyError> {
let ns = state.borrow::<time::Instant>().elapsed().as_nanos();
let ns_u64 = u64::try_from(ns)?;
Ok(ns_u64)
}

View file

@ -3,6 +3,7 @@
use crate::proc_state::ProcState; use crate::proc_state::ProcState;
use deno_core::Extension; use deno_core::Extension;
pub mod bench;
mod errors; mod errors;
mod runtime_compiler; mod runtime_compiler;
pub mod testing; pub mod testing;

View file

@ -0,0 +1,135 @@
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
use crate::itest;
itest!(overloads {
args: "bench --unstable bench/overloads.ts",
exit_code: 0,
output: "bench/overloads.out",
});
itest!(meta {
args: "bench --unstable bench/meta.ts",
exit_code: 0,
output: "bench/meta.out",
});
itest!(pass {
args: "bench --unstable bench/pass.ts",
exit_code: 0,
output: "bench/pass.out",
});
itest!(ignore {
args: "bench --unstable bench/ignore.ts",
exit_code: 0,
output: "bench/ignore.out",
});
itest!(ignore_permissions {
args: "bench --unstable bench/ignore_permissions.ts",
exit_code: 0,
output: "bench/ignore_permissions.out",
});
itest!(fail {
args: "bench --unstable bench/fail.ts",
exit_code: 1,
output: "bench/fail.out",
});
itest!(collect {
args: "bench --unstable --ignore=bench/collect/ignore bench/collect",
exit_code: 0,
output: "bench/collect.out",
});
itest!(load_unload {
args: "bench --unstable bench/load_unload.ts",
exit_code: 0,
output: "bench/load_unload.out",
});
itest!(interval {
args: "bench --unstable bench/interval.ts",
exit_code: 0,
output: "bench/interval.out",
});
itest!(quiet {
args: "bench --unstable --quiet bench/quiet.ts",
exit_code: 0,
output: "bench/quiet.out",
});
itest!(only {
args: "bench --unstable bench/only.ts",
exit_code: 1,
output: "bench/only.out",
});
itest!(no_check {
args: "bench --unstable --no-check bench/no_check.ts",
exit_code: 1,
output: "bench/no_check.out",
});
itest!(allow_all {
args: "bench --unstable --allow-all bench/allow_all.ts",
exit_code: 0,
output: "bench/allow_all.out",
});
itest!(allow_none {
args: "bench --unstable bench/allow_none.ts",
exit_code: 1,
output: "bench/allow_none.out",
});
itest!(exit_sanitizer {
args: "bench --unstable bench/exit_sanitizer.ts",
output: "bench/exit_sanitizer.out",
exit_code: 1,
});
itest!(clear_timeout {
args: "bench --unstable bench/clear_timeout.ts",
exit_code: 0,
output: "bench/clear_timeout.out",
});
itest!(finally_timeout {
args: "bench --unstable bench/finally_timeout.ts",
exit_code: 1,
output: "bench/finally_timeout.out",
});
itest!(unresolved_promise {
args: "bench --unstable bench/unresolved_promise.ts",
exit_code: 1,
output: "bench/unresolved_promise.out",
});
itest!(unhandled_rejection {
args: "bench --unstable bench/unhandled_rejection.ts",
exit_code: 1,
output: "bench/unhandled_rejection.out",
});
itest!(filter {
args: "bench --unstable --filter=foo bench/filter",
exit_code: 0,
output: "bench/filter.out",
});
itest!(no_prompt_by_default {
args: "bench --unstable bench/no_prompt_by_default.ts",
exit_code: 1,
output: "bench/no_prompt_by_default.out",
});
itest!(no_prompt_with_denied_perms {
args: "bench --unstable --allow-read bench/no_prompt_with_denied_perms.ts",
exit_code: 1,
output: "bench/no_prompt_with_denied_perms.out",
});

View file

@ -50,6 +50,8 @@ macro_rules! itest_flaky(
// the test (ex. `lint_tests.rs`) and which is the implementation (ex. `lint.rs`) // the test (ex. `lint_tests.rs`) and which is the implementation (ex. `lint.rs`)
// when both are open, especially for two tabs in VS Code // when both are open, especially for two tabs in VS Code
#[path = "bench_tests.rs"]
mod bench;
#[path = "bundle_tests.rs"] #[path = "bundle_tests.rs"]
mod bundle; mod bundle;
#[path = "cache_tests.rs"] #[path = "cache_tests.rs"]

18
cli/tests/testdata/bench/allow_all.out vendored Normal file
View file

@ -0,0 +1,18 @@
[WILDCARD]
running 14 benches from [WILDCARD]
bench read false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench read true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench write false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench write true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench net false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench net true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench env false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench env true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench run false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench run true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench ffi false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench ffi true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench hrtime false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench hrtime true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench result: ok. 14 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]

35
cli/tests/testdata/bench/allow_all.ts vendored Normal file
View file

@ -0,0 +1,35 @@
import { assertEquals } from "../../../../test_util/std/testing/asserts.ts";
const permissions: Deno.PermissionName[] = [
"read",
"write",
"net",
"env",
"run",
"ffi",
"hrtime",
];
for (const name of permissions) {
Deno.bench({
name: `${name} false`,
permissions: {
[name]: false,
},
async fn() {
const status = await Deno.permissions.query({ name });
assertEquals(status.state, "prompt");
},
});
Deno.bench({
name: `${name} true`,
permissions: {
[name]: true,
},
async fn() {
const status = await Deno.permissions.query({ name });
assertEquals(status.state, "granted");
},
});
}

51
cli/tests/testdata/bench/allow_none.out vendored Normal file
View file

@ -0,0 +1,51 @@
[WILDCARD]
running 7 benches from [WILDCARD]
bench read ... 1000 iterations FAILED [WILDCARD]
bench write ... 1000 iterations FAILED [WILDCARD]
bench net ... 1000 iterations FAILED [WILDCARD]
bench env ... 1000 iterations FAILED [WILDCARD]
bench run ... 1000 iterations FAILED [WILDCARD]
bench ffi ... 1000 iterations FAILED [WILDCARD]
bench hrtime ... 1000 iterations FAILED [WILDCARD]
failures:
read
PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
write
PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
net
PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
env
PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
run
PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
ffi
PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
hrtime
PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
failures:
read
write
net
env
run
ffi
hrtime
bench result: FAILED. 0 passed; 7 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]

23
cli/tests/testdata/bench/allow_none.ts vendored Normal file
View file

@ -0,0 +1,23 @@
import { unreachable } from "../../../../test_util/std/testing/asserts.ts";
const permissions: Deno.PermissionName[] = [
"read",
"write",
"net",
"env",
"run",
"ffi",
"hrtime",
];
for (const name of permissions) {
Deno.bench({
name,
permissions: {
[name]: true,
},
fn() {
unreachable();
},
});
}

View file

@ -0,0 +1,8 @@
Check [WILDCARD]/bench/clear_timeout.ts
running 3 benches from [WILDCARD]/bench/clear_timeout.ts
bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -0,0 +1,5 @@
clearTimeout(setTimeout(() => {}, 1000));
Deno.bench("bench1", () => {});
Deno.bench("bench2", () => {});
Deno.bench("bench3", () => {});

5
cli/tests/testdata/bench/collect.out vendored Normal file
View file

@ -0,0 +1,5 @@
Check [WILDCARD]/bench/collect/bench.ts
running 0 benches from [WILDCARD]/bench/collect/bench.ts
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

View file

@ -0,0 +1 @@
throw new Error("this module should be ignored");

View file

@ -0,0 +1,35 @@
Check [WILDCARD]/bench/exit_sanitizer.ts
running 3 benches from [WILDCARD]/bench/exit_sanitizer.ts
bench exit(0) ... 1000 iterations FAILED ([WILDCARD])
bench exit(1) ... 1000 iterations FAILED ([WILDCARD])
bench exit(2) ... 1000 iterations FAILED ([WILDCARD])
failures:
exit(0)
AssertionError: Bench attempted to exit with exit code: 0
at [WILDCARD]
at [WILDCARD]/bench/exit_sanitizer.ts:2:8
at [WILDCARD]
exit(1)
AssertionError: Bench attempted to exit with exit code: 1
at [WILDCARD]
at [WILDCARD]/bench/exit_sanitizer.ts:6:8
at [WILDCARD]
exit(2)
AssertionError: Bench attempted to exit with exit code: 2
at [WILDCARD]
at [WILDCARD]/bench/exit_sanitizer.ts:10:8
at [WILDCARD]
failures:
exit(0)
exit(1)
exit(2)
bench result: FAILED. 0 passed; 3 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Bench failed

View file

@ -0,0 +1,11 @@
Deno.bench("exit(0)", function () {
Deno.exit(0);
});
Deno.bench("exit(1)", function () {
Deno.exit(1);
});
Deno.bench("exit(2)", function () {
Deno.exit(2);
});

81
cli/tests/testdata/bench/fail.out vendored Normal file
View file

@ -0,0 +1,81 @@
Check [WILDCARD]/bench/fail.ts
running 10 benches from [WILDCARD]/bench/fail.ts
bench bench0 ... 1000 iterations FAILED ([WILDCARD])
bench bench1 ... 1000 iterations FAILED ([WILDCARD])
bench bench2 ... 1000 iterations FAILED ([WILDCARD])
bench bench3 ... 1000 iterations FAILED ([WILDCARD])
bench bench4 ... 1000 iterations FAILED ([WILDCARD])
bench bench5 ... 1000 iterations FAILED ([WILDCARD])
bench bench6 ... 1000 iterations FAILED ([WILDCARD])
bench bench7 ... 1000 iterations FAILED ([WILDCARD])
bench bench8 ... 1000 iterations FAILED ([WILDCARD])
bench bench9 ... 1000 iterations FAILED ([WILDCARD])
failures:
bench0
Error
at [WILDCARD]/bench/fail.ts:2:9
at [WILDCARD]
bench1
Error
at [WILDCARD]/bench/fail.ts:5:9
at [WILDCARD]
bench2
Error
at [WILDCARD]/bench/fail.ts:8:9
at [WILDCARD]
bench3
Error
at [WILDCARD]/bench/fail.ts:11:9
at [WILDCARD]
bench4
Error
at [WILDCARD]/bench/fail.ts:14:9
at [WILDCARD]
bench5
Error
at [WILDCARD]/bench/fail.ts:17:9
at [WILDCARD]
bench6
Error
at [WILDCARD]/bench/fail.ts:20:9
at [WILDCARD]
bench7
Error
at [WILDCARD]/bench/fail.ts:23:9
at [WILDCARD]
bench8
Error
at [WILDCARD]/bench/fail.ts:26:9
at [WILDCARD]
bench9
Error
at [WILDCARD]/bench/fail.ts:29:9
at [WILDCARD]
failures:
bench0
bench1
bench2
bench3
bench4
bench5
bench6
bench7
bench8
bench9
bench result: FAILED. 0 passed; 10 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Bench failed

30
cli/tests/testdata/bench/fail.ts vendored Normal file
View file

@ -0,0 +1,30 @@
Deno.bench("bench0", () => {
throw new Error();
});
Deno.bench("bench1", () => {
throw new Error();
});
Deno.bench("bench2", () => {
throw new Error();
});
Deno.bench("bench3", () => {
throw new Error();
});
Deno.bench("bench4", () => {
throw new Error();
});
Deno.bench("bench5", () => {
throw new Error();
});
Deno.bench("bench6", () => {
throw new Error();
});
Deno.bench("bench7", () => {
throw new Error();
});
Deno.bench("bench8", () => {
throw new Error();
});
Deno.bench("bench9", () => {
throw new Error();
});

12
cli/tests/testdata/bench/filter.out vendored Normal file
View file

@ -0,0 +1,12 @@
Check [WILDCARD]/bench/filter/a_bench.ts
Check [WILDCARD]/bench/filter/b_bench.ts
Check [WILDCARD]/bench/filter/c_bench.ts
running 1 bench from [WILDCARD]/bench/filter/a_bench.ts
bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
running 1 bench from [WILDCARD]/bench/filter/b_bench.ts
bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
running 1 bench from [WILDCARD]/bench/filter/c_bench.ts
bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 6 filtered out ([WILDCARD])

View file

@ -0,0 +1,3 @@
Deno.bench("foo", function () {});
Deno.bench("bar", function () {});
Deno.bench("baz", function () {});

View file

@ -0,0 +1,3 @@
Deno.bench("foo", function () {});
Deno.bench("bar", function () {});
Deno.bench("baz", function () {});

View file

@ -0,0 +1,3 @@
Deno.bench("foo", function () {});
Deno.bench("bar", function () {});
Deno.bench("baz", function () {});

View file

@ -0,0 +1,19 @@
Check [WILDCARD]/bench/finally_timeout.ts
running 2 benches from [WILDCARD]/bench/finally_timeout.ts
bench error ... 1000 iterations FAILED ([WILDCARD])
bench success ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
failures:
error
Error: fail
at [WILDCARD]/bench/finally_timeout.ts:4:11
at [WILDCARD]
failures:
error
bench result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Bench failed

View file

@ -0,0 +1,11 @@
Deno.bench("error", function () {
const timer = setTimeout(() => null, 10000);
try {
throw new Error("fail");
} finally {
clearTimeout(timer);
}
});
Deno.bench("success", function () {
});

15
cli/tests/testdata/bench/ignore.out vendored Normal file
View file

@ -0,0 +1,15 @@
Check [WILDCARD]/bench/ignore.ts
running 10 benches from [WILDCARD]/bench/ignore.ts
bench bench0 ... 1000 iterations ignored ([WILDCARD])
bench bench1 ... 1000 iterations ignored ([WILDCARD])
bench bench2 ... 1000 iterations ignored ([WILDCARD])
bench bench3 ... 1000 iterations ignored ([WILDCARD])
bench bench4 ... 1000 iterations ignored ([WILDCARD])
bench bench5 ... 1000 iterations ignored ([WILDCARD])
bench bench6 ... 1000 iterations ignored ([WILDCARD])
bench bench7 ... 1000 iterations ignored ([WILDCARD])
bench bench8 ... 1000 iterations ignored ([WILDCARD])
bench bench9 ... 1000 iterations ignored ([WILDCARD])
bench result: ok. 0 passed; 0 failed; 10 ignored; 0 measured; 0 filtered out ([WILDCARD])

9
cli/tests/testdata/bench/ignore.ts vendored Normal file
View file

@ -0,0 +1,9 @@
for (let i = 0; i < 10; i++) {
Deno.bench({
name: `bench${i}`,
ignore: true,
fn() {
throw new Error("unreachable");
},
});
}

View file

@ -0,0 +1,6 @@
Check [WILDCARD]/bench/ignore_permissions.ts
running 1 bench from [WILDCARD]/bench/ignore_permissions.ts
bench ignore ... 1000 iterations ignored ([WILDCARD])
bench result: ok. 0 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -0,0 +1,16 @@
Deno.bench({
name: "ignore",
permissions: {
read: true,
write: true,
net: true,
env: true,
run: true,
ffi: true,
hrtime: true,
},
ignore: true,
fn() {
throw new Error("unreachable");
},
});

5
cli/tests/testdata/bench/interval.out vendored Normal file
View file

@ -0,0 +1,5 @@
Check [WILDCARD]/bench/interval.ts
running 0 benches from [WILDCARD]/bench/interval.ts
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

1
cli/tests/testdata/bench/interval.ts vendored Normal file
View file

@ -0,0 +1 @@
setInterval(function () {}, 0);

View file

@ -0,0 +1,6 @@
Check [WILDCARD]/bench/load_unload.ts
running 1 bench from [WILDCARD]/bench/load_unload.ts
bench bench ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

22
cli/tests/testdata/bench/load_unload.ts vendored Normal file
View file

@ -0,0 +1,22 @@
let interval: number | null = null;
addEventListener("load", () => {
if (interval) {
throw new Error("Interval is already set");
}
interval = setInterval(() => {}, 0);
});
addEventListener("unload", () => {
if (!interval) {
throw new Error("Interval was not set");
}
clearInterval(interval);
});
Deno.bench("bench", () => {
if (!interval) {
throw new Error("Interval was not set");
}
});

7
cli/tests/testdata/bench/meta.out vendored Normal file
View file

@ -0,0 +1,7 @@
Check [WILDCARD]/bench/meta.ts
import.meta.main: false
import.meta.url: [WILDCARD]/bench/meta.ts
running 0 benches from [WILDCARD]/bench/meta.ts
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

2
cli/tests/testdata/bench/meta.ts vendored Normal file
View file

@ -0,0 +1,2 @@
console.log("import.meta.main: %s", import.meta.main);
console.log("import.meta.url: %s", import.meta.url);

8
cli/tests/testdata/bench/no_check.out vendored Normal file
View file

@ -0,0 +1,8 @@
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Uncaught TypeError: Cannot read properties of undefined (reading 'fn')
Deno.bench();
^
at [WILDCARD]
at [WILDCARD]/bench/no_check.ts:1:6

1
cli/tests/testdata/bench/no_check.ts vendored Normal file
View file

@ -0,0 +1 @@
Deno.bench();

17
cli/tests/testdata/bench/no_color.ts vendored Normal file
View file

@ -0,0 +1,17 @@
Deno.bench({
name: "success",
fn() {},
});
Deno.bench({
name: "fail",
fn() {
throw new Error("fail");
},
});
Deno.bench({
name: "ignored",
ignore: true,
fn() {},
});

View file

@ -0,0 +1,17 @@
Check [WILDCARD]no_prompt_by_default.ts
running 1 bench from [WILDCARD]no_prompt_by_default.ts
bench no prompt ... 1000 iterations FAILED ([WILDCARD]ms)
failures:
no prompt
PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
[WILDCARD]
failures:
no prompt
bench result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms)
error: Bench failed

View file

@ -0,0 +1,3 @@
Deno.bench("no prompt", async () => {
await Deno.readTextFile("./some_file.txt");
});

View file

@ -0,0 +1,17 @@
Check [WILDCARD]/no_prompt_with_denied_perms.ts
running 1 bench from [WILDCARD]/no_prompt_with_denied_perms.ts
bench no prompt ... 1000 iterations FAILED ([WILDCARD]ms)
failures:
no prompt
PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
[WILDCARD]
failures:
no prompt
bench result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms)
error: Bench failed

View file

@ -0,0 +1,3 @@
Deno.bench("no prompt", { permissions: { read: false } }, async () => {
await Deno.readTextFile("./some_file.txt");
});

7
cli/tests/testdata/bench/only.out vendored Normal file
View file

@ -0,0 +1,7 @@
Check [WILDCARD]/bench/only.ts
running 1 bench from [WILDCARD]/bench/only.ts
bench only ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out ([WILDCARD])
error: Bench failed because the "only" option was used

15
cli/tests/testdata/bench/only.ts vendored Normal file
View file

@ -0,0 +1,15 @@
Deno.bench({
name: "before",
fn() {},
});
Deno.bench({
only: true,
name: "only",
fn() {},
});
Deno.bench({
name: "after",
fn() {},
});

11
cli/tests/testdata/bench/overloads.out vendored Normal file
View file

@ -0,0 +1,11 @@
Check [WILDCARD]/bench/overloads.ts
running 6 benches from [WILDCARD]/bench/overloads.ts
bench bench0 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench4 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench5 ... 1000 iterations ignored ([WILDCARD])
bench result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out ([WILDCARD])

6
cli/tests/testdata/bench/overloads.ts vendored Normal file
View file

@ -0,0 +1,6 @@
Deno.bench("bench0", () => {});
Deno.bench(function bench1() {});
Deno.bench({ name: "bench2", fn: () => {} });
Deno.bench("bench3", { permissions: "none" }, () => {});
Deno.bench({ name: "bench4" }, () => {});
Deno.bench({ ignore: true }, function bench5() {});

15
cli/tests/testdata/bench/pass.out vendored Normal file
View file

@ -0,0 +1,15 @@
Check [WILDCARD]/bench/pass.ts
running 10 benches from [WILDCARD]/bench/pass.ts
bench bench0 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench4 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench5 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench6 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench7 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench8 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench bench9 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD])
bench result: ok. 10 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

10
cli/tests/testdata/bench/pass.ts vendored Normal file
View file

@ -0,0 +1,10 @@
Deno.bench("bench0", () => {});
Deno.bench("bench1", () => {});
Deno.bench("bench2", () => {});
Deno.bench("bench3", () => {});
Deno.bench("bench4", () => {});
Deno.bench("bench5", () => {});
Deno.bench("bench6", () => {});
Deno.bench("bench7", () => {});
Deno.bench("bench8", () => {});
Deno.bench("bench9", () => {});

8
cli/tests/testdata/bench/quiet.out vendored Normal file
View file

@ -0,0 +1,8 @@
running 4 benches from [WILDCARD]/bench/quiet.ts
bench console.log ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench console.error ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench console.info ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench console.warn ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD]
bench result: ok. 4 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

15
cli/tests/testdata/bench/quiet.ts vendored Normal file
View file

@ -0,0 +1,15 @@
Deno.bench("console.log", function () {
console.log("log");
});
Deno.bench("console.error", function () {
console.error("error");
});
Deno.bench("console.info", function () {
console.info("info");
});
Deno.bench("console.warn", function () {
console.info("warn");
});

View file

@ -0,0 +1,10 @@
Check [WILDCARD]/bench/unhandled_rejection.ts
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Uncaught (in promise) Error: rejection
reject(new Error("rejection"));
^
at [WILDCARD]/bench/unhandled_rejection.ts:2:10
at new Promise (<anonymous>)
at [WILDCARD]/bench/unhandled_rejection.ts:1:1

View file

@ -0,0 +1,3 @@
new Promise((_resolve, reject) => {
reject(new Error("rejection"));
});

View file

@ -0,0 +1,5 @@
Check [WILDCARD]/bench/unresolved_promise.ts
bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Module evaluation is still pending but there are no pending ops or dynamic imports. This situation is often caused by unresolved promises.

View file

@ -0,0 +1 @@
await new Promise((_resolve, _reject) => {});

727
cli/tools/bench.rs Normal file
View file

@ -0,0 +1,727 @@
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
use crate::cache;
use crate::cache::CacherLoader;
use crate::colors;
use crate::compat;
use crate::create_main_worker;
use crate::display;
use crate::emit;
use crate::file_watcher;
use crate::file_watcher::ResolutionResult;
use crate::flags::BenchFlags;
use crate::flags::CheckFlag;
use crate::flags::Flags;
use crate::fs_util::collect_specifiers;
use crate::fs_util::is_supported_bench_path;
use crate::graph_util::contains_specifier;
use crate::graph_util::graph_valid;
use crate::located_script_name;
use crate::lockfile;
use crate::ops;
use crate::proc_state::ProcState;
use crate::resolver::ImportMapResolver;
use crate::resolver::JsxResolver;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::futures::future;
use deno_core::futures::stream;
use deno_core::futures::FutureExt;
use deno_core::futures::StreamExt;
use deno_core::serde_json::json;
use deno_core::ModuleSpecifier;
use deno_graph::ModuleKind;
use deno_runtime::permissions::Permissions;
use deno_runtime::tokio_util::run_basic;
use log::Level;
use num_format::Locale;
use num_format::ToFormattedString;
use serde::Deserialize;
use std::collections::HashSet;
use std::io::Write;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::mpsc::UnboundedSender;
#[derive(Debug, Clone, Deserialize)]
struct BenchSpecifierOptions {
compat_mode: bool,
filter: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
#[serde(rename_all = "camelCase")]
pub struct BenchDescription {
pub origin: String,
pub name: String,
pub iterations: u64,
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum BenchOutput {
Console(String),
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum BenchResult {
Ok,
Ignored,
Failed(String),
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct BenchPlan {
pub origin: String,
pub total: usize,
pub filtered_out: usize,
pub used_only: bool,
}
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum BenchEvent {
Plan(BenchPlan),
Wait(BenchDescription),
Output(BenchOutput),
IterationTime(u64),
Result(BenchDescription, BenchResult, u64),
}
#[derive(Debug, Clone)]
pub struct BenchMeasures {
pub iterations: u64,
pub current_start: Instant,
pub measures: Vec<u128>,
}
#[derive(Debug, Clone)]
pub struct BenchSummary {
pub total: usize,
pub passed: usize,
pub failed: usize,
pub ignored: usize,
pub filtered_out: usize,
pub measured: usize,
pub measures: Vec<BenchMeasures>,
pub current_bench: BenchMeasures,
pub failures: Vec<(BenchDescription, String)>,
}
impl BenchSummary {
pub fn new() -> Self {
Self {
total: 0,
passed: 0,
failed: 0,
ignored: 0,
filtered_out: 0,
measured: 0,
measures: Vec::new(),
current_bench: BenchMeasures {
iterations: 0,
current_start: Instant::now(),
measures: vec![],
},
failures: Vec::new(),
}
}
fn has_failed(&self) -> bool {
self.failed > 0 || !self.failures.is_empty()
}
fn has_pending(&self) -> bool {
self.total - self.passed - self.failed - self.ignored > 0
}
}
pub trait BenchReporter {
fn report_plan(&mut self, plan: &BenchPlan);
fn report_wait(&mut self, description: &BenchDescription);
fn report_output(&mut self, output: &BenchOutput);
fn report_result(
&mut self,
description: &BenchDescription,
result: &BenchResult,
elapsed: u64,
current_bench: &BenchMeasures,
);
fn report_summary(&mut self, summary: &BenchSummary, elapsed: &Duration);
}
struct PrettyBenchReporter {
echo_output: bool,
}
impl PrettyBenchReporter {
fn new(echo_output: bool) -> Self {
Self { echo_output }
}
fn force_report_wait(&mut self, description: &BenchDescription) {
print!(
"bench {} ... {} iterations ",
description.name, description.iterations
);
// flush for faster feedback when line buffered
std::io::stdout().flush().unwrap();
}
}
impl BenchReporter for PrettyBenchReporter {
fn report_plan(&mut self, plan: &BenchPlan) {
let inflection = if plan.total == 1 { "bench" } else { "benches" };
println!("running {} {} from {}", plan.total, inflection, plan.origin);
}
fn report_wait(&mut self, description: &BenchDescription) {
self.force_report_wait(description);
}
fn report_output(&mut self, output: &BenchOutput) {
if self.echo_output {
match output {
BenchOutput::Console(line) => print!("{}", line),
}
}
}
fn report_result(
&mut self,
_description: &BenchDescription,
result: &BenchResult,
elapsed: u64,
current_bench: &BenchMeasures,
) {
let status = match result {
BenchResult::Ok => {
let ns_op = current_bench.measures.iter().sum::<u128>()
/ current_bench.iterations as u128;
let min_op = current_bench.measures.iter().min().unwrap_or(&0);
let max_op = current_bench.measures.iter().max().unwrap_or(&0);
format!(
"{} ns/iter ({}..{} ns/iter) {}",
ns_op.to_formatted_string(&Locale::en),
min_op.to_formatted_string(&Locale::en),
max_op.to_formatted_string(&Locale::en),
colors::green("ok")
)
}
BenchResult::Ignored => colors::yellow("ignored").to_string(),
BenchResult::Failed(_) => colors::red("FAILED").to_string(),
};
println!(
"{} {}",
status,
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
);
}
fn report_summary(&mut self, summary: &BenchSummary, elapsed: &Duration) {
if !summary.failures.is_empty() {
println!("\nfailures:\n");
for (description, error) in &summary.failures {
println!("{}", description.name);
println!("{}", error);
println!();
}
println!("failures:\n");
for (description, _) in &summary.failures {
println!("\t{}", description.name);
}
}
let status = if summary.has_failed() || summary.has_pending() {
colors::red("FAILED").to_string()
} else {
colors::green("ok").to_string()
};
println!(
"\nbench result: {}. {} passed; {} failed; {} ignored; {} measured; {} filtered out {}\n",
status,
summary.passed,
summary.failed,
summary.ignored,
summary.measured,
summary.filtered_out,
colors::gray(format!("({})", display::human_elapsed(elapsed.as_millis()))),
);
}
}
fn create_reporter(echo_output: bool) -> Box<dyn BenchReporter + Send> {
Box::new(PrettyBenchReporter::new(echo_output))
}
/// Type check a collection of module and document specifiers.
async fn check_specifiers(
ps: &ProcState,
permissions: Permissions,
specifiers: Vec<ModuleSpecifier>,
lib: emit::TypeLib,
) -> Result<(), AnyError> {
ps.prepare_module_load(
specifiers,
false,
lib,
Permissions::allow_all(),
permissions,
true,
)
.await?;
Ok(())
}
/// Run a single specifier as an executable bench module.
async fn bench_specifier(
ps: ProcState,
permissions: Permissions,
specifier: ModuleSpecifier,
channel: UnboundedSender<BenchEvent>,
options: BenchSpecifierOptions,
) -> Result<(), AnyError> {
let mut worker = create_main_worker(
&ps,
specifier.clone(),
permissions,
vec![ops::bench::init(channel.clone())],
);
if options.compat_mode {
worker.execute_side_module(&compat::GLOBAL_URL).await?;
worker.execute_side_module(&compat::MODULE_URL).await?;
let use_esm_loader = compat::check_if_should_use_esm_loader(&specifier)?;
if use_esm_loader {
worker.execute_side_module(&specifier).await?;
} else {
compat::load_cjs_module(
&mut worker.js_runtime,
&specifier.to_file_path().unwrap().display().to_string(),
false,
)?;
worker.run_event_loop(false).await?;
}
} else {
// We execute the module module as a side module so that import.meta.main is not set.
worker.execute_side_module(&specifier).await?;
}
worker.dispatch_load_event(&located_script_name!())?;
let bench_result = worker.js_runtime.execute_script(
&located_script_name!(),
&format!(
r#"Deno[Deno.internal].runBenchmarks({})"#,
json!({
"filter": options.filter,
}),
),
)?;
worker.js_runtime.resolve_value(bench_result).await?;
worker.dispatch_unload_event(&located_script_name!())?;
Ok(())
}
/// Test a collection of specifiers with test modes concurrently.
async fn bench_specifiers(
ps: ProcState,
permissions: Permissions,
specifiers: Vec<ModuleSpecifier>,
options: BenchSpecifierOptions,
) -> Result<(), AnyError> {
let log_level = ps.flags.log_level;
let (sender, mut receiver) = unbounded_channel::<BenchEvent>();
let join_handles = specifiers.iter().map(move |specifier| {
let ps = ps.clone();
let permissions = permissions.clone();
let specifier = specifier.clone();
let sender = sender.clone();
let options = options.clone();
tokio::task::spawn_blocking(move || {
let future = bench_specifier(ps, permissions, specifier, sender, options);
run_basic(future)
})
});
let join_stream = stream::iter(join_handles)
.buffer_unordered(1)
.collect::<Vec<Result<Result<(), AnyError>, tokio::task::JoinError>>>();
let mut reporter = create_reporter(log_level != Some(Level::Error));
let handler = {
tokio::task::spawn(async move {
let earlier = Instant::now();
let mut summary = BenchSummary::new();
let mut used_only = false;
while let Some(event) = receiver.recv().await {
match event {
BenchEvent::Plan(plan) => {
summary.total += plan.total;
summary.filtered_out += plan.filtered_out;
if plan.used_only {
used_only = true;
}
reporter.report_plan(&plan);
}
BenchEvent::Wait(description) => {
reporter.report_wait(&description);
summary.current_bench = BenchMeasures {
iterations: description.iterations,
current_start: Instant::now(),
measures: Vec::with_capacity(
description.iterations.try_into().unwrap(),
),
};
}
BenchEvent::Output(output) => {
reporter.report_output(&output);
}
BenchEvent::IterationTime(iter_time) => {
summary.current_bench.measures.push(iter_time.into())
}
BenchEvent::Result(description, result, elapsed) => {
match &result {
BenchResult::Ok => {
summary.passed += 1;
}
BenchResult::Ignored => {
summary.ignored += 1;
}
BenchResult::Failed(error) => {
summary.failed += 1;
summary.failures.push((description.clone(), error.clone()));
}
}
reporter.report_result(
&description,
&result,
elapsed,
&summary.current_bench,
);
}
}
}
let elapsed = Instant::now().duration_since(earlier);
reporter.report_summary(&summary, &elapsed);
if used_only {
return Err(generic_error(
"Bench failed because the \"only\" option was used",
));
}
if summary.failed > 0 {
return Err(generic_error("Bench failed"));
}
Ok(())
})
};
let (join_results, result) = future::join(join_stream, handler).await;
// propagate any errors
for join_result in join_results {
join_result??;
}
result??;
Ok(())
}
pub async fn run_benchmarks(
flags: Flags,
bench_flags: BenchFlags,
) -> Result<(), AnyError> {
let ps = ProcState::build(Arc::new(flags)).await?;
let permissions = Permissions::from_options(&ps.flags.permissions_options());
let specifiers = collect_specifiers(
bench_flags.include.unwrap_or_else(|| vec![".".to_string()]),
&bench_flags.ignore.clone(),
is_supported_bench_path,
)?;
if specifiers.is_empty() {
return Err(generic_error("No bench modules found"));
}
let lib = if ps.flags.unstable {
emit::TypeLib::UnstableDenoWindow
} else {
emit::TypeLib::DenoWindow
};
check_specifiers(&ps, permissions.clone(), specifiers.clone(), lib).await?;
let compat = ps.flags.compat;
bench_specifiers(
ps,
permissions,
specifiers,
BenchSpecifierOptions {
compat_mode: compat,
filter: bench_flags.filter,
},
)
.await?;
Ok(())
}
// TODO(bartlomieju): heavy duplication of code with `cli/tools/test.rs`
pub async fn run_benchmarks_with_watch(
flags: Flags,
bench_flags: BenchFlags,
) -> Result<(), AnyError> {
let flags = Arc::new(flags);
let ps = ProcState::build(flags.clone()).await?;
let permissions = Permissions::from_options(&flags.permissions_options());
let lib = if flags.unstable {
emit::TypeLib::UnstableDenoWindow
} else {
emit::TypeLib::DenoWindow
};
let include = bench_flags.include.unwrap_or_else(|| vec![".".to_string()]);
let ignore = bench_flags.ignore.clone();
let paths_to_watch: Vec<_> = include.iter().map(PathBuf::from).collect();
let no_check = ps.flags.check == CheckFlag::None;
let resolver = |changed: Option<Vec<PathBuf>>| {
let mut cache = cache::FetchCacher::new(
ps.dir.gen_cache.clone(),
ps.file_fetcher.clone(),
Permissions::allow_all(),
Permissions::allow_all(),
);
let paths_to_watch = paths_to_watch.clone();
let paths_to_watch_clone = paths_to_watch.clone();
let maybe_import_map_resolver =
ps.maybe_import_map.clone().map(ImportMapResolver::new);
let maybe_jsx_resolver = ps.maybe_config_file.as_ref().and_then(|cf| {
cf.to_maybe_jsx_import_source_module()
.map(|im| JsxResolver::new(im, maybe_import_map_resolver.clone()))
});
let maybe_locker = lockfile::as_maybe_locker(ps.lockfile.clone());
let maybe_imports = ps
.maybe_config_file
.as_ref()
.map(|cf| cf.to_maybe_imports());
let files_changed = changed.is_some();
let include = include.clone();
let ignore = ignore.clone();
let check_js = ps
.maybe_config_file
.as_ref()
.map(|cf| cf.get_check_js())
.unwrap_or(false);
async move {
let bench_modules =
collect_specifiers(include.clone(), &ignore, is_supported_bench_path)?;
let mut paths_to_watch = paths_to_watch_clone;
let mut modules_to_reload = if files_changed {
Vec::new()
} else {
bench_modules
.iter()
.map(|url| (url.clone(), ModuleKind::Esm))
.collect()
};
let maybe_imports = if let Some(result) = maybe_imports {
result?
} else {
None
};
let maybe_resolver = if maybe_jsx_resolver.is_some() {
maybe_jsx_resolver.as_ref().map(|jr| jr.as_resolver())
} else {
maybe_import_map_resolver
.as_ref()
.map(|im| im.as_resolver())
};
let graph = deno_graph::create_graph(
bench_modules
.iter()
.map(|s| (s.clone(), ModuleKind::Esm))
.collect(),
false,
maybe_imports,
cache.as_mut_loader(),
maybe_resolver,
maybe_locker,
None,
None,
)
.await;
graph_valid(&graph, !no_check, check_js)?;
// TODO(@kitsonk) - This should be totally derivable from the graph.
for specifier in bench_modules {
fn get_dependencies<'a>(
graph: &'a deno_graph::ModuleGraph,
maybe_module: Option<&'a deno_graph::Module>,
// This needs to be accessible to skip getting dependencies if they're already there,
// otherwise this will cause a stack overflow with circular dependencies
output: &mut HashSet<&'a ModuleSpecifier>,
no_check: bool,
) {
if let Some(module) = maybe_module {
for dep in module.dependencies.values() {
if let Some(specifier) = &dep.get_code() {
if !output.contains(specifier) {
output.insert(specifier);
get_dependencies(
graph,
graph.get(specifier),
output,
no_check,
);
}
}
if !no_check {
if let Some(specifier) = &dep.get_type() {
if !output.contains(specifier) {
output.insert(specifier);
get_dependencies(
graph,
graph.get(specifier),
output,
no_check,
);
}
}
}
}
}
}
// This bench module and all it's dependencies
let mut modules = HashSet::new();
modules.insert(&specifier);
get_dependencies(&graph, graph.get(&specifier), &mut modules, no_check);
paths_to_watch.extend(
modules
.iter()
.filter_map(|specifier| specifier.to_file_path().ok()),
);
if let Some(changed) = &changed {
for path in changed.iter().filter_map(|path| {
deno_core::resolve_url_or_path(&path.to_string_lossy()).ok()
}) {
if modules.contains(&&path) {
modules_to_reload.push((specifier, ModuleKind::Esm));
break;
}
}
}
}
Ok((paths_to_watch, modules_to_reload))
}
.map(move |result| {
if files_changed
&& matches!(result, Ok((_, ref modules)) if modules.is_empty())
{
ResolutionResult::Ignore
} else {
match result {
Ok((paths_to_watch, modules_to_reload)) => {
ResolutionResult::Restart {
paths_to_watch,
result: Ok(modules_to_reload),
}
}
Err(e) => ResolutionResult::Restart {
paths_to_watch,
result: Err(e),
},
}
}
})
};
let operation = |modules_to_reload: Vec<(ModuleSpecifier, ModuleKind)>| {
let flags = flags.clone();
let filter = bench_flags.filter.clone();
let include = include.clone();
let ignore = ignore.clone();
let lib = lib.clone();
let permissions = permissions.clone();
let ps = ps.clone();
async move {
let specifiers =
collect_specifiers(include.clone(), &ignore, is_supported_bench_path)?
.iter()
.filter(|specifier| contains_specifier(&modules_to_reload, specifier))
.cloned()
.collect::<Vec<ModuleSpecifier>>();
check_specifiers(&ps, permissions.clone(), specifiers.clone(), lib)
.await?;
bench_specifiers(
ps,
permissions.clone(),
specifiers,
BenchSpecifierOptions {
compat_mode: flags.compat,
filter: filter.clone(),
},
)
.await?;
Ok(())
}
};
file_watcher::watch_func(
resolver,
operation,
file_watcher::PrintConfig {
job_name: "Bench".to_string(),
clear_screen: !flags.no_clear_screen,
},
)
.await?;
Ok(())
}

View file

@ -1,5 +1,6 @@
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
pub mod bench;
pub mod coverage; pub mod coverage;
pub mod doc; pub mod doc;
pub mod fmt; pub mod fmt;

View file

@ -409,12 +409,14 @@
// Wrap test function in additional assertion that makes sure // Wrap test function in additional assertion that makes sure
// that the test case does not accidentally exit prematurely. // that the test case does not accidentally exit prematurely.
function assertExit(fn) { function assertExit(fn, isTest) {
return async function exitSanitizer(...params) { return async function exitSanitizer(...params) {
setExitHandler((exitCode) => { setExitHandler((exitCode) => {
assert( assert(
false, false,
`Test case attempted to exit with exit code: ${exitCode}`, `${
isTest ? "Test case" : "Bench"
} attempted to exit with exit code: ${exitCode}`,
); );
}); });
@ -528,6 +530,7 @@
} }
const tests = []; const tests = [];
const benches = [];
// Main test function provided by Deno. // Main test function provided by Deno.
function test( function test(
@ -627,6 +630,107 @@
ArrayPrototypePush(tests, testDef); ArrayPrototypePush(tests, testDef);
} }
// Main bench function provided by Deno.
function bench(
nameOrFnOrOptions,
optionsOrFn,
maybeFn,
) {
let benchDef;
const defaults = {
ignore: false,
only: false,
sanitizeOps: true,
sanitizeResources: true,
sanitizeExit: true,
permissions: null,
};
if (typeof nameOrFnOrOptions === "string") {
if (!nameOrFnOrOptions) {
throw new TypeError("The bench name can't be empty");
}
if (typeof optionsOrFn === "function") {
benchDef = { fn: optionsOrFn, name: nameOrFnOrOptions, ...defaults };
} else {
if (!maybeFn || typeof maybeFn !== "function") {
throw new TypeError("Missing bench function");
}
if (optionsOrFn.fn != undefined) {
throw new TypeError(
"Unexpected 'fn' field in options, bench function is already provided as the third argument.",
);
}
if (optionsOrFn.name != undefined) {
throw new TypeError(
"Unexpected 'name' field in options, bench name is already provided as the first argument.",
);
}
benchDef = {
...defaults,
...optionsOrFn,
fn: maybeFn,
name: nameOrFnOrOptions,
};
}
} else if (typeof nameOrFnOrOptions === "function") {
if (!nameOrFnOrOptions.name) {
throw new TypeError("The bench function must have a name");
}
if (optionsOrFn != undefined) {
throw new TypeError("Unexpected second argument to Deno.bench()");
}
if (maybeFn != undefined) {
throw new TypeError("Unexpected third argument to Deno.bench()");
}
benchDef = {
...defaults,
fn: nameOrFnOrOptions,
name: nameOrFnOrOptions.name,
};
} else {
let fn;
let name;
if (typeof optionsOrFn === "function") {
fn = optionsOrFn;
if (nameOrFnOrOptions.fn != undefined) {
throw new TypeError(
"Unexpected 'fn' field in options, bench function is already provided as the second argument.",
);
}
name = nameOrFnOrOptions.name ?? fn.name;
} else {
if (
!nameOrFnOrOptions.fn || typeof nameOrFnOrOptions.fn !== "function"
) {
throw new TypeError(
"Expected 'fn' field in the first argument to be a bench function.",
);
}
fn = nameOrFnOrOptions.fn;
name = nameOrFnOrOptions.name ?? fn.name;
}
if (!name) {
throw new TypeError("The bench name can't be empty");
}
benchDef = { ...defaults, ...nameOrFnOrOptions, fn, name };
}
benchDef.fn = wrapBenchFnWithSanitizers(
reportBenchIteration(benchDef.fn),
benchDef,
);
if (benchDef.permissions) {
benchDef.fn = withPermissions(
benchDef.fn,
benchDef.permissions,
);
}
ArrayPrototypePush(benches, benchDef);
}
function formatError(error) { function formatError(error) {
if (ObjectPrototypeIsPrototypeOf(AggregateErrorPrototype, error)) { if (ObjectPrototypeIsPrototypeOf(AggregateErrorPrototype, error)) {
const message = error const message = error
@ -699,10 +803,48 @@
} }
} }
async function runBench(bench) {
if (bench.ignore) {
return "ignored";
}
const step = new BenchStep({
name: bench.name,
sanitizeExit: bench.sanitizeExit,
warmup: false,
});
try {
const warmupIterations = bench.warmupIterations;
step.warmup = true;
for (let i = 0; i < warmupIterations; i++) {
await bench.fn(step);
}
const iterations = bench.n;
step.warmup = false;
for (let i = 0; i < iterations; i++) {
await bench.fn(step);
}
return "ok";
} catch (error) {
return {
"failed": formatError(error),
};
}
}
function getTestOrigin() { function getTestOrigin() {
return core.opSync("op_get_test_origin"); return core.opSync("op_get_test_origin");
} }
function getBenchOrigin() {
return core.opSync("op_get_bench_origin");
}
function reportTestPlan(plan) { function reportTestPlan(plan) {
core.opSync("op_dispatch_test_event", { core.opSync("op_dispatch_test_event", {
plan, plan,
@ -739,6 +881,53 @@
}); });
} }
function reportBenchPlan(plan) {
core.opSync("op_dispatch_bench_event", {
plan,
});
}
function reportBenchConsoleOutput(console) {
core.opSync("op_dispatch_bench_event", {
output: { console },
});
}
function reportBenchWait(description) {
core.opSync("op_dispatch_bench_event", {
wait: description,
});
}
function reportBenchResult(description, result, elapsed) {
core.opSync("op_dispatch_bench_event", {
result: [description, result, elapsed],
});
}
function reportBenchIteration(fn) {
return async function benchIteration(step) {
let now;
if (!step.warmup) {
now = benchNow();
}
await fn(step);
if (!step.warmup) {
reportIterationTime(benchNow() - now);
}
};
}
function benchNow() {
return core.opSync("op_bench_now");
}
function reportIterationTime(time) {
core.opSync("op_dispatch_bench_event", {
iterationTime: time,
});
}
async function runTests({ async function runTests({
filter = null, filter = null,
shuffle = null, shuffle = null,
@ -799,6 +988,53 @@
globalThis.console = originalConsole; globalThis.console = originalConsole;
} }
async function runBenchmarks({
filter = null,
} = {}) {
core.setMacrotaskCallback(handleOpSanitizerDelayMacrotask);
const origin = getBenchOrigin();
const originalConsole = globalThis.console;
globalThis.console = new Console(reportBenchConsoleOutput);
const only = ArrayPrototypeFilter(benches, (bench) => bench.only);
const filtered = ArrayPrototypeFilter(
only.length > 0 ? only : benches,
createTestFilter(filter),
);
reportBenchPlan({
origin,
total: filtered.length,
filteredOut: benches.length - filtered.length,
usedOnly: only.length > 0,
});
for (const bench of filtered) {
// TODO(bartlomieju): probably needs some validation?
const iterations = bench.n ?? 1000;
const warmupIterations = bench.warmup ?? 1000;
const description = {
origin,
name: bench.name,
iterations,
};
bench.n = iterations;
bench.warmupIterations = warmupIterations;
const earlier = DateNow();
reportBenchWait(description);
const result = await runBench(bench);
const elapsed = DateNow() - earlier;
reportBenchResult(description, result, elapsed);
}
globalThis.console = originalConsole;
}
/** /**
* @typedef {{ * @typedef {{
* fn: (t: TestContext) => void | Promise<void>, * fn: (t: TestContext) => void | Promise<void>,
@ -989,6 +1225,27 @@
} }
} }
/**
* @typedef {{
* name: string;
* sanitizeExit: boolean,
* warmup: boolean,
* }} BenchStepParams
*/
class BenchStep {
/** @type {BenchStepParams} */
#params;
/** @param params {BenchStepParams} */
constructor(params) {
this.#params = params;
}
get name() {
return this.#params.name;
}
}
/** @param parentStep {TestStep} */ /** @param parentStep {TestStep} */
function createTestContext(parentStep) { function createTestContext(parentStep) {
return { return {
@ -1121,11 +1378,26 @@
testFn = assertResources(testFn); testFn = assertResources(testFn);
} }
if (opts.sanitizeExit) { if (opts.sanitizeExit) {
testFn = assertExit(testFn); testFn = assertExit(testFn, true);
} }
return testFn; return testFn;
} }
/**
* @template T {Function}
* @param fn {T}
* @param opts {{
* sanitizeExit: boolean,
* }}
* @returns {T}
*/
function wrapBenchFnWithSanitizers(fn, opts) {
if (opts.sanitizeExit) {
fn = assertExit(fn, false);
}
return fn;
}
/** /**
* @template T * @template T
* @param value {T | undefined} * @param value {T | undefined}
@ -1139,9 +1411,11 @@
window.__bootstrap.internals = { window.__bootstrap.internals = {
...window.__bootstrap.internals ?? {}, ...window.__bootstrap.internals ?? {},
runTests, runTests,
runBenchmarks,
}; };
window.__bootstrap.testing = { window.__bootstrap.testing = {
test, test,
bench,
}; };
})(this); })(this);

View file

@ -7,6 +7,7 @@
__bootstrap.denoNs = { __bootstrap.denoNs = {
metrics: core.metrics, metrics: core.metrics,
test: __bootstrap.testing.test, test: __bootstrap.testing.test,
bench: __bootstrap.testing.bench,
Process: __bootstrap.process.Process, Process: __bootstrap.process.Process,
run: __bootstrap.process.run, run: __bootstrap.process.run,
isatty: __bootstrap.tty.isatty, isatty: __bootstrap.tty.isatty,