mirror of
https://github.com/denoland/deno.git
synced 2024-11-22 15:06:54 -05:00
5dbf5c8293
Disables `BenchContext::start()` and `BenchContext::end()` for low precision benchmarks (less than 0.01s per iteration). Prints a warning when they are used in such benchmarks, suggesting to remove them. ```ts Deno.bench("noop", { group: "noops" }, () => {}); Deno.bench("noop with start/end", { group: "noops" }, (b) => { b.start(); b.end(); }); ``` Before: ``` cpu: 12th Gen Intel(R) Core(TM) i9-12900K runtime: deno 1.36.2 (x86_64-unknown-linux-gnu) file:///home/nayeem/projects/deno/temp3.ts benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- noop 2.63 ns/iter 380,674,131.4 (2.45 ns … 27.78 ns) 2.55 ns 4.03 ns 5.33 ns noop with start and end 302.47 ns/iter 3,306,146.0 (200 ns … 151.2 µs) 300 ns 400 ns 400 ns summary noop 115.14x faster than noop with start and end ``` After: ``` cpu: 12th Gen Intel(R) Core(TM) i9-12900K runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) file:///home/nayeem/projects/deno/temp3.ts benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- noop 3.01 ns/iter 332,565,561.7 (2.73 ns … 29.54 ns) 2.93 ns 5.29 ns 7.45 ns noop with start and end 7.73 ns/iter 129,291,091.5 (6.61 ns … 46.76 ns) 7.87 ns 13.12 ns 15.32 ns Warning start() and end() calls in "noop with start and end" are ignored because it averages less than 0.01s per iteration. Remove them for better results. summary noop 2.57x faster than noop with start and end ```
304 lines
7.4 KiB
Rust
304 lines
7.4 KiB
Rust
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
|
|
|
use serde::Serialize;
|
|
|
|
use super::*;
|
|
|
|
pub trait BenchReporter {
|
|
fn report_group_summary(&mut self);
|
|
fn report_plan(&mut self, plan: &BenchPlan);
|
|
fn report_end(&mut self, report: &BenchReport);
|
|
fn report_register(&mut self, desc: &BenchDescription);
|
|
fn report_wait(&mut self, desc: &BenchDescription);
|
|
fn report_output(&mut self, output: &str);
|
|
fn report_result(&mut self, desc: &BenchDescription, result: &BenchResult);
|
|
}
|
|
|
|
#[derive(Debug, Serialize)]
|
|
struct JsonReporterOutput {
|
|
runtime: String,
|
|
cpu: String,
|
|
benches: Vec<JsonReporterBench>,
|
|
}
|
|
|
|
impl Default for JsonReporterOutput {
|
|
fn default() -> Self {
|
|
Self {
|
|
runtime: format!("{} {}", get_user_agent(), env!("TARGET")),
|
|
cpu: mitata::cpu::name(),
|
|
benches: vec![],
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Debug, Serialize)]
|
|
struct JsonReporterBench {
|
|
origin: String,
|
|
group: Option<String>,
|
|
name: String,
|
|
baseline: bool,
|
|
results: Vec<BenchResult>,
|
|
}
|
|
|
|
#[derive(Debug, Serialize)]
|
|
pub struct JsonReporter(JsonReporterOutput);
|
|
|
|
impl JsonReporter {
|
|
pub fn new() -> Self {
|
|
Self(Default::default())
|
|
}
|
|
}
|
|
|
|
impl BenchReporter for JsonReporter {
|
|
fn report_group_summary(&mut self) {}
|
|
#[cold]
|
|
fn report_plan(&mut self, _plan: &BenchPlan) {}
|
|
|
|
fn report_end(&mut self, _report: &BenchReport) {
|
|
match write_json_to_stdout(self) {
|
|
Ok(_) => (),
|
|
Err(e) => println!("{e}"),
|
|
}
|
|
}
|
|
|
|
fn report_register(&mut self, _desc: &BenchDescription) {}
|
|
|
|
fn report_wait(&mut self, _desc: &BenchDescription) {}
|
|
|
|
fn report_output(&mut self, _output: &str) {}
|
|
|
|
fn report_result(&mut self, desc: &BenchDescription, result: &BenchResult) {
|
|
if desc.warmup {
|
|
return;
|
|
}
|
|
|
|
let maybe_bench = self.0.benches.iter_mut().find(|bench| {
|
|
bench.origin == desc.origin
|
|
&& bench.group == desc.group
|
|
&& bench.name == desc.name
|
|
&& bench.baseline == desc.baseline
|
|
});
|
|
|
|
if let Some(bench) = maybe_bench {
|
|
bench.results.push(result.clone());
|
|
} else {
|
|
self.0.benches.push(JsonReporterBench {
|
|
origin: desc.origin.clone(),
|
|
group: desc.group.clone(),
|
|
name: desc.name.clone(),
|
|
baseline: desc.baseline,
|
|
results: vec![result.clone()],
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
pub struct ConsoleReporter {
|
|
name: String,
|
|
show_output: bool,
|
|
has_ungrouped: bool,
|
|
group: Option<String>,
|
|
baseline: bool,
|
|
group_measurements: Vec<(BenchDescription, BenchStats)>,
|
|
options: Option<mitata::reporter::Options>,
|
|
}
|
|
|
|
impl ConsoleReporter {
|
|
pub fn new(show_output: bool) -> Self {
|
|
Self {
|
|
show_output,
|
|
group: None,
|
|
options: None,
|
|
baseline: false,
|
|
name: String::new(),
|
|
has_ungrouped: false,
|
|
group_measurements: Vec::new(),
|
|
}
|
|
}
|
|
}
|
|
|
|
impl BenchReporter for ConsoleReporter {
|
|
#[cold]
|
|
fn report_plan(&mut self, plan: &BenchPlan) {
|
|
use std::sync::atomic::AtomicBool;
|
|
use std::sync::atomic::Ordering;
|
|
static FIRST_PLAN: AtomicBool = AtomicBool::new(true);
|
|
|
|
self.report_group_summary();
|
|
|
|
self.group = None;
|
|
self.baseline = false;
|
|
self.name = String::new();
|
|
self.group_measurements.clear();
|
|
self.options = Some(mitata::reporter::Options::new(
|
|
&plan.names.iter().map(|x| x.as_str()).collect::<Vec<&str>>(),
|
|
));
|
|
|
|
let options = self.options.as_mut().unwrap();
|
|
|
|
options.percentiles = true;
|
|
options.colors = colors::use_color();
|
|
|
|
if FIRST_PLAN
|
|
.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst)
|
|
.is_ok()
|
|
{
|
|
println!("{}", colors::gray(format!("cpu: {}", mitata::cpu::name())));
|
|
println!(
|
|
"{}\n",
|
|
colors::gray(format!(
|
|
"runtime: deno {} ({})",
|
|
crate::version::deno(),
|
|
env!("TARGET")
|
|
))
|
|
);
|
|
} else {
|
|
println!();
|
|
}
|
|
|
|
println!(
|
|
"{}\n{}\n{}",
|
|
colors::gray(&plan.origin),
|
|
mitata::reporter::header(options),
|
|
mitata::reporter::br(options)
|
|
);
|
|
}
|
|
|
|
fn report_register(&mut self, _desc: &BenchDescription) {}
|
|
|
|
fn report_wait(&mut self, desc: &BenchDescription) {
|
|
self.name = desc.name.clone();
|
|
|
|
match &desc.group {
|
|
None => {
|
|
self.has_ungrouped = true;
|
|
}
|
|
|
|
Some(group) => {
|
|
if self.group.is_none()
|
|
&& self.has_ungrouped
|
|
&& self.group_measurements.is_empty()
|
|
{
|
|
println!();
|
|
}
|
|
|
|
if self.group.is_none() || group != self.group.as_ref().unwrap() {
|
|
self.report_group_summary();
|
|
}
|
|
|
|
if (self.group.is_none() && self.has_ungrouped)
|
|
|| (self.group.is_some() && self.group_measurements.is_empty())
|
|
{
|
|
println!();
|
|
}
|
|
|
|
self.group = Some(group.clone());
|
|
}
|
|
}
|
|
}
|
|
|
|
fn report_output(&mut self, output: &str) {
|
|
if self.show_output {
|
|
print!("{} {}", colors::gray(format!("{}:", self.name)), output)
|
|
}
|
|
}
|
|
|
|
fn report_result(&mut self, desc: &BenchDescription, result: &BenchResult) {
|
|
if desc.warmup {
|
|
return;
|
|
}
|
|
|
|
let options = self.options.as_ref().unwrap();
|
|
|
|
match result {
|
|
BenchResult::Ok(stats) => {
|
|
let mut desc = desc.clone();
|
|
|
|
if desc.baseline && !self.baseline {
|
|
self.baseline = true;
|
|
} else {
|
|
desc.baseline = false;
|
|
}
|
|
|
|
println!(
|
|
"{}",
|
|
mitata::reporter::benchmark(
|
|
&desc.name,
|
|
&mitata::reporter::BenchmarkStats {
|
|
avg: stats.avg,
|
|
min: stats.min,
|
|
max: stats.max,
|
|
p75: stats.p75,
|
|
p99: stats.p99,
|
|
p995: stats.p995,
|
|
},
|
|
options
|
|
)
|
|
);
|
|
|
|
if !stats.high_precision && stats.used_explicit_timers {
|
|
println!("{}", colors::yellow(format!("Warning: start() and end() calls in \"{}\" are ignored because it averages less\nthan 0.01s per iteration. Remove them for better results.", &desc.name)));
|
|
}
|
|
|
|
self.group_measurements.push((desc, stats.clone()));
|
|
}
|
|
|
|
BenchResult::Failed(js_error) => {
|
|
println!(
|
|
"{}",
|
|
mitata::reporter::benchmark_error(
|
|
&desc.name,
|
|
&mitata::reporter::Error {
|
|
stack: None,
|
|
message: format_test_error(js_error),
|
|
},
|
|
options
|
|
)
|
|
)
|
|
}
|
|
};
|
|
}
|
|
|
|
fn report_group_summary(&mut self) {
|
|
let options = match self.options.as_ref() {
|
|
None => return,
|
|
Some(options) => options,
|
|
};
|
|
|
|
if 2 <= self.group_measurements.len()
|
|
&& (self.group.is_some() || (self.group.is_none() && self.baseline))
|
|
{
|
|
println!(
|
|
"\n{}",
|
|
mitata::reporter::summary(
|
|
&self
|
|
.group_measurements
|
|
.iter()
|
|
.map(|(d, s)| mitata::reporter::GroupBenchmark {
|
|
name: d.name.clone(),
|
|
baseline: d.baseline,
|
|
group: d.group.as_deref().unwrap_or("").to_owned(),
|
|
|
|
stats: mitata::reporter::BenchmarkStats {
|
|
avg: s.avg,
|
|
min: s.min,
|
|
max: s.max,
|
|
p75: s.p75,
|
|
p99: s.p99,
|
|
p995: s.p995,
|
|
},
|
|
})
|
|
.collect::<Vec<mitata::reporter::GroupBenchmark>>(),
|
|
options
|
|
)
|
|
);
|
|
}
|
|
|
|
self.baseline = false;
|
|
self.group_measurements.clear();
|
|
}
|
|
|
|
fn report_end(&mut self, _: &BenchReport) {
|
|
self.report_group_summary();
|
|
}
|
|
}
|