1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-29 02:29:06 -05:00
denoland-deno/cli/tools/bench/reporters.rs

312 lines
7.9 KiB
Rust
Raw Normal View History

// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use serde::Serialize;
use super::*;
pub trait BenchReporter {
fn report_group_summary(&mut self);
fn report_plan(&mut self, plan: &BenchPlan);
fn report_end(&mut self, report: &BenchReport);
fn report_register(&mut self, desc: &BenchDescription);
fn report_wait(&mut self, desc: &BenchDescription);
fn report_output(&mut self, output: &str);
fn report_result(&mut self, desc: &BenchDescription, result: &BenchResult);
fn report_uncaught_error(&mut self, origin: &str, error: Box<JsError>);
}
#[derive(Debug, Serialize)]
struct JsonReporterOutput {
runtime: String,
cpu: String,
benches: Vec<JsonReporterBench>,
}
impl Default for JsonReporterOutput {
fn default() -> Self {
Self {
runtime: format!("{} {}", get_user_agent(), env!("TARGET")),
cpu: mitata::cpu::name(),
benches: vec![],
}
}
}
#[derive(Debug, Serialize)]
struct JsonReporterBench {
origin: String,
group: Option<String>,
name: String,
baseline: bool,
results: Vec<BenchResult>,
}
#[derive(Debug, Serialize)]
pub struct JsonReporter(JsonReporterOutput);
impl JsonReporter {
pub fn new() -> Self {
Self(Default::default())
}
}
impl BenchReporter for JsonReporter {
fn report_group_summary(&mut self) {}
#[cold]
fn report_plan(&mut self, _plan: &BenchPlan) {}
fn report_end(&mut self, _report: &BenchReport) {
match write_json_to_stdout(self) {
Ok(_) => (),
Err(e) => println!("{e}"),
}
}
fn report_register(&mut self, _desc: &BenchDescription) {}
fn report_wait(&mut self, _desc: &BenchDescription) {}
fn report_output(&mut self, _output: &str) {}
fn report_result(&mut self, desc: &BenchDescription, result: &BenchResult) {
if desc.warmup {
return;
}
let maybe_bench = self.0.benches.iter_mut().find(|bench| {
bench.origin == desc.origin
&& bench.group == desc.group
&& bench.name == desc.name
&& bench.baseline == desc.baseline
});
if let Some(bench) = maybe_bench {
bench.results.push(result.clone());
} else {
self.0.benches.push(JsonReporterBench {
origin: desc.origin.clone(),
group: desc.group.clone(),
name: desc.name.clone(),
baseline: desc.baseline,
results: vec![result.clone()],
});
}
}
fn report_uncaught_error(&mut self, _origin: &str, _error: Box<JsError>) {}
}
pub struct ConsoleReporter {
name: String,
show_output: bool,
has_ungrouped: bool,
group: Option<String>,
baseline: bool,
group_measurements: Vec<(BenchDescription, BenchStats)>,
options: Option<mitata::reporter::Options>,
}
impl ConsoleReporter {
pub fn new(show_output: bool) -> Self {
Self {
show_output,
group: None,
options: None,
baseline: false,
name: String::new(),
has_ungrouped: false,
group_measurements: Vec::new(),
}
}
}
impl BenchReporter for ConsoleReporter {
#[cold]
fn report_plan(&mut self, plan: &BenchPlan) {
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
static FIRST_PLAN: AtomicBool = AtomicBool::new(true);
self.report_group_summary();
self.group = None;
self.baseline = false;
self.name = String::new();
self.group_measurements.clear();
self.options = Some(mitata::reporter::Options::new(
&plan.names.iter().map(|x| x.as_str()).collect::<Vec<&str>>(),
));
let options = self.options.as_mut().unwrap();
options.percentiles = true;
options.colors = colors::use_color();
if FIRST_PLAN
.compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst)
.is_ok()
{
println!("{}", colors::gray(format!("cpu: {}", mitata::cpu::name())));
println!(
"{}\n",
colors::gray(format!(
"runtime: deno {} ({})",
crate::version::deno(),
env!("TARGET")
))
);
} else {
println!();
}
println!(
"{}\n{}\n{}",
colors::gray(&plan.origin),
mitata::reporter::header(options),
mitata::reporter::br(options)
);
}
fn report_register(&mut self, _desc: &BenchDescription) {}
fn report_wait(&mut self, desc: &BenchDescription) {
self.name = desc.name.clone();
match &desc.group {
None => {
self.has_ungrouped = true;
}
Some(group) => {
if self.group.is_none() || group != self.group.as_ref().unwrap() {
self.report_group_summary();
}
if (self.group.is_none() && self.has_ungrouped)
|| (self.group.is_some() && self.group_measurements.is_empty())
{
println!("{} {}", colors::gray("group"), colors::green(group));
}
self.group = Some(group.clone());
}
}
}
fn report_output(&mut self, output: &str) {
if self.show_output {
print!("{} {}", colors::gray(format!("{}:", self.name)), output)
}
}
fn report_result(&mut self, desc: &BenchDescription, result: &BenchResult) {
if desc.warmup {
return;
}
let options = self.options.as_ref().unwrap();
match result {
BenchResult::Ok(stats) => {
let mut desc = desc.clone();
if desc.baseline && !self.baseline {
self.baseline = true;
} else {
desc.baseline = false;
}
println!(
"{}",
mitata::reporter::benchmark(
&desc.name,
&mitata::reporter::BenchmarkStats {
avg: stats.avg,
min: stats.min,
max: stats.max,
p75: stats.p75,
p99: stats.p99,
p995: stats.p995,
},
options
)
);
fix(bench): explicit timers don't force high precision measurements (#20272) Disables `BenchContext::start()` and `BenchContext::end()` for low precision benchmarks (less than 0.01s per iteration). Prints a warning when they are used in such benchmarks, suggesting to remove them. ```ts Deno.bench("noop", { group: "noops" }, () => {}); Deno.bench("noop with start/end", { group: "noops" }, (b) => { b.start(); b.end(); }); ``` Before: ``` cpu: 12th Gen Intel(R) Core(TM) i9-12900K runtime: deno 1.36.2 (x86_64-unknown-linux-gnu) file:///home/nayeem/projects/deno/temp3.ts benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- noop 2.63 ns/iter 380,674,131.4 (2.45 ns … 27.78 ns) 2.55 ns 4.03 ns 5.33 ns noop with start and end 302.47 ns/iter 3,306,146.0 (200 ns … 151.2 µs) 300 ns 400 ns 400 ns summary noop 115.14x faster than noop with start and end ``` After: ``` cpu: 12th Gen Intel(R) Core(TM) i9-12900K runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) file:///home/nayeem/projects/deno/temp3.ts benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- noop 3.01 ns/iter 332,565,561.7 (2.73 ns … 29.54 ns) 2.93 ns 5.29 ns 7.45 ns noop with start and end 7.73 ns/iter 129,291,091.5 (6.61 ns … 46.76 ns) 7.87 ns 13.12 ns 15.32 ns Warning start() and end() calls in "noop with start and end" are ignored because it averages less than 0.01s per iteration. Remove them for better results. summary noop 2.57x faster than noop with start and end ```
2023-08-26 05:29:45 -04:00
if !stats.high_precision && stats.used_explicit_timers {
println!("{}", colors::yellow(format!("Warning: start() and end() calls in \"{}\" are ignored because it averages less\nthan 10µs per iteration. Remove them for better results.", &desc.name)));
fix(bench): explicit timers don't force high precision measurements (#20272) Disables `BenchContext::start()` and `BenchContext::end()` for low precision benchmarks (less than 0.01s per iteration). Prints a warning when they are used in such benchmarks, suggesting to remove them. ```ts Deno.bench("noop", { group: "noops" }, () => {}); Deno.bench("noop with start/end", { group: "noops" }, (b) => { b.start(); b.end(); }); ``` Before: ``` cpu: 12th Gen Intel(R) Core(TM) i9-12900K runtime: deno 1.36.2 (x86_64-unknown-linux-gnu) file:///home/nayeem/projects/deno/temp3.ts benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- noop 2.63 ns/iter 380,674,131.4 (2.45 ns … 27.78 ns) 2.55 ns 4.03 ns 5.33 ns noop with start and end 302.47 ns/iter 3,306,146.0 (200 ns … 151.2 µs) 300 ns 400 ns 400 ns summary noop 115.14x faster than noop with start and end ``` After: ``` cpu: 12th Gen Intel(R) Core(TM) i9-12900K runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) file:///home/nayeem/projects/deno/temp3.ts benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- noop 3.01 ns/iter 332,565,561.7 (2.73 ns … 29.54 ns) 2.93 ns 5.29 ns 7.45 ns noop with start and end 7.73 ns/iter 129,291,091.5 (6.61 ns … 46.76 ns) 7.87 ns 13.12 ns 15.32 ns Warning start() and end() calls in "noop with start and end" are ignored because it averages less than 0.01s per iteration. Remove them for better results. summary noop 2.57x faster than noop with start and end ```
2023-08-26 05:29:45 -04:00
}
self.group_measurements.push((desc, stats.clone()));
}
BenchResult::Failed(js_error) => {
println!(
"{}",
mitata::reporter::benchmark_error(
&desc.name,
&mitata::reporter::Error {
stack: None,
message: format_test_error(js_error),
},
options
)
)
}
};
}
fn report_group_summary(&mut self) {
let options = match self.options.as_ref() {
None => return,
Some(options) => options,
};
if 2 <= self.group_measurements.len()
&& (self.group.is_some() || (self.group.is_none() && self.baseline))
{
println!(
"\n{}",
mitata::reporter::summary(
&self
.group_measurements
.iter()
.map(|(d, s)| mitata::reporter::GroupBenchmark {
name: d.name.clone(),
baseline: d.baseline,
group: d.group.as_deref().unwrap_or("").to_owned(),
stats: mitata::reporter::BenchmarkStats {
avg: s.avg,
min: s.min,
max: s.max,
p75: s.p75,
p99: s.p99,
p995: s.p995,
},
})
.collect::<Vec<mitata::reporter::GroupBenchmark>>(),
options
)
);
}
println!();
self.baseline = false;
self.group_measurements.clear();
}
fn report_end(&mut self, _: &BenchReport) {
self.report_group_summary();
}
fn report_uncaught_error(&mut self, _origin: &str, error: Box<JsError>) {
println!(
"{}: {}",
colors::red_bold("error"),
format_test_error(&error)
);
println!("This error was not caught from a benchmark and caused the bench runner to fail on the referenced module.");
println!("It most likely originated from a dangling promise, event/timeout handler or top-level code.");
println!();
}
}