mirror of
https://github.com/denoland/deno.git
synced 2024-12-24 08:09:08 -05:00
fix(bench): explicit timers don't force high precision measurements (#20272)
Disables `BenchContext::start()` and `BenchContext::end()` for low precision benchmarks (less than 0.01s per iteration). Prints a warning when they are used in such benchmarks, suggesting to remove them. ```ts Deno.bench("noop", { group: "noops" }, () => {}); Deno.bench("noop with start/end", { group: "noops" }, (b) => { b.start(); b.end(); }); ``` Before: ``` cpu: 12th Gen Intel(R) Core(TM) i9-12900K runtime: deno 1.36.2 (x86_64-unknown-linux-gnu) file:///home/nayeem/projects/deno/temp3.ts benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- noop 2.63 ns/iter 380,674,131.4 (2.45 ns … 27.78 ns) 2.55 ns 4.03 ns 5.33 ns noop with start and end 302.47 ns/iter 3,306,146.0 (200 ns … 151.2 µs) 300 ns 400 ns 400 ns summary noop 115.14x faster than noop with start and end ``` After: ``` cpu: 12th Gen Intel(R) Core(TM) i9-12900K runtime: deno 1.36.1 (x86_64-unknown-linux-gnu) file:///home/nayeem/projects/deno/temp3.ts benchmark time (avg) iter/s (min … max) p75 p99 p995 ----------------------------------------------------------------------------- ----------------------------- noop 3.01 ns/iter 332,565,561.7 (2.73 ns … 29.54 ns) 2.93 ns 5.29 ns 7.45 ns noop with start and end 7.73 ns/iter 129,291,091.5 (6.61 ns … 46.76 ns) 7.87 ns 13.12 ns 15.32 ns Warning start() and end() calls in "noop with start and end" are ignored because it averages less than 0.01s per iteration. Remove them for better results. summary noop 2.57x faster than noop with start and end ```
This commit is contained in:
parent
1cb547d885
commit
5dbf5c8293
7 changed files with 48 additions and 5 deletions
|
@ -849,7 +849,7 @@ function compareMeasurements(a, b) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
function benchStats(n, highPrecision, avg, min, max, all) {
|
||||
function benchStats(n, highPrecision, usedExplicitTimers, avg, min, max, all) {
|
||||
return {
|
||||
n,
|
||||
min,
|
||||
|
@ -859,6 +859,8 @@ function benchStats(n, highPrecision, avg, min, max, all) {
|
|||
p995: all[MathCeil(n * (99.5 / 100)) - 1],
|
||||
p999: all[MathCeil(n * (99.9 / 100)) - 1],
|
||||
avg: !highPrecision ? (avg / n) : MathCeil(avg / n),
|
||||
highPrecision,
|
||||
usedExplicitTimers,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -926,7 +928,7 @@ async function benchMeasure(timeBudget, fn, async, context) {
|
|||
wavg /= c;
|
||||
|
||||
// measure step
|
||||
if (wavg > lowPrecisionThresholdInNs || usedExplicitTimers) {
|
||||
if (wavg > lowPrecisionThresholdInNs) {
|
||||
let iterations = 10;
|
||||
let budget = timeBudget * 1e6;
|
||||
|
||||
|
@ -978,6 +980,8 @@ async function benchMeasure(timeBudget, fn, async, context) {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
context.start = function start() {};
|
||||
context.end = function end() {};
|
||||
let iterations = 10;
|
||||
let budget = timeBudget * 1e6;
|
||||
|
||||
|
@ -986,8 +990,6 @@ async function benchMeasure(timeBudget, fn, async, context) {
|
|||
const t1 = benchNow();
|
||||
for (let c = 0; c < lowPrecisionThresholdInNs; c++) {
|
||||
fn(context);
|
||||
currentBenchUserExplicitStart = null;
|
||||
currentBenchUserExplicitEnd = null;
|
||||
}
|
||||
const iterationTime = (benchNow() - t1) / lowPrecisionThresholdInNs;
|
||||
|
||||
|
@ -1019,7 +1021,15 @@ async function benchMeasure(timeBudget, fn, async, context) {
|
|||
}
|
||||
|
||||
all.sort(compareMeasurements);
|
||||
return benchStats(n, wavg > lowPrecisionThresholdInNs, avg, min, max, all);
|
||||
return benchStats(
|
||||
n,
|
||||
wavg > lowPrecisionThresholdInNs,
|
||||
usedExplicitTimers,
|
||||
avg,
|
||||
min,
|
||||
max,
|
||||
all,
|
||||
);
|
||||
}
|
||||
|
||||
/** @param desc {BenchDescription} */
|
||||
|
|
|
@ -187,6 +187,11 @@ itest!(bench_explicit_start_end {
|
|||
exit_code: 1,
|
||||
});
|
||||
|
||||
itest!(bench_explicit_start_end_low_precision {
|
||||
args: "bench --quiet -A bench/explicit_start_and_end_low_precision.ts",
|
||||
output: "bench/explicit_start_and_end_low_precision.out",
|
||||
});
|
||||
|
||||
itest!(bench_with_config {
|
||||
args: "bench --config bench/collect/deno.jsonc bench/collect",
|
||||
exit_code: 0,
|
||||
|
|
9
cli/tests/testdata/bench/explicit_start_and_end_low_precision.out
vendored
Normal file
9
cli/tests/testdata/bench/explicit_start_and_end_low_precision.out
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
cpu: [WILDCARD]
|
||||
runtime: deno [WILDCARD] ([WILDCARD])
|
||||
|
||||
[WILDCARD]/explicit_start_and_end_low_precision.ts
|
||||
benchmark time (avg) iter/s (min … max) p75 p99 p995
|
||||
----------------------------------------------------------------------------- -----------------------------
|
||||
noop with start and end [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD]
|
||||
Warning: start() and end() calls in "noop with start and end" are ignored because it averages less
|
||||
than 0.01s per iteration. Remove them for better results.
|
4
cli/tests/testdata/bench/explicit_start_and_end_low_precision.ts
vendored
Normal file
4
cli/tests/testdata/bench/explicit_start_and_end_low_precision.ts
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
Deno.bench("noop with start and end", (b) => {
|
||||
b.start();
|
||||
b.end();
|
||||
});
|
|
@ -106,6 +106,7 @@ pub struct BenchDescription {
|
|||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct BenchStats {
|
||||
pub n: u64,
|
||||
pub min: f64,
|
||||
|
@ -115,6 +116,8 @@ pub struct BenchStats {
|
|||
pub p99: f64,
|
||||
pub p995: f64,
|
||||
pub p999: f64,
|
||||
pub high_precision: bool,
|
||||
pub used_explicit_timers: bool,
|
||||
}
|
||||
|
||||
impl BenchReport {
|
||||
|
|
|
@ -236,6 +236,10 @@ impl BenchReporter for ConsoleReporter {
|
|||
)
|
||||
);
|
||||
|
||||
if !stats.high_precision && stats.used_explicit_timers {
|
||||
println!("{}", colors::yellow(format!("Warning: start() and end() calls in \"{}\" are ignored because it averages less\nthan 0.01s per iteration. Remove them for better results.", &desc.name)));
|
||||
}
|
||||
|
||||
self.group_measurements.push((desc, stats.clone()));
|
||||
}
|
||||
|
||||
|
|
8
cli/tsc/dts/lib.deno.ns.d.ts
vendored
8
cli/tsc/dts/lib.deno.ns.d.ts
vendored
|
@ -1011,6 +1011,10 @@ declare namespace Deno {
|
|||
/** Restarts the timer for the bench measurement. This should be called
|
||||
* after doing setup work which should not be measured.
|
||||
*
|
||||
* Warning: This method should not be used for benchmarks averaging less
|
||||
* than 0.01s per iteration. In such cases it will be disabled but the call
|
||||
* will still have noticeable overhead, resulting in a warning.
|
||||
*
|
||||
* ```ts
|
||||
* Deno.bench("foo", async (t) => {
|
||||
* const data = await Deno.readFile("data.txt");
|
||||
|
@ -1024,6 +1028,10 @@ declare namespace Deno {
|
|||
/** End the timer early for the bench measurement. This should be called
|
||||
* before doing teardown work which should not be measured.
|
||||
*
|
||||
* Warning: This method should not be used for benchmarks averaging less
|
||||
* than 0.01s per iteration. In such cases it will be disabled but the call
|
||||
* will still have noticeable overhead, resulting in a warning.
|
||||
*
|
||||
* ```ts
|
||||
* Deno.bench("foo", async (t) => {
|
||||
* const file = await Deno.open("data.txt");
|
||||
|
|
Loading…
Reference in a new issue