diff --git a/cli/bench/main.rs b/cli/bench/main.rs index 3e7cc61fc0..e4cbf3eda9 100644 --- a/cli/bench/main.rs +++ b/cli/bench/main.rs @@ -162,7 +162,7 @@ const RESULT_KEYS: &[&str] = fn run_exec_time( deno_exe: &Path, target_dir: &Path, -) -> Result>> { +) -> Result>> { let hyperfine_exe = test_util::prebuilt_tool_path("hyperfine"); let benchmark_file = target_dir.join("hyperfine_results.json"); @@ -203,7 +203,7 @@ fn run_exec_time( true, ); - let mut results = HashMap::>::new(); + let mut results = HashMap::>::new(); let hyperfine_results = read_json(benchmark_file)?; for ((name, _, _), data) in EXEC_TIME_BENCHMARKS.iter().zip( hyperfine_results @@ -220,7 +220,7 @@ fn run_exec_time( data .into_iter() .filter(|(key, _)| RESULT_KEYS.contains(&key.as_str())) - .map(|(key, val)| (key, val.as_f64().unwrap() as i64)) + .map(|(key, val)| (key, val.as_f64().unwrap())) .collect(), ); } @@ -382,11 +382,11 @@ struct BenchResult { // TODO(ry) The "benchmark" benchmark should actually be called "exec_time". // When this is changed, the historical data in gh-pages branch needs to be // changed too. - benchmark: HashMap>, + benchmark: HashMap>, binary_size: HashMap, bundle_size: HashMap, cargo_deps: usize, - max_latency: HashMap, + max_latency: HashMap, max_memory: HashMap, lsp_exec_time: HashMap, req_per_sec: HashMap, @@ -489,7 +489,7 @@ async fn main() -> Result<()> { new_data.req_per_sec = req_per_sec; let max_latency = stats .iter() - .map(|(name, result)| (name.clone(), result.latency as i64)) + .map(|(name, result)| (name.clone(), result.latency)) .collect(); reporter.write("max_latency", &max_latency); diff --git a/cli/bench/metrics.rs b/cli/bench/metrics.rs index c49ca90208..964fdde4b9 100644 --- a/cli/bench/metrics.rs +++ b/cli/bench/metrics.rs @@ -17,9 +17,9 @@ static GIT_HASH: Lazy = Lazy::new(|| { }); #[derive(serde::Serialize)] -struct Metric { +struct Metric { name: String, - value: i64, + value: T, sha1: String, #[serde(rename = "type")] type_: String, @@ -62,7 +62,12 @@ impl Reporter { } } - pub fn write_one(&mut self, type_: &str, name: &str, value: i64) { + pub fn write_one( + &mut self, + type_: &str, + name: &str, + value: T, + ) { self .wtr .serialize(Metric { @@ -75,7 +80,11 @@ impl Reporter { .unwrap(); } - pub fn write(&mut self, type_: &str, hashmap: &HashMap) { + pub fn write( + &mut self, + type_: &str, + hashmap: &HashMap, + ) { for (name, value) in hashmap { self.write_one(type_, name, *value); }