1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-24 15:19:26 -05:00

Move benchmarks to Rust (#7134)

All benchmarks are done in Rust and can be invoked with
`cargo bench`.

Currently this has it's own "harness" that behaves like
`./tools/benchmark.py` did.
Because of this tests inside `cli/bench` are currently not run.
This should be switched to the language provided harness
once the `#[bench]` attribute has been stabilized.
This commit is contained in:
Valentin Anger 2020-08-28 15:03:50 +02:00 committed by GitHub
parent 3d23208019
commit 31f32ed8c4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
31 changed files with 1196 additions and 701 deletions

View file

@ -5,3 +5,4 @@ std/**/testdata/
std/**/node_modules/ std/**/node_modules/
cli/tsc/*typescript.js cli/tsc/*typescript.js
cli/dts/* cli/dts/*
cli/bench/node*.js

View file

@ -154,7 +154,7 @@ jobs:
- name: Run Benchmarks - name: Run Benchmarks
if: matrix.config.kind == 'bench' if: matrix.config.kind == 'bench'
run: python ./tools/benchmark.py --release run: cargo bench
- name: Post Benchmarks - name: Post Benchmarks
if: | if: |

12
Cargo.lock generated
View file

@ -230,6 +230,17 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "chrono"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6"
dependencies = [
"num-integer",
"num-traits",
"time",
]
[[package]] [[package]]
name = "clap" name = "clap"
version = "2.33.2" version = "2.33.2"
@ -327,6 +338,7 @@ dependencies = [
"base64 0.12.3", "base64 0.12.3",
"byteorder", "byteorder",
"bytes", "bytes",
"chrono",
"clap", "clap",
"deno_core", "deno_core",
"deno_doc", "deno_doc",

View file

@ -15,3 +15,8 @@ exclude = [
codegen-units = 1 codegen-units = 1
lto = true lto = true
opt-level = 'z' # Optimize for size opt-level = 'z' # Optimize for size
[profile.bench]
codegen-units = 1
lto = true
opt-level = 'z' # Optimize for size

View file

@ -14,6 +14,11 @@ default-run = "deno"
name = "deno" name = "deno"
path = "main.rs" path = "main.rs"
[[bench]]
name = "deno_bench"
harness = false
path = "./bench/main.rs"
[build-dependencies] [build-dependencies]
deno_core = { path = "../core", version = "0.54.0" } deno_core = { path = "../core", version = "0.54.0" }
deno_web = { path = "../op_crates/web", version = "0.4.0" } deno_web = { path = "../op_crates/web", version = "0.4.0" }
@ -78,6 +83,8 @@ fwdansi = "1.1.0"
nix = "0.17.0" nix = "0.17.0"
[dev-dependencies] [dev-dependencies]
# Used in benchmark
chrono = "0.4"
os_pipe = "0.9.2" os_pipe = "0.9.2"
# Used for testing inspector. Keep in-sync with warp. # Used for testing inspector. Keep in-sync with warp.
tokio-tungstenite = { version = "0.10.1", features = ["connect"] } tokio-tungstenite = { version = "0.10.1", features = ["connect"] }

View file

@ -1,4 +1,4 @@
// Used for benchmarking Deno's networking. See tools/http_benchmark.py // Used for benchmarking Deno's networking.
// TODO Replace this with a real HTTP server once // TODO Replace this with a real HTTP server once
// https://github.com/denoland/deno/issues/726 is completed. // https://github.com/denoland/deno/issues/726 is completed.
// Note: this is a keep-alive server. // Note: this is a keep-alive server.

View file

@ -1,4 +1,4 @@
// Used for benchmarking Deno's tcp proxy performance. See tools/http_benchmark.py // Used for benchmarking Deno's tcp proxy performance.
const addr = Deno.args[0] || "127.0.0.1:4500"; const addr = Deno.args[0] || "127.0.0.1:4500";
const originAddr = Deno.args[1] || "127.0.0.1:4501"; const originAddr = Deno.args[1] || "127.0.0.1:4501";

303
cli/bench/http.rs Normal file
View file

@ -0,0 +1,303 @@
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use super::Result;
use std::{
collections::HashMap, path::PathBuf, process::Command, time::Duration,
};
pub use test_util::{parse_wrk_output, WrkOutput as HttpBenchmarkResult};
// Some of the benchmarks in this file have been renamed. In case the history
// somehow gets messed up:
// "node_http" was once called "node"
// "deno_tcp" was once called "deno"
// "deno_http" was once called "deno_net_http"
const DURATION: &str = "20s";
pub(crate) fn benchmark(
target_path: &PathBuf,
) -> Result<HashMap<String, HttpBenchmarkResult>> {
let deno_exe = test_util::deno_exe_path();
let deno_exe = deno_exe.to_str().unwrap();
let hyper_hello_exe = target_path.join("test_server");
let hyper_hello_exe = hyper_hello_exe.to_str().unwrap();
let core_http_bin_ops_exe = target_path.join("examples/http_bench_bin_ops");
let core_http_bin_ops_exe = core_http_bin_ops_exe.to_str().unwrap();
let core_http_json_ops_exe = target_path.join("examples/http_bench_json_ops");
let core_http_json_ops_exe = core_http_json_ops_exe.to_str().unwrap();
let mut res = HashMap::new();
// "deno_tcp" was once called "deno"
res.insert("deno_tcp".to_string(), deno_tcp(deno_exe)?);
// res.insert("deno_udp".to_string(), deno_udp(deno_exe)?);
res.insert("deno_http".to_string(), deno_http(deno_exe)?);
// TODO(ry) deno_proxy disabled to make fetch() standards compliant.
// res.insert("deno_proxy".to_string(), deno_http_proxy(deno_exe) hyper_hello_exe))
res.insert(
"deno_proxy_tcp".to_string(),
deno_tcp_proxy(deno_exe, hyper_hello_exe)?,
);
// "core_http_bin_ops" was once called "deno_core_single"
// "core_http_bin_ops" was once called "deno_core_http_bench"
res.insert(
"core_http_bin_ops".to_string(),
core_http_bin_ops(core_http_bin_ops_exe)?,
);
res.insert(
"core_http_json_ops".to_string(),
core_http_json_ops(core_http_json_ops_exe)?,
);
// "node_http" was once called "node"
res.insert("node_http".to_string(), node_http()?);
res.insert("node_proxy".to_string(), node_http_proxy(hyper_hello_exe)?);
res.insert(
"node_proxy_tcp".to_string(),
node_tcp_proxy(hyper_hello_exe)?,
);
res.insert("node_tcp".to_string(), node_tcp()?);
res.insert("hyper".to_string(), hyper_http(hyper_hello_exe)?);
Ok(res)
}
fn run(
server_cmd: &[&str],
port: u16,
env: Option<Vec<(String, String)>>,
origin_cmd: Option<&[&str]>,
) -> Result<HttpBenchmarkResult> {
// Wait for port 4544 to become available.
// TODO Need to use SO_REUSEPORT with tokio::net::TcpListener.
std::thread::sleep(Duration::from_secs(5));
let mut origin = None;
if let Some(cmd) = origin_cmd {
let mut com = Command::new(cmd[0]);
com.args(&cmd[1..]);
if let Some(env) = env.clone() {
com.envs(env);
}
origin = Some(com.spawn()?);
};
println!("{}", server_cmd.join(" "));
let mut server = {
let mut com = Command::new(server_cmd[0]);
com.args(&server_cmd[1..]);
if let Some(env) = env {
com.envs(env);
}
com.spawn()?
};
std::thread::sleep(Duration::from_secs(5)); // wait for server to wake up. TODO racy.
let wrk = test_util::prebuilt_tool_path("wrk");
assert!(wrk.is_file());
let wrk_cmd = &[
wrk.to_str().unwrap(),
"-d",
DURATION,
"--latency",
&format!("http://127.0.0.1:{}/", port),
];
println!("{}", wrk_cmd.join(" "));
let output = test_util::run_collect(wrk_cmd, None, None, None, true).0;
println!("{}", output);
assert!(
server.try_wait()?.map_or(true, |s| s.success()),
"server ended with error"
);
server.kill()?;
if let Some(mut origin) = origin {
origin.kill()?;
}
Ok(parse_wrk_output(&output))
}
fn get_port() -> u16 {
static mut NEXT_PORT: u16 = 4544;
let port = unsafe { NEXT_PORT };
unsafe {
NEXT_PORT += 1;
}
port
}
fn server_addr(port: u16) -> String {
format!("0.0.0.0:{}", port)
}
fn deno_tcp(deno_exe: &str) -> Result<HttpBenchmarkResult> {
let port = get_port();
println!("http_benchmark testing DENO tcp.");
run(
&[
deno_exe,
"run",
"--allow-net",
"cli/bench/deno_tcp.ts",
&server_addr(port),
],
port,
None,
None,
)
}
fn deno_tcp_proxy(
deno_exe: &str,
hyper_exe: &str,
) -> Result<HttpBenchmarkResult> {
let port = get_port();
let origin_port = get_port();
println!("http_proxy_benchmark testing DENO using net/tcp.");
run(
&[
deno_exe,
"run",
"--allow-net",
"--reload",
"--unstable",
"cli/bench/deno_tcp_proxy.ts",
&server_addr(port),
&server_addr(origin_port),
],
port,
None,
Some(&[hyper_exe, &origin_port.to_string()]),
)
}
fn deno_http(deno_exe: &str) -> Result<HttpBenchmarkResult> {
let port = get_port();
println!("http_benchmark testing DENO using net/http.");
run(
&[
deno_exe,
"run",
"--allow-net",
"--reload",
"--unstable",
"std/http/http_bench.ts",
&server_addr(port),
],
port,
None,
None,
)
}
#[allow(dead_code)]
fn deno_http_proxy(
deno_exe: &str,
hyper_exe: &str,
) -> Result<HttpBenchmarkResult> {
let port = get_port();
let origin_port = get_port();
println!("http_proxy_benchmark testing DENO using net/http.");
run(
&[
deno_exe,
"run",
"--allow-net",
"--reload",
"--unstable",
"cli/bench/deno_http_proxy.ts",
&server_addr(port),
&server_addr(origin_port),
],
port,
None,
Some(&[hyper_exe, &origin_port.to_string()]),
)
}
fn core_http_bin_ops(exe: &str) -> Result<HttpBenchmarkResult> {
println!("http_benchmark testing CORE http_bench_bin_ops");
run(&[exe], 4544, None, None)
}
fn core_http_json_ops(exe: &str) -> Result<HttpBenchmarkResult> {
println!("http_benchmark testing CORE http_bench_json_ops");
run(&[exe], 4544, None, None)
}
fn node_http() -> Result<HttpBenchmarkResult> {
let port = get_port();
println!("http_benchmark testing NODE.");
run(
&["node", "cli/bench/node_http.js", &port.to_string()],
port,
None,
None,
)
}
fn node_http_proxy(hyper_exe: &str) -> Result<HttpBenchmarkResult> {
let port = get_port();
let origin_port = get_port();
let origin_port = origin_port.to_string();
println!("http_proxy_benchmark testing NODE.");
run(
&[
"node",
"cli/bench/node_http_proxy.js",
&port.to_string(),
&origin_port,
],
port,
None,
Some(&[hyper_exe, &origin_port]),
)
}
fn node_tcp_proxy(exe: &str) -> Result<HttpBenchmarkResult> {
let port = get_port();
let origin_port = get_port();
let origin_port = origin_port.to_string();
println!("http_proxy_benchmark testing NODE tcp.");
run(
&[
"node",
"cli/bench/node_tcp_proxy.js",
&port.to_string(),
&origin_port,
],
port,
None,
Some(&[exe, &origin_port]),
)
}
fn node_tcp() -> Result<HttpBenchmarkResult> {
let port = get_port();
println!("http_benchmark testing node_tcp.js");
run(
&["node", "cli/bench/node_tcp.js", &port.to_string()],
port,
None,
None,
)
}
fn hyper_http(exe: &str) -> Result<HttpBenchmarkResult> {
let port = get_port();
println!("http_benchmark testing RUST hyper");
run(&[exe, &format!("{}", port)], port, None, None)
}

462
cli/bench/main.rs Normal file
View file

@ -0,0 +1,462 @@
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use serde_json::{self, map::Map, Number, Value};
use std::{
convert::From,
env, fs,
path::PathBuf,
process::{Command, Stdio},
};
mod http;
mod throughput;
fn read_json(filename: &str) -> Result<serde_json::Value> {
let f = fs::File::open(filename)?;
Ok(serde_json::from_reader(f)?)
}
fn write_json(filename: &str, value: &serde_json::Value) -> Result<()> {
let f = fs::File::create(filename)?;
serde_json::to_writer(f, value)?;
Ok(())
}
/// The list of the tuples of the benchmark name, arguments and return code
const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
("hello", &["run", "cli/tests/002_hello.ts"], None),
(
"relative_import",
&["run", "cli/tests/003_relative_import.ts"],
None,
),
("error_001", &["run", "cli/tests/error_001.ts"], Some(1)),
(
"cold_hello",
&["run", "--reload", "cli/tests/002_hello.ts"],
None,
),
(
"cold_relative_import",
&["run", "--reload", "cli/tests/003_relative_import.ts"],
None,
),
(
"workers_startup",
&["run", "--allow-read", "cli/tests/workers_startup_bench.ts"],
None,
),
(
"workers_round_robin",
&[
"run",
"--allow-read",
"cli/tests/workers_round_robin_bench.ts",
],
None,
),
(
"text_decoder",
&["run", "cli/tests/text_decoder_perf.js"],
None,
),
(
"text_encoder",
&["run", "cli/tests/text_encoder_perf.js"],
None,
),
(
"check",
&["cache", "--reload", "std/examples/chat/server_test.ts"],
None,
),
(
"no_check",
&[
"cache",
"--reload",
"--no-check",
"std/examples/chat/server_test.ts",
],
None,
),
];
const RESULT_KEYS: &[&str] =
&["mean", "stddev", "user", "system", "min", "max"];
fn run_exec_time(deno_exe: &PathBuf, target_dir: &PathBuf) -> Result<Value> {
let hyperfine_exe = test_util::prebuilt_tool_path("hyperfine");
let benchmark_file = target_dir.join("hyperfine_results.json");
let benchmark_file = benchmark_file.to_str().unwrap();
let mut command = [
hyperfine_exe.to_str().unwrap(),
"--export-json",
benchmark_file,
"--warmup",
"3",
]
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
for (_, args, return_code) in EXEC_TIME_BENCHMARKS {
let ret_code_test = if let Some(code) = return_code {
// Bash test which asserts the return code value of the previous command
// $? contains the return code of the previous command
format!("; test $? -eq {}", code)
} else {
"".to_string()
};
command.push(format!(
"{} {} {}",
deno_exe.to_str().unwrap(),
args.join(" "),
ret_code_test
));
}
test_util::run(
&command.iter().map(|s| s.as_ref()).collect::<Vec<_>>(),
None,
None,
None,
true,
);
let mut results = Map::new();
let hyperfine_results = read_json(benchmark_file)?;
for ((name, _, _), data) in EXEC_TIME_BENCHMARKS.iter().zip(
hyperfine_results
.as_object()
.unwrap()
.get("results")
.unwrap()
.as_array()
.unwrap(),
) {
let data = data.as_object().unwrap().clone();
results.insert(
name.to_string(),
Value::Object(
data
.into_iter()
.filter(|(key, _)| RESULT_KEYS.contains(&key.as_str()))
.collect::<Map<String, Value>>(),
),
);
}
Ok(Value::Object(results))
}
const BINARY_TARGET_FILES: &[&str] =
&["CLI_SNAPSHOT.bin", "COMPILER_SNAPSHOT.bin"];
fn get_binary_sizes(target_dir: &PathBuf) -> Result<Value> {
let mut sizes = Map::new();
let mut mtimes = std::collections::HashMap::new();
sizes.insert(
"deno".to_string(),
Value::Number(Number::from(test_util::deno_exe_path().metadata()?.len())),
);
// Because cargo's OUT_DIR is not predictable, search the build tree for
// snapshot related files.
for file in walkdir::WalkDir::new(target_dir) {
if file.is_err() {
continue;
}
let file = file.unwrap();
let filename = file.file_name().to_str().unwrap().to_string();
if !BINARY_TARGET_FILES.contains(&filename.as_str()) {
continue;
}
let meta = file.metadata()?;
let file_mtime = meta.modified()?;
// If multiple copies of a file are found, use the most recent one.
if let Some(stored_mtime) = mtimes.get(&filename) {
if *stored_mtime > file_mtime {
continue;
}
}
mtimes.insert(filename.clone(), file_mtime);
sizes.insert(filename, Value::Number(Number::from(meta.len())));
}
Ok(Value::Object(sizes))
}
const BUNDLES: &[(&str, &str)] = &[
("file_server", "./std/http/file_server.ts"),
("gist", "./std/examples/gist.ts"),
];
fn bundle_benchmark(deno_exe: &PathBuf) -> Result<Value> {
let mut sizes = Map::new();
for (name, url) in BUNDLES {
let path = format!("{}.bundle.js", name);
test_util::run(
&[
deno_exe.to_str().unwrap(),
"bundle",
"--unstable",
url,
&path,
],
None,
None,
None,
true,
);
let file = PathBuf::from(path);
assert!(file.is_file());
sizes.insert(
name.to_string(),
Value::Number(Number::from(file.metadata()?.len())),
);
let _ = fs::remove_file(file);
}
Ok(Value::Object(sizes))
}
fn run_throughput(deno_exe: &PathBuf) -> Result<Value> {
let mut m = Map::new();
m.insert("100M_tcp".to_string(), throughput::tcp(deno_exe, 100)?);
m.insert("100M_cat".to_string(), throughput::cat(deno_exe, 100)?);
m.insert("10M_tcp".to_string(), throughput::tcp(deno_exe, 10)?);
m.insert("10M_cat".to_string(), throughput::cat(deno_exe, 10)?);
Ok(Value::Object(m))
}
fn run_http(
target_dir: &PathBuf,
new_data: &mut Map<String, Value>,
) -> Result<()> {
let stats = http::benchmark(target_dir)?;
new_data.insert(
"req_per_sec".to_string(),
Value::Object(
stats
.iter()
.map(|(name, result)| {
(name.clone(), Value::Number(Number::from(result.requests)))
})
.collect::<Map<String, Value>>(),
),
);
new_data.insert(
"max_latency".to_string(),
Value::Object(
stats
.iter()
.map(|(name, result)| {
(
name.clone(),
Value::Number(Number::from_f64(result.latency).unwrap()),
)
})
.collect::<Map<String, Value>>(),
),
);
Ok(())
}
fn run_strace_benchmarks(
deno_exe: &PathBuf,
new_data: &mut Map<String, Value>,
) -> Result<()> {
use std::io::Read;
let mut thread_count = Map::new();
let mut syscall_count = Map::new();
for (name, args, _) in EXEC_TIME_BENCHMARKS {
let mut file = tempfile::NamedTempFile::new()?;
Command::new("strace")
.args(&[
"-c",
"-f",
"-o",
file.path().to_str().unwrap(),
deno_exe.to_str().unwrap(),
])
.args(args.iter())
.stdout(Stdio::null())
.spawn()?
.wait()?;
let mut output = String::new();
file.as_file_mut().read_to_string(&mut output)?;
let strace_result = test_util::parse_strace_output(&output);
thread_count.insert(
name.to_string(),
Value::Number(Number::from(
strace_result.get("clone").unwrap().calls + 1,
)),
);
syscall_count.insert(
name.to_string(),
Value::Number(Number::from(strace_result.get("total").unwrap().calls)),
);
}
new_data.insert("thread_count".to_string(), Value::Object(thread_count));
new_data.insert("syscall_count".to_string(), Value::Object(syscall_count));
Ok(())
}
fn run_max_mem_benchmark(deno_exe: &PathBuf) -> Result<Value> {
let mut results = Map::new();
for (name, args, return_code) in EXEC_TIME_BENCHMARKS {
let proc = Command::new("time")
.args(&["-v", deno_exe.to_str().unwrap()])
.args(args.iter())
.stdout(Stdio::null())
.stderr(Stdio::piped())
.spawn()?;
let proc_result = proc.wait_with_output()?;
if let Some(code) = return_code {
assert_eq!(proc_result.status.code().unwrap(), *code);
}
let out = String::from_utf8(proc_result.stderr)?;
results.insert(
name.to_string(),
Value::Number(Number::from(test_util::parse_max_mem(&out).unwrap())),
);
}
Ok(Value::Object(results))
}
/*
TODO(SyrupThinker)
Switch to the #[bench] attribute once
it is stabilized.
Before that the #[test] tests won't be run because
we replace the harness with our own runner here.
*/
fn main() -> Result<()> {
if env::args().find(|s| s == "--bench").is_none() {
return Ok(());
}
println!("Starting Deno benchmark");
let target_dir = test_util::target_dir();
let deno_exe = test_util::deno_exe_path();
env::set_current_dir(&test_util::root_path())?;
let mut new_data: Map<String, Value> = Map::new();
new_data.insert(
"created_at".to_string(),
Value::String(
chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true),
),
);
new_data.insert(
"sha1".to_string(),
Value::String(
test_util::run_collect(
&["git", "rev-parse", "HEAD"],
None,
None,
None,
true,
)
.0
.trim()
.to_string(),
),
);
// TODO(ry) The "benchmark" benchmark should actually be called "exec_time".
// When this is changed, the historical data in gh-pages branch needs to be
// changed too.
new_data.insert(
"benchmark".to_string(),
run_exec_time(&deno_exe, &target_dir)?,
);
new_data.insert("binary_size".to_string(), get_binary_sizes(&target_dir)?);
new_data.insert("bundle_size".to_string(), bundle_benchmark(&deno_exe)?);
// Cannot run throughput benchmark on windows because they don't have nc or
// pipe.
if cfg!(not(target_os = "windows")) {
new_data.insert("throughput".to_string(), run_throughput(&deno_exe)?);
run_http(&target_dir, &mut new_data)?;
}
if cfg!(target_os = "linux") {
run_strace_benchmarks(&deno_exe, &mut new_data)?;
new_data
.insert("max_memory".to_string(), run_max_mem_benchmark(&deno_exe)?);
}
println!("===== <BENCHMARK RESULTS>");
serde_json::to_writer_pretty(std::io::stdout(), &new_data)?;
println!("\n===== </BENCHMARK RESULTS>");
if let Some(filename) = target_dir.join("bench.json").to_str() {
write_json(filename, &Value::Object(new_data))?;
} else {
eprintln!("Cannot write bench.json, path is invalid");
}
Ok(())
}
#[derive(Debug)]
enum Error {
Io(std::io::Error),
Serde(serde_json::error::Error),
FromUtf8(std::string::FromUtf8Error),
Walkdir(walkdir::Error),
}
impl From<std::io::Error> for Error {
fn from(ioe: std::io::Error) -> Self {
Error::Io(ioe)
}
}
impl From<serde_json::error::Error> for Error {
fn from(sje: serde_json::error::Error) -> Self {
Error::Serde(sje)
}
}
impl From<std::string::FromUtf8Error> for Error {
fn from(fue: std::string::FromUtf8Error) -> Self {
Error::FromUtf8(fue)
}
}
impl From<walkdir::Error> for Error {
fn from(wde: walkdir::Error) -> Self {
Error::Walkdir(wde)
}
}
pub(crate) type Result<T> = std::result::Result<T, Error>;

62
cli/bench/throughput.rs Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
use super::Result;
use serde_json::{Number, Value};
use std::{
path::PathBuf,
process::Command,
time::{Duration, Instant},
};
const MB: usize = 1024 * 1024;
const SERVER_ADDR: &str = "0.0.0.0:4544";
const CLIENT_ADDR: &str = "127.0.0.1 4544";
pub(crate) fn cat(deno_exe: &PathBuf, megs: usize) -> Result<Value> {
let size = megs * MB;
let shell_cmd = format!(
"{} run --allow-read cli/tests/cat.ts /dev/zero | head -c {}",
deno_exe.to_str().unwrap(),
size
);
println!("{}", shell_cmd);
let cmd = &["sh", "-c", &shell_cmd];
let start = Instant::now();
let _ = test_util::run_collect(cmd, None, None, None, true);
let end = Instant::now();
Ok(Value::Number(
Number::from_f64((end - start).as_secs_f64()).unwrap(),
))
}
pub(crate) fn tcp(deno_exe: &PathBuf, megs: usize) -> Result<Value> {
let size = megs * MB;
let shell_cmd = format!("head -c {} /dev/zero | nc {}", size, CLIENT_ADDR);
println!("{}", shell_cmd);
let cmd = &["sh", "-c", &shell_cmd];
// Run deno echo server in the background.
let mut echo_server = Command::new(deno_exe.to_str().unwrap())
.args(&[
"run",
"--allow-net",
"cli/tests/echo_server.ts",
SERVER_ADDR,
])
.spawn()?;
std::thread::sleep(Duration::from_secs(5)); // wait for deno to wake up. TODO racy.
let start = Instant::now();
let _ = test_util::run_collect(cmd, None, None, None, true);
let end = Instant::now();
echo_server.kill()?;
Ok(Value::Number(
Number::from_f64((end - start).as_secs_f64()).unwrap(),
))
}

View file

@ -272,11 +272,6 @@ grault",
)); ));
} }
#[test]
fn benchmark_test() {
util::run_python_script("tools/benchmark_test.py")
}
#[test] #[test]
fn deno_dir_test() { fn deno_dir_test() {
use std::fs::remove_dir_all; use std::fs::remove_dir_all;

View file

@ -10,6 +10,7 @@ use os_pipe::pipe;
#[cfg(unix)] #[cfg(unix)]
pub use pty; pub use pty;
use regex::Regex; use regex::Regex;
use std::collections::HashMap;
use std::env; use std::env;
use std::io::Read; use std::io::Read;
use std::io::Write; use std::io::Write;
@ -57,10 +58,18 @@ pub fn root_path() -> PathBuf {
PathBuf::from(concat!(env!("CARGO_MANIFEST_DIR"), "/..")) PathBuf::from(concat!(env!("CARGO_MANIFEST_DIR"), "/.."))
} }
pub fn prebuilt_path() -> PathBuf {
third_party_path().join("prebuilt")
}
pub fn tests_path() -> PathBuf { pub fn tests_path() -> PathBuf {
root_path().join("cli").join("tests") root_path().join("cli").join("tests")
} }
pub fn third_party_path() -> PathBuf {
root_path().join("third_party")
}
pub fn target_dir() -> PathBuf { pub fn target_dir() -> PathBuf {
let current_exe = std::env::current_exe().unwrap(); let current_exe = std::env::current_exe().unwrap();
let target_dir = current_exe.parent().unwrap().parent().unwrap(); let target_dir = current_exe.parent().unwrap().parent().unwrap();
@ -77,6 +86,24 @@ pub fn deno_exe_path() -> PathBuf {
p p
} }
pub fn prebuilt_tool_path(tool: &str) -> PathBuf {
let mut exe = tool.to_string();
exe.push_str(if cfg!(windows) { ".exe" } else { "" });
prebuilt_path().join(platform_dir_name()).join(exe)
}
fn platform_dir_name() -> &'static str {
if cfg!(target_os = "linux") {
"linux64"
} else if cfg!(target_os = "macos") {
"mac"
} else if cfg!(target_os = "windows") {
"win"
} else {
unreachable!()
}
}
pub fn test_server_path() -> PathBuf { pub fn test_server_path() -> PathBuf {
let mut p = target_dir().join("test_server"); let mut p = target_dir().join("test_server");
if cfg!(windows) { if cfg!(windows) {
@ -578,6 +605,76 @@ pub fn strip_ansi_codes(s: &str) -> std::borrow::Cow<str> {
STRIP_ANSI_RE.replace_all(s, "") STRIP_ANSI_RE.replace_all(s, "")
} }
pub fn run(
cmd: &[&str],
input: Option<&[&str]>,
envs: Option<Vec<(String, String)>>,
current_dir: Option<&str>,
expect_success: bool,
) {
let mut process_builder = Command::new(cmd[0]);
process_builder.args(&cmd[1..]).stdin(Stdio::piped());
if let Some(dir) = current_dir {
process_builder.current_dir(dir);
}
if let Some(envs) = envs {
process_builder.envs(envs);
}
let mut prog = process_builder.spawn().expect("failed to spawn script");
if let Some(lines) = input {
let stdin = prog.stdin.as_mut().expect("failed to get stdin");
stdin
.write_all(lines.join("\n").as_bytes())
.expect("failed to write to stdin");
}
let status = prog.wait().expect("failed to wait on child");
if expect_success != status.success() {
panic!("Unexpected exit code: {:?}", status.code());
}
}
pub fn run_collect(
cmd: &[&str],
input: Option<&[&str]>,
envs: Option<Vec<(String, String)>>,
current_dir: Option<&str>,
expect_success: bool,
) -> (String, String) {
let mut process_builder = Command::new(cmd[0]);
process_builder
.args(&cmd[1..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
if let Some(dir) = current_dir {
process_builder.current_dir(dir);
}
if let Some(envs) = envs {
process_builder.envs(envs);
}
let mut prog = process_builder.spawn().expect("failed to spawn script");
if let Some(lines) = input {
let stdin = prog.stdin.as_mut().expect("failed to get stdin");
stdin
.write_all(lines.join("\n").as_bytes())
.expect("failed to write to stdin");
}
let Output {
stdout,
stderr,
status,
} = prog.wait_with_output().expect("failed to wait on child");
let stdout = String::from_utf8(stdout).unwrap();
let stderr = String::from_utf8(stderr).unwrap();
if expect_success != status.success() {
eprintln!("stdout: <<<{}>>>", stdout);
eprintln!("stderr: <<<{}>>>", stderr);
panic!("Unexpected exit code: {:?}", status.code());
}
(stdout, stderr)
}
pub fn run_and_collect_output( pub fn run_and_collect_output(
expect_success: bool, expect_success: bool,
args: &str, args: &str,
@ -855,45 +952,248 @@ pub fn test_pty(args: &str, output_path: &str, input: &[u8]) {
} }
} }
#[test] pub struct WrkOutput {
fn test_wildcard_match() { pub latency: f64,
let fixtures = vec![ pub requests: u64,
("foobarbaz", "foobarbaz", true), }
("[WILDCARD]", "foobarbaz", true),
("foobar", "foobarbaz", false),
("foo[WILDCARD]baz", "foobarbaz", true),
("foo[WILDCARD]baz", "foobazbar", false),
("foo[WILDCARD]baz[WILDCARD]qux", "foobarbazqatqux", true),
("foo[WILDCARD]", "foobar", true),
("foo[WILDCARD]baz[WILDCARD]", "foobarbazqat", true),
// check with different line endings
("foo[WILDCARD]\nbaz[WILDCARD]\n", "foobar\nbazqat\n", true),
(
"foo[WILDCARD]\nbaz[WILDCARD]\n",
"foobar\r\nbazqat\r\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\n",
"foobar\nbazqat\r\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\r\n",
"foobar\nbazqat\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\r\n",
"foobar\r\nbazqat\r\n",
true,
),
];
// Iterate through the fixture lists, testing each one pub fn parse_wrk_output(output: &str) -> WrkOutput {
for (pattern, string, expected) in fixtures { lazy_static! {
let actual = wildcard_match(pattern, string); static ref REQUESTS_RX: Regex =
dbg!(pattern, string, expected); Regex::new(r"Requests/sec:\s+(\d+)").unwrap();
assert_eq!(actual, expected); static ref LATENCY_RX: Regex =
Regex::new(r"\s+99%(?:\s+(\d+.\d+)([a-z]+))").unwrap();
}
let mut requests = None;
let mut latency = None;
for line in output.lines() {
if requests == None {
if let Some(cap) = REQUESTS_RX.captures(line) {
requests =
Some(str::parse::<u64>(cap.get(1).unwrap().as_str()).unwrap());
}
}
if latency == None {
if let Some(cap) = LATENCY_RX.captures(line) {
let time = cap.get(1).unwrap();
let unit = cap.get(2).unwrap();
latency = Some(
str::parse::<f64>(time.as_str()).unwrap()
* match unit.as_str() {
"ms" => 1.0,
"us" => 0.001,
"s" => 1000.0,
_ => unreachable!(),
},
);
}
}
}
WrkOutput {
requests: requests.unwrap(),
latency: latency.unwrap(),
}
}
pub struct StraceOutput {
pub percent_time: f64,
pub seconds: f64,
pub usecs_per_call: Option<u64>,
pub calls: u64,
pub errors: u64,
}
pub fn parse_strace_output(output: &str) -> HashMap<String, StraceOutput> {
let mut summary = HashMap::new();
// Filter out non-relevant lines. See the error log at
// https://github.com/denoland/deno/pull/3715/checks?check_run_id=397365887
// This is checked in testdata/strace_summary2.out
let mut lines = output
.lines()
.filter(|line| !line.is_empty() && !line.contains("detached ..."));
let count = lines.clone().count();
if count < 4 {
return summary;
}
let total_line = lines.next_back().unwrap();
lines.next_back(); // Drop separator
let data_lines = lines.skip(2);
for line in data_lines {
let syscall_fields = line.split_whitespace().collect::<Vec<_>>();
let len = syscall_fields.len();
let syscall_name = syscall_fields.last().unwrap();
if 5 <= len && len <= 6 {
summary.insert(
syscall_name.to_string(),
StraceOutput {
percent_time: str::parse::<f64>(syscall_fields[0]).unwrap(),
seconds: str::parse::<f64>(syscall_fields[1]).unwrap(),
usecs_per_call: Some(str::parse::<u64>(syscall_fields[2]).unwrap()),
calls: str::parse::<u64>(syscall_fields[3]).unwrap(),
errors: if syscall_fields.len() < 6 {
0
} else {
str::parse::<u64>(syscall_fields[4]).unwrap()
},
},
);
}
}
let total_fields = total_line.split_whitespace().collect::<Vec<_>>();
summary.insert(
"total".to_string(),
StraceOutput {
percent_time: str::parse::<f64>(total_fields[0]).unwrap(),
seconds: str::parse::<f64>(total_fields[1]).unwrap(),
usecs_per_call: None,
calls: str::parse::<u64>(total_fields[2]).unwrap(),
errors: str::parse::<u64>(total_fields[3]).unwrap(),
},
);
summary
}
pub fn parse_max_mem(output: &str) -> Option<u64> {
// Takes the output from "time -v" as input and extracts the 'maximum
// resident set size' and returns it in bytes.
for line in output.lines() {
if line
.to_lowercase()
.contains("maximum resident set size (kbytes)")
{
let value = line.split(": ").nth(1).unwrap();
return Some(str::parse::<u64>(value).unwrap() * 1024);
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_wrk_output_1() {
const TEXT: &str = include_str!("./testdata/wrk1.txt");
let wrk = parse_wrk_output(TEXT);
assert_eq!(wrk.requests, 1837);
assert!((wrk.latency - 6.25).abs() < f64::EPSILON);
}
#[test]
fn parse_wrk_output_2() {
const TEXT: &str = include_str!("./testdata/wrk2.txt");
let wrk = parse_wrk_output(TEXT);
assert_eq!(wrk.requests, 53435);
assert!((wrk.latency - 6.22).abs() < f64::EPSILON);
}
#[test]
fn parse_wrk_output_3() {
const TEXT: &str = include_str!("./testdata/wrk3.txt");
let wrk = parse_wrk_output(TEXT);
assert_eq!(wrk.requests, 96037);
assert!((wrk.latency - 6.36).abs() < f64::EPSILON);
}
#[test]
fn strace_parse_1() {
const TEXT: &str = include_str!("./testdata/strace_summary.out");
let strace = parse_strace_output(TEXT);
// first syscall line
let munmap = strace.get("munmap").unwrap();
assert_eq!(munmap.calls, 60);
assert_eq!(munmap.errors, 0);
// line with errors
assert_eq!(strace.get("mkdir").unwrap().errors, 2);
// last syscall line
let prlimit = strace.get("prlimit64").unwrap();
assert_eq!(prlimit.calls, 2);
assert!((prlimit.percent_time - 0.0).abs() < f64::EPSILON);
// summary line
assert_eq!(strace.get("total").unwrap().calls, 704);
assert_eq!(strace.get("total").unwrap().errors, 5);
}
#[test]
fn strace_parse_2() {
const TEXT: &str = include_str!("./testdata/strace_summary2.out");
let strace = parse_strace_output(TEXT);
// first syscall line
let futex = strace.get("futex").unwrap();
assert_eq!(futex.calls, 449);
assert_eq!(futex.errors, 94);
// summary line
assert_eq!(strace.get("total").unwrap().calls, 821);
assert_eq!(strace.get("total").unwrap().errors, 107);
}
#[test]
fn test_wildcard_match() {
let fixtures = vec![
("foobarbaz", "foobarbaz", true),
("[WILDCARD]", "foobarbaz", true),
("foobar", "foobarbaz", false),
("foo[WILDCARD]baz", "foobarbaz", true),
("foo[WILDCARD]baz", "foobazbar", false),
("foo[WILDCARD]baz[WILDCARD]qux", "foobarbazqatqux", true),
("foo[WILDCARD]", "foobar", true),
("foo[WILDCARD]baz[WILDCARD]", "foobarbazqat", true),
// check with different line endings
("foo[WILDCARD]\nbaz[WILDCARD]\n", "foobar\nbazqat\n", true),
(
"foo[WILDCARD]\nbaz[WILDCARD]\n",
"foobar\r\nbazqat\r\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\n",
"foobar\nbazqat\r\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\r\n",
"foobar\nbazqat\n",
true,
),
(
"foo[WILDCARD]\r\nbaz[WILDCARD]\r\n",
"foobar\r\nbazqat\r\n",
true,
),
];
// Iterate through the fixture lists, testing each one
for (pattern, string, expected) in fixtures {
let actual = wildcard_match(pattern, string);
dbg!(pattern, string, expected);
assert_eq!(actual, expected);
}
}
#[test]
fn max_mem_parse() {
const TEXT: &str = include_str!("./testdata/time.out");
let size = parse_max_mem(TEXT);
assert_eq!(size, Some(120380 * 1024));
} }
} }

View file

@ -1,277 +0,0 @@
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
# Performs benchmark and append data to //website/data.json.
# If //website/data.json doesn't exist, this script tries to import it from
# gh-pages branch.
# To view the results locally run target/debug/test_server and visit
# http://localhost:4545/website
import os
import sys
import json
import time
import tempfile
import subprocess
from util import build_path, executable_suffix, root_path, run, run_output
import third_party
from http_benchmark import http_benchmark
import throughput_benchmark
# The list of the tuples of the benchmark name, arguments and return code
exec_time_benchmarks = [
("hello", ["run", "cli/tests/002_hello.ts"], None),
("relative_import", ["run", "cli/tests/003_relative_import.ts"], None),
("error_001", ["run", "cli/tests/error_001.ts"], 1),
("cold_hello", ["run", "--reload", "cli/tests/002_hello.ts"], None),
("cold_relative_import",
["run", "--reload", "cli/tests/003_relative_import.ts"], None),
("workers_startup",
["run", "--allow-read", "cli/tests/workers_startup_bench.ts"], None),
("workers_round_robin",
["run", "--allow-read", "cli/tests/workers_round_robin_bench.ts"], None),
("text_decoder", ["run", "cli/tests/text_decoder_perf.js"], None),
("text_encoder", ["run", "cli/tests/text_encoder_perf.js"], None),
("check", ["cache", "--reload", "std/examples/chat/server_test.ts"], None),
("no_check",
["cache", "--reload", "--no-check",
"std/examples/chat/server_test.ts"], None),
]
def read_json(filename):
with open(filename) as json_file:
return json.load(json_file)
def write_json(filename, data):
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def get_binary_sizes(build_dir):
sizes = {}
mtimes = {}
# The deno executable should be located at the root of the build tree.
deno_exe = os.path.join(build_dir, "deno" + executable_suffix)
sizes["deno"] = os.path.getsize(deno_exe)
# Because cargo's OUT_DIR is not predictable, search the build tree for
# snapshot related files.
for parent_dir, _, file_names in os.walk(build_dir):
for file_name in file_names:
if not file_name in [
"CLI_SNAPSHOT.bin",
"COMPILER_SNAPSHOT.bin",
]:
continue
file_path = os.path.join(parent_dir, file_name)
file_mtime = os.path.getmtime(file_path)
# If multiple copies of a file are found, use the most recent one.
if file_name in mtimes and mtimes[file_name] > file_mtime:
continue
mtimes[file_name] = file_mtime
sizes[file_name] = os.path.getsize(file_path)
return sizes
def get_strace_summary_text(test_args):
f = tempfile.NamedTemporaryFile()
cmd = ["strace", "-c", "-f", "-o", f.name] + test_args
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError:
pass
return f.read()
def strace_parse(summary_text):
summary = {}
# clear empty lines
lines = list(filter(lambda x: x and x != "\n", summary_text.split("\n")))
# Filter out non-relevant lines. See the error log at
# https://github.com/denoland/deno/pull/3715/checks?check_run_id=397365887
# This is checked in tools/testdata/strace_summary2.out
lines = [x for x in lines if x.find("detached ...") == -1]
if len(lines) < 4:
return {} # malformed summary
lines, total_line = lines[2:-2], lines[-1]
# data to dict for each line
for line in lines:
syscall_fields = line.split()
syscall_name = syscall_fields[-1]
syscall_dict = {}
if 5 <= len(syscall_fields) <= 6:
syscall_dict = {
"% time": float(syscall_fields[0]),
"seconds": float(syscall_fields[1]),
"usecs/call": int(syscall_fields[2]),
"calls": int(syscall_fields[3])
}
syscall_dict["errors"] = 0 if len(syscall_fields) < 6 else int(
syscall_fields[4])
summary[syscall_name] = syscall_dict
# record overall (total) data
total_fields = total_line.split()
summary["total"] = {
"% time": float(total_fields[0]),
"seconds": float(total_fields[1]),
"calls": int(total_fields[2]),
"errors": int(total_fields[3])
}
return summary
def get_strace_summary(test_args):
s = get_strace_summary_text(test_args)
try:
return strace_parse(s)
except ValueError:
print "error parsing strace"
print "----- <strace> -------"
print s
print "----- </strace> ------"
def run_throughput(deno_exe):
m = {}
m["100M_tcp"] = throughput_benchmark.tcp(deno_exe, 100)
m["100M_cat"] = throughput_benchmark.cat(deno_exe, 100)
m["10M_tcp"] = throughput_benchmark.tcp(deno_exe, 10)
m["10M_cat"] = throughput_benchmark.cat(deno_exe, 10)
return m
# "thread_count" and "syscall_count" are both calculated here.
def run_strace_benchmarks(deno_exe, new_data):
thread_count = {}
syscall_count = {}
for (name, args, _) in exec_time_benchmarks:
s = get_strace_summary([deno_exe] + args)
thread_count[name] = s["clone"]["calls"] + 1
syscall_count[name] = s["total"]["calls"]
new_data["thread_count"] = thread_count
new_data["syscall_count"] = syscall_count
# Takes the output from "/usr/bin/time -v" as input and extracts the 'maximum
# resident set size' and returns it in bytes.
def find_max_mem_in_bytes(time_v_output):
for line in time_v_output.split('\n'):
if 'maximum resident set size (kbytes)' in line.lower():
_, value = line.split(': ')
return int(value) * 1024
def run_max_mem_benchmark(deno_exe):
results = {}
for (name, args, return_code) in exec_time_benchmarks:
cmd = ["/usr/bin/time", "-v", deno_exe] + args
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
if (return_code is e.returncode):
pass
else:
raise e
mem = find_max_mem_in_bytes(out)
results[name] = mem
return results
def run_exec_time(deno_exe, build_dir):
hyperfine_exe = third_party.get_prebuilt_tool_path("hyperfine")
benchmark_file = os.path.join(build_dir, "hyperfine_results.json")
def benchmark_command(deno_exe, args, return_code):
# Bash test which asserts the return code value of the previous command
# $? contains the return code of the previous command
return_code_test = "; test $? -eq {}".format(
return_code) if return_code is not None else ""
return "{} {}{}".format(deno_exe, " ".join(args), return_code_test)
run([hyperfine_exe, "--export-json", benchmark_file, "--warmup", "3"] + [
benchmark_command(deno_exe, args, return_code)
for (_, args, return_code) in exec_time_benchmarks
])
hyperfine_results = read_json(benchmark_file)
results = {}
for [[name, _, _], data] in zip(exec_time_benchmarks,
hyperfine_results["results"]):
results[name] = {
"mean": data["mean"],
"stddev": data["stddev"],
"user": data["user"],
"system": data["system"],
"min": data["min"],
"max": data["max"]
}
return results
def run_http(build_dir, new_data):
stats = http_benchmark(build_dir)
new_data["req_per_sec"] = {k: v["req_per_sec"] for k, v in stats.items()}
new_data["max_latency"] = {k: v["max_latency"] for k, v in stats.items()}
def bundle_benchmark(deno_exe):
bundles = {
"file_server": "./std/http/file_server.ts",
"gist": "./std/examples/gist.ts",
}
sizes = {}
for name, url in bundles.items():
# bundle
path = name + ".bundle.js"
run([deno_exe, "bundle", "--unstable", url, path])
# get size of bundle
assert os.path.exists(path)
sizes[name] = os.path.getsize(path)
# remove bundle
os.remove(path)
return sizes
def main():
build_dir = build_path()
sha1 = run_output(["git", "rev-parse", "HEAD"],
exit_on_fail=True).out.strip()
deno_exe = os.path.join(build_dir, "deno")
os.chdir(root_path)
new_data = {
"created_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
"sha1": sha1,
}
# TODO(ry) The "benchmark" benchmark should actually be called "exec_time".
# When this is changed, the historical data in gh-pages branch needs to be
# changed too.
new_data["benchmark"] = run_exec_time(deno_exe, build_dir)
new_data["binary_size"] = get_binary_sizes(build_dir)
new_data["bundle_size"] = bundle_benchmark(deno_exe)
# Cannot run throughput benchmark on windows because they don't have nc or
# pipe.
if os.name != 'nt':
new_data["throughput"] = run_throughput(deno_exe)
run_http(build_dir, new_data)
if "linux" in sys.platform:
run_strace_benchmarks(deno_exe, new_data)
new_data["max_memory"] = run_max_mem_benchmark(deno_exe)
print "===== <BENCHMARK RESULTS>"
print json.dumps(new_data, indent=2)
print "===== </BENCHMARK RESULTS>"
write_json(os.path.join(build_dir, "bench.json"), new_data)
if __name__ == '__main__':
main()

View file

@ -1,66 +0,0 @@
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
import sys
import os
import unittest
import benchmark
from test_util import DenoTestCase, run_tests
class TestBenchmark(DenoTestCase):
def test_strace_parse(self):
with open(
os.path.join(sys.path[0], "testdata/strace_summary.out"),
"r") as f:
summary = benchmark.strace_parse(f.read())
# first syscall line
assert summary["munmap"]["calls"] == 60
assert summary["munmap"]["errors"] == 0
# line with errors
assert summary["mkdir"]["errors"] == 2
# last syscall line
assert summary["prlimit64"]["calls"] == 2
assert summary["prlimit64"]["% time"] == 0
# summary line
assert summary["total"]["calls"] == 704
def test_strace_parse2(self):
with open(
os.path.join(sys.path[0], "testdata/strace_summary2.out"),
"r") as f:
summary = benchmark.strace_parse(f.read())
# first syscall line
assert summary["futex"]["calls"] == 449
assert summary["futex"]["errors"] == 94
# summary line
assert summary["total"]["calls"] == 821
def test_max_mem_parse(self):
with open(os.path.join(sys.path[0], "testdata/time.out"), "r") as f:
data = f.read()
assert benchmark.find_max_mem_in_bytes(data) == 120380 * 1024
def test_binary_size(self):
binary_size_dict = benchmark.get_binary_sizes(self.build_dir)
assert binary_size_dict["deno"] > 0
assert binary_size_dict["CLI_SNAPSHOT.bin"] > 0
@unittest.skipIf("linux" not in sys.platform,
"strace only supported on linux")
def test_strace(self):
new_data = {}
benchmark.run_strace_benchmarks(self.deno_exe, new_data)
assert "thread_count" in new_data
assert "syscall_count" in new_data
s = new_data["thread_count"]
assert "hello" in s
assert s["hello"] > 1
s = new_data["syscall_count"]
assert "hello" in s
assert s["hello"] > 1
if __name__ == '__main__':
run_tests()

View file

@ -1,215 +0,0 @@
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
import os
import sys
import time
import subprocess
import util
import third_party
# Some of the benchmarks in this file have been renamed. In case the history
# somehow gets messed up:
# "node_http" was once called "node"
# "deno_tcp" was once called "deno"
# "deno_http" was once called "deno_net_http"
DURATION = "20s"
NEXT_PORT = 4544
def server_addr(port):
return "0.0.0.0:%s" % port
def get_port():
global NEXT_PORT
port = NEXT_PORT
NEXT_PORT += 1
# Return port as str because all usages below are as a str and having it an
# integer just adds complexity.
return str(port)
def deno_tcp(deno_exe):
port = get_port()
deno_cmd = [
# TODO(lucacasonato): remove unstable when stabilized
deno_exe,
"run",
"--allow-net",
"tools/deno_tcp.ts",
server_addr(port)
]
print "http_benchmark testing DENO tcp."
return run(deno_cmd, port)
def deno_http(deno_exe):
port = get_port()
deno_cmd = [
deno_exe, "run", "--allow-net", "--reload", "--unstable",
"std/http/http_bench.ts",
server_addr(port)
]
print "http_benchmark testing DENO using net/http."
return run(deno_cmd, port)
def deno_tcp_proxy(deno_exe, hyper_hello_exe):
port = get_port()
origin_port = get_port()
deno_cmd = [
deno_exe, "run", "--allow-net", "tools/deno_tcp_proxy.ts",
server_addr(port),
server_addr(origin_port)
]
print "http_proxy_benchmark testing DENO using net/tcp."
return run(
deno_cmd,
port,
origin_cmd=http_proxy_origin(hyper_hello_exe, origin_port))
def deno_http_proxy(deno_exe, hyper_hello_exe):
port = get_port()
origin_port = get_port()
deno_cmd = [
deno_exe, "run", "--allow-net", "tools/deno_http_proxy.ts",
server_addr(port),
server_addr(origin_port)
]
print "http_proxy_benchmark testing DENO using net/http."
return run(
deno_cmd,
port,
origin_cmd=http_proxy_origin(hyper_hello_exe, origin_port))
def core_http_bin_ops(exe):
print "http_benchmark testing CORE http_bench_bin_ops"
return run([exe], 4544)
def core_http_json_ops(exe):
print "http_benchmark testing CORE http_bench_json_ops"
return run([exe], 4544)
def node_http():
port = get_port()
node_cmd = ["node", "tools/node_http.js", port]
print "http_benchmark testing NODE."
return run(node_cmd, port)
def node_http_proxy(hyper_hello_exe):
port = get_port()
origin_port = get_port()
node_cmd = ["node", "tools/node_http_proxy.js", port, origin_port]
print "http_proxy_benchmark testing NODE."
return run(node_cmd, port, None,
http_proxy_origin(hyper_hello_exe, origin_port))
def node_tcp_proxy(hyper_hello_exe):
port = get_port()
origin_port = get_port()
node_cmd = ["node", "tools/node_tcp_proxy.js", port, origin_port]
print "http_proxy_benchmark testing NODE tcp."
return run(node_cmd, port, None,
http_proxy_origin(hyper_hello_exe, origin_port))
def node_tcp():
port = get_port()
node_cmd = ["node", "tools/node_tcp.js", port]
print "http_benchmark testing node_tcp.js"
return run(node_cmd, port)
def http_proxy_origin(hyper_hello_exe, port):
return [hyper_hello_exe, port]
def hyper_http(hyper_hello_exe):
port = get_port()
hyper_cmd = [hyper_hello_exe, port]
print "http_benchmark testing RUST hyper."
return run(hyper_cmd, port)
def http_benchmark(build_dir):
deno_exe = os.path.join(build_dir, "deno")
hyper_hello_exe = os.path.join(build_dir, "test_server")
core_http_bin_ops_exe = os.path.join(build_dir,
"examples/http_bench_bin_ops")
core_http_json_ops_exe = os.path.join(build_dir,
"examples/http_bench_json_ops")
return {
# "deno_tcp" was once called "deno"
"deno_tcp": deno_tcp(deno_exe),
# "deno_udp": deno_udp(deno_exe),
"deno_http": deno_http(deno_exe),
# TODO(ry) deno_proxy disabled to make fetch() standards compliant.
# "deno_proxy": deno_http_proxy(deno_exe, hyper_hello_exe),
"deno_proxy_tcp": deno_tcp_proxy(deno_exe, hyper_hello_exe),
# "core_http_bin_ops" was once called "deno_core_single"
# "core_http_bin_ops" was once called "deno_core_http_bench"
"core_http_bin_ops": core_http_bin_ops(core_http_bin_ops_exe),
"core_http_json_ops": core_http_json_ops(core_http_json_ops_exe),
# "node_http" was once called "node"
"node_http": node_http(),
"node_proxy": node_http_proxy(hyper_hello_exe),
"node_proxy_tcp": node_tcp_proxy(hyper_hello_exe),
"node_tcp": node_tcp(),
"hyper": hyper_http(hyper_hello_exe)
}
def run(server_cmd, port, merge_env=None, origin_cmd=None):
# Run deno echo server in the background.
if merge_env is None:
env = None
else:
env = os.environ.copy()
for key, value in merge_env.iteritems():
env[key] = value
# Wait for port 4544 to become available.
# TODO Need to use SO_REUSEPORT with tokio::net::TcpListener.
time.sleep(5)
origin = None
if origin_cmd is not None:
origin = subprocess.Popen(origin_cmd, env=env)
print server_cmd
server = subprocess.Popen(server_cmd, env=env)
time.sleep(5) # wait for server to wake up. TODO racy.
try:
wrk = third_party.get_prebuilt_tool_path("wrk")
assert os.path.exists(wrk)
cmd = "%s -d %s --latency http://127.0.0.1:%s/" % (wrk, DURATION, port)
print cmd
output = subprocess.check_output(cmd, shell=True)
stats = util.parse_wrk_output(output)
print output
return stats
finally:
server_retcode = server.poll()
if server_retcode is not None and server_retcode != 0:
print "server ended with error"
sys.exit(1)
server.kill()
if origin is not None:
origin.kill()
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage ./tools/http_benchmark.py target/debug/deno"
sys.exit(1)
deno_http(sys.argv[1])

View file

@ -72,6 +72,7 @@ def eslint():
":!:cli/tests/encoding/**", ":!:cli/tests/encoding/**",
":!:cli/dts/**", ":!:cli/dts/**",
":!:cli/tsc/*typescript.js", ":!:cli/tsc/*typescript.js",
":!:cli/bench/node*.js",
]) ])
if source_files: if source_files:
max_command_len = 30000 max_command_len = 30000

View file

@ -1,64 +0,0 @@
#!/usr/bin/env python
# Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
# Performs benchmark and append data to //website/data.json.
# If //website/data.json doesn't exist, this script tries to import it from
# gh-pages branch.
# To view the results locally run target/debug/test_server and visit
# http://localhost:4545/website
import os
import sys
import time
import subprocess
import util
MB = 1024 * 1024
SERVER_ADDR = "0.0.0.0:4544"
CLIENT_ADDR = "127.0.0.1 4544"
def cat(deno_exe, megs):
size = megs * MB
start = time.time()
cmd = deno_exe + " run --allow-read "
cmd += "cli/tests/cat.ts /dev/zero | head -c %s " % size
print cmd
subprocess.check_output(cmd, shell=True)
end = time.time()
return end - start
def tcp(deno_exe, megs):
size = megs * MB
# Run deno echo server in the background.
args = [
deno_exe, "run", "--allow-net", "cli/tests/echo_server.ts", SERVER_ADDR
]
print args
echo_server = subprocess.Popen(args)
time.sleep(5) # wait for deno to wake up. TODO racy.
try:
start = time.time()
nc_cmd = "nc " + CLIENT_ADDR
cmd = ("head -c %s /dev/zero " % size) + " | " + nc_cmd
print cmd
subprocess.check_output(cmd, shell=True)
end = time.time()
return end - start
finally:
echo_server.kill()
def main():
deno_exe = sys.argv[1]
megs = int(sys.argv[2])
if not deno_exe or not megs:
print "Usage ./tools/throughput_benchmark.py target/debug/deno 100"
sys.exit(1)
secs = tcp(sys.argv[1], megs)
print secs, "seconds"
if __name__ == '__main__':
main()

View file

@ -361,20 +361,6 @@ def extract_max_latency_in_milliseconds(pattern, string):
return num * 1000 return num * 1000
def parse_wrk_output(output):
stats = {}
stats['req_per_sec'] = None
stats['max_latency'] = None
for line in output.split("\n"):
if stats['req_per_sec'] is None:
stats['req_per_sec'] = extract_number(r'Requests/sec:\s+(\d+)',
line)
if stats['max_latency'] is None:
stats['max_latency'] = extract_max_latency_in_milliseconds(
r'\s+99%(?:\s+(\d+.\d+)([a-z]+))', line)
return stats
def platform(): def platform():
return {"linux2": "linux", "darwin": "mac", "win32": "win"}[sys.platform] return {"linux2": "linux", "darwin": "mac", "win32": "win"}[sys.platform]

View file

@ -2,8 +2,7 @@
import os import os
from test_util import DenoTestCase, run_tests from test_util import DenoTestCase, run_tests
from util import (parse_exit_code, shell_quote_win, parse_wrk_output, from util import (parse_exit_code, shell_quote_win, root_path)
root_path)
class TestUtil(DenoTestCase): class TestUtil(DenoTestCase):
@ -21,22 +20,6 @@ class TestUtil(DenoTestCase):
assert shell_quote_win( assert shell_quote_win(
'a"b""c\\d\\"e\\\\') == '"a""b""""c\\d\\\\""e\\\\\\\\"' 'a"b""c\\d\\"e\\\\') == '"a""b""""c\\d\\\\""e\\\\\\\\"'
def test_parse_wrk_output(self):
f = open(os.path.join(root_path, "tools/testdata/wrk1.txt"))
stats = parse_wrk_output(f.read())
assert stats['req_per_sec'] == 1837
assert stats['max_latency'] == 6.25
f2 = open(os.path.join(root_path, "tools/testdata/wrk2.txt"))
stats2 = parse_wrk_output(f2.read())
assert stats2['req_per_sec'] == 53435
assert stats2['max_latency'] == 6.22
f3 = open(os.path.join(root_path, "tools/testdata/wrk3.txt"))
stats3 = parse_wrk_output(f3.read())
assert stats3['req_per_sec'] == 96037
assert stats3['max_latency'] == 6.36
def test_executable_exists(self): def test_executable_exists(self):
assert os.path.exists(self.deno_exe) assert os.path.exists(self.deno_exe)