1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-24 08:09:08 -05:00

feat(cli): Add dot test reporter (#19804)

This commit adds a "dot" reporter to "deno test" subcommand,
that can be activated using "--dot" flag.

It provides a concise output using:
- "." for passing test
- "," for ignored test
- "!" for failing test

User output is silenced and not printed to the console.

In non-TTY environments each result is printed on a separate line.
This commit is contained in:
Bartek Iwańczuk 2023-08-02 18:38:10 +02:00 committed by GitHub
parent d9c85e016f
commit 029bdf0cd5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 705 additions and 308 deletions

View file

@ -218,6 +218,15 @@ pub struct TaskFlags {
pub task: Option<String>,
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub enum TestReporterConfig {
#[default]
Pretty,
Dot,
// Contains path to write to or "-" to print to stdout.
Junit(String),
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct TestFlags {
pub doc: bool,
@ -231,7 +240,7 @@ pub struct TestFlags {
pub concurrent_jobs: Option<NonZeroUsize>,
pub trace_ops: bool,
pub watch: Option<WatchFlags>,
pub junit_path: Option<String>,
pub reporter: TestReporterConfig,
}
#[derive(Clone, Debug, Eq, PartialEq)]
@ -1877,6 +1886,13 @@ Directory arguments are expanded to all contained files matching the glob
.require_equals(true)
.default_missing_value("-")
)
.arg(
Arg::new("dot-reporter")
.long("dot")
.conflicts_with("junit")
.help("Use 'dot' test reporter with a concise output format")
.action(ArgAction::SetTrue),
)
)
}
@ -3078,6 +3094,18 @@ fn test_parse(flags: &mut Flags, matches: &mut ArgMatches) {
};
let junit_path = matches.remove_one::<String>("junit");
let dot_reporter = matches.get_flag("dot-reporter");
if dot_reporter {
flags.log_level = Some(Level::Error);
}
let reporter = if dot_reporter {
TestReporterConfig::Dot
} else if let Some(path) = junit_path {
TestReporterConfig::Junit(path)
} else {
TestReporterConfig::Pretty
};
flags.subcommand = DenoSubcommand::Test(TestFlags {
no_run,
@ -3091,7 +3119,7 @@ fn test_parse(flags: &mut Flags, matches: &mut ArgMatches) {
concurrent_jobs,
trace_ops,
watch: watch_arg_parse(matches),
junit_path,
reporter,
});
}
@ -5995,7 +6023,7 @@ mod tests {
trace_ops: true,
coverage_dir: Some("cov".to_string()),
watch: Default::default(),
junit_path: None,
reporter: Default::default(),
}),
unstable: true,
no_prompt: true,
@ -6061,6 +6089,7 @@ mod tests {
Flags {
subcommand: DenoSubcommand::Test(TestFlags {
no_run: false,
reporter: Default::default(),
doc: false,
fail_fast: None,
filter: None,
@ -6074,7 +6103,6 @@ mod tests {
trace_ops: false,
coverage_dir: None,
watch: Default::default(),
junit_path: None,
}),
type_check_mode: TypeCheckMode::Local,
no_prompt: true,
@ -6107,7 +6135,7 @@ mod tests {
trace_ops: false,
coverage_dir: None,
watch: Default::default(),
junit_path: None,
reporter: Default::default(),
}),
type_check_mode: TypeCheckMode::Local,
no_prompt: true,
@ -6144,7 +6172,7 @@ mod tests {
trace_ops: false,
coverage_dir: None,
watch: Default::default(),
junit_path: None,
reporter: Default::default(),
}),
no_prompt: true,
type_check_mode: TypeCheckMode::Local,
@ -6154,6 +6182,40 @@ mod tests {
);
}
#[test]
fn test_dot() {
let r = flags_from_vec(svec!["deno", "test", "--dot"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Test(TestFlags {
no_run: false,
doc: false,
fail_fast: None,
filter: None,
reporter: TestReporterConfig::Dot,
allow_none: false,
shuffle: None,
files: FileFlags {
include: vec![],
ignore: vec![],
},
concurrent_jobs: None,
trace_ops: false,
coverage_dir: None,
watch: Default::default(),
}),
no_prompt: true,
type_check_mode: TypeCheckMode::Local,
log_level: Some(Level::Error),
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "test", "--dot", "--junit"]);
assert!(r.is_err());
}
#[test]
fn test_shuffle() {
let r = flags_from_vec(svec!["deno", "test", "--shuffle=1"]);
@ -6175,7 +6237,7 @@ mod tests {
trace_ops: false,
coverage_dir: None,
watch: Default::default(),
junit_path: None,
reporter: Default::default(),
}),
no_prompt: true,
type_check_mode: TypeCheckMode::Local,
@ -6207,7 +6269,7 @@ mod tests {
watch: Some(WatchFlags {
no_clear_screen: false,
}),
junit_path: None,
reporter: Default::default(),
}),
no_prompt: true,
type_check_mode: TypeCheckMode::Local,
@ -6238,7 +6300,7 @@ mod tests {
watch: Some(WatchFlags {
no_clear_screen: false,
}),
junit_path: None,
reporter: Default::default(),
}),
no_prompt: true,
type_check_mode: TypeCheckMode::Local,
@ -6271,7 +6333,7 @@ mod tests {
watch: Some(WatchFlags {
no_clear_screen: true,
}),
junit_path: None,
reporter: Default::default(),
}),
type_check_mode: TypeCheckMode::Local,
no_prompt: true,
@ -6301,7 +6363,7 @@ mod tests {
trace_ops: false,
coverage_dir: None,
watch: Default::default(),
junit_path: Some("-".to_string()),
reporter: TestReporterConfig::Junit("-".to_string()),
}),
type_check_mode: TypeCheckMode::Local,
no_prompt: true,
@ -6331,7 +6393,7 @@ mod tests {
trace_ops: false,
coverage_dir: None,
watch: Default::default(),
junit_path: Some("junit.xml".to_string()),
reporter: TestReporterConfig::Junit("junit.xml".to_string()),
}),
type_check_mode: TypeCheckMode::Local,
no_prompt: true,

View file

@ -227,7 +227,7 @@ pub struct TestOptions {
pub shuffle: Option<u64>,
pub concurrent_jobs: NonZeroUsize,
pub trace_ops: bool,
pub junit_path: Option<String>,
pub reporter: TestReporterConfig,
}
impl TestOptions {
@ -252,7 +252,7 @@ impl TestOptions {
no_run: test_flags.no_run,
shuffle: test_flags.shuffle,
trace_ops: test_flags.trace_ops,
junit_path: test_flags.junit_path,
reporter: test_flags.reporter,
})
}
}

View file

@ -700,7 +700,7 @@ impl LspTestReporter {
let err_string = format!(
"Uncaught error from {}: {}\nThis error was not caught from a test and caused the test runner to fail on the referenced module.\nIt most likely originated from a dangling promise, event/timeout handler or top-level code.",
origin,
test::format_test_error(js_error)
test::fmt::format_test_error(js_error)
);
let messages = as_test_messages(err_string, false);
for t in stack.iter().rev() {

View file

@ -333,6 +333,24 @@ itest!(steps_ignored_steps {
output: "test/steps/ignored_steps.out",
});
itest!(steps_dot_passing_steps {
args: "test --dot test/steps/passing_steps.ts",
exit_code: 0,
output: "test/steps/passing_steps.dot.out",
});
itest!(steps_dot_failing_steps {
args: "test --dot test/steps/failing_steps.ts",
exit_code: 1,
output: "test/steps/failing_steps.dot.out",
});
itest!(steps_dot_ignored_steps {
args: "test --dot test/steps/ignored_steps.ts",
exit_code: 0,
output: "test/steps/ignored_steps.dot.out",
});
itest!(steps_invalid_usage {
args: "test test/steps/invalid_usage.ts",
exit_code: 1,

View file

@ -0,0 +1,54 @@
[WILDCARD]
!
.
!
!
!
!
!
!
!
ERRORS
nested failure ... step 1 ... inner 1 => ./test/steps/failing_steps.ts:[WILDCARD]
error: Error: Failed.
throw new Error("Failed.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
multiple test step failures ... step 1 => ./test/steps/failing_steps.ts:[WILDCARD]
error: Error: Fail.
throw new Error("Fail.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
multiple test step failures ... step 2 => ./test/steps/failing_steps.ts:[WILDCARD]
error: Error: Fail.
await t.step("step 2", () => Promise.reject(new Error("Fail.")));
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
failing step in failing test ... step 1 => ./test/steps/failing_steps.ts:[WILDCARD]
error: Error: Fail.
throw new Error("Fail.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
failing step in failing test => ./test/steps/failing_steps.ts:[WILDCARD]
error: Error: Fail test.
throw new Error("Fail test.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
FAILURES
nested failure ... step 1 ... inner 1 => ./test/steps/failing_steps.ts:[WILDCARD]
multiple test step failures ... step 1 => ./test/steps/failing_steps.ts:[WILDCARD]
multiple test step failures ... step 2 => ./test/steps/failing_steps.ts:[WILDCARD]
failing step in failing test ... step 1 => ./test/steps/failing_steps.ts:[WILDCARD]
failing step in failing test => ./test/steps/failing_steps.ts:[WILDCARD]
FAILED | 0 passed (1 step) | 3 failed (5 steps) ([WILDCARD])
error: Test failed

View file

@ -0,0 +1,6 @@
[WILDCARD]
,
.
.
ok | 1 passed (1 step) | 0 failed | 0 ignored (1 step) [WILDCARD]

View file

@ -0,0 +1,17 @@
[WILDCARD]
.
.
.
.
.
.
.
.
.
.
.
.
.
ok | 6 passed (21 steps) | 0 failed ([WILDCARD])

74
cli/tools/test/fmt.rs Normal file
View file

@ -0,0 +1,74 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use super::*;
pub fn to_relative_path_or_remote_url(cwd: &Url, path_or_url: &str) -> String {
let url = Url::parse(path_or_url).unwrap();
if url.scheme() == "file" {
if let Some(mut r) = cwd.make_relative(&url) {
if !r.starts_with("../") {
r = format!("./{r}");
}
return r;
}
}
path_or_url.to_string()
}
fn abbreviate_test_error(js_error: &JsError) -> JsError {
let mut js_error = js_error.clone();
let frames = std::mem::take(&mut js_error.frames);
// check if there are any stack frames coming from user code
let should_filter = frames.iter().any(|f| {
if let Some(file_name) = &f.file_name {
!(file_name.starts_with("[ext:") || file_name.starts_with("ext:"))
} else {
true
}
});
if should_filter {
let mut frames = frames
.into_iter()
.rev()
.skip_while(|f| {
if let Some(file_name) = &f.file_name {
file_name.starts_with("[ext:") || file_name.starts_with("ext:")
} else {
false
}
})
.collect::<Vec<_>>();
frames.reverse();
js_error.frames = frames;
} else {
js_error.frames = frames;
}
js_error.cause = js_error
.cause
.as_ref()
.map(|e| Box::new(abbreviate_test_error(e)));
js_error.aggregated = js_error
.aggregated
.as_ref()
.map(|es| es.iter().map(abbreviate_test_error).collect());
js_error
}
// This function prettifies `JsError` and applies some changes specifically for
// test runner purposes:
//
// - filter out stack frames:
// - if stack trace consists of mixed user and internal code, the frames
// below the first user code frame are filtered out
// - if stack trace consists only of internal code it is preserved as is
pub fn format_test_error(js_error: &JsError) -> String {
let mut js_error = abbreviate_test_error(js_error);
js_error.exception_message = js_error
.exception_message
.trim_start_matches("Uncaught ")
.to_string();
format_js_error(&js_error)
}

View file

@ -4,6 +4,7 @@ use crate::args::CliOptions;
use crate::args::FilesConfig;
use crate::args::Flags;
use crate::args::TestFlags;
use crate::args::TestReporterConfig;
use crate::colors;
use crate::display;
use crate::factory::CliFactory;
@ -80,8 +81,12 @@ use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::mpsc::WeakUnboundedSender;
pub mod fmt;
mod reporters;
pub use fmt::format_test_error;
use reporters::CompoundTestReporter;
use reporters::DotTestReporter;
use reporters::JunitTestReporter;
use reporters::PrettyTestReporter;
use reporters::TestReporter;
@ -355,7 +360,7 @@ struct TestSpecifiersOptions {
fail_fast: Option<NonZeroUsize>,
log_level: Option<log::Level>,
specifier: TestSpecifierOptions,
junit_path: Option<String>,
reporter: TestReporterConfig,
}
#[derive(Debug, Clone)]
@ -388,81 +393,31 @@ impl TestSummary {
}
fn get_test_reporter(options: &TestSpecifiersOptions) -> Box<dyn TestReporter> {
let pretty = Box::new(PrettyTestReporter::new(
options.concurrent_jobs.get() > 1,
options.log_level != Some(Level::Error),
));
if let Some(junit_path) = &options.junit_path {
let junit = Box::new(JunitTestReporter::new(junit_path.clone()));
// If junit is writing to stdout, only enable the junit reporter
if junit_path == "-" {
junit
} else {
Box::new(CompoundTestReporter::new(vec![pretty, junit]))
let parallel = options.concurrent_jobs.get() > 1;
match &options.reporter {
TestReporterConfig::Dot => Box::new(DotTestReporter::new()),
TestReporterConfig::Pretty => Box::new(PrettyTestReporter::new(
parallel,
options.log_level != Some(Level::Error),
)),
TestReporterConfig::Junit(path) => {
let junit = Box::new(JunitTestReporter::new(path.clone()));
// If junit is writing to stdout, only enable the junit reporter
if path == "-" {
junit
} else {
Box::new(CompoundTestReporter::new(vec![
Box::new(PrettyTestReporter::new(
parallel,
options.log_level != Some(Level::Error),
)),
junit,
]))
}
}
} else {
pretty
}
}
fn abbreviate_test_error(js_error: &JsError) -> JsError {
let mut js_error = js_error.clone();
let frames = std::mem::take(&mut js_error.frames);
// check if there are any stack frames coming from user code
let should_filter = frames.iter().any(|f| {
if let Some(file_name) = &f.file_name {
!(file_name.starts_with("[ext:") || file_name.starts_with("ext:"))
} else {
true
}
});
if should_filter {
let mut frames = frames
.into_iter()
.rev()
.skip_while(|f| {
if let Some(file_name) = &f.file_name {
file_name.starts_with("[ext:") || file_name.starts_with("ext:")
} else {
false
}
})
.collect::<Vec<_>>();
frames.reverse();
js_error.frames = frames;
} else {
js_error.frames = frames;
}
js_error.cause = js_error
.cause
.as_ref()
.map(|e| Box::new(abbreviate_test_error(e)));
js_error.aggregated = js_error
.aggregated
.as_ref()
.map(|es| es.iter().map(abbreviate_test_error).collect());
js_error
}
// This function prettifies `JsError` and applies some changes specifically for
// test runner purposes:
//
// - filter out stack frames:
// - if stack trace consists of mixed user and internal code, the frames
// below the first user code frame are filtered out
// - if stack trace consists only of internal code it is preserved as is
pub fn format_test_error(js_error: &JsError) -> String {
let mut js_error = abbreviate_test_error(js_error);
js_error.exception_message = js_error
.exception_message
.trim_start_matches("Uncaught ")
.to_string();
format_js_error(&js_error)
}
/// Test a single specifier as documentation containing test programs, an executable test module or
/// both.
pub async fn test_specifier(
@ -1206,7 +1161,7 @@ pub async fn run_tests(
concurrent_jobs: test_options.concurrent_jobs,
fail_fast: test_options.fail_fast,
log_level,
junit_path: test_options.junit_path,
reporter: test_options.reporter,
specifier: TestSpecifierOptions {
filter: TestFilter::from_flag(&test_options.filter),
shuffle: test_options.shuffle,
@ -1337,7 +1292,7 @@ pub async fn run_tests_with_watch(
concurrent_jobs: test_options.concurrent_jobs,
fail_fast: test_options.fail_fast,
log_level,
junit_path: test_options.junit_path,
reporter: test_options.reporter,
specifier: TestSpecifierOptions {
filter: TestFilter::from_flag(&test_options.filter),
shuffle: test_options.shuffle,

View file

@ -0,0 +1,210 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use super::fmt::format_test_error;
use super::fmt::to_relative_path_or_remote_url;
use super::*;
pub(super) fn format_test_step_ancestry(
desc: &TestStepDescription,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> String {
let root;
let mut ancestor_names = vec![];
let mut current_desc = desc;
loop {
if let Some(step_desc) = test_steps.get(&current_desc.parent_id) {
ancestor_names.push(&step_desc.name);
current_desc = step_desc;
} else {
root = tests.get(&current_desc.parent_id).unwrap();
break;
}
}
ancestor_names.reverse();
let mut result = String::new();
result.push_str(&root.name);
result.push_str(" ... ");
for name in ancestor_names {
result.push_str(name);
result.push_str(" ... ");
}
result.push_str(&desc.name);
result
}
pub fn format_test_for_summary(cwd: &Url, desc: &TestDescription) -> String {
format!(
"{} {}",
&desc.name,
colors::gray(format!(
"=> {}:{}:{}",
to_relative_path_or_remote_url(cwd, &desc.location.file_name),
desc.location.line_number,
desc.location.column_number
))
)
}
pub fn format_test_step_for_summary(
cwd: &Url,
desc: &TestStepDescription,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> String {
let long_name = format_test_step_ancestry(desc, tests, test_steps);
format!(
"{} {}",
long_name,
colors::gray(format!(
"=> {}:{}:{}",
to_relative_path_or_remote_url(cwd, &desc.location.file_name),
desc.location.line_number,
desc.location.column_number
))
)
}
pub(super) fn report_sigint(
cwd: &Url,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
if tests_pending.is_empty() {
return;
}
let mut formatted_pending = BTreeSet::new();
for id in tests_pending {
if let Some(desc) = tests.get(id) {
formatted_pending.insert(format_test_for_summary(cwd, desc));
}
if let Some(desc) = test_steps.get(id) {
formatted_pending
.insert(format_test_step_for_summary(cwd, desc, tests, test_steps));
}
}
println!(
"\n{} The following tests were pending:\n",
colors::intense_blue("SIGINT")
);
for entry in formatted_pending {
println!("{}", entry);
}
println!();
}
pub(super) fn report_summary(
cwd: &Url,
summary: &TestSummary,
elapsed: &Duration,
) {
if !summary.failures.is_empty() || !summary.uncaught_errors.is_empty() {
#[allow(clippy::type_complexity)] // Type alias doesn't look better here
let mut failures_by_origin: BTreeMap<
String,
(Vec<(&TestDescription, &TestFailure)>, Option<&JsError>),
> = BTreeMap::default();
let mut failure_titles = vec![];
for (description, failure) in &summary.failures {
let (failures, _) = failures_by_origin
.entry(description.origin.clone())
.or_default();
failures.push((description, failure));
}
for (origin, js_error) in &summary.uncaught_errors {
let (_, uncaught_error) =
failures_by_origin.entry(origin.clone()).or_default();
let _ = uncaught_error.insert(js_error.as_ref());
}
// note: the trailing whitespace is intentional to get a red background
println!("\n{}\n", colors::white_bold_on_red(" ERRORS "));
for (origin, (failures, uncaught_error)) in failures_by_origin {
for (description, failure) in failures {
if !failure.hide_in_summary() {
let failure_title = format_test_for_summary(cwd, description);
println!("{}", &failure_title);
println!("{}: {}", colors::red_bold("error"), failure.to_string());
println!();
failure_titles.push(failure_title);
}
}
if let Some(js_error) = uncaught_error {
let failure_title = format!(
"{} (uncaught error)",
to_relative_path_or_remote_url(cwd, &origin)
);
println!("{}", &failure_title);
println!(
"{}: {}",
colors::red_bold("error"),
format_test_error(js_error)
);
println!("This error was not caught from a test and caused the test runner to fail on the referenced module.");
println!("It most likely originated from a dangling promise, event/timeout handler or top-level code.");
println!();
failure_titles.push(failure_title);
}
}
// note: the trailing whitespace is intentional to get a red background
println!("{}\n", colors::white_bold_on_red(" FAILURES "));
for failure_title in failure_titles {
println!("{failure_title}");
}
}
let status = if summary.has_failed() {
colors::red("FAILED").to_string()
} else {
colors::green("ok").to_string()
};
let get_steps_text = |count: usize| -> String {
if count == 0 {
String::new()
} else if count == 1 {
" (1 step)".to_string()
} else {
format!(" ({count} steps)")
}
};
let mut summary_result = String::new();
write!(
summary_result,
"{} passed{} | {} failed{}",
summary.passed,
get_steps_text(summary.passed_steps),
summary.failed,
get_steps_text(summary.failed_steps),
)
.unwrap();
let ignored_steps = get_steps_text(summary.ignored_steps);
if summary.ignored > 0 || !ignored_steps.is_empty() {
write!(
summary_result,
" | {} ignored{}",
summary.ignored, ignored_steps
)
.unwrap()
}
if summary.measured > 0 {
write!(summary_result, " | {} measured", summary.measured,).unwrap();
}
if summary.filtered_out > 0 {
write!(summary_result, " | {} filtered out", summary.filtered_out).unwrap()
};
println!(
"\n{} | {} {}\n",
status,
summary_result,
colors::gray(format!("({})", display::human_elapsed(elapsed.as_millis()))),
);
}

View file

@ -0,0 +1,207 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use super::common;
use super::fmt::to_relative_path_or_remote_url;
use super::*;
pub struct DotTestReporter {
n: usize,
width: usize,
cwd: Url,
summary: TestSummary,
}
impl DotTestReporter {
pub fn new() -> DotTestReporter {
let console_width = if let Some(size) = crate::util::console::console_size()
{
size.cols as usize
} else {
0
};
let console_width = (console_width as f32 * 0.8) as usize;
DotTestReporter {
n: 0,
width: console_width,
cwd: Url::from_directory_path(std::env::current_dir().unwrap()).unwrap(),
summary: TestSummary::new(),
}
}
fn print_status(&mut self, status: String) {
// Non-TTY console prints every result on a separate line.
if self.width == 0 {
println!("{}", status);
return;
}
if self.n != 0 && self.n % self.width == 0 {
println!();
}
self.n += 1;
print!("{}", status);
}
fn print_test_step_result(&mut self, result: &TestStepResult) {
let status = match result {
TestStepResult::Ok => fmt_ok(),
TestStepResult::Ignored => fmt_ignored(),
TestStepResult::Failed(_failure) => fmt_failed(),
};
self.print_status(status);
}
fn print_test_result(&mut self, result: &TestResult) {
let status = match result {
TestResult::Ok => fmt_ok(),
TestResult::Ignored => fmt_ignored(),
TestResult::Failed(_failure) => fmt_failed(),
TestResult::Cancelled => fmt_cancelled(),
};
self.print_status(status);
}
}
fn fmt_ok() -> String {
colors::gray(".").to_string()
}
fn fmt_ignored() -> String {
colors::cyan(",").to_string()
}
fn fmt_failed() -> String {
colors::red_bold("!").to_string()
}
fn fmt_cancelled() -> String {
colors::gray("!").to_string()
}
impl TestReporter for DotTestReporter {
fn report_register(&mut self, _description: &TestDescription) {}
fn report_plan(&mut self, plan: &TestPlan) {
self.summary.total += plan.total;
self.summary.filtered_out += plan.filtered_out;
}
fn report_wait(&mut self, _description: &TestDescription) {
// flush for faster feedback when line buffered
std::io::stdout().flush().unwrap();
}
fn report_output(&mut self, _output: &[u8]) {}
fn report_result(
&mut self,
description: &TestDescription,
result: &TestResult,
_elapsed: u64,
) {
match &result {
TestResult::Ok => {
self.summary.passed += 1;
}
TestResult::Ignored => {
self.summary.ignored += 1;
}
TestResult::Failed(failure) => {
self.summary.failed += 1;
self
.summary
.failures
.push((description.clone(), failure.clone()));
}
TestResult::Cancelled => {
self.summary.failed += 1;
}
}
self.print_test_result(result);
}
fn report_uncaught_error(&mut self, origin: &str, error: Box<JsError>) {
self.summary.failed += 1;
self
.summary
.uncaught_errors
.push((origin.to_string(), error));
println!(
"Uncaught error from {} {}",
to_relative_path_or_remote_url(&self.cwd, origin),
colors::red("FAILED")
);
}
fn report_step_register(&mut self, _description: &TestStepDescription) {}
fn report_step_wait(&mut self, _description: &TestStepDescription) {
// flush for faster feedback when line buffered
std::io::stdout().flush().unwrap();
}
fn report_step_result(
&mut self,
desc: &TestStepDescription,
result: &TestStepResult,
_elapsed: u64,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
match &result {
TestStepResult::Ok => {
self.summary.passed_steps += 1;
}
TestStepResult::Ignored => {
self.summary.ignored_steps += 1;
}
TestStepResult::Failed(failure) => {
self.summary.failed_steps += 1;
self.summary.failures.push((
TestDescription {
id: desc.id,
name: common::format_test_step_ancestry(desc, tests, test_steps),
ignore: false,
only: false,
origin: desc.origin.clone(),
location: desc.location.clone(),
},
failure.clone(),
))
}
}
self.print_test_step_result(result);
}
fn report_summary(
&mut self,
elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
common::report_summary(&self.cwd, &self.summary, elapsed);
}
fn report_sigint(
&mut self,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
common::report_sigint(&self.cwd, tests_pending, tests, test_steps);
}
fn flush_report(
&mut self,
_elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) -> anyhow::Result<()> {
Ok(())
}
}

View file

@ -2,11 +2,14 @@
use super::*;
mod common;
mod compound;
mod dot;
mod junit;
mod pretty;
pub use compound::CompoundTestReporter;
pub use dot::DotTestReporter;
pub use junit::JunitTestReporter;
pub use pretty::PrettyTestReporter;

View file

@ -1,5 +1,7 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use super::common;
use super::fmt::to_relative_path_or_remote_url;
use super::*;
pub struct PrettyTestReporter {
@ -39,7 +41,7 @@ impl PrettyTestReporter {
"{}",
colors::gray(format!(
"{} => ",
self.to_relative_path_or_remote_url(&description.origin)
to_relative_path_or_remote_url(&self.cwd, &description.origin)
))
);
}
@ -50,19 +52,6 @@ impl PrettyTestReporter {
self.scope_test_id = Some(description.id);
}
fn to_relative_path_or_remote_url(&self, path_or_url: &str) -> String {
let url = Url::parse(path_or_url).unwrap();
if url.scheme() == "file" {
if let Some(mut r) = self.cwd.make_relative(&url) {
if !r.starts_with("../") {
r = format!("./{r}");
}
return r;
}
}
path_or_url.to_string()
}
fn force_report_step_wait(&mut self, description: &TestStepDescription) {
self.write_output_end();
if !self.in_new_line {
@ -137,68 +126,6 @@ impl PrettyTestReporter {
self.did_have_user_output = false;
}
}
fn format_test_step_ancestry(
&self,
desc: &TestStepDescription,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> String {
let root;
let mut ancestor_names = vec![];
let mut current_desc = desc;
loop {
if let Some(step_desc) = test_steps.get(&current_desc.parent_id) {
ancestor_names.push(&step_desc.name);
current_desc = step_desc;
} else {
root = tests.get(&current_desc.parent_id).unwrap();
break;
}
}
ancestor_names.reverse();
let mut result = String::new();
result.push_str(&root.name);
result.push_str(" ... ");
for name in ancestor_names {
result.push_str(name);
result.push_str(" ... ");
}
result.push_str(&desc.name);
result
}
fn format_test_for_summary(&self, desc: &TestDescription) -> String {
format!(
"{} {}",
&desc.name,
colors::gray(format!(
"=> {}:{}:{}",
self.to_relative_path_or_remote_url(&desc.location.file_name),
desc.location.line_number,
desc.location.column_number
))
)
}
fn format_test_step_for_summary(
&self,
desc: &TestStepDescription,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> String {
let long_name = self.format_test_step_ancestry(desc, tests, test_steps);
format!(
"{} {}",
long_name,
colors::gray(format!(
"=> {}:{}:{}",
self.to_relative_path_or_remote_url(&desc.location.file_name),
desc.location.line_number,
desc.location.column_number
))
)
}
}
impl TestReporter for PrettyTestReporter {
@ -216,7 +143,7 @@ impl TestReporter for PrettyTestReporter {
"running {} {} from {}",
plan.total,
inflection,
self.to_relative_path_or_remote_url(&plan.origin)
to_relative_path_or_remote_url(&self.cwd, &plan.origin)
))
);
self.in_new_line = true;
@ -314,7 +241,7 @@ impl TestReporter for PrettyTestReporter {
}
println!(
"Uncaught error from {} {}",
self.to_relative_path_or_remote_url(origin),
to_relative_path_or_remote_url(&self.cwd, origin),
colors::red("FAILED")
);
self.in_new_line = true;
@ -349,7 +276,7 @@ impl TestReporter for PrettyTestReporter {
self.summary.failures.push((
TestDescription {
id: desc.id,
name: self.format_test_step_ancestry(desc, tests, test_steps),
name: common::format_test_step_ancestry(desc, tests, test_steps),
ignore: false,
only: false,
origin: desc.origin.clone(),
@ -366,9 +293,9 @@ impl TestReporter for PrettyTestReporter {
"{} {} ...",
colors::gray(format!(
"{} =>",
self.to_relative_path_or_remote_url(&desc.origin)
to_relative_path_or_remote_url(&self.cwd, &desc.origin)
)),
self.format_test_step_ancestry(desc, tests, test_steps)
common::format_test_step_ancestry(desc, tests, test_steps)
);
self.in_new_line = false;
self.scope_test_id = Some(desc.id);
@ -398,123 +325,7 @@ impl TestReporter for PrettyTestReporter {
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
if !self.summary.failures.is_empty()
|| !self.summary.uncaught_errors.is_empty()
{
#[allow(clippy::type_complexity)] // Type alias doesn't look better here
let mut failures_by_origin: BTreeMap<
String,
(Vec<(&TestDescription, &TestFailure)>, Option<&JsError>),
> = BTreeMap::default();
let mut failure_titles = vec![];
for (description, failure) in &self.summary.failures {
let (failures, _) = failures_by_origin
.entry(description.origin.clone())
.or_default();
failures.push((description, failure));
}
for (origin, js_error) in &self.summary.uncaught_errors {
let (_, uncaught_error) =
failures_by_origin.entry(origin.clone()).or_default();
let _ = uncaught_error.insert(js_error.as_ref());
}
// note: the trailing whitespace is intentional to get a red background
println!("\n{}\n", colors::white_bold_on_red(" ERRORS "));
for (origin, (failures, uncaught_error)) in failures_by_origin {
for (description, failure) in failures {
if !failure.hide_in_summary() {
let failure_title = self.format_test_for_summary(description);
println!("{}", &failure_title);
println!("{}: {}", colors::red_bold("error"), failure.to_string());
println!();
failure_titles.push(failure_title);
}
}
if let Some(js_error) = uncaught_error {
let failure_title = format!(
"{} (uncaught error)",
self.to_relative_path_or_remote_url(&origin)
);
println!("{}", &failure_title);
println!(
"{}: {}",
colors::red_bold("error"),
format_test_error(js_error)
);
println!("This error was not caught from a test and caused the test runner to fail on the referenced module.");
println!("It most likely originated from a dangling promise, event/timeout handler or top-level code.");
println!();
failure_titles.push(failure_title);
}
}
// note: the trailing whitespace is intentional to get a red background
println!("{}\n", colors::white_bold_on_red(" FAILURES "));
for failure_title in failure_titles {
println!("{failure_title}");
}
}
let status = if self.summary.has_failed() {
colors::red("FAILED").to_string()
} else {
colors::green("ok").to_string()
};
let get_steps_text = |count: usize| -> String {
if count == 0 {
String::new()
} else if count == 1 {
" (1 step)".to_string()
} else {
format!(" ({count} steps)")
}
};
let mut summary_result = String::new();
write!(
summary_result,
"{} passed{} | {} failed{}",
self.summary.passed,
get_steps_text(self.summary.passed_steps),
self.summary.failed,
get_steps_text(self.summary.failed_steps),
)
.unwrap();
let ignored_steps = get_steps_text(self.summary.ignored_steps);
if self.summary.ignored > 0 || !ignored_steps.is_empty() {
write!(
summary_result,
" | {} ignored{}",
self.summary.ignored, ignored_steps
)
.unwrap()
}
if self.summary.measured > 0 {
write!(summary_result, " | {} measured", self.summary.measured,).unwrap();
}
if self.summary.filtered_out > 0 {
write!(
summary_result,
" | {} filtered out",
self.summary.filtered_out
)
.unwrap()
};
println!(
"\n{} | {} {}\n",
status,
summary_result,
colors::gray(format!(
"({})",
display::human_elapsed(elapsed.as_millis())
)),
);
common::report_summary(&self.cwd, &self.summary, elapsed);
self.in_new_line = true;
}
@ -524,27 +335,7 @@ impl TestReporter for PrettyTestReporter {
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
if tests_pending.is_empty() {
return;
}
let mut formatted_pending = BTreeSet::new();
for id in tests_pending {
if let Some(desc) = tests.get(id) {
formatted_pending.insert(self.format_test_for_summary(desc));
}
if let Some(desc) = test_steps.get(id) {
formatted_pending
.insert(self.format_test_step_for_summary(desc, tests, test_steps));
}
}
println!(
"\n{} The following tests were pending:\n",
colors::intense_blue("SIGINT")
);
for entry in formatted_pending {
println!("{}", entry);
}
println!();
common::report_sigint(&self.cwd, tests_pending, tests, test_steps);
self.in_new_line = true;
}