0
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-10-29 08:58:01 -04:00

feat(test): Represent uncaught errors (#14513)

This commit adds better reporting of uncaught errors
in top level scope of testing files. This change affects
both console runner as well as LSP runner.
This commit is contained in:
Nayeem Rahman 2022-05-09 10:44:50 +01:00 committed by GitHub
parent ab728e9ccf
commit 23efc4fcab
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 316 additions and 53 deletions

View file

@ -20,6 +20,7 @@ use crate::tools::test::TestEventSender;
use deno_core::anyhow::anyhow;
use deno_core::error::AnyError;
use deno_core::error::JsError;
use deno_core::futures::future;
use deno_core::futures::stream;
use deno_core::futures::StreamExt;
@ -181,7 +182,7 @@ async fn test_specifier(
permissions: Permissions,
specifier: ModuleSpecifier,
mode: test::TestMode,
sender: TestEventSender,
sender: &TestEventSender,
token: CancellationToken,
options: Option<Value>,
) -> Result<(), AnyError> {
@ -337,22 +338,32 @@ impl TestRun {
let specifier = specifier.clone();
let ps = ps.clone();
let permissions = permissions.clone();
let sender = sender.clone();
let mut sender = sender.clone();
let options = self.filters.get(&specifier).map(|f| f.as_test_options());
let token = self.token.clone();
tokio::task::spawn_blocking(move || {
let future = test_specifier(
let origin = specifier.to_string();
let file_result = run_basic(test_specifier(
ps,
permissions,
specifier,
test::TestMode::Executable,
sender,
&sender,
token,
options,
);
run_basic(future)
));
if let Err(error) = file_result {
if error.is::<JsError>() {
sender.send(test::TestEvent::UncaughtError(
origin,
Box::new(error.downcast::<JsError>().unwrap()),
))?;
} else {
return Err(error);
}
}
Ok(())
})
});
@ -404,6 +415,11 @@ impl TestRun {
reporter.report_result(&description, &result, elapsed);
}
test::TestEvent::UncaughtError(origin, error) => {
reporter.report_uncaught_error(&origin, &error);
summary.failed += 1;
summary.uncaught_errors.push((origin, error));
}
test::TestEvent::StepWait(description) => {
reporter.report_step_wait(&description);
}
@ -805,6 +821,37 @@ impl test::TestReporter for LspTestReporter {
}
}
fn report_uncaught_error(&mut self, origin: &str, js_error: &JsError) {
if self.current_origin == Some(origin.to_string()) {
self.current_origin = None;
}
let stack = self.stack.remove(origin).unwrap_or_default();
let err_string = format!(
"Uncaught error from {}: {}\nThis error was not caught from a test and caused the test runner to fail on the referenced module.\nIt most likely originated from a dangling promise, event/timeout handler or top-level code.",
origin,
test::format_test_error(js_error)
);
let messages = as_test_messages(err_string, false);
for t in stack.iter().rev() {
match t {
TestOrTestStepDescription::TestDescription(desc) => {
self.progress(lsp_custom::TestRunProgressMessage::Failed {
test: desc.into(),
messages: messages.clone(),
duration: None,
});
}
TestOrTestStepDescription::TestStepDescription(desc) => {
self.progress(lsp_custom::TestRunProgressMessage::Failed {
test: desc.into(),
messages: messages.clone(),
duration: None,
});
}
}
}
}
fn report_step_wait(&mut self, desc: &test::TestStepDescription) {
if !self.includes_step(desc) {
self.add_step(desc);

View file

@ -165,7 +165,7 @@ pub enum TestRunProgressMessage {
End,
}
#[derive(Debug, Deserialize, Serialize)]
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct TestMessage {
pub message: lsp::MarkupContent,

View file

@ -368,3 +368,9 @@ fn file_protocol() {
})
.run();
}
itest!(uncaught_errors {
args: "test --quiet test/uncaught_errors_1.ts test/uncaught_errors_2.ts test/uncaught_errors_3.ts",
output: "test/uncaught_errors.out",
exit_code: 1,
});

View file

@ -1,7 +1,9 @@
Uncaught error from ./compat/test_runner/top_level_fail_cjs.js FAILED
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
ERRORS
error: Uncaught (in promise) AssertionError: Values are not strictly equal:
./compat/test_runner/top_level_fail_cjs.js (uncaught error)
error: (in promise) AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@ -10,4 +12,16 @@ error: Uncaught (in promise) AssertionError: Values are not strictly equal:
- 10
+ 20
[WILDCARD]
Error.captureStackTrace(this, stackStartFn || stackStartFunction);
^
at [WILDCARD]
This error was not caught from a test and caused the test runner to fail on the referenced module.
It most likely originated from a dangling promise, event/timeout handler or top-level code.
FAILURES
./compat/test_runner/top_level_fail_cjs.js (uncaught error)
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Test failed

View file

@ -1,7 +1,9 @@
Uncaught error from ./compat/test_runner/top_level_fail_esm.mjs FAILED
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
ERRORS
error: Uncaught AssertionError: Values are not strictly equal:
./compat/test_runner/top_level_fail_esm.mjs (uncaught error)
error: AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@ -10,4 +12,16 @@ error: Uncaught AssertionError: Values are not strictly equal:
- 10
+ 20
[WILDCARD]
Error.captureStackTrace(this, stackStartFn || stackStartFunction);
^
at [WILDCARD]
This error was not caught from a test and caused the test runner to fail on the referenced module.
It most likely originated from a dangling promise, event/timeout handler or top-level code.
FAILURES
./compat/test_runner/top_level_fail_esm.mjs (uncaught error)
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Test failed

View file

@ -1,8 +1,19 @@
Uncaught error from ./test/no_check.ts FAILED
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
ERRORS
error: Uncaught TypeError: Cannot read properties of undefined (reading 'fn')
./test/no_check.ts (uncaught error)
error: TypeError: Cannot read properties of undefined (reading 'fn')
Deno.test();
^
at [WILDCARD]
at [WILDCARD]/test/no_check.ts:1:6
This error was not caught from a test and caused the test runner to fail on the referenced module.
It most likely originated from a dangling promise, event/timeout handler or top-level code.
FAILURES
./test/no_check.ts (uncaught error)
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Test failed

View file

@ -0,0 +1,58 @@
running 3 tests from ./test/uncaught_errors_1.ts
foo 1 ... FAILED ([WILDCARD])
foo 2 ... ok ([WILDCARD])
foo 3 ...
Uncaught error from ./test/uncaught_errors_1.ts FAILED
running 3 tests from ./test/uncaught_errors_2.ts
bar 1 ... ok ([WILDCARD])
bar 2 ... FAILED ([WILDCARD])
bar 3 ... FAILED ([WILDCARD])
Uncaught error from ./test/uncaught_errors_3.ts FAILED
ERRORS
foo 1 => ./test/uncaught_errors_1.ts:1:6
error: Error: foo 1 message
throw new Error("foo 1 message");
^
at [WILDCARD]/test/uncaught_errors_1.ts:2:9
./test/uncaught_errors_1.ts (uncaught error)
error: (in promise) Error: foo 3 message
Promise.reject(new Error("foo 3 message"));
^
at [WILDCARD]/test/uncaught_errors_1.ts:8:18
This error was not caught from a test and caused the test runner to fail on the referenced module.
It most likely originated from a dangling promise, event/timeout handler or top-level code.
bar 2 => ./test/uncaught_errors_2.ts:3:6
error: Error: bar 2
throw new Error("bar 2");
^
at [WILDCARD]/test/uncaught_errors_2.ts:4:9
bar 3 => ./test/uncaught_errors_2.ts:6:6
error: Error: bar 3 message
throw new Error("bar 3 message");
^
at [WILDCARD]/test/uncaught_errors_2.ts:7:9
./test/uncaught_errors_3.ts (uncaught error)
error: Error: baz
throw new Error("baz");
^
at [WILDCARD]/test/uncaught_errors_3.ts:1:7
This error was not caught from a test and caused the test runner to fail on the referenced module.
It most likely originated from a dangling promise, event/timeout handler or top-level code.
FAILURES
foo 1 => ./test/uncaught_errors_1.ts:1:6
./test/uncaught_errors_1.ts (uncaught error)
bar 2 => ./test/uncaught_errors_2.ts:3:6
bar 3 => ./test/uncaught_errors_2.ts:6:6
./test/uncaught_errors_3.ts (uncaught error)
test result: FAILED. 2 passed; 5 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Test failed

View file

@ -0,0 +1,9 @@
Deno.test("foo 1", () => {
throw new Error("foo 1 message");
});
Deno.test("foo 2", () => {});
Deno.test("foo 3", () => {
Promise.reject(new Error("foo 3 message"));
});

View file

@ -0,0 +1,8 @@
Deno.test("bar 1", () => {});
Deno.test("bar 2", () => {
throw new Error("bar 2");
});
Deno.test("bar 3", () => {
throw new Error("bar 3 message");
});

View file

@ -0,0 +1 @@
throw new Error("baz");

View file

@ -1,10 +1,22 @@
Check [WILDCARD]/test/unhandled_rejection.ts
Uncaught error from ./test/unhandled_rejection.ts FAILED
test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
ERRORS
error: Uncaught (in promise) Error: rejection
./test/unhandled_rejection.ts (uncaught error)
error: (in promise) Error: rejection
reject(new Error("rejection"));
^
at [WILDCARD]/test/unhandled_rejection.ts:2:10
at new Promise (<anonymous>)
at [WILDCARD]/test/unhandled_rejection.ts:1:1
This error was not caught from a test and caused the test runner to fail on the referenced module.
It most likely originated from a dangling promise, event/timeout handler or top-level code.
FAILURES
./test/unhandled_rejection.ts (uncaught error)
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
error: Test failed

View file

@ -51,6 +51,7 @@ use rand::seq::SliceRandom;
use rand::SeedableRng;
use regex::Regex;
use serde::Deserialize;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Read;
@ -149,6 +150,7 @@ pub enum TestEvent {
Wait(TestDescription),
Output(Vec<u8>),
Result(TestDescription, TestResult, u64),
UncaughtError(String, Box<JsError>),
StepWait(TestStepDescription),
StepResult(TestStepDescription, TestStepResult, u64),
}
@ -166,6 +168,7 @@ pub struct TestSummary {
pub filtered_out: usize,
pub measured: usize,
pub failures: Vec<(TestDescription, Box<JsError>)>,
pub uncaught_errors: Vec<(String, Box<JsError>)>,
}
#[derive(Debug, Clone, Deserialize)]
@ -192,6 +195,7 @@ impl TestSummary {
filtered_out: 0,
measured: 0,
failures: Vec::new(),
uncaught_errors: Vec::new(),
}
}
@ -214,6 +218,7 @@ pub trait TestReporter {
result: &TestResult,
elapsed: u64,
);
fn report_uncaught_error(&mut self, origin: &str, error: &JsError);
fn report_step_wait(&mut self, description: &TestStepDescription);
fn report_step_result(
&mut self,
@ -233,10 +238,11 @@ struct PrettyTestReporter {
concurrent: bool,
echo_output: bool,
deferred_step_output: HashMap<TestDescription, Vec<DeferredStepOutput>>,
in_test_count: usize,
in_new_line: bool,
last_wait_output_level: usize,
cwd: Url,
did_have_user_output: bool,
started_tests: bool,
}
impl PrettyTestReporter {
@ -244,16 +250,18 @@ impl PrettyTestReporter {
PrettyTestReporter {
concurrent,
echo_output,
in_test_count: 0,
in_new_line: true,
deferred_step_output: HashMap::new(),
last_wait_output_level: 0,
cwd: Url::from_directory_path(std::env::current_dir().unwrap()).unwrap(),
did_have_user_output: false,
started_tests: false,
}
}
fn force_report_wait(&mut self, description: &TestDescription) {
print!("{} ...", description.name);
self.in_new_line = false;
// flush for faster feedback when line buffered
std::io::stdout().flush().unwrap();
self.last_wait_output_level = 0;
@ -278,6 +286,7 @@ impl PrettyTestReporter {
println!();
}
print!("{}{} ...", " ".repeat(description.level), description.name);
self.in_new_line = false;
// flush for faster feedback when line buffered
std::io::stdout().flush().unwrap();
self.last_wait_output_level = description.level;
@ -316,11 +325,13 @@ impl PrettyTestReporter {
println!("{}{}", " ".repeat(description.level + 1), line);
}
}
self.in_new_line = true;
}
fn write_output_end(&mut self) -> bool {
if self.did_have_user_output {
println!("{}", colors::gray("----- output end -----"));
self.in_new_line = true;
self.did_have_user_output = false;
true
} else {
@ -341,13 +352,14 @@ impl TestReporter for PrettyTestReporter {
self.to_relative_path_or_remote_url(&plan.origin)
))
);
self.in_new_line = true;
}
fn report_wait(&mut self, description: &TestDescription) {
if !self.concurrent {
self.force_report_wait(description);
}
self.in_test_count += 1;
self.started_tests = true;
}
fn report_output(&mut self, output: &[u8]) {
@ -355,10 +367,11 @@ impl TestReporter for PrettyTestReporter {
return;
}
if !self.did_have_user_output && self.in_test_count > 0 {
if !self.did_have_user_output && self.started_tests {
self.did_have_user_output = true;
println!();
println!("{}", colors::gray("------- output -------"));
self.in_new_line = true;
}
// output everything to stdout in order to prevent
@ -372,8 +385,6 @@ impl TestReporter for PrettyTestReporter {
result: &TestResult,
elapsed: u64,
) {
self.in_test_count -= 1;
if self.concurrent {
self.force_report_wait(description);
@ -414,6 +425,21 @@ impl TestReporter for PrettyTestReporter {
status,
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
);
self.in_new_line = true;
}
fn report_uncaught_error(&mut self, origin: &str, _error: &JsError) {
if !self.in_new_line {
println!();
}
println!(
"Uncaught error from {} {}",
self.to_relative_path_or_remote_url(origin),
colors::red("FAILED")
);
self.in_new_line = true;
self.last_wait_output_level = 0;
self.did_have_user_output = false;
}
fn report_step_wait(&mut self, description: &TestStepDescription) {
@ -450,31 +476,65 @@ impl TestReporter for PrettyTestReporter {
}
fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration) {
if !summary.failures.is_empty() {
if !summary.failures.is_empty() || !summary.uncaught_errors.is_empty() {
#[allow(clippy::type_complexity)] // Type alias doesn't look better here
let mut failures_by_origin: BTreeMap<
String,
(Vec<(&TestDescription, &JsError)>, Option<&JsError>),
> = BTreeMap::default();
let mut failure_titles = vec![];
println!("\n{}\n", colors::white_bold_on_red(" ERRORS "));
for (description, js_error) in &summary.failures {
let failure_title = format!(
"{} {}",
&description.name,
colors::gray(format!(
"=> {}:{}:{}",
self
.to_relative_path_or_remote_url(&description.location.file_name),
description.location.line_number,
description.location.column_number
))
);
println!("{}", &failure_title);
println!(
"{}: {}",
colors::red_bold("error"),
format_test_error(js_error)
);
println!();
failure_titles.push(failure_title);
let (failures, _) = failures_by_origin
.entry(description.origin.clone())
.or_default();
failures.push((description, js_error.as_ref()));
}
for (origin, js_error) in &summary.uncaught_errors {
let (_, uncaught_error) =
failures_by_origin.entry(origin.clone()).or_default();
let _ = uncaught_error.insert(js_error.as_ref());
}
println!("\n{}\n", colors::white_bold_on_red(" ERRORS "));
for (origin, (failures, uncaught_error)) in failures_by_origin {
for (description, js_error) in failures {
let failure_title = format!(
"{} {}",
&description.name,
colors::gray(format!(
"=> {}:{}:{}",
self.to_relative_path_or_remote_url(
&description.location.file_name
),
description.location.line_number,
description.location.column_number
))
);
println!("{}", &failure_title);
println!(
"{}: {}",
colors::red_bold("error"),
format_test_error(js_error)
);
println!();
failure_titles.push(failure_title);
}
if let Some(js_error) = uncaught_error {
let failure_title = format!(
"{} (uncaught error)",
self.to_relative_path_or_remote_url(&origin)
);
println!("{}", &failure_title);
println!(
"{}: {}",
colors::red_bold("error"),
format_test_error(js_error)
);
println!("This error was not caught from a test and caused the test runner to fail on the referenced module.");
println!("It most likely originated from a dangling promise, event/timeout handler or top-level code.");
println!();
failure_titles.push(failure_title);
}
}
println!("{}\n", colors::white_bold_on_red(" FAILURES "));
for failure_title in failure_titles {
println!("{}", failure_title);
@ -510,6 +570,7 @@ impl TestReporter for PrettyTestReporter {
colors::gray(
format!("({})", display::human_elapsed(elapsed.as_millis()))),
);
self.in_new_line = true;
}
}
@ -586,7 +647,7 @@ async fn test_specifier(
permissions: Permissions,
specifier: ModuleSpecifier,
mode: TestMode,
sender: TestEventSender,
sender: &TestEventSender,
options: TestSpecifierOptions,
) -> Result<(), AnyError> {
let mut worker = create_main_worker(
@ -959,14 +1020,30 @@ async fn test_specifiers(
let permissions = permissions.clone();
let specifier = specifier.clone();
let mode = mode.clone();
let sender = sender.clone();
let mut sender = sender.clone();
let options = options.clone();
tokio::task::spawn_blocking(move || {
let future =
test_specifier(ps, permissions, specifier, mode, sender, options);
run_basic(future)
let origin = specifier.to_string();
let file_result = run_basic(test_specifier(
ps,
permissions,
specifier,
mode,
&sender,
options,
));
if let Err(error) = file_result {
if error.is::<JsError>() {
sender.send(TestEvent::UncaughtError(
origin,
Box::new(error.downcast::<JsError>().unwrap()),
))?;
} else {
return Err(error);
}
}
Ok(())
})
});
@ -1021,6 +1098,12 @@ async fn test_specifiers(
reporter.report_result(&description, &result, elapsed);
}
TestEvent::UncaughtError(origin, error) => {
reporter.report_uncaught_error(&origin, &error);
summary.failed += 1;
summary.uncaught_errors.push((origin, error));
}
TestEvent::StepWait(description) => {
reporter.report_step_wait(&description);
}