1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-24 08:09:08 -05:00

feat(test): Show Deno.test() call locations for failures (#14484)

This commit is contained in:
Nayeem Rahman 2022-05-05 00:15:54 +01:00 committed by GitHub
parent 6a21fe745a
commit ca134d25e1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 187 additions and 189 deletions

View file

@ -4,8 +4,8 @@ Failed assertion ... FAILED ([WILDCARD])
failures:
./compat/test_runner/cjs.js > Failed assertion
AssertionError: Values are not strictly equal:
Failed assertion => ./compat/test_runner/cjs.js:[WILDCARD]
error: AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@ -20,8 +20,7 @@ AssertionError: Values are not strictly equal:
failures:
./compat/test_runner/cjs.js
Failed assertion
Failed assertion => ./compat/test_runner/cjs.js:[WILDCARD]
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -4,8 +4,8 @@ Failed assertion ... FAILED ([WILDCARD])
failures:
./compat/test_runner/esm.mjs > Failed assertion
AssertionError: Values are not strictly equal:
Failed assertion => ./compat/test_runner/esm.mjs:[WILDCARD]
error: AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@ -20,8 +20,7 @@ AssertionError: Values are not strictly equal:
failures:
./compat/test_runner/esm.mjs
Failed assertion
Failed assertion => ./compat/test_runner/esm.mjs:[WILDCARD]
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -3,8 +3,8 @@ aggregate ... FAILED ([WILDCARD])
failures:
./test/aggregate_error.ts > aggregate
AggregateError
aggregate => ./test/aggregate_error.ts:[WILDCARD]
error: AggregateError
Error: Error 1
at [WILDCARD]/testdata/test/aggregate_error.ts:2:18
Error: Error 2
@ -15,8 +15,7 @@ AggregateError
failures:
./test/aggregate_error.ts
aggregate
aggregate => ./test/aggregate_error.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -10,43 +10,42 @@ hrtime ... FAILED [WILDCARD]
failures:
./test/allow_none.ts > read
PermissionDenied: Can't escalate parent thread permissions
read => ./test/allow_none.ts:[WILDCARD]
error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
./test/allow_none.ts > write
PermissionDenied: Can't escalate parent thread permissions
write => ./test/allow_none.ts:[WILDCARD]
error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
./test/allow_none.ts > net
PermissionDenied: Can't escalate parent thread permissions
net => ./test/allow_none.ts:[WILDCARD]
error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
./test/allow_none.ts > env
PermissionDenied: Can't escalate parent thread permissions
env => ./test/allow_none.ts:[WILDCARD]
error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
./test/allow_none.ts > run
PermissionDenied: Can't escalate parent thread permissions
run => ./test/allow_none.ts:[WILDCARD]
error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
./test/allow_none.ts > ffi
PermissionDenied: Can't escalate parent thread permissions
ffi => ./test/allow_none.ts:[WILDCARD]
error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
./test/allow_none.ts > hrtime
PermissionDenied: Can't escalate parent thread permissions
hrtime => ./test/allow_none.ts:[WILDCARD]
error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
failures:
./test/allow_none.ts
read
write
net
env
run
ffi
hrtime
read => ./test/allow_none.ts:[WILDCARD]
write => ./test/allow_none.ts:[WILDCARD]
net => ./test/allow_none.ts:[WILDCARD]
env => ./test/allow_none.ts:[WILDCARD]
run => ./test/allow_none.ts:[WILDCARD]
ffi => ./test/allow_none.ts:[WILDCARD]
hrtime => ./test/allow_none.ts:[WILDCARD]
test result: FAILED. 0 passed; 7 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]

View file

@ -6,22 +6,22 @@ exit(2) ... FAILED ([WILDCARD])
failures:
./test/exit_sanitizer.ts > exit(0)
AssertionError: Test case attempted to exit with exit code: 0
exit(0) => ./test/exit_sanitizer.ts:[WILDCARD]
error: AssertionError: Test case attempted to exit with exit code: 0
Deno.exit(0);
^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:2:8
./test/exit_sanitizer.ts > exit(1)
AssertionError: Test case attempted to exit with exit code: 1
exit(1) => ./test/exit_sanitizer.ts:[WILDCARD]
error: AssertionError: Test case attempted to exit with exit code: 1
Deno.exit(1);
^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:6:8
./test/exit_sanitizer.ts > exit(2)
AssertionError: Test case attempted to exit with exit code: 2
exit(2) => ./test/exit_sanitizer.ts:[WILDCARD]
error: AssertionError: Test case attempted to exit with exit code: 2
Deno.exit(2);
^
at [WILDCARD]
@ -29,10 +29,9 @@ AssertionError: Test case attempted to exit with exit code: 2
failures:
./test/exit_sanitizer.ts
exit(0)
exit(1)
exit(2)
exit(0) => ./test/exit_sanitizer.ts:[WILDCARD]
exit(1) => ./test/exit_sanitizer.ts:[WILDCARD]
exit(2) => ./test/exit_sanitizer.ts:[WILDCARD]
test result: FAILED. 0 passed; 3 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -13,79 +13,78 @@ test 9 ... FAILED ([WILDCARD])
failures:
./test/fail.ts > test 0
Error
test 0 => ./test/fail.ts:1:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:2:9
./test/fail.ts > test 1
Error
test 1 => ./test/fail.ts:4:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:5:9
./test/fail.ts > test 2
Error
test 2 => ./test/fail.ts:7:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:8:9
./test/fail.ts > test 3
Error
test 3 => ./test/fail.ts:10:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:11:9
./test/fail.ts > test 4
Error
test 4 => ./test/fail.ts:13:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:14:9
./test/fail.ts > test 5
Error
test 5 => ./test/fail.ts:16:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:17:9
./test/fail.ts > test 6
Error
test 6 => ./test/fail.ts:19:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:20:9
./test/fail.ts > test 7
Error
test 7 => ./test/fail.ts:22:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:23:9
./test/fail.ts > test 8
Error
test 8 => ./test/fail.ts:25:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:26:9
./test/fail.ts > test 9
Error
test 9 => ./test/fail.ts:28:6
error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:29:9
failures:
./test/fail.ts
test 0
test 1
test 2
test 3
test 4
test 5
test 6
test 7
test 8
test 9
test 0 => ./test/fail.ts:1:6
test 1 => ./test/fail.ts:4:6
test 2 => ./test/fail.ts:7:6
test 3 => ./test/fail.ts:10:6
test 4 => ./test/fail.ts:13:6
test 5 => ./test/fail.ts:16:6
test 6 => ./test/fail.ts:19:6
test 7 => ./test/fail.ts:22:6
test 8 => ./test/fail.ts:25:6
test 9 => ./test/fail.ts:28:6
test result: FAILED. 0 passed; 10 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -4,16 +4,15 @@ test 1 ... FAILED ([WILDCARD])
failures:
./test/fail_fast.ts > test 1
Error
test 1 => ./test/fail_fast.ts:[WILDCARD]
error: Error
throw new Error();
^
at [WILDCARD]/test/fail_fast.ts:2:9
failures:
./test/fail_fast.ts
test 1
test 1 => ./test/fail_fast.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -5,19 +5,19 @@ test test 2 ... FAILED ([WILDCARD])
failures:
test 1
Error
test 1 => ./test/fail_fast_with_val.ts:[WILDCARD]
error: Error
at [WILDCARD]/test/fail_fast_with_val.ts:2:9
at [WILDCARD]
test 2
Error
test 2 => ./test/fail_fast_with_val.ts:[WILDCARD]
error: Error
at [WILDCARD]/test/fail_fast_with_val.ts:5:9
at [WILDCARD]
failures:
test 1
test 2
test 1 => ./test/fail_fast_with_val.ts:[WILDCARD]
test 2 => ./test/fail_fast_with_val.ts:[WILDCARD]
test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -5,16 +5,15 @@ success ... ok ([WILDCARD])
failures:
./test/finally_timeout.ts > error
Error: fail
error => ./test/finally_timeout.ts:[WILDCARD]
error: Error: fail
throw new Error("fail");
^
at [WILDCARD]/test/finally_timeout.ts:4:11
failures:
./test/finally_timeout.ts
error
error => ./test/finally_timeout.ts:[WILDCARD]
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -3,14 +3,13 @@ no prompt ... FAILED ([WILDCARD]ms)
failures:
./test/no_prompt_by_default.ts > no prompt
PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
no prompt => ./test/no_prompt_by_default.ts:[WILDCARD]
error: PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
[WILDCARD]
failures:
./test/no_prompt_by_default.ts
no prompt
no prompt => ./test/no_prompt_by_default.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms)

View file

@ -3,14 +3,13 @@ no prompt ... FAILED ([WILDCARD]ms)
failures:
./test/no_prompt_with_denied_perms.ts > no prompt
PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
no prompt => ./test/no_prompt_with_denied_perms.ts:[WILDCARD]
error: PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
[WILDCARD]
failures:
./test/no_prompt_with_denied_perms.ts
no prompt
no prompt => ./test/no_prompt_with_denied_perms.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms)

View file

@ -4,8 +4,8 @@ test 1 ... FAILED [WILDCARD]
failures:
./test/ops_sanitizer_missing_details.ts > test 1
Test case is leaking async ops.
test 1 => ./test/ops_sanitizer_missing_details.ts:[WILDCARD]
error: Test case is leaking async ops.
- 1 async operation to op_write was started in this test, but never completed.
@ -13,8 +13,7 @@ To get more details where ops were leaked, run again with --trace-ops flag.
failures:
./test/ops_sanitizer_missing_details.ts
test 1
test 1 => ./test/ops_sanitizer_missing_details.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]

View file

@ -5,8 +5,8 @@ test 2 ... FAILED ([WILDCARD])
failures:
./test/ops_sanitizer_multiple_timeout_tests.ts > test 1
Test case is leaking async ops.
test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
error: Test case is leaking async ops.
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operations were started here:
at [WILDCARD]
@ -21,8 +21,8 @@ Test case is leaking async ops.
at [WILDCARD]/testdata/test/ops_sanitizer_multiple_timeout_tests.ts:8:27
at [WILDCARD]
./test/ops_sanitizer_multiple_timeout_tests.ts > test 2
Test case is leaking async ops.
test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
error: Test case is leaking async ops.
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operations were started here:
at [WILDCARD]
@ -39,9 +39,8 @@ Test case is leaking async ops.
failures:
./test/ops_sanitizer_multiple_timeout_tests.ts
test 1
test 2
test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -5,15 +5,15 @@ test 2 ... FAILED ([WILDCARD])
failures:
./test/ops_sanitizer_multiple_timeout_tests.ts > test 1
Test case is leaking async ops.
test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
error: Test case is leaking async ops.
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
To get more details where ops were leaked, run again with --trace-ops flag.
./test/ops_sanitizer_multiple_timeout_tests.ts > test 2
Test case is leaking async ops.
test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
error: Test case is leaking async ops.
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
@ -21,9 +21,8 @@ To get more details where ops were leaked, run again with --trace-ops flag.
failures:
./test/ops_sanitizer_multiple_timeout_tests.ts
test 1
test 2
test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -5,8 +5,8 @@ leak interval ... FAILED ([WILDCARD])
failures:
./test/ops_sanitizer_unstable.ts > leak interval
Test case is leaking async ops.
leak interval => ./test/ops_sanitizer_unstable.ts:[WILDCARD]
error: Test case is leaking async ops.
- 1 async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
at [WILDCARD]
@ -16,8 +16,7 @@ Test case is leaking async ops.
failures:
./test/ops_sanitizer_unstable.ts
leak interval
leak interval => ./test/ops_sanitizer_unstable.ts:[WILDCARD]
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -4,8 +4,8 @@ leak ... FAILED ([WILDCARD])
failures:
./test/resource_sanitizer.ts > leak
AssertionError: Test case is leaking 2 resources:
leak => ./test/resource_sanitizer.ts:[WILDCARD]
error: AssertionError: Test case is leaking 2 resources:
- The stdin pipe (rid 0) was opened before the test started, but was closed during the test. Do not close resources in a test that were not created during that test.
- A file (rid 3) was opened during the test, but not closed during the test. Close the file handle by calling `file.close()`.
@ -14,8 +14,7 @@ AssertionError: Test case is leaking 2 resources:
failures:
./test/resource_sanitizer.ts
leak
leak => ./test/resource_sanitizer.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -3,7 +3,7 @@ running 3 tests from ./test/steps/failing_steps.ts
nested failure ...
step 1 ...
inner 1 ... FAILED ([WILDCARD])
Error: Failed.
error: Error: Failed.
throw new Error("Failed.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
@ -13,12 +13,12 @@ nested failure ...
FAILED ([WILDCARD])
multiple test step failures ...
step 1 ... FAILED ([WILDCARD])
Error: Fail.
error: Error: Fail.
throw new Error("Fail.");
^
[WILDCARD]
step 2 ... FAILED ([WILDCARD])
Error: Fail.
error: Error: Fail.
await t.step("step 2", () => Promise.reject(new Error("Fail.")));
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
@ -26,7 +26,7 @@ multiple test step failures ...
FAILED ([WILDCARD])
failing step in failing test ...
step 1 ... FAILED ([WILDCARD])
Error: Fail.
error: Error: Fail.
throw new Error("Fail.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
@ -35,28 +35,27 @@ FAILED ([WILDCARD])
failures:
./test/steps/failing_steps.ts > nested failure
Error: 1 test step failed.
nested failure => ./test/steps/failing_steps.ts:[WILDCARD]
error: Error: 1 test step failed.
at runTest (deno:runtime/js/40_testing.js:[WILDCARD])
at async Object.runTests (deno:runtime/js/40_testing.js:[WILDCARD])
./test/steps/failing_steps.ts > multiple test step failures
Error: 2 test steps failed.
multiple test step failures => ./test/steps/failing_steps.ts:[WILDCARD]
error: Error: 2 test steps failed.
at runTest (deno:runtime/js/40_testing.js:[WILDCARD])
at async Object.runTests (deno:runtime/js/40_testing.js:[WILDCARD])
./test/steps/failing_steps.ts > failing step in failing test
Error: Fail test.
failing step in failing test => ./test/steps/failing_steps.ts:[WILDCARD]
error: Error: Fail test.
throw new Error("Fail test.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
failures:
./test/steps/failing_steps.ts
nested failure
multiple test step failures
failing step in failing test
nested failure => ./test/steps/failing_steps.ts:[WILDCARD]
multiple test step failures => ./test/steps/failing_steps.ts:[WILDCARD]
failing step in failing test => ./test/steps/failing_steps.ts:[WILDCARD]
test result: FAILED. 0 passed (1 step); 3 failed (5 steps); 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -9,11 +9,11 @@ FAILED ([WILDCARD])
inner missing await ...
step ...
inner ... pending ([WILDCARD])
Error: Parent scope completed before test step finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
error: Error: Parent scope completed before test step finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
at [WILDCARD]
at async TestContext.step [WILDCARD]
FAILED ([WILDCARD])
Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
await t.step("step", (t) => {
^
at [WILDCARD]
@ -22,7 +22,7 @@ FAILED ([WILDCARD])
parallel steps with sanitizers ...
step 1 ... pending ([WILDCARD])
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step while another test step with sanitizers is running.
error: Error: Cannot start test step while another test step with sanitizers is running.
* parallel steps with sanitizers > step 1
await t.step("step 2", () => {});
^
@ -32,7 +32,7 @@ FAILED ([WILDCARD])
parallel steps when first has sanitizer ...
step 1 ... pending ([WILDCARD])
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step while another test step with sanitizers is running.
error: Error: Cannot start test step while another test step with sanitizers is running.
* parallel steps when first has sanitizer > step 1
await t.step({
^
@ -42,7 +42,7 @@ FAILED ([WILDCARD])
parallel steps when second has sanitizer ...
step 1 ... ok ([WILDCARD])
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step with sanitizers while another test step is running.
error: Error: Cannot start test step with sanitizers while another test step is running.
* parallel steps when second has sanitizer > step 1
await t.step({
^
@ -55,7 +55,7 @@ parallel steps where only inner tests have sanitizers ...
ok ([WILDCARD])
step 2 ...
step inner ... FAILED ([WILDCARD])
Error: Cannot start test step with sanitizers while another test step is running.
error: Error: Cannot start test step with sanitizers while another test step is running.
* parallel steps where only inner tests have sanitizers > step 1
await t.step({
^
@ -66,49 +66,48 @@ FAILED ([WILDCARD])
failures:
./test/steps/invalid_usage.ts > capturing
Error: Cannot run test step after parent scope has finished execution. Ensure any `.step(...)` calls are executed before their parent scope completes execution.
capturing => ./test/steps/invalid_usage.ts:[WILDCARD]
error: Error: Cannot run test step after parent scope has finished execution. Ensure any `.step(...)` calls are executed before their parent scope completes execution.
await capturedContext.step("next step", () => {});
^
at TestContext.step ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
./test/steps/invalid_usage.ts > top level missing await
Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
top level missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
at postValidation [WILDCARD]
at testStepSanitizer ([WILDCARD])
[WILDCARD]
./test/steps/invalid_usage.ts > inner missing await
Error: 1 test step failed.
inner missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
error: Error: 1 test step failed.
at [WILDCARD]
./test/steps/invalid_usage.ts > parallel steps with sanitizers
Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
parallel steps with sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
at postValidation [WILDCARD]
at testStepSanitizer ([WILDCARD])
[WILDCARD]
./test/steps/invalid_usage.ts > parallel steps when first has sanitizer
Error: 1 test step failed.
parallel steps when first has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
error: Error: 1 test step failed.
at runTest ([WILDCARD])
at [WILDCARD]
./test/steps/invalid_usage.ts > parallel steps when second has sanitizer
Error: 1 test step failed.
parallel steps when second has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
error: Error: 1 test step failed.
at runTest ([WILDCARD])
at [WILDCARD]
failures:
./test/steps/invalid_usage.ts
capturing
top level missing await
inner missing await
parallel steps with sanitizers
parallel steps when first has sanitizer
parallel steps when second has sanitizer
parallel steps where only inner tests have sanitizers
capturing => ./test/steps/invalid_usage.ts:[WILDCARD]
top level missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
inner missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
parallel steps with sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
parallel steps when first has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
parallel steps when second has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
parallel steps where only inner tests have sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
test result: FAILED. 0 passed (4 steps); 7 failed (10 steps); 0 ignored; 0 measured; 0 filtered out ([WILDCARD])

View file

@ -51,7 +51,6 @@ use rand::seq::SliceRandom;
use rand::SeedableRng;
use regex::Regex;
use serde::Deserialize;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Read;
@ -76,11 +75,20 @@ pub enum TestMode {
Both,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
#[serde(rename_all = "camelCase")]
pub struct TestLocation {
pub file_name: String,
pub line_number: u32,
pub column_number: u32,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
#[serde(rename_all = "camelCase")]
pub struct TestDescription {
pub origin: String,
pub name: String,
pub location: TestLocation,
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
@ -303,6 +311,7 @@ impl PrettyTestReporter {
if let Some(js_error) = result.error() {
let err_string = format_test_error(js_error);
let err_string = format!("{}: {}", colors::red_bold("error"), err_string);
for line in err_string.lines() {
println!("{}{}", " ".repeat(description.level + 1), line);
}
@ -442,38 +451,33 @@ impl TestReporter for PrettyTestReporter {
fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration) {
if !summary.failures.is_empty() {
let mut failure_titles = vec![];
println!("\nfailures:\n");
for (description, js_error) in &summary.failures {
println!(
"{} {} {}",
colors::gray(
self.to_relative_path_or_remote_url(&description.origin)
),
colors::gray(">"),
description.name
let failure_title = format!(
"{} {}",
&description.name,
colors::gray(format!(
"=> {}:{}:{}",
self
.to_relative_path_or_remote_url(&description.location.file_name),
description.location.line_number,
description.location.column_number
))
);
println!("{}", &failure_title);
println!(
"{}: {}",
colors::red_bold("error"),
format_test_error(js_error)
);
println!("{}", format_test_error(js_error));
println!();
}
let mut grouped_by_origin: BTreeMap<String, Vec<String>> =
BTreeMap::default();
for (description, _) in &summary.failures {
let test_names = grouped_by_origin
.entry(description.origin.clone())
.or_default();
test_names.push(description.name.clone());
failure_titles.push(failure_title);
}
println!("failures:\n");
for (origin, test_names) in &grouped_by_origin {
println!(
"\t{}",
colors::gray(self.to_relative_path_or_remote_url(origin))
);
for test_name in test_names {
println!("\t{}", test_name);
}
for failure_title in failure_titles {
println!("{}", failure_title);
}
}

View file

@ -635,6 +635,16 @@
);
}
const jsError = Deno.core.destructureError(new Error());
// Note: There might pop up a case where one of the filename, line number or
// column number from the caller isn't defined. We assume never for now.
// Make `TestDescription::location` optional if such a case is found.
testDef.location = {
fileName: jsError.frames[1].fileName,
lineNumber: jsError.frames[1].lineNumber,
columnNumber: jsError.frames[1].columnNumber,
};
ArrayPrototypePush(tests, testDef);
}
@ -1097,6 +1107,7 @@
const description = {
origin,
name: test.name,
location: test.location,
};
const earlier = DateNow();