mirror of
https://github.com/denoland/deno.git
synced 2025-01-03 04:48:52 -05:00
feat(test): print pending tests on sigint (#18246)
This commit is contained in:
parent
fe88b53e50
commit
8a4865c379
16 changed files with 649 additions and 584 deletions
|
@ -142,7 +142,8 @@ function assertOps(fn) {
|
|||
const pre = core.metrics();
|
||||
const preTraces = new Map(core.opCallTraces);
|
||||
try {
|
||||
await fn(desc);
|
||||
const innerResult = await fn(desc);
|
||||
if (innerResult) return innerResult;
|
||||
} finally {
|
||||
// Defer until next event loop turn - that way timeouts and intervals
|
||||
// cleared can actually be removed from resource table, otherwise
|
||||
|
@ -150,9 +151,6 @@ function assertOps(fn) {
|
|||
await opSanitizerDelay();
|
||||
await opSanitizerDelay();
|
||||
}
|
||||
|
||||
if (shouldSkipSanitizers(desc)) return;
|
||||
|
||||
const post = core.metrics();
|
||||
const postTraces = new Map(core.opCallTraces);
|
||||
|
||||
|
@ -161,7 +159,7 @@ function assertOps(fn) {
|
|||
const dispatchedDiff = post.opsDispatchedAsync - pre.opsDispatchedAsync;
|
||||
const completedDiff = post.opsCompletedAsync - pre.opsCompletedAsync;
|
||||
|
||||
if (dispatchedDiff === completedDiff) return;
|
||||
if (dispatchedDiff === completedDiff) return null;
|
||||
|
||||
const details = [];
|
||||
for (const key in post.ops) {
|
||||
|
@ -215,19 +213,7 @@ function assertOps(fn) {
|
|||
);
|
||||
}
|
||||
}
|
||||
|
||||
let msg = `Test case is leaking async ops.
|
||||
|
||||
- ${ArrayPrototypeJoin(details, "\n - ")}`;
|
||||
|
||||
if (!core.isOpCallTracingEnabled()) {
|
||||
msg +=
|
||||
`\n\nTo get more details where ops were leaked, run again with --trace-ops flag.`;
|
||||
} else {
|
||||
msg += "\n";
|
||||
}
|
||||
|
||||
throw assert(false, msg);
|
||||
return { failed: { leakedOps: [details, core.isOpCallTracingEnabled()] } };
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -372,12 +358,8 @@ function assertResources(fn) {
|
|||
/** @param desc {TestDescription | TestStepDescription} */
|
||||
return async function resourceSanitizer(desc) {
|
||||
const pre = core.resources();
|
||||
await fn(desc);
|
||||
|
||||
if (shouldSkipSanitizers(desc)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const innerResult = await fn(desc);
|
||||
if (innerResult) return innerResult;
|
||||
const post = core.resources();
|
||||
|
||||
const allResources = new Set([
|
||||
|
@ -404,14 +386,10 @@ function assertResources(fn) {
|
|||
ArrayPrototypePush(details, detail);
|
||||
}
|
||||
}
|
||||
|
||||
const message = `Test case is leaking ${details.length} resource${
|
||||
details.length === 1 ? "" : "s"
|
||||
}:
|
||||
|
||||
- ${details.join("\n - ")}
|
||||
`;
|
||||
assert(details.length === 0, message);
|
||||
if (details.length == 0) {
|
||||
return null;
|
||||
}
|
||||
return { failed: { leakedResources: details } };
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -429,9 +407,8 @@ function assertExit(fn, isTest) {
|
|||
});
|
||||
|
||||
try {
|
||||
await fn(...new SafeArrayIterator(params));
|
||||
} catch (err) {
|
||||
throw err;
|
||||
const innerResult = await fn(...new SafeArrayIterator(params));
|
||||
if (innerResult) return innerResult;
|
||||
} finally {
|
||||
setExitHandler(null);
|
||||
}
|
||||
|
@ -441,81 +418,54 @@ function assertExit(fn, isTest) {
|
|||
function assertTestStepScopes(fn) {
|
||||
/** @param desc {TestDescription | TestStepDescription} */
|
||||
return async function testStepSanitizer(desc) {
|
||||
preValidation();
|
||||
// only report waiting after pre-validation
|
||||
if (canStreamReporting(desc) && "parent" in desc) {
|
||||
stepReportWait(desc);
|
||||
function getRunningStepDescs() {
|
||||
const results = [];
|
||||
let childDesc = desc;
|
||||
while (childDesc.parent != null) {
|
||||
const state = MapPrototypeGet(testStates, childDesc.parent.id);
|
||||
for (const siblingDesc of state.children) {
|
||||
if (siblingDesc.id == childDesc.id) {
|
||||
continue;
|
||||
}
|
||||
const siblingState = MapPrototypeGet(testStates, siblingDesc.id);
|
||||
if (!siblingState.completed) {
|
||||
ArrayPrototypePush(results, siblingDesc);
|
||||
}
|
||||
}
|
||||
childDesc = childDesc.parent;
|
||||
}
|
||||
return results;
|
||||
}
|
||||
const runningStepDescs = getRunningStepDescs();
|
||||
const runningStepDescsWithSanitizers = ArrayPrototypeFilter(
|
||||
runningStepDescs,
|
||||
(d) => usesSanitizer(d),
|
||||
);
|
||||
|
||||
if (runningStepDescsWithSanitizers.length > 0) {
|
||||
return {
|
||||
failed: {
|
||||
overlapsWithSanitizers: runningStepDescsWithSanitizers.map(
|
||||
getFullName,
|
||||
),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
if (usesSanitizer(desc) && runningStepDescs.length > 0) {
|
||||
return {
|
||||
failed: { hasSanitizersAndOverlaps: runningStepDescs.map(getFullName) },
|
||||
};
|
||||
}
|
||||
await fn(MapPrototypeGet(testStates, desc.id).context);
|
||||
testStepPostValidation(desc);
|
||||
|
||||
function preValidation() {
|
||||
const runningStepDescs = getRunningStepDescs();
|
||||
const runningStepDescsWithSanitizers = ArrayPrototypeFilter(
|
||||
runningStepDescs,
|
||||
(d) => usesSanitizer(d),
|
||||
);
|
||||
|
||||
if (runningStepDescsWithSanitizers.length > 0) {
|
||||
throw new Error(
|
||||
"Cannot start test step while another test step with sanitizers is running.\n" +
|
||||
runningStepDescsWithSanitizers
|
||||
.map((d) => ` * ${getFullName(d)}`)
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
if (usesSanitizer(desc) && runningStepDescs.length > 0) {
|
||||
throw new Error(
|
||||
"Cannot start test step with sanitizers while another test step is running.\n" +
|
||||
runningStepDescs.map((d) => ` * ${getFullName(d)}`).join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
function getRunningStepDescs() {
|
||||
const results = [];
|
||||
let childDesc = desc;
|
||||
while (childDesc.parent != null) {
|
||||
const state = MapPrototypeGet(testStates, childDesc.parent.id);
|
||||
for (const siblingDesc of state.children) {
|
||||
if (siblingDesc.id == childDesc.id) {
|
||||
continue;
|
||||
}
|
||||
const siblingState = MapPrototypeGet(testStates, siblingDesc.id);
|
||||
if (!siblingState.finalized) {
|
||||
ArrayPrototypePush(results, siblingDesc);
|
||||
}
|
||||
}
|
||||
childDesc = childDesc.parent;
|
||||
}
|
||||
return results;
|
||||
for (const childDesc of MapPrototypeGet(testStates, desc.id).children) {
|
||||
if (!MapPrototypeGet(testStates, childDesc.id).completed) {
|
||||
return { failed: "incompleteSteps" };
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function testStepPostValidation(desc) {
|
||||
// check for any running steps
|
||||
for (const childDesc of MapPrototypeGet(testStates, desc.id).children) {
|
||||
if (MapPrototypeGet(testStates, childDesc.id).status == "pending") {
|
||||
throw new Error(
|
||||
"There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// check if an ancestor already completed
|
||||
let currentDesc = desc.parent;
|
||||
while (currentDesc != null) {
|
||||
if (MapPrototypeGet(testStates, currentDesc.id).finalized) {
|
||||
throw new Error(
|
||||
"Parent scope completed before test step finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).",
|
||||
);
|
||||
}
|
||||
currentDesc = currentDesc.parent;
|
||||
}
|
||||
}
|
||||
|
||||
function pledgePermissions(permissions) {
|
||||
return ops.op_pledge_test_permissions(
|
||||
serializePermissions(permissions),
|
||||
|
@ -573,18 +523,14 @@ function withPermissions(fn, permissions) {
|
|||
* @typedef {{
|
||||
* context: TestContext,
|
||||
* children: TestStepDescription[],
|
||||
* finalized: boolean,
|
||||
* completed: boolean,
|
||||
* }} TestState
|
||||
*
|
||||
* @typedef {{
|
||||
* context: TestContext,
|
||||
* children: TestStepDescription[],
|
||||
* finalized: boolean,
|
||||
* status: "pending" | "ok" | ""failed" | ignored",
|
||||
* error: unknown,
|
||||
* elapsed: number | null,
|
||||
* reportedWait: boolean,
|
||||
* reportedResult: boolean,
|
||||
* completed: boolean,
|
||||
* failed: boolean,
|
||||
* }} TestStepState
|
||||
*
|
||||
* @typedef {{
|
||||
|
@ -701,13 +647,6 @@ function test(
|
|||
|
||||
// Delete this prop in case the user passed it. It's used to detect steps.
|
||||
delete testDesc.parent;
|
||||
testDesc.fn = wrapTestFnWithSanitizers(testDesc.fn, testDesc);
|
||||
if (testDesc.permissions) {
|
||||
testDesc.fn = withPermissions(
|
||||
testDesc.fn,
|
||||
testDesc.permissions,
|
||||
);
|
||||
}
|
||||
testDesc.origin = getTestOrigin();
|
||||
const jsError = core.destructureError(new Error());
|
||||
testDesc.location = {
|
||||
|
@ -724,7 +663,7 @@ function test(
|
|||
MapPrototypeSet(testStates, testDesc.id, {
|
||||
context: createTestContext(testDesc),
|
||||
children: [],
|
||||
finalized: false,
|
||||
completed: false,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -832,28 +771,20 @@ async function runTest(desc) {
|
|||
if (desc.ignore) {
|
||||
return "ignored";
|
||||
}
|
||||
|
||||
let testFn = wrapTestFnWithSanitizers(desc.fn, desc);
|
||||
if (!("parent" in desc) && desc.permissions) {
|
||||
testFn = withPermissions(
|
||||
testFn,
|
||||
desc.permissions,
|
||||
);
|
||||
}
|
||||
try {
|
||||
await desc.fn(desc);
|
||||
const failCount = failedChildStepsCount(desc);
|
||||
return failCount === 0 ? "ok" : {
|
||||
"failed": core.destructureError(
|
||||
new Error(
|
||||
`${failCount} test step${failCount === 1 ? "" : "s"} failed.`,
|
||||
),
|
||||
),
|
||||
};
|
||||
const result = await testFn(desc);
|
||||
if (result) return result;
|
||||
const failedSteps = failedChildStepsCount(desc);
|
||||
return failedSteps === 0 ? "ok" : { failed: { failedSteps } };
|
||||
} catch (error) {
|
||||
return {
|
||||
"failed": core.destructureError(error),
|
||||
};
|
||||
} finally {
|
||||
const state = MapPrototypeGet(testStates, desc.id);
|
||||
state.finalized = true;
|
||||
// ensure the children report their result
|
||||
for (const childDesc of state.children) {
|
||||
stepReportResult(childDesc);
|
||||
}
|
||||
return { failed: { jsError: core.destructureError(error) } };
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1094,6 +1025,11 @@ async function runTests({
|
|||
const earlier = DateNow();
|
||||
const result = await runTest(desc);
|
||||
const elapsed = DateNow() - earlier;
|
||||
const state = MapPrototypeGet(testStates, desc.id);
|
||||
state.completed = true;
|
||||
for (const childDesc of state.children) {
|
||||
stepReportResult(childDesc, { failed: "incomplete" }, 0);
|
||||
}
|
||||
ops.op_dispatch_test_event({
|
||||
result: [desc.id, result, elapsed],
|
||||
});
|
||||
|
@ -1153,7 +1089,7 @@ async function runBenchmarks() {
|
|||
|
||||
function getFullName(desc) {
|
||||
if ("parent" in desc) {
|
||||
return `${desc.parent.name} > ${desc.name}`;
|
||||
return `${getFullName(desc.parent)} ... ${desc.name}`;
|
||||
}
|
||||
return desc.name;
|
||||
}
|
||||
|
@ -1162,74 +1098,23 @@ function usesSanitizer(desc) {
|
|||
return desc.sanitizeResources || desc.sanitizeOps || desc.sanitizeExit;
|
||||
}
|
||||
|
||||
function canStreamReporting(desc) {
|
||||
let currentDesc = desc;
|
||||
while (currentDesc != null) {
|
||||
if (!usesSanitizer(currentDesc)) {
|
||||
return false;
|
||||
}
|
||||
currentDesc = currentDesc.parent;
|
||||
}
|
||||
for (const childDesc of MapPrototypeGet(testStates, desc.id).children) {
|
||||
const state = MapPrototypeGet(testStates, childDesc.id);
|
||||
if (!usesSanitizer(childDesc) && !state.finalized) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function stepReportWait(desc) {
|
||||
function stepReportResult(desc, result, elapsed) {
|
||||
const state = MapPrototypeGet(testStates, desc.id);
|
||||
if (state.reportedWait) {
|
||||
return;
|
||||
}
|
||||
ops.op_dispatch_test_event({ stepWait: desc.id });
|
||||
state.reportedWait = true;
|
||||
}
|
||||
|
||||
function stepReportResult(desc) {
|
||||
const state = MapPrototypeGet(testStates, desc.id);
|
||||
if (state.reportedResult) {
|
||||
return;
|
||||
}
|
||||
stepReportWait(desc);
|
||||
for (const childDesc of state.children) {
|
||||
stepReportResult(childDesc);
|
||||
}
|
||||
let result;
|
||||
if (state.status == "pending" || state.status == "failed") {
|
||||
result = {
|
||||
[state.status]: state.error && core.destructureError(state.error),
|
||||
};
|
||||
} else {
|
||||
result = state.status;
|
||||
stepReportResult(childDesc, { failed: "incomplete" }, 0);
|
||||
}
|
||||
ops.op_dispatch_test_event({
|
||||
stepResult: [desc.id, result, state.elapsed],
|
||||
stepResult: [desc.id, result, elapsed],
|
||||
});
|
||||
state.reportedResult = true;
|
||||
}
|
||||
|
||||
function failedChildStepsCount(desc) {
|
||||
return ArrayPrototypeFilter(
|
||||
MapPrototypeGet(testStates, desc.id).children,
|
||||
(d) => MapPrototypeGet(testStates, d.id).status === "failed",
|
||||
(d) => MapPrototypeGet(testStates, d.id).failed,
|
||||
).length;
|
||||
}
|
||||
|
||||
/** If a test validation error already occurred then don't bother checking
|
||||
* the sanitizers as that will create extra noise.
|
||||
*/
|
||||
function shouldSkipSanitizers(desc) {
|
||||
try {
|
||||
testStepPostValidation(desc);
|
||||
return false;
|
||||
} catch {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/** @param desc {TestDescription | TestStepDescription} */
|
||||
function createTestContext(desc) {
|
||||
let parent;
|
||||
|
@ -1266,7 +1151,7 @@ function createTestContext(desc) {
|
|||
* @param maybeFn {((t: TestContext) => void | Promise<void>) | undefined}
|
||||
*/
|
||||
async step(nameOrFnOrOptions, maybeFn) {
|
||||
if (MapPrototypeGet(testStates, desc.id).finalized) {
|
||||
if (MapPrototypeGet(testStates, desc.id).completed) {
|
||||
throw new Error(
|
||||
"Cannot run test step after parent scope has finished execution. " +
|
||||
"Ensure any `.step(...)` calls are executed before their parent scope completes execution.",
|
||||
|
@ -1322,12 +1207,8 @@ function createTestContext(desc) {
|
|||
const state = {
|
||||
context: createTestContext(stepDesc),
|
||||
children: [],
|
||||
finalized: false,
|
||||
status: "pending",
|
||||
error: null,
|
||||
elapsed: null,
|
||||
reportedWait: false,
|
||||
reportedResult: false,
|
||||
failed: false,
|
||||
completed: false,
|
||||
};
|
||||
MapPrototypeSet(testStates, stepDesc.id, state);
|
||||
ArrayPrototypePush(
|
||||
|
@ -1335,56 +1216,14 @@ function createTestContext(desc) {
|
|||
stepDesc,
|
||||
);
|
||||
|
||||
try {
|
||||
if (stepDesc.ignore) {
|
||||
state.status = "ignored";
|
||||
state.finalized = true;
|
||||
if (canStreamReporting(stepDesc)) {
|
||||
stepReportResult(stepDesc);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const testFn = wrapTestFnWithSanitizers(stepDesc.fn, stepDesc);
|
||||
const start = DateNow();
|
||||
|
||||
try {
|
||||
await testFn(stepDesc);
|
||||
|
||||
if (failedChildStepsCount(stepDesc) > 0) {
|
||||
state.status = "failed";
|
||||
} else {
|
||||
state.status = "ok";
|
||||
}
|
||||
} catch (error) {
|
||||
state.error = error;
|
||||
state.status = "failed";
|
||||
}
|
||||
|
||||
state.elapsed = DateNow() - start;
|
||||
|
||||
if (MapPrototypeGet(testStates, stepDesc.parent.id).finalized) {
|
||||
// always point this test out as one that was still running
|
||||
// if the parent step finalized
|
||||
state.status = "pending";
|
||||
}
|
||||
|
||||
state.finalized = true;
|
||||
|
||||
if (state.reportedWait && canStreamReporting(stepDesc)) {
|
||||
stepReportResult(stepDesc);
|
||||
}
|
||||
|
||||
return state.status === "ok";
|
||||
} finally {
|
||||
if (canStreamReporting(stepDesc.parent)) {
|
||||
const parentState = MapPrototypeGet(testStates, stepDesc.parent.id);
|
||||
// flush any buffered steps
|
||||
for (const childDesc of parentState.children) {
|
||||
stepReportResult(childDesc);
|
||||
}
|
||||
}
|
||||
}
|
||||
ops.op_dispatch_test_event({ stepWait: stepDesc.id });
|
||||
const earlier = DateNow();
|
||||
const result = await runTest(stepDesc);
|
||||
const elapsed = DateNow() - earlier;
|
||||
state.failed = !!result.failed;
|
||||
state.completed = true;
|
||||
stepReportResult(stepDesc, result, elapsed);
|
||||
return result == "ok";
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
|
@ -439,9 +439,6 @@ impl TestRun {
|
|||
test::TestStepResult::Failed(_) => {
|
||||
summary.failed_steps += 1;
|
||||
}
|
||||
test::TestStepResult::Pending(_) => {
|
||||
summary.pending_steps += 1;
|
||||
}
|
||||
}
|
||||
reporter.report_step_result(
|
||||
test_steps.get(&id).unwrap(),
|
||||
|
@ -449,6 +446,7 @@ impl TestRun {
|
|||
duration,
|
||||
);
|
||||
}
|
||||
test::TestEvent::Sigint => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -716,11 +714,10 @@ impl LspTestReporter {
|
|||
test: desc.into(),
|
||||
})
|
||||
}
|
||||
test::TestResult::Failed(js_error) => {
|
||||
let err_string = test::format_test_error(js_error);
|
||||
test::TestResult::Failed(failure) => {
|
||||
self.progress(lsp_custom::TestRunProgressMessage::Failed {
|
||||
test: desc.into(),
|
||||
messages: as_test_messages(err_string, false),
|
||||
messages: as_test_messages(failure.to_string(), false),
|
||||
duration: Some(elapsed as u32),
|
||||
})
|
||||
}
|
||||
|
@ -830,24 +827,13 @@ impl LspTestReporter {
|
|||
test: desc.into(),
|
||||
})
|
||||
}
|
||||
test::TestStepResult::Failed(js_error) => {
|
||||
let messages = if let Some(js_error) = js_error {
|
||||
let err_string = test::format_test_error(js_error);
|
||||
as_test_messages(err_string, false)
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
test::TestStepResult::Failed(failure) => {
|
||||
self.progress(lsp_custom::TestRunProgressMessage::Failed {
|
||||
test: desc.into(),
|
||||
messages,
|
||||
messages: as_test_messages(failure.to_string(), false),
|
||||
duration: Some(elapsed as u32),
|
||||
})
|
||||
}
|
||||
test::TestStepResult::Pending(_) => {
|
||||
self.progress(lsp_custom::TestRunProgressMessage::Enqueued {
|
||||
test: desc.into(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ use deno_core::url::Url;
|
|||
use test_util as util;
|
||||
use util::assert_contains;
|
||||
use util::env_vars_for_npm_tests;
|
||||
use util::wildcard_match;
|
||||
use util::TestContext;
|
||||
|
||||
#[test]
|
||||
|
@ -444,6 +445,26 @@ itest!(parallel_output {
|
|||
exit_code: 1,
|
||||
});
|
||||
|
||||
#[test]
|
||||
fn sigint_with_hanging_test() {
|
||||
util::with_pty(
|
||||
&[
|
||||
"test",
|
||||
"--quiet",
|
||||
"--no-check",
|
||||
"test/sigint_with_hanging_test.ts",
|
||||
],
|
||||
|mut console| {
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
console.write_line("\x03");
|
||||
wildcard_match(
|
||||
include_str!("../testdata/test/sigint_with_hanging_test.out"),
|
||||
&console.read_all_output(),
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
itest!(package_json_basic {
|
||||
args: "test",
|
||||
output: "package_json/basic/lib.test.out",
|
||||
|
|
|
@ -5,12 +5,9 @@ test 1 ... FAILED [WILDCARD]
|
|||
ERRORS
|
||||
|
||||
test 1 => ./test/ops_sanitizer_missing_details.ts:[WILDCARD]
|
||||
error: AssertionError: Test case is leaking async ops.
|
||||
|
||||
- 1 async operation to op_write was started in this test, but never completed.
|
||||
|
||||
error: Leaking async ops:
|
||||
- 1 async operation to op_write was started in this test, but never completed.
|
||||
To get more details where ops were leaked, run again with --trace-ops flag.
|
||||
at [WILDCARD]
|
||||
|
||||
FAILURES
|
||||
|
||||
|
|
|
@ -6,9 +6,8 @@ test 2 ... FAILED ([WILDCARD])
|
|||
ERRORS
|
||||
|
||||
test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
|
||||
error: AssertionError: Test case is leaking async ops.
|
||||
|
||||
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operations were started here:
|
||||
error: Leaking async ops:
|
||||
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operations were started here:
|
||||
at [WILDCARD]
|
||||
at setTimeout ([WILDCARD])
|
||||
at test ([WILDCARD]/testdata/test/ops_sanitizer_multiple_timeout_tests.ts:4:3)
|
||||
|
@ -21,12 +20,9 @@ error: AssertionError: Test case is leaking async ops.
|
|||
at [WILDCARD]/testdata/test/ops_sanitizer_multiple_timeout_tests.ts:8:27
|
||||
at [WILDCARD]
|
||||
|
||||
at [WILDCARD]
|
||||
|
||||
test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
|
||||
error: AssertionError: Test case is leaking async ops.
|
||||
|
||||
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operations were started here:
|
||||
error: Leaking async ops:
|
||||
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operations were started here:
|
||||
at [WILDCARD]
|
||||
at setTimeout ([WILDCARD])
|
||||
at test ([WILDCARD]/testdata/test/ops_sanitizer_multiple_timeout_tests.ts:4:3)
|
||||
|
@ -39,8 +35,6 @@ error: AssertionError: Test case is leaking async ops.
|
|||
at [WILDCARD]/testdata/test/ops_sanitizer_multiple_timeout_tests.ts:10:27
|
||||
at [WILDCARD]
|
||||
|
||||
at [WILDCARD]
|
||||
|
||||
FAILURES
|
||||
|
||||
test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
|
||||
|
|
|
@ -6,20 +6,14 @@ test 2 ... FAILED ([WILDCARD])
|
|||
ERRORS
|
||||
|
||||
test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
|
||||
error: AssertionError: Test case is leaking async ops.
|
||||
|
||||
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
|
||||
|
||||
error: Leaking async ops:
|
||||
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
|
||||
To get more details where ops were leaked, run again with --trace-ops flag.
|
||||
at [WILDCARD]
|
||||
|
||||
test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
|
||||
error: AssertionError: Test case is leaking async ops.
|
||||
|
||||
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
|
||||
|
||||
error: Leaking async ops:
|
||||
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
|
||||
To get more details where ops were leaked, run again with --trace-ops flag.
|
||||
at [WILDCARD]
|
||||
|
||||
FAILURES
|
||||
|
||||
|
|
|
@ -6,16 +6,13 @@ leak interval ... FAILED ([WILDCARD])
|
|||
ERRORS
|
||||
|
||||
leak interval => ./test/ops_sanitizer_unstable.ts:[WILDCARD]
|
||||
error: AssertionError: Test case is leaking async ops.
|
||||
|
||||
- 1 async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
|
||||
error: Leaking async ops:
|
||||
- 1 async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
|
||||
at [WILDCARD]
|
||||
at setInterval ([WILDCARD])
|
||||
at [WILDCARD]/testdata/test/ops_sanitizer_unstable.ts:3:3
|
||||
at [WILDCARD]
|
||||
|
||||
at [WILDCARD]
|
||||
|
||||
FAILURES
|
||||
|
||||
leak interval => ./test/ops_sanitizer_unstable.ts:[WILDCARD]
|
||||
|
|
53
cli/tests/testdata/test/parallel_output.out
vendored
53
cli/tests/testdata/test/parallel_output.out
vendored
|
@ -12,45 +12,44 @@ Hello, world! (from step 4)
|
|||
./test/parallel_output.ts => step output ... ok ([WILDCARD]ms)
|
||||
./test/parallel_output.ts => step failures ... step 1 ... ok ([WILDCARD]ms)
|
||||
./test/parallel_output.ts => step failures ... step 2 ... FAILED ([WILDCARD]ms)
|
||||
error: Error: Fail.
|
||||
throw new Error("Fail.");
|
||||
^
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:15:11
|
||||
at [WILDCARD]
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:14:11
|
||||
./test/parallel_output.ts => step failures ... step 3 ... FAILED ([WILDCARD]ms)
|
||||
error: Error: Fail.
|
||||
await t.step("step 3", () => Promise.reject(new Error("Fail.")));
|
||||
^
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:17:47
|
||||
at [WILDCARD]
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:17:11
|
||||
./test/parallel_output.ts => step failures ... FAILED ([WILDCARD]ms)
|
||||
./test/parallel_output.ts => step failures ... FAILED (due to 2 failed steps) ([WILDCARD]ms)
|
||||
./test/parallel_output.ts => step nested failure ... step 1 ... inner 1 ... ok ([WILDCARD]ms)
|
||||
./test/parallel_output.ts => step nested failure ... step 1 ... inner 2 ... FAILED ([WILDCARD]ms)
|
||||
error: Error: Failed.
|
||||
throw new Error("Failed.");
|
||||
^
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:24:13
|
||||
at [WILDCARD]
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:23:13
|
||||
./test/parallel_output.ts => step nested failure ... step 1 ... FAILED ([WILDCARD]ms)
|
||||
./test/parallel_output.ts => step nested failure ... FAILED ([WILDCARD]ms)
|
||||
./test/parallel_output.ts => step nested failure ... step 1 ... FAILED (due to 1 failed step) ([WILDCARD]ms)
|
||||
./test/parallel_output.ts => step nested failure ... FAILED (due to 1 failed step) ([WILDCARD]ms)
|
||||
|
||||
ERRORS
|
||||
|
||||
step failures => ./test/parallel_output.ts:12:6
|
||||
error: Error: 2 test steps failed.
|
||||
step failures ... step 2 => ./test/parallel_output.ts:14:11
|
||||
error: Error: Fail.
|
||||
throw new Error("Fail.");
|
||||
^
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:15:11
|
||||
at [WILDCARD]
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:14:11
|
||||
|
||||
step nested failure => ./test/parallel_output.ts:20:6
|
||||
error: Error: 1 test step failed.
|
||||
step failures ... step 3 => ./test/parallel_output.ts:17:11
|
||||
error: Error: Fail.
|
||||
await t.step("step 3", () => Promise.reject(new Error("Fail.")));
|
||||
^
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:17:47
|
||||
at [WILDCARD]
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:17:11
|
||||
|
||||
step nested failure ... step 1 ... inner 2 => ./test/parallel_output.ts:23:13
|
||||
error: Error: Failed.
|
||||
throw new Error("Failed.");
|
||||
^
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:24:13
|
||||
at [WILDCARD]
|
||||
at file:///[WILDCARD]/test/parallel_output.ts:23:13
|
||||
|
||||
FAILURES
|
||||
|
||||
step failures => ./test/parallel_output.ts:12:6
|
||||
step nested failure => ./test/parallel_output.ts:20:6
|
||||
step failures ... step 2 => ./test/parallel_output.ts:14:11
|
||||
step failures ... step 3 => ./test/parallel_output.ts:17:11
|
||||
step nested failure ... step 1 ... inner 2 => ./test/parallel_output.ts:23:13
|
||||
|
||||
FAILED | 1 passed (6 steps) | 2 failed (4 steps) ([WILDCARD]ms)
|
||||
|
||||
|
|
|
@ -5,12 +5,9 @@ leak ... FAILED ([WILDCARD])
|
|||
ERRORS
|
||||
|
||||
leak => ./test/resource_sanitizer.ts:[WILDCARD]
|
||||
error: AssertionError: Test case is leaking 2 resources:
|
||||
|
||||
- The stdin pipe (rid 0) was opened before the test started, but was closed during the test. Do not close resources in a test that were not created during that test.
|
||||
- A file (rid 3) was opened during the test, but not closed during the test. Close the file handle by calling `file.close()`.
|
||||
|
||||
at [WILDCARD]
|
||||
error: Leaking resources:
|
||||
- The stdin pipe (rid 0) was opened before the test started, but was closed during the test. Do not close resources in a test that were not created during that test.
|
||||
- A file (rid 3) was opened during the test, but not closed during the test. Close the file handle by calling `file.close()`.
|
||||
|
||||
FAILURES
|
||||
|
||||
|
|
10
cli/tests/testdata/test/sigint_with_hanging_test.out
vendored
Normal file
10
cli/tests/testdata/test/sigint_with_hanging_test.out
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
running 1 test from [WILDCARD]/hanging_test.ts
|
||||
test ...
|
||||
step 1 ...
|
||||
step 2 ...
|
||||
SIGINT The following tests were pending:
|
||||
|
||||
test => [WILDCARD]/hanging_test.ts:3:6
|
||||
test ... step 1 => [WILDCARD]/hanging_test.ts:9:13
|
||||
test ... step 1 ... step 2 => [WILDCARD]/hanging_test.ts:10:15
|
||||
|
15
cli/tests/testdata/test/sigint_with_hanging_test.ts
vendored
Normal file
15
cli/tests/testdata/test/sigint_with_hanging_test.ts
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
setInterval(() => {}, 10000);
|
||||
|
||||
Deno.test({
|
||||
name: "test",
|
||||
sanitizeOps: false,
|
||||
sanitizeExit: false,
|
||||
sanitizeResources: false,
|
||||
async fn(t) {
|
||||
await t.step("step 1", async (t) => {
|
||||
await t.step("step 2", async () => {
|
||||
await new Promise(() => {});
|
||||
});
|
||||
});
|
||||
},
|
||||
});
|
61
cli/tests/testdata/test/steps/failing_steps.out
vendored
61
cli/tests/testdata/test/steps/failing_steps.out
vendored
|
@ -3,47 +3,42 @@ running 3 tests from ./test/steps/failing_steps.ts
|
|||
nested failure ...
|
||||
step 1 ...
|
||||
inner 1 ... FAILED ([WILDCARD])
|
||||
error: Error: Failed.
|
||||
throw new Error("Failed.");
|
||||
^
|
||||
at [WILDCARD]/failing_steps.ts:[WILDCARD]
|
||||
[WILDCARD]
|
||||
inner 2 ... ok ([WILDCARD])
|
||||
step 1 ... FAILED ([WILDCARD])
|
||||
nested failure ... FAILED ([WILDCARD])
|
||||
step 1 ... FAILED (due to 1 failed step) ([WILDCARD])
|
||||
nested failure ... FAILED (due to 1 failed step) ([WILDCARD])
|
||||
multiple test step failures ...
|
||||
step 1 ... FAILED ([WILDCARD])
|
||||
error: Error: Fail.
|
||||
throw new Error("Fail.");
|
||||
^
|
||||
[WILDCARD]
|
||||
step 2 ... FAILED ([WILDCARD])
|
||||
error: Error: Fail.
|
||||
await t.step("step 2", () => Promise.reject(new Error("Fail.")));
|
||||
^
|
||||
at [WILDCARD]/failing_steps.ts:[WILDCARD]
|
||||
[WILDCARD]
|
||||
multiple test step failures ... FAILED ([WILDCARD])
|
||||
multiple test step failures ... FAILED (due to 2 failed steps) ([WILDCARD])
|
||||
failing step in failing test ...
|
||||
step 1 ... FAILED ([WILDCARD])
|
||||
error: Error: Fail.
|
||||
throw new Error("Fail.");
|
||||
^
|
||||
at [WILDCARD]/failing_steps.ts:[WILDCARD]
|
||||
at [WILDCARD]
|
||||
failing step in failing test ... FAILED ([WILDCARD])
|
||||
|
||||
ERRORS
|
||||
|
||||
nested failure => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
error: Error: 1 test step failed.
|
||||
at runTest (ext:cli/40_testing.js:[WILDCARD])
|
||||
at async runTests (ext:cli/40_testing.js:[WILDCARD])
|
||||
nested failure ... step 1 ... inner 1 => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
error: Error: Failed.
|
||||
throw new Error("Failed.");
|
||||
^
|
||||
at [WILDCARD]/failing_steps.ts:[WILDCARD]
|
||||
|
||||
multiple test step failures => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
error: Error: 2 test steps failed.
|
||||
at runTest (ext:cli/40_testing.js:[WILDCARD])
|
||||
at async runTests (ext:cli/40_testing.js:[WILDCARD])
|
||||
multiple test step failures ... step 1 => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
error: Error: Fail.
|
||||
throw new Error("Fail.");
|
||||
^
|
||||
at [WILDCARD]/failing_steps.ts:[WILDCARD]
|
||||
|
||||
multiple test step failures ... step 2 => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
error: Error: Fail.
|
||||
await t.step("step 2", () => Promise.reject(new Error("Fail.")));
|
||||
^
|
||||
at [WILDCARD]/failing_steps.ts:[WILDCARD]
|
||||
|
||||
failing step in failing test ... step 1 => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
error: Error: Fail.
|
||||
throw new Error("Fail.");
|
||||
^
|
||||
at [WILDCARD]/failing_steps.ts:[WILDCARD]
|
||||
|
||||
failing step in failing test => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
error: Error: Fail test.
|
||||
|
@ -53,8 +48,10 @@ error: Error: Fail test.
|
|||
|
||||
FAILURES
|
||||
|
||||
nested failure => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
multiple test step failures => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
nested failure ... step 1 ... inner 1 => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
multiple test step failures ... step 1 => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
multiple test step failures ... step 2 => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
failing step in failing test ... step 1 => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
failing step in failing test => ./test/steps/failing_steps.ts:[WILDCARD]
|
||||
|
||||
FAILED | 0 passed (1 step) | 3 failed (5 steps) ([WILDCARD])
|
||||
|
|
110
cli/tests/testdata/test/steps/invalid_usage.out
vendored
110
cli/tests/testdata/test/steps/invalid_usage.out
vendored
|
@ -4,65 +4,33 @@ capturing ...
|
|||
some step ... ok ([WILDCARD])
|
||||
capturing ... FAILED ([WILDCARD])
|
||||
top level missing await ...
|
||||
step ... pending ([WILDCARD])
|
||||
top level missing await ... FAILED ([WILDCARD])
|
||||
step ... INCOMPLETE
|
||||
top level missing await ... FAILED (due to incomplete steps) ([WILDCARD])
|
||||
inner missing await ...
|
||||
step ...
|
||||
inner ... pending ([WILDCARD])
|
||||
error: Error: Parent scope completed before test step finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
|
||||
at [WILDCARD]
|
||||
at async TestContext.step [WILDCARD]
|
||||
step ... FAILED ([WILDCARD])
|
||||
error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
|
||||
await t.step("step", (t) => {
|
||||
^
|
||||
at [WILDCARD]
|
||||
at async fn ([WILDCARD]/invalid_usage.ts:[WILDCARD])
|
||||
inner missing await ... FAILED ([WILDCARD])
|
||||
inner ... INCOMPLETE
|
||||
step ... FAILED (due to incomplete steps) ([WILDCARD])
|
||||
inner missing await ... FAILED (due to 1 failed step) ([WILDCARD])
|
||||
parallel steps with sanitizers ...
|
||||
step 1 ... pending ([WILDCARD])
|
||||
step 1 ... INCOMPLETE
|
||||
step 2 ... FAILED ([WILDCARD])
|
||||
error: Error: Cannot start test step while another test step with sanitizers is running.
|
||||
* parallel steps with sanitizers > step 1
|
||||
await t.step("step 2", () => {});
|
||||
^
|
||||
at [WILDCARD]
|
||||
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps with sanitizers ... FAILED ([WILDCARD])
|
||||
parallel steps with sanitizers ... FAILED (due to incomplete steps) ([WILDCARD])
|
||||
parallel steps when first has sanitizer ...
|
||||
step 1 ... pending ([WILDCARD])
|
||||
step 1 ... ok ([WILDCARD])
|
||||
step 2 ... FAILED ([WILDCARD])
|
||||
error: Error: Cannot start test step while another test step with sanitizers is running.
|
||||
* parallel steps when first has sanitizer > step 1
|
||||
await t.step({
|
||||
^
|
||||
at [WILDCARD]
|
||||
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps when first has sanitizer ... FAILED ([WILDCARD])
|
||||
parallel steps when first has sanitizer ... FAILED (due to 1 failed step) ([WILDCARD])
|
||||
parallel steps when second has sanitizer ...
|
||||
step 1 ... ok ([WILDCARD])
|
||||
step 2 ... FAILED ([WILDCARD])
|
||||
error: Error: Cannot start test step with sanitizers while another test step is running.
|
||||
* parallel steps when second has sanitizer > step 1
|
||||
await t.step({
|
||||
^
|
||||
at [WILDCARD]
|
||||
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps when second has sanitizer ... FAILED ([WILDCARD])
|
||||
parallel steps when second has sanitizer ... FAILED (due to 1 failed step) ([WILDCARD])
|
||||
parallel steps where only inner tests have sanitizers ...
|
||||
step 1 ...
|
||||
step inner ... ok ([WILDCARD])
|
||||
step 1 ... ok ([WILDCARD])
|
||||
step 2 ...
|
||||
step inner ... FAILED ([WILDCARD])
|
||||
error: Error: Cannot start test step with sanitizers while another test step is running.
|
||||
* parallel steps where only inner tests have sanitizers > step 1
|
||||
await t.step({
|
||||
^
|
||||
at [WILDCARD]
|
||||
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
|
||||
step 2 ... FAILED ([WILDCARD])
|
||||
parallel steps where only inner tests have sanitizers ... FAILED ([WILDCARD])
|
||||
step 2 ... FAILED (due to 1 failed step) ([WILDCARD])
|
||||
parallel steps where only inner tests have sanitizers ... FAILED (due to 1 failed step) ([WILDCARD])
|
||||
|
||||
ERRORS
|
||||
|
||||
|
@ -73,38 +41,42 @@ error: Error: Cannot run test step after parent scope has finished execution. En
|
|||
at TestContext.step ([WILDCARD])
|
||||
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
|
||||
|
||||
top level missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
|
||||
[WILDCARD]
|
||||
top level missing await ... step => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Didn't complete before parent. Await step with `await t.step(...)`.
|
||||
|
||||
inner missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Error: 1 test step failed.
|
||||
at [WILDCARD]
|
||||
inner missing await ... step ... inner => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Didn't complete before parent. Await step with `await t.step(...)`.
|
||||
|
||||
parallel steps with sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
|
||||
[WILDCARD]
|
||||
parallel steps with sanitizers ... step 2 => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Started test step while another test step with sanitizers was running:
|
||||
* parallel steps with sanitizers ... step 1
|
||||
|
||||
parallel steps when first has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Error: 1 test step failed.
|
||||
at runTest ([WILDCARD])
|
||||
at [WILDCARD]
|
||||
parallel steps with sanitizers ... step 1 => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Didn't complete before parent. Await step with `await t.step(...)`.
|
||||
|
||||
parallel steps when second has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Error: 1 test step failed.
|
||||
at runTest ([WILDCARD])
|
||||
at [WILDCARD]
|
||||
parallel steps when first has sanitizer ... step 2 => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Started test step while another test step with sanitizers was running:
|
||||
* parallel steps when first has sanitizer ... step 1
|
||||
|
||||
parallel steps when second has sanitizer ... step 2 => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Started test step with sanitizers while another test step was running:
|
||||
* parallel steps when second has sanitizer ... step 1
|
||||
|
||||
parallel steps where only inner tests have sanitizers ... step 2 ... step inner => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
error: Started test step with sanitizers while another test step was running:
|
||||
* parallel steps where only inner tests have sanitizers ... step 1
|
||||
|
||||
FAILURES
|
||||
|
||||
capturing => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
top level missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
inner missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps with sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps when first has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps when second has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps where only inner tests have sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
capturing => ./test/steps/invalid_usage.ts:3:6
|
||||
top level missing await ... step => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
inner missing await ... step ... inner => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps with sanitizers ... step 2 => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps with sanitizers ... step 1 => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps when first has sanitizer ... step 2 => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps when second has sanitizer ... step 2 => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
parallel steps where only inner tests have sanitizers ... step 2 ... step inner => ./test/steps/invalid_usage.ts:[WILDCARD]
|
||||
|
||||
FAILED | 0 passed (4 steps) | 7 failed (10 steps) ([WILDCARD])
|
||||
FAILED | 0 passed (5 steps) | 7 failed (9 steps) ([WILDCARD])
|
||||
|
||||
error: Test failed
|
||||
|
|
|
@ -11,7 +11,7 @@ Deno.test("capturing", async (t) => {
|
|||
|
||||
Deno.test("top level missing await", (t) => {
|
||||
t.step("step", () => {
|
||||
return new Promise((resolve) => setTimeout(resolve, 10));
|
||||
return new Promise(() => {});
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
@ -155,24 +155,29 @@ Deno.test({
|
|||
name: "process.on signal",
|
||||
ignore: Deno.build.os == "windows",
|
||||
async fn() {
|
||||
const promise = deferred();
|
||||
let c = 0;
|
||||
const listener = () => {
|
||||
c += 1;
|
||||
};
|
||||
process.on("SIGINT", listener);
|
||||
setTimeout(async () => {
|
||||
// Sends SIGINT 3 times.
|
||||
for (const _ of Array(3)) {
|
||||
await delay(20);
|
||||
Deno.kill(Deno.pid, "SIGINT");
|
||||
}
|
||||
const process = new Deno.Command(Deno.execPath(), {
|
||||
args: [
|
||||
"eval",
|
||||
`
|
||||
import process from "node:process";
|
||||
setInterval(() => {}, 1000);
|
||||
process.on("SIGINT", () => {
|
||||
console.log("foo");
|
||||
});
|
||||
`,
|
||||
],
|
||||
stdout: "piped",
|
||||
stderr: "null",
|
||||
}).spawn();
|
||||
await delay(500);
|
||||
for (const _ of Array(3)) {
|
||||
process.kill("SIGINT");
|
||||
await delay(20);
|
||||
Deno.removeSignalListener("SIGINT", listener);
|
||||
promise.resolve();
|
||||
});
|
||||
await promise;
|
||||
assertEquals(c, 3);
|
||||
}
|
||||
await delay(20);
|
||||
process.kill("SIGTERM");
|
||||
const output = await process.output();
|
||||
assertEquals(new TextDecoder().decode(output.stdout), "foo\nfoo\nfoo\n");
|
||||
},
|
||||
});
|
||||
|
||||
|
@ -180,24 +185,35 @@ Deno.test({
|
|||
name: "process.off signal",
|
||||
ignore: Deno.build.os == "windows",
|
||||
async fn() {
|
||||
const promise = deferred();
|
||||
let c = 0;
|
||||
const listener = () => {
|
||||
c += 1;
|
||||
process.off("SIGINT", listener);
|
||||
};
|
||||
process.on("SIGINT", listener);
|
||||
setTimeout(async () => {
|
||||
// Sends SIGINT 3 times.
|
||||
for (const _ of Array(3)) {
|
||||
await delay(20);
|
||||
Deno.kill(Deno.pid, "SIGINT");
|
||||
}
|
||||
const process = new Deno.Command(Deno.execPath(), {
|
||||
args: [
|
||||
"eval",
|
||||
`
|
||||
import process from "node:process";
|
||||
setInterval(() => {}, 1000);
|
||||
const listener = () => {
|
||||
console.log("foo");
|
||||
process.off("SIGINT")
|
||||
};
|
||||
process.on("SIGINT", listener);
|
||||
`,
|
||||
],
|
||||
stdout: "piped",
|
||||
stderr: "null",
|
||||
}).spawn();
|
||||
await delay(500);
|
||||
for (const _ of Array(3)) {
|
||||
try {
|
||||
process.kill("SIGINT");
|
||||
} catch { /* should die after the first one */ }
|
||||
await delay(20);
|
||||
promise.resolve();
|
||||
});
|
||||
await promise;
|
||||
assertEquals(c, 1);
|
||||
}
|
||||
await delay(20);
|
||||
try {
|
||||
process.kill("SIGTERM");
|
||||
} catch { /* should be dead, avoid hanging just in case */ }
|
||||
const output = await process.output();
|
||||
assertEquals(new TextDecoder().decode(output.stdout), "foo\n");
|
||||
},
|
||||
});
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ use deno_runtime::permissions::Permissions;
|
|||
use deno_runtime::permissions::PermissionsContainer;
|
||||
use deno_runtime::tokio_util::run_local;
|
||||
use indexmap::IndexMap;
|
||||
use indexmap::IndexSet;
|
||||
use log::Level;
|
||||
use rand::rngs::SmallRng;
|
||||
use rand::seq::SliceRandom;
|
||||
|
@ -47,6 +48,8 @@ use regex::Regex;
|
|||
use serde::Deserialize;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::BTreeSet;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Write as _;
|
||||
use std::io::Read;
|
||||
|
@ -58,8 +61,10 @@ use std::sync::atomic::AtomicUsize;
|
|||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use std::time::Instant;
|
||||
use tokio::signal;
|
||||
use tokio::sync::mpsc::unbounded_channel;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::sync::mpsc::WeakUnboundedSender;
|
||||
|
||||
/// The test mode is used to determine how a specifier is to be tested.
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
|
@ -154,13 +159,100 @@ pub enum TestOutput {
|
|||
Bytes(Vec<u8>),
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum TestFailure {
|
||||
JsError(Box<JsError>),
|
||||
FailedSteps(usize),
|
||||
IncompleteSteps,
|
||||
LeakedOps(Vec<String>, bool), // Details, isOpCallTracingEnabled
|
||||
LeakedResources(Vec<String>), // Details
|
||||
// The rest are for steps only.
|
||||
Incomplete,
|
||||
OverlapsWithSanitizers(IndexSet<String>), // Long names of overlapped tests
|
||||
HasSanitizersAndOverlaps(IndexSet<String>), // Long names of overlapped tests
|
||||
}
|
||||
|
||||
impl ToString for TestFailure {
|
||||
fn to_string(&self) -> String {
|
||||
match self {
|
||||
TestFailure::JsError(js_error) => format_test_error(js_error),
|
||||
TestFailure::FailedSteps(1) => "1 test step failed.".to_string(),
|
||||
TestFailure::FailedSteps(n) => format!("{} test steps failed.", n),
|
||||
TestFailure::IncompleteSteps => "Completed while steps were still running. Ensure all steps are awaited with `await t.step(...)`.".to_string(),
|
||||
TestFailure::Incomplete => "Didn't complete before parent. Await step with `await t.step(...)`.".to_string(),
|
||||
TestFailure::LeakedOps(details, is_op_call_tracing_enabled) => {
|
||||
let mut string = "Leaking async ops:".to_string();
|
||||
for detail in details {
|
||||
string.push_str(&format!("\n - {}", detail));
|
||||
}
|
||||
if !is_op_call_tracing_enabled {
|
||||
string.push_str("\nTo get more details where ops were leaked, run again with --trace-ops flag.");
|
||||
}
|
||||
string
|
||||
}
|
||||
TestFailure::LeakedResources(details) => {
|
||||
let mut string = "Leaking resources:".to_string();
|
||||
for detail in details {
|
||||
string.push_str(&format!("\n - {}", detail));
|
||||
}
|
||||
string
|
||||
}
|
||||
TestFailure::OverlapsWithSanitizers(long_names) => {
|
||||
let mut string = "Started test step while another test step with sanitizers was running:".to_string();
|
||||
for long_name in long_names {
|
||||
string.push_str(&format!("\n * {}", long_name));
|
||||
}
|
||||
string
|
||||
}
|
||||
TestFailure::HasSanitizersAndOverlaps(long_names) => {
|
||||
let mut string = "Started test step with sanitizers while another test step was running:".to_string();
|
||||
for long_name in long_names {
|
||||
string.push_str(&format!("\n * {}", long_name));
|
||||
}
|
||||
string
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TestFailure {
|
||||
fn format_label(&self) -> String {
|
||||
match self {
|
||||
TestFailure::Incomplete => colors::gray("INCOMPLETE").to_string(),
|
||||
_ => colors::red("FAILED").to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn format_inline_summary(&self) -> Option<String> {
|
||||
match self {
|
||||
TestFailure::FailedSteps(1) => Some("due to 1 failed step".to_string()),
|
||||
TestFailure::FailedSteps(n) => Some(format!("due to {} failed steps", n)),
|
||||
TestFailure::IncompleteSteps => {
|
||||
Some("due to incomplete steps".to_string())
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn hide_in_summary(&self) -> bool {
|
||||
// These failure variants are hidden in summaries because they are caused
|
||||
// by child errors that will be summarized separately.
|
||||
matches!(
|
||||
self,
|
||||
TestFailure::FailedSteps(_) | TestFailure::IncompleteSteps
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Clone, PartialEq, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum TestResult {
|
||||
Ok,
|
||||
Ignored,
|
||||
Failed(Box<JsError>),
|
||||
Failed(TestFailure),
|
||||
Cancelled,
|
||||
}
|
||||
|
||||
|
@ -193,18 +285,7 @@ impl TestStepDescription {
|
|||
pub enum TestStepResult {
|
||||
Ok,
|
||||
Ignored,
|
||||
Failed(Option<Box<JsError>>),
|
||||
Pending(Option<Box<JsError>>),
|
||||
}
|
||||
|
||||
impl TestStepResult {
|
||||
fn error(&self) -> Option<&JsError> {
|
||||
match self {
|
||||
TestStepResult::Failed(Some(error)) => Some(error),
|
||||
TestStepResult::Pending(Some(error)) => Some(error),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
Failed(TestFailure),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Deserialize)]
|
||||
|
@ -228,6 +309,7 @@ pub enum TestEvent {
|
|||
StepRegister(TestStepDescription),
|
||||
StepWait(usize),
|
||||
StepResult(usize, TestStepResult, u64),
|
||||
Sigint,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
|
@ -238,11 +320,10 @@ pub struct TestSummary {
|
|||
pub ignored: usize,
|
||||
pub passed_steps: usize,
|
||||
pub failed_steps: usize,
|
||||
pub pending_steps: usize,
|
||||
pub ignored_steps: usize,
|
||||
pub filtered_out: usize,
|
||||
pub measured: usize,
|
||||
pub failures: Vec<(TestDescription, Box<JsError>)>,
|
||||
pub failures: Vec<(TestDescription, TestFailure)>,
|
||||
pub uncaught_errors: Vec<(String, Box<JsError>)>,
|
||||
}
|
||||
|
||||
|
@ -262,7 +343,6 @@ impl TestSummary {
|
|||
ignored: 0,
|
||||
passed_steps: 0,
|
||||
failed_steps: 0,
|
||||
pending_steps: 0,
|
||||
ignored_steps: 0,
|
||||
filtered_out: 0,
|
||||
measured: 0,
|
||||
|
@ -280,10 +360,12 @@ struct PrettyTestReporter {
|
|||
parallel: bool,
|
||||
echo_output: bool,
|
||||
in_new_line: bool,
|
||||
last_wait_id: Option<usize>,
|
||||
scope_test_id: Option<usize>,
|
||||
cwd: Url,
|
||||
did_have_user_output: bool,
|
||||
started_tests: bool,
|
||||
child_results_buffer:
|
||||
HashMap<usize, IndexMap<usize, (TestStepDescription, TestStepResult, u64)>>,
|
||||
}
|
||||
|
||||
impl PrettyTestReporter {
|
||||
|
@ -292,10 +374,11 @@ impl PrettyTestReporter {
|
|||
parallel,
|
||||
echo_output,
|
||||
in_new_line: true,
|
||||
last_wait_id: None,
|
||||
scope_test_id: None,
|
||||
cwd: Url::from_directory_path(std::env::current_dir().unwrap()).unwrap(),
|
||||
did_have_user_output: false,
|
||||
started_tests: false,
|
||||
child_results_buffer: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -316,7 +399,7 @@ impl PrettyTestReporter {
|
|||
self.in_new_line = false;
|
||||
// flush for faster feedback when line buffered
|
||||
std::io::stdout().flush().unwrap();
|
||||
self.last_wait_id = Some(description.id);
|
||||
self.scope_test_id = Some(description.id);
|
||||
}
|
||||
|
||||
fn to_relative_path_or_remote_url(&self, path_or_url: &str) -> String {
|
||||
|
@ -341,7 +424,7 @@ impl PrettyTestReporter {
|
|||
self.in_new_line = false;
|
||||
// flush for faster feedback when line buffered
|
||||
std::io::stdout().flush().unwrap();
|
||||
self.last_wait_id = Some(description.id);
|
||||
self.scope_test_id = Some(description.id);
|
||||
}
|
||||
|
||||
fn force_report_step_result(
|
||||
|
@ -350,32 +433,53 @@ impl PrettyTestReporter {
|
|||
result: &TestStepResult,
|
||||
elapsed: u64,
|
||||
) {
|
||||
let status = match result {
|
||||
TestStepResult::Ok => colors::green("ok").to_string(),
|
||||
TestStepResult::Ignored => colors::yellow("ignored").to_string(),
|
||||
TestStepResult::Pending(_) => colors::gray("pending").to_string(),
|
||||
TestStepResult::Failed(_) => colors::red("FAILED").to_string(),
|
||||
};
|
||||
|
||||
self.write_output_end();
|
||||
if self.in_new_line || self.last_wait_id != Some(description.id) {
|
||||
if self.in_new_line || self.scope_test_id != Some(description.id) {
|
||||
self.force_report_step_wait(description);
|
||||
}
|
||||
|
||||
println!(
|
||||
" {} {}",
|
||||
status,
|
||||
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
|
||||
);
|
||||
|
||||
if let Some(js_error) = result.error() {
|
||||
let err_string = format_test_error(js_error);
|
||||
let err_string = format!("{}: {}", colors::red_bold("error"), err_string);
|
||||
for line in err_string.lines() {
|
||||
println!("{}{}", " ".repeat(description.level + 1), line);
|
||||
if !self.parallel {
|
||||
let child_results = self
|
||||
.child_results_buffer
|
||||
.remove(&description.id)
|
||||
.unwrap_or_default();
|
||||
for (desc, result, elapsed) in child_results.values() {
|
||||
self.force_report_step_result(desc, result, *elapsed);
|
||||
}
|
||||
if !child_results.is_empty() {
|
||||
self.force_report_step_wait(description);
|
||||
}
|
||||
}
|
||||
|
||||
let status = match &result {
|
||||
TestStepResult::Ok => colors::green("ok").to_string(),
|
||||
TestStepResult::Ignored => colors::yellow("ignored").to_string(),
|
||||
TestStepResult::Failed(failure) => failure.format_label(),
|
||||
};
|
||||
print!(" {}", status);
|
||||
if let TestStepResult::Failed(failure) = result {
|
||||
if let Some(inline_summary) = failure.format_inline_summary() {
|
||||
print!(" ({})", inline_summary)
|
||||
}
|
||||
}
|
||||
if !matches!(result, TestStepResult::Failed(TestFailure::Incomplete)) {
|
||||
print!(
|
||||
" {}",
|
||||
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
|
||||
);
|
||||
}
|
||||
println!();
|
||||
self.in_new_line = true;
|
||||
if self.parallel {
|
||||
self.scope_test_id = None;
|
||||
} else {
|
||||
self.scope_test_id = Some(description.parent_id);
|
||||
}
|
||||
self
|
||||
.child_results_buffer
|
||||
.entry(description.parent_id)
|
||||
.or_default()
|
||||
.remove(&description.id);
|
||||
}
|
||||
|
||||
fn write_output_end(&mut self) {
|
||||
|
@ -442,23 +546,28 @@ impl PrettyTestReporter {
|
|||
}
|
||||
|
||||
self.write_output_end();
|
||||
if self.in_new_line || self.last_wait_id != Some(description.id) {
|
||||
if self.in_new_line || self.scope_test_id != Some(description.id) {
|
||||
self.force_report_wait(description);
|
||||
}
|
||||
|
||||
let status = match result {
|
||||
TestResult::Ok => colors::green("ok").to_string(),
|
||||
TestResult::Ignored => colors::yellow("ignored").to_string(),
|
||||
TestResult::Failed(_) => colors::red("FAILED").to_string(),
|
||||
TestResult::Failed(failure) => failure.format_label(),
|
||||
TestResult::Cancelled => colors::gray("cancelled").to_string(),
|
||||
};
|
||||
|
||||
print!(" {}", status);
|
||||
if let TestResult::Failed(failure) = result {
|
||||
if let Some(inline_summary) = failure.format_inline_summary() {
|
||||
print!(" ({})", inline_summary)
|
||||
}
|
||||
}
|
||||
println!(
|
||||
" {} {}",
|
||||
status,
|
||||
" {}",
|
||||
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
|
||||
);
|
||||
self.in_new_line = true;
|
||||
self.scope_test_id = None;
|
||||
}
|
||||
|
||||
fn report_uncaught_error(&mut self, origin: &str, _error: &JsError) {
|
||||
|
@ -477,14 +586,14 @@ impl PrettyTestReporter {
|
|||
fn report_step_register(&mut self, _description: &TestStepDescription) {}
|
||||
|
||||
fn report_step_wait(&mut self, description: &TestStepDescription) {
|
||||
if !self.parallel {
|
||||
if !self.parallel && self.scope_test_id == Some(description.parent_id) {
|
||||
self.force_report_step_wait(description);
|
||||
}
|
||||
}
|
||||
|
||||
fn report_step_result(
|
||||
&mut self,
|
||||
description: &TestStepDescription,
|
||||
desc: &TestStepDescription,
|
||||
result: &TestStepResult,
|
||||
elapsed: u64,
|
||||
tests: &IndexMap<usize, TestDescription>,
|
||||
|
@ -492,35 +601,34 @@ impl PrettyTestReporter {
|
|||
) {
|
||||
if self.parallel {
|
||||
self.write_output_end();
|
||||
let root;
|
||||
let mut ancestor_names = vec![];
|
||||
let mut current_desc = description;
|
||||
loop {
|
||||
if let Some(step_desc) = test_steps.get(¤t_desc.parent_id) {
|
||||
ancestor_names.push(&step_desc.name);
|
||||
current_desc = step_desc;
|
||||
} else {
|
||||
root = tests.get(¤t_desc.parent_id).unwrap();
|
||||
break;
|
||||
}
|
||||
}
|
||||
ancestor_names.reverse();
|
||||
print!(
|
||||
"{}",
|
||||
"{} {} ...",
|
||||
colors::gray(format!(
|
||||
"{} =>",
|
||||
self.to_relative_path_or_remote_url(&description.origin)
|
||||
))
|
||||
self.to_relative_path_or_remote_url(&desc.origin)
|
||||
)),
|
||||
self.format_test_step_ancestry(desc, tests, test_steps)
|
||||
);
|
||||
print!(" {} ...", root.name);
|
||||
for name in ancestor_names {
|
||||
print!(" {name} ...");
|
||||
}
|
||||
print!(" {} ...", description.name);
|
||||
self.in_new_line = false;
|
||||
self.last_wait_id = Some(description.id);
|
||||
self.scope_test_id = Some(desc.id);
|
||||
self.force_report_step_result(desc, result, elapsed);
|
||||
} else {
|
||||
let sibling_results =
|
||||
self.child_results_buffer.entry(desc.parent_id).or_default();
|
||||
if self.scope_test_id == Some(desc.id)
|
||||
|| self.scope_test_id == Some(desc.parent_id)
|
||||
{
|
||||
let sibling_results = std::mem::take(sibling_results);
|
||||
self.force_report_step_result(desc, result, elapsed);
|
||||
// Flush buffered sibling results.
|
||||
for (desc, result, elapsed) in sibling_results.values() {
|
||||
self.force_report_step_result(desc, result, *elapsed);
|
||||
}
|
||||
} else {
|
||||
sibling_results
|
||||
.insert(desc.id, (desc.clone(), result.clone(), elapsed));
|
||||
}
|
||||
}
|
||||
self.force_report_step_result(description, result, elapsed);
|
||||
}
|
||||
|
||||
fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration) {
|
||||
|
@ -528,14 +636,14 @@ impl PrettyTestReporter {
|
|||
#[allow(clippy::type_complexity)] // Type alias doesn't look better here
|
||||
let mut failures_by_origin: BTreeMap<
|
||||
String,
|
||||
(Vec<(&TestDescription, &JsError)>, Option<&JsError>),
|
||||
(Vec<(&TestDescription, &TestFailure)>, Option<&JsError>),
|
||||
> = BTreeMap::default();
|
||||
let mut failure_titles = vec![];
|
||||
for (description, js_error) in &summary.failures {
|
||||
for (description, failure) in &summary.failures {
|
||||
let (failures, _) = failures_by_origin
|
||||
.entry(description.origin.clone())
|
||||
.or_default();
|
||||
failures.push((description, js_error.as_ref()));
|
||||
failures.push((description, failure));
|
||||
}
|
||||
for (origin, js_error) in &summary.uncaught_errors {
|
||||
let (_, uncaught_error) =
|
||||
|
@ -544,27 +652,14 @@ impl PrettyTestReporter {
|
|||
}
|
||||
println!("\n{}\n", colors::white_bold_on_red(" ERRORS "));
|
||||
for (origin, (failures, uncaught_error)) in failures_by_origin {
|
||||
for (description, js_error) in failures {
|
||||
let failure_title = format!(
|
||||
"{} {}",
|
||||
&description.name,
|
||||
colors::gray(format!(
|
||||
"=> {}:{}:{}",
|
||||
self.to_relative_path_or_remote_url(
|
||||
&description.location.file_name
|
||||
),
|
||||
description.location.line_number,
|
||||
description.location.column_number
|
||||
))
|
||||
);
|
||||
println!("{}", &failure_title);
|
||||
println!(
|
||||
"{}: {}",
|
||||
colors::red_bold("error"),
|
||||
format_test_error(js_error)
|
||||
);
|
||||
println!();
|
||||
failure_titles.push(failure_title);
|
||||
for (description, failure) in failures {
|
||||
if !failure.hide_in_summary() {
|
||||
let failure_title = self.format_test_for_summary(description);
|
||||
println!("{}", &failure_title);
|
||||
println!("{}: {}", colors::red_bold("error"), failure.to_string());
|
||||
println!();
|
||||
failure_titles.push(failure_title);
|
||||
}
|
||||
}
|
||||
if let Some(js_error) = uncaught_error {
|
||||
let failure_title = format!(
|
||||
|
@ -613,7 +708,7 @@ impl PrettyTestReporter {
|
|||
summary.passed,
|
||||
get_steps_text(summary.passed_steps),
|
||||
summary.failed,
|
||||
get_steps_text(summary.failed_steps + summary.pending_steps),
|
||||
get_steps_text(summary.failed_steps),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -647,6 +742,98 @@ impl PrettyTestReporter {
|
|||
);
|
||||
self.in_new_line = true;
|
||||
}
|
||||
|
||||
fn report_sigint(
|
||||
&mut self,
|
||||
tests_pending: &HashSet<usize>,
|
||||
tests: &IndexMap<usize, TestDescription>,
|
||||
test_steps: &IndexMap<usize, TestStepDescription>,
|
||||
) {
|
||||
if tests_pending.is_empty() {
|
||||
return;
|
||||
}
|
||||
let mut formatted_pending = BTreeSet::new();
|
||||
for id in tests_pending {
|
||||
if let Some(desc) = tests.get(id) {
|
||||
formatted_pending.insert(self.format_test_for_summary(desc));
|
||||
}
|
||||
if let Some(desc) = test_steps.get(id) {
|
||||
formatted_pending
|
||||
.insert(self.format_test_step_for_summary(desc, tests, test_steps));
|
||||
}
|
||||
}
|
||||
println!(
|
||||
"\n{} The following tests were pending:\n",
|
||||
colors::intense_blue("SIGINT")
|
||||
);
|
||||
for entry in formatted_pending {
|
||||
println!("{}", entry);
|
||||
}
|
||||
println!();
|
||||
self.in_new_line = true;
|
||||
}
|
||||
|
||||
fn format_test_step_ancestry(
|
||||
&self,
|
||||
desc: &TestStepDescription,
|
||||
tests: &IndexMap<usize, TestDescription>,
|
||||
test_steps: &IndexMap<usize, TestStepDescription>,
|
||||
) -> String {
|
||||
let root;
|
||||
let mut ancestor_names = vec![];
|
||||
let mut current_desc = desc;
|
||||
loop {
|
||||
if let Some(step_desc) = test_steps.get(¤t_desc.parent_id) {
|
||||
ancestor_names.push(&step_desc.name);
|
||||
current_desc = step_desc;
|
||||
} else {
|
||||
root = tests.get(¤t_desc.parent_id).unwrap();
|
||||
break;
|
||||
}
|
||||
}
|
||||
ancestor_names.reverse();
|
||||
let mut result = String::new();
|
||||
result.push_str(&root.name);
|
||||
result.push_str(" ... ");
|
||||
for name in ancestor_names {
|
||||
result.push_str(name);
|
||||
result.push_str(" ... ");
|
||||
}
|
||||
result.push_str(&desc.name);
|
||||
result
|
||||
}
|
||||
|
||||
fn format_test_for_summary(&self, desc: &TestDescription) -> String {
|
||||
format!(
|
||||
"{} {}",
|
||||
&desc.name,
|
||||
colors::gray(format!(
|
||||
"=> {}:{}:{}",
|
||||
self.to_relative_path_or_remote_url(&desc.location.file_name),
|
||||
desc.location.line_number,
|
||||
desc.location.column_number
|
||||
))
|
||||
)
|
||||
}
|
||||
|
||||
fn format_test_step_for_summary(
|
||||
&self,
|
||||
desc: &TestStepDescription,
|
||||
tests: &IndexMap<usize, TestDescription>,
|
||||
test_steps: &IndexMap<usize, TestStepDescription>,
|
||||
) -> String {
|
||||
let long_name = self.format_test_step_ancestry(desc, tests, test_steps);
|
||||
format!(
|
||||
"{} {}",
|
||||
long_name,
|
||||
colors::gray(format!(
|
||||
"=> {}:{}:{}",
|
||||
self.to_relative_path_or_remote_url(&desc.location.file_name),
|
||||
desc.location.line_number,
|
||||
desc.location.column_number
|
||||
))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn abbreviate_test_error(js_error: &JsError) -> JsError {
|
||||
|
@ -1007,6 +1194,12 @@ async fn test_specifiers(
|
|||
let sender = TestEventSender::new(sender);
|
||||
let concurrent_jobs = options.concurrent_jobs;
|
||||
|
||||
let sender_ = sender.downgrade();
|
||||
let sigint_handler_handle = tokio::task::spawn(async move {
|
||||
signal::ctrl_c().await.unwrap();
|
||||
sender_.upgrade().map(|s| s.send(TestEvent::Sigint).ok());
|
||||
});
|
||||
|
||||
let join_handles =
|
||||
specifiers_with_mode
|
||||
.into_iter()
|
||||
|
@ -1060,6 +1253,7 @@ async fn test_specifiers(
|
|||
let earlier = Instant::now();
|
||||
let mut tests = IndexMap::new();
|
||||
let mut test_steps = IndexMap::new();
|
||||
let mut tests_started = HashSet::new();
|
||||
let mut tests_with_result = HashSet::new();
|
||||
let mut summary = TestSummary::new();
|
||||
let mut used_only = false;
|
||||
|
@ -1083,7 +1277,9 @@ async fn test_specifiers(
|
|||
}
|
||||
|
||||
TestEvent::Wait(id) => {
|
||||
reporter.report_wait(tests.get(&id).unwrap());
|
||||
if tests_started.insert(id) {
|
||||
reporter.report_wait(tests.get(&id).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
TestEvent::Output(output) => {
|
||||
|
@ -1100,9 +1296,11 @@ async fn test_specifiers(
|
|||
TestResult::Ignored => {
|
||||
summary.ignored += 1;
|
||||
}
|
||||
TestResult::Failed(error) => {
|
||||
TestResult::Failed(failure) => {
|
||||
summary.failed += 1;
|
||||
summary.failures.push((description.clone(), error.clone()));
|
||||
summary
|
||||
.failures
|
||||
.push((description.clone(), failure.clone()));
|
||||
}
|
||||
TestResult::Cancelled => {
|
||||
unreachable!("should be handled in TestEvent::UncaughtError");
|
||||
|
@ -1130,36 +1328,65 @@ async fn test_specifiers(
|
|||
}
|
||||
|
||||
TestEvent::StepWait(id) => {
|
||||
reporter.report_step_wait(test_steps.get(&id).unwrap());
|
||||
if tests_started.insert(id) {
|
||||
reporter.report_step_wait(test_steps.get(&id).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
TestEvent::StepResult(id, result, duration) => {
|
||||
match &result {
|
||||
TestStepResult::Ok => {
|
||||
summary.passed_steps += 1;
|
||||
if tests_with_result.insert(id) {
|
||||
let description = test_steps.get(&id).unwrap();
|
||||
match &result {
|
||||
TestStepResult::Ok => {
|
||||
summary.passed_steps += 1;
|
||||
}
|
||||
TestStepResult::Ignored => {
|
||||
summary.ignored_steps += 1;
|
||||
}
|
||||
TestStepResult::Failed(failure) => {
|
||||
summary.failed_steps += 1;
|
||||
summary.failures.push((
|
||||
TestDescription {
|
||||
id: description.id,
|
||||
name: reporter.format_test_step_ancestry(
|
||||
description,
|
||||
&tests,
|
||||
&test_steps,
|
||||
),
|
||||
origin: description.origin.clone(),
|
||||
location: description.location.clone(),
|
||||
},
|
||||
failure.clone(),
|
||||
))
|
||||
}
|
||||
}
|
||||
TestStepResult::Ignored => {
|
||||
summary.ignored_steps += 1;
|
||||
}
|
||||
TestStepResult::Failed(_) => {
|
||||
summary.failed_steps += 1;
|
||||
}
|
||||
TestStepResult::Pending(_) => {
|
||||
summary.pending_steps += 1;
|
||||
}
|
||||
}
|
||||
|
||||
reporter.report_step_result(
|
||||
test_steps.get(&id).unwrap(),
|
||||
&result,
|
||||
duration,
|
||||
reporter.report_step_result(
|
||||
description,
|
||||
&result,
|
||||
duration,
|
||||
&tests,
|
||||
&test_steps,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
TestEvent::Sigint => {
|
||||
reporter.report_sigint(
|
||||
&tests_started
|
||||
.difference(&tests_with_result)
|
||||
.copied()
|
||||
.collect(),
|
||||
&tests,
|
||||
&test_steps,
|
||||
);
|
||||
std::process::exit(130);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sigint_handler_handle.abort();
|
||||
|
||||
let elapsed = Instant::now().duration_since(earlier);
|
||||
reporter.report_summary(&summary, &elapsed);
|
||||
|
||||
|
@ -1598,6 +1825,10 @@ impl TestEventSender {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn downgrade(&self) -> WeakUnboundedSender<TestEvent> {
|
||||
self.sender.downgrade()
|
||||
}
|
||||
|
||||
fn flush_stdout_and_stderr(&mut self) -> Result<(), AnyError> {
|
||||
self.stdout_writer.flush()?;
|
||||
self.stderr_writer.flush()?;
|
||||
|
|
Loading…
Reference in a new issue