1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-28 16:20:57 -05:00

chore(tests/specs): ability to have sub tests in file (#23667)

Allows writing named sub-tests. These are:

1. Filterable on the command line via `cargo test ...`
2. Run in parallel
3. Use a fresh temp and deno dir for each test (unlike steps)
This commit is contained in:
David Sherret 2024-05-03 00:49:42 -04:00 committed by GitHub
parent b7945a218e
commit 3e98ea4e69
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 383 additions and 202 deletions

4
Cargo.lock generated
View file

@ -2634,9 +2634,9 @@ checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f"
[[package]] [[package]]
name = "file_test_runner" name = "file_test_runner"
version = "0.4.1" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f33b00489de0a5fd03df89aefe9fa55da5da3c1a207ea19cd381d1de7e6204b" checksum = "cc644d2903f00e5f0e5d34dca805c7a100b09a1d257e07697101d90eb10d3351"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"crossbeam-channel", "crossbeam-channel",

View file

@ -43,7 +43,7 @@ deno_lockfile.workspace = true
deno_terminal.workspace = true deno_terminal.workspace = true
deno_tls.workspace = true deno_tls.workspace = true
fastwebsockets = { workspace = true, features = ["upgrade", "unstable-split"] } fastwebsockets = { workspace = true, features = ["upgrade", "unstable-split"] }
file_test_runner = "0.4.1" file_test_runner = "0.5.0"
flaky_test = "=0.1.0" flaky_test = "=0.1.0"
http.workspace = true http.workspace = true
http-body-util.workspace = true http-body-util.workspace = true

View file

@ -28,7 +28,7 @@ cargo test test_name
## `__test__.json` file ## `__test__.json` file
This file describes the test to execute and the steps to execute. A basic This file describes the test(s) to execute and the steps to execute. A basic
example looks like: example looks like:
```json ```json
@ -57,6 +57,23 @@ Or another example that runs multiple steps:
} }
``` ```
Or if you want to run several tests at the same time:
```json
{
"tests": {
"ignore_dir": {
"args": "run script.ts",
"output": "script.out"
},
"some_other_test": {
"args": "run other.ts",
"output": "other.out"
}
}
}
```
### Top level properties ### Top level properties
- `base` - The base config to use for the test. Options: - `base` - The base config to use for the test. Options:
@ -71,13 +88,12 @@ Or another example that runs multiple steps:
### Step properties ### Step properties
When writing a single step, these may be at the top level rather than nested in When writing a single step, these may be at the top level rather than nested in
a "steps" array. a "steps" array or "tests" object.
- `args` - A string (that will be spilt on whitespace into an args array) or an - `args` - A string (that will be spilt on whitespace into an args array) or an
array of arguments. array of arguments.
- `output` - Path to use to assert the output. - `output` - Path to use to assert the output or text (must end with an .out
- `cleanDenoDir` (boolean) - Whether to empty the deno_dir before running the extension) _or_ text to pattern match against the output.
step.
- `flaky` - Step should be repeated until success a maximum of 3 times. - `flaky` - Step should be repeated until success a maximum of 3 times.
- `if` (`"windows"`, `"linux"`, `"mac"`, `"unix"`) - Whether to run this step. - `if` (`"windows"`, `"linux"`, `"mac"`, `"unix"`) - Whether to run this step.
- `exitCode` (number) - Expected exit code. - `exitCode` (number) - Expected exit code.

View file

@ -1,19 +1,21 @@
{ {
"steps": [{ "tests": {
"ignore": {
"args": "bench --ignore=collect/ignore collect", "args": "bench --ignore=collect/ignore collect",
"output": "collect.out" "output": "collect.out"
}, { },
"cleanDenoDir": true, "config_sub_dir": {
"args": "bench --config collect/deno.jsonc collect", "args": "bench --config collect/deno.jsonc collect",
"output": "collect.out" "output": "collect.out"
}, { },
"cleanDenoDir": true, "config_sub_dir_with_exclude": {
"args": "bench --config collect/deno2.jsonc collect", "args": "bench --config collect/deno2.jsonc collect",
"output": "collect2.out" "output": "collect2.out"
}, { },
"cleanDenoDir": true, "config_malformed": {
"args": "bench --config collect/deno.malformed.jsonc", "args": "bench --config collect/deno.malformed.jsonc",
"exitCode": 1, "exitCode": 1,
"output": "collect_with_malformed_config.out" "output": "collect_with_malformed_config.out"
}] }
}
} }

View file

@ -1,28 +1,34 @@
{ {
"steps": [{ "tests": {
"run_import_map": {
"args": "run --quiet --reload --import-map=import_map.json test.ts", "args": "run --quiet --reload --import-map=import_map.json test.ts",
"output": "run.out" "output": "run.out"
}, { },
"un_invalid_import_map": {
"args": "run --quiet --reload --import-map=import_map_invalid.json --config=config.json test.ts", "args": "run --quiet --reload --import-map=import_map_invalid.json --config=config.json test.ts",
"output": "flag_has_precedence.out", "output": "flag_has_precedence.out",
"exitCode": 1 "exitCode": 1
}, { },
"run_config": {
"args": "run --reload --config=config.json test.ts", "args": "run --reload --config=config.json test.ts",
"output": "config.out" "output": "config.out"
}, { },
"cleanDenoDir": true, "cache": {
"args": "cache --quiet --reload --import-map=import_map.json test.ts", "args": "cache --quiet --reload --import-map=import_map.json test.ts",
"output": "cache.out" "output": "cache.out"
}, { },
"cleanDenoDir": true, "info": {
"args": "info --quiet --import-map=import_map.json test.ts", "args": "info --quiet --import-map=import_map.json test.ts",
"output": "info.out" "output": "info.out"
}, { },
"unmapped_bare_specifier": {
"args": "run --quiet --reload --import-map=import_map.json unmapped_bare_specifier.ts", "args": "run --quiet --reload --import-map=import_map.json unmapped_bare_specifier.ts",
"output": "unmapped_bare_specifier.out", "output": "unmapped_bare_specifier.out",
"exitCode": 1 "exitCode": 1
}, { },
"data_url": {
"args": "run --quiet --reload --import-map import_map.json import_data_url.ts", "args": "run --quiet --reload --import-map import_map.json import_data_url.ts",
"output": "import_data_url.out" "output": "import_data_url.out"
}] }
}
} }

View file

@ -1,10 +1,12 @@
{ {
"steps": [{ "tests": {
"run": {
"args": "run main.ts", "args": "run main.ts",
"output": "main.out" "output": "main.out"
}, { },
"cleanDenoDir": true, "info": {
"args": "info main.ts", "args": "info main.ts",
"output": "main_info.out" "output": "main_info.out"
}] }
}
} }

View file

@ -1,12 +1,14 @@
{ {
"steps": [{ "tests": {
"analyzable": {
"args": "run -A analyzable.ts", "args": "run -A analyzable.ts",
"output": "analyzable.out", "output": "analyzable.out",
"exitCode": 1 "exitCode": 1
}, { },
"cleanDenoDir": true, "non_analyzable": {
"args": "run -A nonanalyzable.ts", "args": "run -A nonanalyzable.ts",
"output": "nonanalyzable.out", "output": "nonanalyzable.out",
"exitCode": 1 "exitCode": 1
}] }
}
} }

View file

@ -1,10 +1,12 @@
{ {
"steps": [{ "tests": {
"run": {
"args": "run --log-level=debug main.ts", "args": "run --log-level=debug main.ts",
"output": "main.out" "output": "main.out"
}, { },
"cleanDenoDir": true, "info": {
"args": "info main.ts", "args": "info main.ts",
"output": "main_info.out" "output": "main_info.out"
}] }
}
} }

View file

@ -1,10 +1,12 @@
{ {
"steps": [{ "tests": {
"run": {
"args": "run --log-level=debug main.ts", "args": "run --log-level=debug main.ts",
"output": "main.out" "output": "main.out"
}, { },
"cleanDenoDir": true, "info": {
"args": "info main.ts", "args": "info main.ts",
"output": "main_info.out" "output": "main_info.out"
}] }
}
} }

View file

@ -1,13 +1,16 @@
{ {
"steps": [{ "tests": {
"single": {
"args": "run main.ts", "args": "run main.ts",
"output": "main.out" "output": "main.out"
}, { },
"cleanDenoDir": true, "single_info": {
"args": "info main.ts", "args": "info main.ts",
"output": "main_info.out" "output": "main_info.out"
}, { },
"multiple": {
"args": "run --quiet multiple.ts", "args": "run --quiet multiple.ts",
"output": "multiple.out" "output": "multiple.out"
}] }
}
} }

View file

@ -1,6 +1,7 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::cell::RefCell; use std::cell::RefCell;
use std::collections::BTreeMap;
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::panic::AssertUnwindSafe; use std::panic::AssertUnwindSafe;
@ -10,9 +11,14 @@ use std::sync::Arc;
use deno_core::anyhow::Context; use deno_core::anyhow::Context;
use deno_core::serde_json; use deno_core::serde_json;
use file_test_runner::collection::collect_tests_or_exit; use file_test_runner::collection::collect_tests_or_exit;
use file_test_runner::collection::strategies::FileTestMapperStrategy;
use file_test_runner::collection::strategies::TestPerDirectoryCollectionStrategy; use file_test_runner::collection::strategies::TestPerDirectoryCollectionStrategy;
use file_test_runner::collection::CollectOptions; use file_test_runner::collection::CollectOptions;
use file_test_runner::collection::CollectTestsError;
use file_test_runner::collection::CollectedCategoryOrTest;
use file_test_runner::collection::CollectedTest; use file_test_runner::collection::CollectedTest;
use file_test_runner::collection::CollectedTestCategory;
use file_test_runner::TestResult;
use serde::Deserialize; use serde::Deserialize;
use test_util::tests_path; use test_util::tests_path;
use test_util::PathRef; use test_util::PathRef;
@ -27,6 +33,8 @@ enum VecOrString {
String(String), String(String),
} }
type JsonMap = serde_json::Map<String, serde_json::Value>;
#[derive(Clone, Deserialize)] #[derive(Clone, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")] #[serde(deny_unknown_fields, rename_all = "camelCase")]
struct MultiTestMetaData { struct MultiTestMetaData {
@ -40,6 +48,68 @@ struct MultiTestMetaData {
pub base: Option<String>, pub base: Option<String>,
#[serde(default)] #[serde(default)]
pub envs: HashMap<String, String>, pub envs: HashMap<String, String>,
#[serde(default)]
pub tests: BTreeMap<String, JsonMap>,
}
impl MultiTestMetaData {
pub fn into_collected_tests(
mut self,
parent_test: &CollectedTest,
) -> Vec<CollectedTest<serde_json::Value>> {
fn merge_json_value(
multi_test_meta_data: &MultiTestMetaData,
value: &mut JsonMap,
) {
if let Some(base) = &multi_test_meta_data.base {
if !value.contains_key("base") {
value.insert("base".to_string(), base.clone().into());
}
}
if multi_test_meta_data.temp_dir && !value.contains_key("tempDir") {
value.insert("tempDir".to_string(), true.into());
}
if !multi_test_meta_data.envs.is_empty() {
if !value.contains_key("envs") {
value.insert("envs".to_string(), JsonMap::default().into());
}
let envs_obj = value.get_mut("envs").unwrap().as_object_mut().unwrap();
for (key, value) in &multi_test_meta_data.envs {
if !envs_obj.contains_key(key) {
envs_obj.insert(key.into(), value.clone().into());
}
}
}
}
let mut collected_tests = Vec::with_capacity(self.tests.len());
for (name, mut json_data) in std::mem::take(&mut self.tests) {
merge_json_value(&self, &mut json_data);
collected_tests.push(CollectedTest {
name: format!("{}::{}", parent_test.name, name),
path: parent_test.path.clone(),
data: serde_json::Value::Object(json_data),
});
}
collected_tests
}
}
#[derive(Clone, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
struct MultiStepMetaData {
/// Whether to copy all the non-assertion files in the current
/// test directory to a temporary directory before running the
/// steps.
#[serde(default)]
pub temp_dir: bool,
/// The base environment to use for the test.
#[serde(default)]
pub base: Option<String>,
#[serde(default)]
pub envs: HashMap<String, String>,
#[serde(default)]
pub steps: Vec<StepMetaData>, pub steps: Vec<StepMetaData>,
} }
@ -55,8 +125,8 @@ struct SingleTestMetaData {
} }
impl SingleTestMetaData { impl SingleTestMetaData {
pub fn into_multi(self) -> MultiTestMetaData { pub fn into_multi(self) -> MultiStepMetaData {
MultiTestMetaData { MultiStepMetaData {
base: self.base, base: self.base,
temp_dir: self.temp_dir, temp_dir: self.temp_dir,
envs: Default::default(), envs: Default::default(),
@ -68,9 +138,6 @@ impl SingleTestMetaData {
#[derive(Clone, Deserialize)] #[derive(Clone, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")] #[serde(deny_unknown_fields, rename_all = "camelCase")]
struct StepMetaData { struct StepMetaData {
/// Whether to clean the deno_dir before running the step.
#[serde(default)]
pub clean_deno_dir: bool,
/// If the test should be retried multiple times on failure. /// If the test should be retried multiple times on failure.
#[serde(default)] #[serde(default)]
pub flaky: bool, pub flaky: bool,
@ -87,10 +154,14 @@ struct StepMetaData {
} }
pub fn main() { pub fn main() {
let root_category = collect_tests_or_exit(CollectOptions { let root_category =
collect_tests_or_exit::<serde_json::Value>(CollectOptions {
base: tests_path().join("specs").to_path_buf(), base: tests_path().join("specs").to_path_buf(),
strategy: Box::new(TestPerDirectoryCollectionStrategy { strategy: Box::new(FileTestMapperStrategy {
base_strategy: TestPerDirectoryCollectionStrategy {
file_name: MANIFEST_FILE_NAME.to_string(), file_name: MANIFEST_FILE_NAME.to_string(),
},
map: map_test_within_file,
}), }),
filter_override: None, filter_override: None,
}); });
@ -103,49 +174,106 @@ pub fn main() {
file_test_runner::run_tests( file_test_runner::run_tests(
&root_category, &root_category,
file_test_runner::RunOptions { parallel: true }, file_test_runner::RunOptions { parallel: true },
Arc::new(|test| { Arc::new(run_test),
let diagnostic_logger = Rc::new(RefCell::new(Vec::<u8>::new()));
let result = file_test_runner::TestResult::from_maybe_panic(
AssertUnwindSafe(|| run_test(test, diagnostic_logger.clone())),
); );
}
/// Maps a __test__.jsonc file to a category of tests if it contains a "test" object.
fn map_test_within_file(
test: CollectedTest,
) -> Result<CollectedCategoryOrTest<serde_json::Value>, CollectTestsError> {
let test_path = PathRef::new(&test.path);
let metadata_value = test_path.read_jsonc_value();
if metadata_value
.as_object()
.map(|o| o.contains_key("tests"))
.unwrap_or(false)
{
let data: MultiTestMetaData = serde_json::from_value(metadata_value)
.with_context(|| format!("Failed deserializing {}", test_path))
.map_err(CollectTestsError::Other)?;
Ok(CollectedCategoryOrTest::Category(CollectedTestCategory {
children: data
.into_collected_tests(&test)
.into_iter()
.map(CollectedCategoryOrTest::Test)
.collect(),
name: test.name,
path: test.path,
}))
} else {
Ok(CollectedCategoryOrTest::Test(CollectedTest {
name: test.name,
path: test.path,
data: metadata_value,
}))
}
}
fn run_test(test: &CollectedTest<serde_json::Value>) -> TestResult {
let cwd = PathRef::new(&test.path).parent();
let metadata_value = test.data.clone();
let diagnostic_logger = Rc::new(RefCell::new(Vec::<u8>::new()));
let result = TestResult::from_maybe_panic(AssertUnwindSafe(|| {
run_test_inner(metadata_value, &cwd, diagnostic_logger.clone())
}));
match result { match result {
file_test_runner::TestResult::Passed TestResult::Failed {
| file_test_runner::TestResult::Ignored => result,
file_test_runner::TestResult::Failed {
output: panic_output, output: panic_output,
} => { } => {
let mut output = diagnostic_logger.borrow().clone(); let mut output = diagnostic_logger.borrow().clone();
output.push(b'\n'); output.push(b'\n');
output.extend(panic_output); output.extend(panic_output);
file_test_runner::TestResult::Failed { output } TestResult::Failed { output }
}
TestResult::Passed | TestResult::Ignored | TestResult::SubTests(_) => {
result
} }
file_test_runner::TestResult::Steps(_) => unreachable!(),
} }
}),
);
} }
fn run_test(test: &CollectedTest, diagnostic_logger: Rc<RefCell<Vec<u8>>>) { fn run_test_inner(
let metadata_path = PathRef::new(&test.path); metadata_value: serde_json::Value,
let metadata_value = metadata_path.read_jsonc_value(); cwd: &PathRef,
diagnostic_logger: Rc<RefCell<Vec<u8>>>,
) {
let metadata = deserialize_value(metadata_value);
let context = test_context_from_metadata(&metadata, cwd, diagnostic_logger);
for step in metadata.steps.iter().filter(|s| should_run_step(s)) {
let run_func = || run_step(step, &metadata, cwd, &context);
if step.flaky {
run_flaky(run_func);
} else {
run_func();
}
}
}
fn deserialize_value(metadata_value: serde_json::Value) -> MultiStepMetaData {
// checking for "steps" leads to a more targeted error message // checking for "steps" leads to a more targeted error message
// instead of when deserializing an untagged enum // instead of when deserializing an untagged enum
let metadata = if metadata_value if metadata_value
.as_object() .as_object()
.and_then(|o| o.get("steps")) .map(|o| o.contains_key("steps"))
.is_some() .unwrap_or(false)
{ {
serde_json::from_value::<MultiTestMetaData>(metadata_value) serde_json::from_value::<MultiStepMetaData>(metadata_value)
} else { } else {
serde_json::from_value::<SingleTestMetaData>(metadata_value) serde_json::from_value::<SingleTestMetaData>(metadata_value)
.map(|s| s.into_multi()) .map(|s| s.into_multi())
} }
.with_context(|| format!("Failed to parse {}", metadata_path)) .context("Failed to parse test spec")
.unwrap(); .unwrap()
}
fn test_context_from_metadata(
metadata: &MultiStepMetaData,
cwd: &PathRef,
diagnostic_logger: Rc<RefCell<Vec<u8>>>,
) -> test_util::TestContext {
let mut builder = TestContextBuilder::new(); let mut builder = TestContextBuilder::new();
builder = builder.logging_capture(diagnostic_logger); builder = builder.logging_capture(diagnostic_logger);
let cwd = PathRef::new(test.path.parent().unwrap());
if metadata.temp_dir { if metadata.temp_dir {
builder = builder.use_temp_cwd(); builder = builder.use_temp_cwd();
@ -171,18 +299,10 @@ fn run_test(test: &CollectedTest, diagnostic_logger: Rc<RefCell<Vec<u8>>>) {
// copy all the files in the cwd to a temp directory // copy all the files in the cwd to a temp directory
// excluding the metadata and assertion files // excluding the metadata and assertion files
let temp_dir = context.temp_dir().path(); let temp_dir = context.temp_dir().path();
let assertion_paths = resolve_test_and_assertion_files(&cwd, &metadata); let assertion_paths = resolve_test_and_assertion_files(cwd, metadata);
cwd.copy_to_recursive_with_exclusions(temp_dir, &assertion_paths); cwd.copy_to_recursive_with_exclusions(temp_dir, &assertion_paths);
} }
context
for step in metadata.steps.iter().filter(|s| should_run_step(s)) {
let run_func = || run_step(step, &metadata, &cwd, &context);
if step.flaky {
run_flaky(run_func);
} else {
run_func();
}
}
} }
fn should_run_step(step: &StepMetaData) -> bool { fn should_run_step(step: &StepMetaData) -> bool {
@ -213,14 +333,10 @@ fn run_flaky(action: impl Fn()) {
fn run_step( fn run_step(
step: &StepMetaData, step: &StepMetaData,
metadata: &MultiTestMetaData, metadata: &MultiStepMetaData,
cwd: &PathRef, cwd: &PathRef,
context: &test_util::TestContext, context: &test_util::TestContext,
) { ) {
if step.clean_deno_dir {
context.deno_dir().path().remove_dir_all();
}
let command = context let command = context
.new_command() .new_command()
.envs(metadata.envs.iter().chain(step.envs.iter())); .envs(metadata.envs.iter().chain(step.envs.iter()));
@ -248,7 +364,7 @@ fn run_step(
fn resolve_test_and_assertion_files( fn resolve_test_and_assertion_files(
dir: &PathRef, dir: &PathRef,
metadata: &MultiTestMetaData, metadata: &MultiStepMetaData,
) -> HashSet<PathRef> { ) -> HashSet<PathRef> {
let mut result = HashSet::with_capacity(metadata.steps.len() + 1); let mut result = HashSet::with_capacity(metadata.steps.len() + 1);
result.insert(dir.join(MANIFEST_FILE_NAME)); result.insert(dir.join(MANIFEST_FILE_NAME));

View file

@ -1,21 +1,24 @@
{ {
"steps": [{ "tests": {
"run": {
"args": "run --allow-read --allow-env main.js", "args": "run --allow-read --allow-env main.js",
"output": "main.out" "output": "main.out"
}, { },
"cleanDenoDir": true, "test": {
"args": "test --allow-read --allow-env test.js", "args": "test --allow-read --allow-env test.js",
"output": "test.out" "output": "test.out"
}, { },
"cleanDenoDir": true, "eval": {
"args": [ "args": [
"eval", "eval",
"import chalk from 'npm:chalk@5'; console.log(chalk.green('chalk esm loads'));" "import chalk from 'npm:chalk@5'; console.log(chalk.green('chalk esm loads'));"
], ],
"output": "main.out" "output": "main.out"
}, { },
"bundle": {
"args": "bundle --quiet main.js", "args": "bundle --quiet main.js",
"output": "bundle.out", "output": "bundle.out",
"exitCode": 1 "exitCode": 1
}] }
}
} }

View file

@ -21,9 +21,6 @@
} }
}] }]
}, },
"cleanDenoDir": {
"type": "boolean"
},
"cwd": { "cwd": {
"type": "string" "type": "string"
}, },
@ -55,8 +52,8 @@
"type": "integer" "type": "integer"
} }
} }
}
}, },
"single_or_multi_step_test": {
"oneOf": [{ "oneOf": [{
"required": ["steps"], "required": ["steps"],
"properties": { "properties": {
@ -99,4 +96,34 @@
"$ref": "#/definitions/single_test" "$ref": "#/definitions/single_test"
}] }]
}] }]
},
"multi_test": {
"required": ["tests"],
"properties": {
"tempDir": {
"type": "boolean"
},
"base": {
"type": "string"
},
"envs": {
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"tests": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/single_or_multi_step_test"
}
}
}
}
},
"oneOf": [{
"$ref": "#/definitions/single_or_multi_step_test"
}, {
"$ref": "#/definitions/multi_test"
}]
} }