mirror of
https://github.com/denoland/deno.git
synced 2024-11-21 15:04:11 -05:00
Improve integration test harness (#1142)
This commit is contained in:
parent
e93d686e9d
commit
4e07783663
34 changed files with 188 additions and 65 deletions
2
tests/001_hello.test
Normal file
2
tests/001_hello.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/001_hello.js --reload
|
||||
output: tests/002_hello.ts.out
|
2
tests/002_hello.test
Normal file
2
tests/002_hello.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/002_hello.ts --reload
|
||||
output: tests/002_hello.ts.out
|
2
tests/003_relative_import.test
Normal file
2
tests/003_relative_import.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/003_relative_import.ts --reload
|
||||
output: tests/003_relative_import.ts.out
|
2
tests/004_set_timeout.test
Normal file
2
tests/004_set_timeout.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/004_set_timeout.ts --reload
|
||||
output: tests/004_set_timeout.ts.out
|
2
tests/005_more_imports.test
Normal file
2
tests/005_more_imports.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/005_more_imports.ts --reload
|
||||
output: tests/005_more_imports.ts.out
|
2
tests/006_url_imports.test
Normal file
2
tests/006_url_imports.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/006_url_imports.ts --reload
|
||||
output: tests/006_url_imports.ts.out
|
2
tests/010_set_interval.test
Normal file
2
tests/010_set_interval.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/010_set_interval.ts --reload
|
||||
output: tests/010_set_interval.ts.out
|
2
tests/012_async.test
Normal file
2
tests/012_async.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/012_async.ts --reload
|
||||
output: tests/012_async.ts.out
|
2
tests/013_dynamic_import.test
Normal file
2
tests/013_dynamic_import.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/013_dynamic_import.ts --reload
|
||||
output: tests/013_dynamic_import.ts.out
|
2
tests/014_duplicate_import.test
Normal file
2
tests/014_duplicate_import.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/014_duplicate_import.ts --reload
|
||||
output: tests/014_duplicate_import.ts.out
|
2
tests/015_import_no_ext.test
Normal file
2
tests/015_import_no_ext.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/015_import_no_ext.ts --reload
|
||||
output: tests/015_import_no_ext.ts.out
|
2
tests/016_double_await.test
Normal file
2
tests/016_double_await.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/016_double_await.ts --reload
|
||||
output: tests/016_double_await.ts.out
|
2
tests/017_import_redirect.test
Normal file
2
tests/017_import_redirect.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/017_import_redirect.ts --reload
|
||||
output: tests/017_import_redirect.ts.out
|
2
tests/018_async_catch.test
Normal file
2
tests/018_async_catch.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/018_async_catch.ts --reload
|
||||
output: tests/018_async_catch.ts.out
|
2
tests/019_media_types.test
Normal file
2
tests/019_media_types.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/019_media_types.ts --reload
|
||||
output: tests/019_media_types.ts.out
|
2
tests/020_json_modules.test
Normal file
2
tests/020_json_modules.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/020_json_modules.ts --reload
|
||||
output: tests/020_json_modules.ts.out
|
15
tests/README.md
Normal file
15
tests/README.md
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Integration Tests
|
||||
|
||||
This path contains integration tests. When the integration tests are run, the
|
||||
test harness will execute tests which are defined in a `.test` file and located
|
||||
in the base of this path.
|
||||
|
||||
A `.test` file is a simple configuration format where each option is specified
|
||||
on a single line. The key is the string to the left of the `:` deliminator and
|
||||
the value is the string to the right.
|
||||
|
||||
| Key | Required | Description |
|
||||
| ----------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `args` | Yes | Specifies the command line arguments for the test. This should typically be input script for the test and a `--reload` to help ensure Deno doesn't leverage the cache. |
|
||||
| `output` | Yes | This is a text file which represents the output of the command. The string `[WILDCARD]` can be used in the output to specify ranges of text which any output is accepted. |
|
||||
| `exit_code` | No | If not present, it is assumed the script would exit normally (`0`). If specified, the harness will ensure the proper code is received. |
|
3
tests/async_error.test
Normal file
3
tests/async_error.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
exit_code: 1
|
||||
args: tests/async_error.ts --reload
|
||||
output: tests/async_error.ts.out
|
3
tests/error_001.test
Normal file
3
tests/error_001.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
args: tests/error_001.ts --reload
|
||||
exit_code: 1
|
||||
output: tests/error_001.ts.out
|
3
tests/error_002.test
Normal file
3
tests/error_002.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
args: tests/error_002.ts --reload
|
||||
exit_code: 1
|
||||
output: tests/error_002.ts.out
|
3
tests/error_003_typescript.test
Normal file
3
tests/error_003_typescript.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
args: tests/error_003_typescript.ts --reload
|
||||
exit_code: 1
|
||||
output: tests/error_003_typescript.ts.out
|
3
tests/error_004_missing_module.test
Normal file
3
tests/error_004_missing_module.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
args: tests/error_004_missing_module.ts --reload
|
||||
exit_code: 1
|
||||
output: tests/error_004_missing_module.ts.out
|
3
tests/error_005_missing_dynamic_import.test
Normal file
3
tests/error_005_missing_dynamic_import.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
args: tests/error_005_missing_dynamic_import.ts --reload
|
||||
exit_code: 1
|
||||
output: tests/error_005_missing_dynamic_import.ts.out
|
3
tests/error_006_import_ext_failure.test
Normal file
3
tests/error_006_import_ext_failure.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
args: tests/error_006_import_ext_failure.ts --reload
|
||||
exit_code: 1
|
||||
output: tests/error_006_import_ext_failure.ts.out
|
3
tests/error_007_any.test
Normal file
3
tests/error_007_any.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
args: tests/error_007_any.ts --reload
|
||||
exit_code: 1
|
||||
output: tests/error_007_any.ts.out
|
3
tests/error_008_checkjs.test
Normal file
3
tests/error_008_checkjs.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
args: tests/error_008_checkjs.js --reload
|
||||
exit_code: 1
|
||||
output: tests/error_008_checkjs.js.out
|
3
tests/exit_error42.test
Normal file
3
tests/exit_error42.test
Normal file
|
@ -0,0 +1,3 @@
|
|||
exit_code: 42
|
||||
args: tests/exit_error42.ts --reload
|
||||
output: tests/exit_error42.ts.out
|
2
tests/https_import.test
Normal file
2
tests/https_import.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: tests/https_import.ts --reload
|
||||
output: tests/https_import.ts.out
|
15
tests/types.out
Normal file
15
tests/types.out
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2018 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
/// <reference no-default-lib="true" />
|
||||
/// <reference lib="esnext" />
|
||||
[WILDCARD]
|
||||
declare module "deno" {
|
||||
[WILDCARD]
|
||||
}
|
||||
|
||||
declare interface Window {
|
||||
[WILDCARD]
|
||||
}
|
||||
|
||||
declare const window: Window;
|
||||
[WILDCARD]
|
2
tests/types.test
Normal file
2
tests/types.test
Normal file
|
@ -0,0 +1,2 @@
|
|||
args: --types
|
||||
output: tests/types.out
|
|
@ -1,63 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# Copyright 2018 the Deno authors. All rights reserved. MIT license.
|
||||
# Given a deno executable, this script execute several integration tests
|
||||
# with it. The tests are stored in //tests/ and each script has a corresponding
|
||||
# .out file which specifies what the stdout should be.
|
||||
#
|
||||
# Usage: check_output_test.py [path to deno executable]
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from util import pattern_match, parse_exit_code
|
||||
|
||||
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
tests_path = os.path.join(root_path, "tests")
|
||||
|
||||
|
||||
def check_output_test(deno_exe_filename):
|
||||
assert os.path.isfile(deno_exe_filename)
|
||||
outs = sorted([
|
||||
filename for filename in os.listdir(tests_path)
|
||||
if filename.endswith(".out")
|
||||
])
|
||||
assert len(outs) > 1
|
||||
tests = [(os.path.splitext(filename)[0], filename) for filename in outs]
|
||||
for (script, out_filename) in tests:
|
||||
script_abs = os.path.join(tests_path, script)
|
||||
out_abs = os.path.join(tests_path, out_filename)
|
||||
with open(out_abs, 'r') as f:
|
||||
expected_out = f.read()
|
||||
cmd = [deno_exe_filename, script_abs, "--reload"]
|
||||
expected_code = parse_exit_code(script)
|
||||
print " ".join(cmd)
|
||||
actual_code = 0
|
||||
try:
|
||||
actual_out = subprocess.check_output(cmd, universal_newlines=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
actual_code = e.returncode
|
||||
actual_out = e.output
|
||||
if expected_code == 0:
|
||||
print "Expected success but got error. Output:"
|
||||
print actual_out
|
||||
sys.exit(1)
|
||||
|
||||
if expected_code != actual_code:
|
||||
print "Expected exit code %d but got %d" % (expected_code,
|
||||
actual_code)
|
||||
print "Output:"
|
||||
print actual_out
|
||||
sys.exit(1)
|
||||
|
||||
if pattern_match(expected_out, actual_out) != True:
|
||||
print "Expected output does not match actual."
|
||||
print "Expected: " + expected_out
|
||||
print "Actual: " + actual_out
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main(argv):
|
||||
check_output_test(argv[1])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv))
|
78
tools/integration_tests.py
Executable file
78
tools/integration_tests.py
Executable file
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2018 the Deno authors. All rights reserved. MIT license.
|
||||
# Given a deno executable, this script executes several integration tests with
|
||||
# it. The tests are stored in /tests/ and each is specified in a .yaml file
|
||||
# where a description, command line, and output are specified. Optionally an
|
||||
# exit code can be specified.
|
||||
#
|
||||
# Usage: integration_tests.py [path to deno executable]
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import subprocess
|
||||
from util import pattern_match, green_ok, red_failed
|
||||
|
||||
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
tests_path = os.path.join(root_path, "tests")
|
||||
|
||||
|
||||
def read_test(file_name):
|
||||
with open(file_name, "r") as f:
|
||||
test_file = f.read()
|
||||
lines = test_file.splitlines()
|
||||
test_dict = {}
|
||||
for line in lines:
|
||||
key, value = re.split(r":\s+", line)
|
||||
test_dict[key] = value
|
||||
return test_dict
|
||||
|
||||
|
||||
def integration_tests(deno_executable):
|
||||
assert os.path.isfile(deno_executable)
|
||||
tests = sorted([
|
||||
filename for filename in os.listdir(tests_path)
|
||||
if filename.endswith(".test")
|
||||
])
|
||||
assert len(tests) > 0
|
||||
for test_filename in tests:
|
||||
test_abs = os.path.join(tests_path, test_filename)
|
||||
test = read_test(test_abs)
|
||||
exit_code = int(test.get("exit_code", 0))
|
||||
args = test.get("args", "").split(" ")
|
||||
output_abs = os.path.join(root_path, test.get("output", ""))
|
||||
with open(output_abs, 'r') as f:
|
||||
expected_out = f.read()
|
||||
cmd = [deno_executable] + args
|
||||
print "test %s" % (test_filename)
|
||||
print " ".join(cmd)
|
||||
actual_code = 0
|
||||
try:
|
||||
actual_out = subprocess.check_output(cmd, universal_newlines=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
actual_code = e.returncode
|
||||
actual_out = e.output
|
||||
|
||||
if exit_code != actual_code:
|
||||
print "... " + red_failed()
|
||||
print "Expected exit code %d but got %d" % (exit_code, actual_code)
|
||||
print "Output:"
|
||||
print actual_out
|
||||
sys.exit(1)
|
||||
|
||||
if pattern_match(expected_out, actual_out) != True:
|
||||
print "... " + red_failed()
|
||||
print "Expected output does not match actual."
|
||||
print "Expected output: \n" + expected_out
|
||||
print "Actual output: \n" + actual_out
|
||||
sys.exit(1)
|
||||
|
||||
print "... " + green_ok()
|
||||
|
||||
|
||||
def main(argv):
|
||||
integration_tests(argv[1])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv))
|
|
@ -4,7 +4,7 @@
|
|||
# Usage: ./tools/test.py out/Debug
|
||||
import os
|
||||
import sys
|
||||
from check_output_test import check_output_test
|
||||
from integration_tests import integration_tests
|
||||
from deno_dir_test import deno_dir_test
|
||||
from setup_test import setup_test
|
||||
from util import build_path, enable_ansi_colors, executable_suffix, run, rmtree
|
||||
|
@ -58,7 +58,7 @@ def main(argv):
|
|||
|
||||
unit_tests(deno_exe)
|
||||
|
||||
check_output_test(deno_exe)
|
||||
integration_tests(deno_exe)
|
||||
|
||||
# TODO We currently skip testing the prompt in Windows completely.
|
||||
# Windows does not support the pty module used for testing the permission
|
||||
|
|
|
@ -6,6 +6,10 @@ import stat
|
|||
import sys
|
||||
import subprocess
|
||||
|
||||
RESET = "\x1b[0m"
|
||||
FG_RED = "\x1b[31m"
|
||||
FG_GREEN = "\x1b[32m"
|
||||
|
||||
executable_suffix = ".exe" if os.name == "nt" else ""
|
||||
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
||||
|
@ -73,6 +77,14 @@ def shell_quote(arg):
|
|||
return quote(arg)
|
||||
|
||||
|
||||
def red_failed():
|
||||
return "%sFAILED%s" % (FG_RED, RESET)
|
||||
|
||||
|
||||
def green_ok():
|
||||
return "%sok%s" % (FG_GREEN, RESET)
|
||||
|
||||
|
||||
def remove_and_symlink(target, name, target_is_dir=False):
|
||||
try:
|
||||
# On Windows, directory symlink can only be removed with rmdir().
|
||||
|
|
Loading…
Reference in a new issue