mirror of
https://github.com/denoland/deno.git
synced 2024-12-01 16:51:13 -05:00
Merge branch 'main' into support_create_connection
This commit is contained in:
commit
bc44ae696d
99 changed files with 3493 additions and 1049 deletions
2
.github/workflows/ci.generate.ts
vendored
2
.github/workflows/ci.generate.ts
vendored
|
@ -5,7 +5,7 @@ import { stringify } from "jsr:@std/yaml@^0.221/stringify";
|
||||||
// Bump this number when you want to purge the cache.
|
// Bump this number when you want to purge the cache.
|
||||||
// Note: the tools/release/01_bump_crate_versions.ts script will update this version
|
// Note: the tools/release/01_bump_crate_versions.ts script will update this version
|
||||||
// automatically via regex, so ensure that this line maintains this format.
|
// automatically via regex, so ensure that this line maintains this format.
|
||||||
const cacheVersion = 24;
|
const cacheVersion = 25;
|
||||||
|
|
||||||
const ubuntuX86Runner = "ubuntu-24.04";
|
const ubuntuX86Runner = "ubuntu-24.04";
|
||||||
const ubuntuX86XlRunner = "ubuntu-24.04-xl";
|
const ubuntuX86XlRunner = "ubuntu-24.04-xl";
|
||||||
|
|
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
|
@ -361,8 +361,8 @@ jobs:
|
||||||
path: |-
|
path: |-
|
||||||
~/.cargo/registry/index
|
~/.cargo/registry/index
|
||||||
~/.cargo/registry/cache
|
~/.cargo/registry/cache
|
||||||
key: '24-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
|
key: '25-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
|
||||||
restore-keys: '24-cargo-home-${{ matrix.os }}-${{ matrix.arch }}'
|
restore-keys: '25-cargo-home-${{ matrix.os }}-${{ matrix.arch }}'
|
||||||
if: '!(matrix.skip)'
|
if: '!(matrix.skip)'
|
||||||
- name: Restore cache build output (PR)
|
- name: Restore cache build output (PR)
|
||||||
uses: actions/cache/restore@v4
|
uses: actions/cache/restore@v4
|
||||||
|
@ -375,7 +375,7 @@ jobs:
|
||||||
!./target/*/*.zip
|
!./target/*/*.zip
|
||||||
!./target/*/*.tar.gz
|
!./target/*/*.tar.gz
|
||||||
key: never_saved
|
key: never_saved
|
||||||
restore-keys: '24-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
|
restore-keys: '25-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
|
||||||
- name: Apply and update mtime cache
|
- name: Apply and update mtime cache
|
||||||
if: '!(matrix.skip) && (!startsWith(github.ref, ''refs/tags/''))'
|
if: '!(matrix.skip) && (!startsWith(github.ref, ''refs/tags/''))'
|
||||||
uses: ./.github/mtime_cache
|
uses: ./.github/mtime_cache
|
||||||
|
@ -685,7 +685,7 @@ jobs:
|
||||||
!./target/*/*.zip
|
!./target/*/*.zip
|
||||||
!./target/*/*.sha256sum
|
!./target/*/*.sha256sum
|
||||||
!./target/*/*.tar.gz
|
!./target/*/*.tar.gz
|
||||||
key: '24-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
|
key: '25-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
|
||||||
publish-canary:
|
publish-canary:
|
||||||
name: publish canary
|
name: publish canary
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
|
|
307
Cargo.lock
generated
307
Cargo.lock
generated
|
@ -765,6 +765,8 @@ dependencies = [
|
||||||
"fastwebsockets",
|
"fastwebsockets",
|
||||||
"file_test_runner",
|
"file_test_runner",
|
||||||
"flaky_test",
|
"flaky_test",
|
||||||
|
"hickory-client",
|
||||||
|
"hickory-server",
|
||||||
"http 1.1.0",
|
"http 1.1.0",
|
||||||
"http-body-util",
|
"http-body-util",
|
||||||
"hyper 1.4.1",
|
"hyper 1.4.1",
|
||||||
|
@ -778,8 +780,6 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
"test_server",
|
"test_server",
|
||||||
"tokio",
|
"tokio",
|
||||||
"trust-dns-client",
|
|
||||||
"trust-dns-server",
|
|
||||||
"url",
|
"url",
|
||||||
"uuid",
|
"uuid",
|
||||||
"zeromq",
|
"zeromq",
|
||||||
|
@ -1154,7 +1154,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno"
|
name = "deno"
|
||||||
version = "2.0.5"
|
version = "2.0.6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anstream",
|
"anstream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1323,7 +1323,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_bench_util"
|
name = "deno_bench_util"
|
||||||
version = "0.170.0"
|
version = "0.171.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bencher",
|
"bencher",
|
||||||
"deno_core",
|
"deno_core",
|
||||||
|
@ -1332,7 +1332,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_broadcast_channel"
|
name = "deno_broadcast_channel"
|
||||||
version = "0.170.0"
|
version = "0.171.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"deno_core",
|
"deno_core",
|
||||||
|
@ -1343,7 +1343,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_cache"
|
name = "deno_cache"
|
||||||
version = "0.108.0"
|
version = "0.109.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"deno_core",
|
"deno_core",
|
||||||
|
@ -1376,7 +1376,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_canvas"
|
name = "deno_canvas"
|
||||||
version = "0.45.0"
|
version = "0.46.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
"deno_webgpu",
|
"deno_webgpu",
|
||||||
|
@ -1411,7 +1411,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_console"
|
name = "deno_console"
|
||||||
version = "0.176.0"
|
version = "0.177.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
]
|
]
|
||||||
|
@ -1456,7 +1456,7 @@ checksum = "a13951ea98c0a4c372f162d669193b4c9d991512de9f2381dd161027f34b26b1"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_cron"
|
name = "deno_cron"
|
||||||
version = "0.56.0"
|
version = "0.57.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1469,7 +1469,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_crypto"
|
name = "deno_crypto"
|
||||||
version = "0.190.0"
|
version = "0.191.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes",
|
"aes",
|
||||||
"aes-gcm",
|
"aes-gcm",
|
||||||
|
@ -1531,7 +1531,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_fetch"
|
name = "deno_fetch"
|
||||||
version = "0.200.0"
|
version = "0.201.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.21.7",
|
"base64 0.21.7",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
@ -1564,7 +1564,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_ffi"
|
name = "deno_ffi"
|
||||||
version = "0.163.0"
|
version = "0.164.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
"deno_permissions",
|
"deno_permissions",
|
||||||
|
@ -1584,7 +1584,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_fs"
|
name = "deno_fs"
|
||||||
version = "0.86.0"
|
version = "0.87.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base32",
|
"base32",
|
||||||
|
@ -1635,7 +1635,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_http"
|
name = "deno_http"
|
||||||
version = "0.174.0"
|
version = "0.175.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-compression",
|
"async-compression",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1674,7 +1674,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_io"
|
name = "deno_io"
|
||||||
version = "0.86.0"
|
version = "0.87.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"deno_core",
|
"deno_core",
|
||||||
|
@ -1695,7 +1695,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_kv"
|
name = "deno_kv"
|
||||||
version = "0.84.0"
|
version = "0.85.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -1767,7 +1767,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_napi"
|
name = "deno_napi"
|
||||||
version = "0.107.0"
|
version = "0.108.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
"deno_permissions",
|
"deno_permissions",
|
||||||
|
@ -1795,24 +1795,24 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_net"
|
name = "deno_net"
|
||||||
version = "0.168.0"
|
version = "0.169.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
"deno_permissions",
|
"deno_permissions",
|
||||||
"deno_tls",
|
"deno_tls",
|
||||||
|
"hickory-proto",
|
||||||
|
"hickory-resolver",
|
||||||
"pin-project",
|
"pin-project",
|
||||||
"rustls-tokio-stream",
|
"rustls-tokio-stream",
|
||||||
"serde",
|
"serde",
|
||||||
"socket2",
|
"socket2",
|
||||||
"thiserror",
|
"thiserror",
|
||||||
"tokio",
|
"tokio",
|
||||||
"trust-dns-proto",
|
|
||||||
"trust-dns-resolver",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_node"
|
name = "deno_node"
|
||||||
version = "0.113.0"
|
version = "0.114.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aead-gcm-stream",
|
"aead-gcm-stream",
|
||||||
"aes",
|
"aes",
|
||||||
|
@ -1961,7 +1961,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_permissions"
|
name = "deno_permissions"
|
||||||
version = "0.36.0"
|
version = "0.37.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
"deno_path_util",
|
"deno_path_util",
|
||||||
|
@ -1979,7 +1979,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_resolver"
|
name = "deno_resolver"
|
||||||
version = "0.8.0"
|
version = "0.9.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"base32",
|
"base32",
|
||||||
|
@ -1995,7 +1995,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_runtime"
|
name = "deno_runtime"
|
||||||
version = "0.185.0"
|
version = "0.186.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"color-print",
|
"color-print",
|
||||||
"deno_ast",
|
"deno_ast",
|
||||||
|
@ -2113,7 +2113,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_tls"
|
name = "deno_tls"
|
||||||
version = "0.163.0"
|
version = "0.164.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
"deno_native_certs",
|
"deno_native_certs",
|
||||||
|
@ -2162,7 +2162,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_url"
|
name = "deno_url"
|
||||||
version = "0.176.0"
|
version = "0.177.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_bench_util",
|
"deno_bench_util",
|
||||||
"deno_console",
|
"deno_console",
|
||||||
|
@ -2174,7 +2174,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_web"
|
name = "deno_web"
|
||||||
version = "0.207.0"
|
version = "0.208.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"base64-simd 0.8.0",
|
"base64-simd 0.8.0",
|
||||||
|
@ -2196,7 +2196,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_webgpu"
|
name = "deno_webgpu"
|
||||||
version = "0.143.0"
|
version = "0.144.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
"raw-window-handle",
|
"raw-window-handle",
|
||||||
|
@ -2209,7 +2209,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_webidl"
|
name = "deno_webidl"
|
||||||
version = "0.176.0"
|
version = "0.177.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_bench_util",
|
"deno_bench_util",
|
||||||
"deno_core",
|
"deno_core",
|
||||||
|
@ -2217,7 +2217,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_websocket"
|
name = "deno_websocket"
|
||||||
version = "0.181.0"
|
version = "0.182.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"deno_core",
|
"deno_core",
|
||||||
|
@ -2239,7 +2239,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "deno_webstorage"
|
name = "deno_webstorage"
|
||||||
version = "0.171.0"
|
version = "0.172.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deno_core",
|
"deno_core",
|
||||||
"deno_web",
|
"deno_web",
|
||||||
|
@ -2639,15 +2639,6 @@ dependencies = [
|
||||||
"text_lines",
|
"text_lines",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "drain"
|
|
||||||
version = "0.1.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "9d105028bd2b5dfcb33318fd79a445001ead36004dd8dffef1bdd7e493d8bc1e"
|
|
||||||
dependencies = [
|
|
||||||
"tokio",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "dsa"
|
name = "dsa"
|
||||||
version = "0.6.3"
|
version = "0.6.3"
|
||||||
|
@ -3545,6 +3536,92 @@ version = "0.2.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df"
|
checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hickory-client"
|
||||||
|
version = "0.24.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "bab9683b08d8f8957a857b0236455d80e1886eaa8c6178af556aa7871fb61b55"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"data-encoding",
|
||||||
|
"futures-channel",
|
||||||
|
"futures-util",
|
||||||
|
"hickory-proto",
|
||||||
|
"once_cell",
|
||||||
|
"radix_trie",
|
||||||
|
"rand",
|
||||||
|
"thiserror",
|
||||||
|
"tokio",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hickory-proto"
|
||||||
|
version = "0.24.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"cfg-if",
|
||||||
|
"data-encoding",
|
||||||
|
"enum-as-inner",
|
||||||
|
"futures-channel",
|
||||||
|
"futures-io",
|
||||||
|
"futures-util",
|
||||||
|
"idna 0.4.0",
|
||||||
|
"ipnet",
|
||||||
|
"once_cell",
|
||||||
|
"rand",
|
||||||
|
"serde",
|
||||||
|
"thiserror",
|
||||||
|
"tinyvec",
|
||||||
|
"tokio",
|
||||||
|
"tracing",
|
||||||
|
"url",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hickory-resolver"
|
||||||
|
version = "0.24.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"futures-util",
|
||||||
|
"hickory-proto",
|
||||||
|
"ipconfig",
|
||||||
|
"lru-cache",
|
||||||
|
"once_cell",
|
||||||
|
"parking_lot",
|
||||||
|
"rand",
|
||||||
|
"resolv-conf",
|
||||||
|
"serde",
|
||||||
|
"smallvec",
|
||||||
|
"thiserror",
|
||||||
|
"tokio",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hickory-server"
|
||||||
|
version = "0.24.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9be0e43c556b9b3fdb6c7c71a9a32153a2275d02419e3de809e520bfcfe40c37"
|
||||||
|
dependencies = [
|
||||||
|
"async-trait",
|
||||||
|
"bytes",
|
||||||
|
"cfg-if",
|
||||||
|
"enum-as-inner",
|
||||||
|
"futures-util",
|
||||||
|
"hickory-proto",
|
||||||
|
"serde",
|
||||||
|
"thiserror",
|
||||||
|
"time",
|
||||||
|
"tokio",
|
||||||
|
"tokio-util",
|
||||||
|
"tracing",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hkdf"
|
name = "hkdf"
|
||||||
version = "0.12.4"
|
version = "0.12.4"
|
||||||
|
@ -4484,7 +4561,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "napi_sym"
|
name = "napi_sym"
|
||||||
version = "0.106.0"
|
version = "0.107.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
"serde",
|
"serde",
|
||||||
|
@ -4539,7 +4616,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "node_resolver"
|
name = "node_resolver"
|
||||||
version = "0.15.0"
|
version = "0.16.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
@ -6147,15 +6224,6 @@ dependencies = [
|
||||||
"syn 2.0.72",
|
"syn 2.0.72",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "serde_spanned"
|
|
||||||
version = "0.6.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0"
|
|
||||||
dependencies = [
|
|
||||||
"serde",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_urlencoded"
|
name = "serde_urlencoded"
|
||||||
version = "0.7.1"
|
version = "0.7.1"
|
||||||
|
@ -7122,6 +7190,7 @@ dependencies = [
|
||||||
"console_static_text",
|
"console_static_text",
|
||||||
"deno_unsync",
|
"deno_unsync",
|
||||||
"denokv_proto",
|
"denokv_proto",
|
||||||
|
"faster-hex",
|
||||||
"fastwebsockets",
|
"fastwebsockets",
|
||||||
"flate2",
|
"flate2",
|
||||||
"futures",
|
"futures",
|
||||||
|
@ -7369,40 +7438,6 @@ dependencies = [
|
||||||
"serde",
|
"serde",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "toml"
|
|
||||||
version = "0.7.8"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257"
|
|
||||||
dependencies = [
|
|
||||||
"serde",
|
|
||||||
"serde_spanned",
|
|
||||||
"toml_datetime",
|
|
||||||
"toml_edit",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "toml_datetime"
|
|
||||||
version = "0.6.6"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf"
|
|
||||||
dependencies = [
|
|
||||||
"serde",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "toml_edit"
|
|
||||||
version = "0.19.15"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
|
|
||||||
dependencies = [
|
|
||||||
"indexmap",
|
|
||||||
"serde",
|
|
||||||
"serde_spanned",
|
|
||||||
"toml_datetime",
|
|
||||||
"winnow 0.5.40",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tower"
|
name = "tower"
|
||||||
version = "0.4.13"
|
version = "0.4.13"
|
||||||
|
@ -7492,95 +7527,6 @@ dependencies = [
|
||||||
"stable_deref_trait",
|
"stable_deref_trait",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "trust-dns-client"
|
|
||||||
version = "0.23.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "14135e72c7e6d4c9b6902d4437881a8598f0145dbb2e3f86f92dbad845b61e63"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"data-encoding",
|
|
||||||
"futures-channel",
|
|
||||||
"futures-util",
|
|
||||||
"once_cell",
|
|
||||||
"radix_trie",
|
|
||||||
"rand",
|
|
||||||
"thiserror",
|
|
||||||
"tokio",
|
|
||||||
"tracing",
|
|
||||||
"trust-dns-proto",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "trust-dns-proto"
|
|
||||||
version = "0.23.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374"
|
|
||||||
dependencies = [
|
|
||||||
"async-trait",
|
|
||||||
"cfg-if",
|
|
||||||
"data-encoding",
|
|
||||||
"enum-as-inner",
|
|
||||||
"futures-channel",
|
|
||||||
"futures-io",
|
|
||||||
"futures-util",
|
|
||||||
"idna 0.4.0",
|
|
||||||
"ipnet",
|
|
||||||
"once_cell",
|
|
||||||
"rand",
|
|
||||||
"serde",
|
|
||||||
"smallvec",
|
|
||||||
"thiserror",
|
|
||||||
"tinyvec",
|
|
||||||
"tokio",
|
|
||||||
"tracing",
|
|
||||||
"url",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "trust-dns-resolver"
|
|
||||||
version = "0.23.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6"
|
|
||||||
dependencies = [
|
|
||||||
"cfg-if",
|
|
||||||
"futures-util",
|
|
||||||
"ipconfig",
|
|
||||||
"lru-cache",
|
|
||||||
"once_cell",
|
|
||||||
"parking_lot",
|
|
||||||
"rand",
|
|
||||||
"resolv-conf",
|
|
||||||
"serde",
|
|
||||||
"smallvec",
|
|
||||||
"thiserror",
|
|
||||||
"tokio",
|
|
||||||
"tracing",
|
|
||||||
"trust-dns-proto",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "trust-dns-server"
|
|
||||||
version = "0.23.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "c540f73c2b2ec2f6c54eabd0900e7aafb747a820224b742f556e8faabb461bc7"
|
|
||||||
dependencies = [
|
|
||||||
"async-trait",
|
|
||||||
"bytes",
|
|
||||||
"cfg-if",
|
|
||||||
"drain",
|
|
||||||
"enum-as-inner",
|
|
||||||
"futures-executor",
|
|
||||||
"futures-util",
|
|
||||||
"serde",
|
|
||||||
"thiserror",
|
|
||||||
"time",
|
|
||||||
"tokio",
|
|
||||||
"toml 0.7.8",
|
|
||||||
"tracing",
|
|
||||||
"trust-dns-proto",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "try-lock"
|
name = "try-lock"
|
||||||
version = "0.2.5"
|
version = "0.2.5"
|
||||||
|
@ -8330,15 +8276,6 @@ version = "0.52.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
|
checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "winnow"
|
|
||||||
version = "0.5.40"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876"
|
|
||||||
dependencies = [
|
|
||||||
"memchr",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "winnow"
|
name = "winnow"
|
||||||
version = "0.6.15"
|
version = "0.6.15"
|
||||||
|
@ -8374,7 +8311,7 @@ version = "0.1.12"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b68db261ef59e9e52806f688020631e987592bd83619edccda9c47d42cde4f6c"
|
checksum = "b68db261ef59e9e52806f688020631e987592bd83619edccda9c47d42cde4f6c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"toml 0.5.11",
|
"toml",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -8451,7 +8388,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2a6a39b6b5ba0d02c910d05d7fbc366a4befb8901ea107dcde9c1c97acb8a366"
|
checksum = "2a6a39b6b5ba0d02c910d05d7fbc366a4befb8901ea107dcde9c1c97acb8a366"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"rowan",
|
"rowan",
|
||||||
"winnow 0.6.15",
|
"winnow",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|
56
Cargo.toml
56
Cargo.toml
|
@ -48,16 +48,16 @@ repository = "https://github.com/denoland/deno"
|
||||||
deno_ast = { version = "=0.43.3", features = ["transpiling"] }
|
deno_ast = { version = "=0.43.3", features = ["transpiling"] }
|
||||||
deno_core = { version = "0.318.0" }
|
deno_core = { version = "0.318.0" }
|
||||||
|
|
||||||
deno_bench_util = { version = "0.170.0", path = "./bench_util" }
|
deno_bench_util = { version = "0.171.0", path = "./bench_util" }
|
||||||
deno_lockfile = "=0.23.1"
|
deno_lockfile = "=0.23.1"
|
||||||
deno_media_type = { version = "0.2.0", features = ["module_specifier"] }
|
deno_media_type = { version = "0.2.0", features = ["module_specifier"] }
|
||||||
deno_npm = "=0.25.4"
|
deno_npm = "=0.25.4"
|
||||||
deno_path_util = "=0.2.1"
|
deno_path_util = "=0.2.1"
|
||||||
deno_permissions = { version = "0.36.0", path = "./runtime/permissions" }
|
deno_permissions = { version = "0.37.0", path = "./runtime/permissions" }
|
||||||
deno_runtime = { version = "0.185.0", path = "./runtime" }
|
deno_runtime = { version = "0.186.0", path = "./runtime" }
|
||||||
deno_semver = "=0.5.16"
|
deno_semver = "=0.5.16"
|
||||||
deno_terminal = "0.2.0"
|
deno_terminal = "0.2.0"
|
||||||
napi_sym = { version = "0.106.0", path = "./ext/napi/sym" }
|
napi_sym = { version = "0.107.0", path = "./ext/napi/sym" }
|
||||||
test_util = { package = "test_server", path = "./tests/util/server" }
|
test_util = { package = "test_server", path = "./tests/util/server" }
|
||||||
|
|
||||||
denokv_proto = "0.8.1"
|
denokv_proto = "0.8.1"
|
||||||
|
@ -66,32 +66,32 @@ denokv_remote = "0.8.1"
|
||||||
denokv_sqlite = { default-features = false, version = "0.8.2" }
|
denokv_sqlite = { default-features = false, version = "0.8.2" }
|
||||||
|
|
||||||
# exts
|
# exts
|
||||||
deno_broadcast_channel = { version = "0.170.0", path = "./ext/broadcast_channel" }
|
deno_broadcast_channel = { version = "0.171.0", path = "./ext/broadcast_channel" }
|
||||||
deno_cache = { version = "0.108.0", path = "./ext/cache" }
|
deno_cache = { version = "0.109.0", path = "./ext/cache" }
|
||||||
deno_canvas = { version = "0.45.0", path = "./ext/canvas" }
|
deno_canvas = { version = "0.46.0", path = "./ext/canvas" }
|
||||||
deno_console = { version = "0.176.0", path = "./ext/console" }
|
deno_console = { version = "0.177.0", path = "./ext/console" }
|
||||||
deno_cron = { version = "0.56.0", path = "./ext/cron" }
|
deno_cron = { version = "0.57.0", path = "./ext/cron" }
|
||||||
deno_crypto = { version = "0.190.0", path = "./ext/crypto" }
|
deno_crypto = { version = "0.191.0", path = "./ext/crypto" }
|
||||||
deno_fetch = { version = "0.200.0", path = "./ext/fetch" }
|
deno_fetch = { version = "0.201.0", path = "./ext/fetch" }
|
||||||
deno_ffi = { version = "0.163.0", path = "./ext/ffi" }
|
deno_ffi = { version = "0.164.0", path = "./ext/ffi" }
|
||||||
deno_fs = { version = "0.86.0", path = "./ext/fs" }
|
deno_fs = { version = "0.87.0", path = "./ext/fs" }
|
||||||
deno_http = { version = "0.174.0", path = "./ext/http" }
|
deno_http = { version = "0.175.0", path = "./ext/http" }
|
||||||
deno_io = { version = "0.86.0", path = "./ext/io" }
|
deno_io = { version = "0.87.0", path = "./ext/io" }
|
||||||
deno_kv = { version = "0.84.0", path = "./ext/kv" }
|
deno_kv = { version = "0.85.0", path = "./ext/kv" }
|
||||||
deno_napi = { version = "0.107.0", path = "./ext/napi" }
|
deno_napi = { version = "0.108.0", path = "./ext/napi" }
|
||||||
deno_net = { version = "0.168.0", path = "./ext/net" }
|
deno_net = { version = "0.169.0", path = "./ext/net" }
|
||||||
deno_node = { version = "0.113.0", path = "./ext/node" }
|
deno_node = { version = "0.114.0", path = "./ext/node" }
|
||||||
deno_tls = { version = "0.163.0", path = "./ext/tls" }
|
deno_tls = { version = "0.164.0", path = "./ext/tls" }
|
||||||
deno_url = { version = "0.176.0", path = "./ext/url" }
|
deno_url = { version = "0.177.0", path = "./ext/url" }
|
||||||
deno_web = { version = "0.207.0", path = "./ext/web" }
|
deno_web = { version = "0.208.0", path = "./ext/web" }
|
||||||
deno_webgpu = { version = "0.143.0", path = "./ext/webgpu" }
|
deno_webgpu = { version = "0.144.0", path = "./ext/webgpu" }
|
||||||
deno_webidl = { version = "0.176.0", path = "./ext/webidl" }
|
deno_webidl = { version = "0.177.0", path = "./ext/webidl" }
|
||||||
deno_websocket = { version = "0.181.0", path = "./ext/websocket" }
|
deno_websocket = { version = "0.182.0", path = "./ext/websocket" }
|
||||||
deno_webstorage = { version = "0.171.0", path = "./ext/webstorage" }
|
deno_webstorage = { version = "0.172.0", path = "./ext/webstorage" }
|
||||||
|
|
||||||
# resolvers
|
# resolvers
|
||||||
deno_resolver = { version = "0.8.0", path = "./resolvers/deno" }
|
deno_resolver = { version = "0.9.0", path = "./resolvers/deno" }
|
||||||
node_resolver = { version = "0.15.0", path = "./resolvers/node" }
|
node_resolver = { version = "0.16.0", path = "./resolvers/node" }
|
||||||
|
|
||||||
aes = "=0.8.3"
|
aes = "=0.8.3"
|
||||||
anyhow = "1.0.57"
|
anyhow = "1.0.57"
|
||||||
|
|
12
Releases.md
12
Releases.md
|
@ -6,6 +6,18 @@ https://github.com/denoland/deno/releases
|
||||||
We also have one-line install commands at:
|
We also have one-line install commands at:
|
||||||
https://github.com/denoland/deno_install
|
https://github.com/denoland/deno_install
|
||||||
|
|
||||||
|
### 2.0.6 / 2024.11.10
|
||||||
|
|
||||||
|
- feat(ext/http): abort event when request is cancelled (#26781)
|
||||||
|
- feat(ext/http): abort signal when request is cancelled (#26761)
|
||||||
|
- feat(lsp): auto-import completions from byonm dependencies (#26680)
|
||||||
|
- fix(ext/cache): don't panic when creating cache (#26780)
|
||||||
|
- fix(ext/node): better inspector support (#26471)
|
||||||
|
- fix(fmt): don't use self-closing tags in HTML (#26754)
|
||||||
|
- fix(install): cache jsr deps from all workspace config files (#26779)
|
||||||
|
- fix(node:zlib): gzip & gzipSync should accept ArrayBuffer (#26762)
|
||||||
|
- fix: performance.timeOrigin (#26787)
|
||||||
|
|
||||||
### 2.0.5 / 2024.11.05
|
### 2.0.5 / 2024.11.05
|
||||||
|
|
||||||
- fix(add): better error message when adding package that only has pre-release
|
- fix(add): better error message when adding package that only has pre-release
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_bench_util"
|
name = "deno_bench_util"
|
||||||
version = "0.170.0"
|
version = "0.171.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno"
|
name = "deno"
|
||||||
version = "2.0.5"
|
version = "2.0.6"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
default-run = "deno"
|
default-run = "deno"
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
|
|
|
@ -88,6 +88,10 @@ fn get_resolution_error_class(err: &ResolutionError) -> &'static str {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_try_from_int_error_class(_: &std::num::TryFromIntError) -> &'static str {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_error_class_name(e: &AnyError) -> &'static str {
|
pub fn get_error_class_name(e: &AnyError) -> &'static str {
|
||||||
deno_runtime::errors::get_error_class_name(e)
|
deno_runtime::errors::get_error_class_name(e)
|
||||||
.or_else(|| {
|
.or_else(|| {
|
||||||
|
@ -106,5 +110,9 @@ pub fn get_error_class_name(e: &AnyError) -> &'static str {
|
||||||
e.downcast_ref::<ResolutionError>()
|
e.downcast_ref::<ResolutionError>()
|
||||||
.map(get_resolution_error_class)
|
.map(get_resolution_error_class)
|
||||||
})
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<std::num::TryFromIntError>()
|
||||||
|
.map(get_try_from_int_error_class)
|
||||||
|
})
|
||||||
.unwrap_or("Error")
|
.unwrap_or("Error")
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
use std::sync::atomic::AtomicUsize;
|
use std::sync::atomic::AtomicUsize;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::time;
|
|
||||||
|
|
||||||
use deno_core::error::generic_error;
|
use deno_core::error::generic_error;
|
||||||
use deno_core::error::type_error;
|
use deno_core::error::type_error;
|
||||||
|
@ -13,6 +12,7 @@ use deno_core::ModuleSpecifier;
|
||||||
use deno_core::OpState;
|
use deno_core::OpState;
|
||||||
use deno_runtime::deno_permissions::ChildPermissionsArg;
|
use deno_runtime::deno_permissions::ChildPermissionsArg;
|
||||||
use deno_runtime::deno_permissions::PermissionsContainer;
|
use deno_runtime::deno_permissions::PermissionsContainer;
|
||||||
|
use deno_runtime::deno_web::StartTime;
|
||||||
use tokio::sync::mpsc::UnboundedSender;
|
use tokio::sync::mpsc::UnboundedSender;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ struct PermissionsHolder(Uuid, PermissionsContainer);
|
||||||
pub fn op_pledge_test_permissions(
|
pub fn op_pledge_test_permissions(
|
||||||
state: &mut OpState,
|
state: &mut OpState,
|
||||||
#[serde] args: ChildPermissionsArg,
|
#[serde] args: ChildPermissionsArg,
|
||||||
) -> Result<Uuid, AnyError> {
|
) -> Result<Uuid, deno_runtime::deno_permissions::ChildPermissionError> {
|
||||||
let token = Uuid::new_v4();
|
let token = Uuid::new_v4();
|
||||||
let parent_permissions = state.borrow_mut::<PermissionsContainer>();
|
let parent_permissions = state.borrow_mut::<PermissionsContainer>();
|
||||||
let worker_permissions = parent_permissions.create_child_permissions(args)?;
|
let worker_permissions = parent_permissions.create_child_permissions(args)?;
|
||||||
|
@ -147,8 +147,8 @@ fn op_dispatch_bench_event(state: &mut OpState, #[serde] event: BenchEvent) {
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
#[number]
|
#[number]
|
||||||
fn op_bench_now(state: &mut OpState) -> Result<u64, AnyError> {
|
fn op_bench_now(state: &mut OpState) -> Result<u64, std::num::TryFromIntError> {
|
||||||
let ns = state.borrow::<time::Instant>().elapsed().as_nanos();
|
let ns = state.borrow::<StartTime>().elapsed().as_nanos();
|
||||||
let ns_u64 = u64::try_from(ns)?;
|
let ns_u64 = u64::try_from(ns)?;
|
||||||
Ok(ns_u64)
|
Ok(ns_u64)
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ pub fn op_jupyter_input(
|
||||||
state: &mut OpState,
|
state: &mut OpState,
|
||||||
#[string] prompt: String,
|
#[string] prompt: String,
|
||||||
is_password: bool,
|
is_password: bool,
|
||||||
) -> Result<Option<String>, AnyError> {
|
) -> Option<String> {
|
||||||
let (last_execution_request, stdin_connection_proxy) = {
|
let (last_execution_request, stdin_connection_proxy) = {
|
||||||
(
|
(
|
||||||
state.borrow::<Arc<Mutex<Option<JupyterMessage>>>>().clone(),
|
state.borrow::<Arc<Mutex<Option<JupyterMessage>>>>().clone(),
|
||||||
|
@ -58,11 +58,11 @@ pub fn op_jupyter_input(
|
||||||
if let Some(last_request) = maybe_last_request {
|
if let Some(last_request) = maybe_last_request {
|
||||||
let JupyterMessageContent::ExecuteRequest(msg) = &last_request.content
|
let JupyterMessageContent::ExecuteRequest(msg) = &last_request.content
|
||||||
else {
|
else {
|
||||||
return Ok(None);
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
if !msg.allow_stdin {
|
if !msg.allow_stdin {
|
||||||
return Ok(None);
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let content = InputRequest {
|
let content = InputRequest {
|
||||||
|
@ -73,7 +73,7 @@ pub fn op_jupyter_input(
|
||||||
let msg = JupyterMessage::new(content, Some(&last_request));
|
let msg = JupyterMessage::new(content, Some(&last_request));
|
||||||
|
|
||||||
let Ok(()) = stdin_connection_proxy.lock().tx.send(msg) else {
|
let Ok(()) = stdin_connection_proxy.lock().tx.send(msg) else {
|
||||||
return Ok(None);
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Need to spawn a separate thread here, because `blocking_recv()` can't
|
// Need to spawn a separate thread here, because `blocking_recv()` can't
|
||||||
|
@ -82,17 +82,25 @@ pub fn op_jupyter_input(
|
||||||
stdin_connection_proxy.lock().rx.blocking_recv()
|
stdin_connection_proxy.lock().rx.blocking_recv()
|
||||||
});
|
});
|
||||||
let Ok(Some(response)) = join_handle.join() else {
|
let Ok(Some(response)) = join_handle.join() else {
|
||||||
return Ok(None);
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
let JupyterMessageContent::InputReply(msg) = response.content else {
|
let JupyterMessageContent::InputReply(msg) = response.content else {
|
||||||
return Ok(None);
|
return None;
|
||||||
};
|
};
|
||||||
|
|
||||||
return Ok(Some(msg.value));
|
return Some(msg.value);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(None)
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum JupyterBroadcastError {
|
||||||
|
#[error(transparent)]
|
||||||
|
SerdeJson(serde_json::Error),
|
||||||
|
#[error(transparent)]
|
||||||
|
ZeroMq(AnyError),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(async)]
|
#[op2(async)]
|
||||||
|
@ -102,7 +110,7 @@ pub async fn op_jupyter_broadcast(
|
||||||
#[serde] content: serde_json::Value,
|
#[serde] content: serde_json::Value,
|
||||||
#[serde] metadata: serde_json::Value,
|
#[serde] metadata: serde_json::Value,
|
||||||
#[serde] buffers: Vec<deno_core::JsBuffer>,
|
#[serde] buffers: Vec<deno_core::JsBuffer>,
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), JupyterBroadcastError> {
|
||||||
let (iopub_connection, last_execution_request) = {
|
let (iopub_connection, last_execution_request) = {
|
||||||
let s = state.borrow();
|
let s = state.borrow();
|
||||||
|
|
||||||
|
@ -125,36 +133,35 @@ pub async fn op_jupyter_broadcast(
|
||||||
content,
|
content,
|
||||||
err
|
err
|
||||||
);
|
);
|
||||||
err
|
JupyterBroadcastError::SerdeJson(err)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let jupyter_message = JupyterMessage::new(content, Some(&last_request))
|
let jupyter_message = JupyterMessage::new(content, Some(&last_request))
|
||||||
.with_metadata(metadata)
|
.with_metadata(metadata)
|
||||||
.with_buffers(buffers.into_iter().map(|b| b.to_vec().into()).collect());
|
.with_buffers(buffers.into_iter().map(|b| b.to_vec().into()).collect());
|
||||||
|
|
||||||
iopub_connection.lock().send(jupyter_message).await?;
|
iopub_connection
|
||||||
|
.lock()
|
||||||
|
.send(jupyter_message)
|
||||||
|
.await
|
||||||
|
.map_err(JupyterBroadcastError::ZeroMq)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
pub fn op_print(
|
pub fn op_print(state: &mut OpState, #[string] msg: &str, is_err: bool) {
|
||||||
state: &mut OpState,
|
|
||||||
#[string] msg: &str,
|
|
||||||
is_err: bool,
|
|
||||||
) -> Result<(), AnyError> {
|
|
||||||
let sender = state.borrow_mut::<mpsc::UnboundedSender<StreamContent>>();
|
let sender = state.borrow_mut::<mpsc::UnboundedSender<StreamContent>>();
|
||||||
|
|
||||||
if is_err {
|
if is_err {
|
||||||
if let Err(err) = sender.send(StreamContent::stderr(msg)) {
|
if let Err(err) = sender.send(StreamContent::stderr(msg)) {
|
||||||
log::error!("Failed to send stderr message: {}", err);
|
log::error!("Failed to send stderr message: {}", err);
|
||||||
}
|
}
|
||||||
return Ok(());
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = sender.send(StreamContent::stdout(msg)) {
|
if let Err(err) = sender.send(StreamContent::stdout(msg)) {
|
||||||
log::error!("Failed to send stdout message: {}", err);
|
log::error!("Failed to send stdout message: {}", err);
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ struct PermissionsHolder(Uuid, PermissionsContainer);
|
||||||
pub fn op_pledge_test_permissions(
|
pub fn op_pledge_test_permissions(
|
||||||
state: &mut OpState,
|
state: &mut OpState,
|
||||||
#[serde] args: ChildPermissionsArg,
|
#[serde] args: ChildPermissionsArg,
|
||||||
) -> Result<Uuid, AnyError> {
|
) -> Result<Uuid, deno_runtime::deno_permissions::ChildPermissionError> {
|
||||||
let token = Uuid::new_v4();
|
let token = Uuid::new_v4();
|
||||||
let parent_permissions = state.borrow_mut::<PermissionsContainer>();
|
let parent_permissions = state.borrow_mut::<PermissionsContainer>();
|
||||||
let worker_permissions = parent_permissions.create_child_permissions(args)?;
|
let worker_permissions = parent_permissions.create_child_permissions(args)?;
|
||||||
|
@ -150,7 +150,7 @@ fn op_register_test_step(
|
||||||
#[smi] parent_id: usize,
|
#[smi] parent_id: usize,
|
||||||
#[smi] root_id: usize,
|
#[smi] root_id: usize,
|
||||||
#[string] root_name: String,
|
#[string] root_name: String,
|
||||||
) -> Result<usize, AnyError> {
|
) -> usize {
|
||||||
let id = NEXT_ID.fetch_add(1, Ordering::SeqCst);
|
let id = NEXT_ID.fetch_add(1, Ordering::SeqCst);
|
||||||
let origin = state.borrow::<ModuleSpecifier>().to_string();
|
let origin = state.borrow::<ModuleSpecifier>().to_string();
|
||||||
let description = TestStepDescription {
|
let description = TestStepDescription {
|
||||||
|
@ -169,7 +169,7 @@ fn op_register_test_step(
|
||||||
};
|
};
|
||||||
let sender = state.borrow_mut::<TestEventSender>();
|
let sender = state.borrow_mut::<TestEventSender>();
|
||||||
sender.send(TestEvent::StepRegister(description)).ok();
|
sender.send(TestEvent::StepRegister(description)).ok();
|
||||||
Ok(id)
|
id
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
|
|
|
@ -1032,7 +1032,7 @@ fn get_resolved_markup_fmt_config(
|
||||||
max_attrs_per_line: None,
|
max_attrs_per_line: None,
|
||||||
prefer_attrs_single_line: false,
|
prefer_attrs_single_line: false,
|
||||||
html_normal_self_closing: None,
|
html_normal_self_closing: None,
|
||||||
html_void_self_closing: Some(true),
|
html_void_self_closing: None,
|
||||||
component_self_closing: None,
|
component_self_closing: None,
|
||||||
svg_self_closing: None,
|
svg_self_closing: None,
|
||||||
mathml_self_closing: None,
|
mathml_self_closing: None,
|
||||||
|
|
|
@ -44,7 +44,11 @@ pub async fn cache_top_level_deps(
|
||||||
|
|
||||||
let mut seen_reqs = std::collections::HashSet::new();
|
let mut seen_reqs = std::collections::HashSet::new();
|
||||||
|
|
||||||
for entry in import_map.imports().entries() {
|
for entry in import_map.imports().entries().chain(
|
||||||
|
import_map
|
||||||
|
.scopes()
|
||||||
|
.flat_map(|scope| scope.imports.entries()),
|
||||||
|
) {
|
||||||
let Some(specifier) = entry.value else {
|
let Some(specifier) = entry.value else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_broadcast_channel"
|
name = "deno_broadcast_channel"
|
||||||
version = "0.170.0"
|
version = "0.171.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
2
ext/cache/Cargo.toml
vendored
2
ext/cache/Cargo.toml
vendored
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_cache"
|
name = "deno_cache"
|
||||||
version = "0.108.0"
|
version = "0.109.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
6
ext/cache/lib.rs
vendored
6
ext/cache/lib.rs
vendored
|
@ -33,7 +33,9 @@ pub enum CacheError {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct CreateCache<C: Cache + 'static>(pub Arc<dyn Fn() -> C>);
|
pub struct CreateCache<C: Cache + 'static>(
|
||||||
|
pub Arc<dyn Fn() -> Result<C, CacheError>>,
|
||||||
|
);
|
||||||
|
|
||||||
deno_core::extension!(deno_cache,
|
deno_core::extension!(deno_cache,
|
||||||
deps = [ deno_webidl, deno_web, deno_url, deno_fetch ],
|
deps = [ deno_webidl, deno_web, deno_url, deno_fetch ],
|
||||||
|
@ -231,7 +233,7 @@ where
|
||||||
if let Some(cache) = state.try_borrow::<CA>() {
|
if let Some(cache) = state.try_borrow::<CA>() {
|
||||||
Ok(cache.clone())
|
Ok(cache.clone())
|
||||||
} else if let Some(create_cache) = state.try_borrow::<CreateCache<CA>>() {
|
} else if let Some(create_cache) = state.try_borrow::<CreateCache<CA>>() {
|
||||||
let cache = create_cache.0();
|
let cache = create_cache.0()?;
|
||||||
state.put(cache);
|
state.put(cache);
|
||||||
Ok(state.borrow::<CA>().clone())
|
Ok(state.borrow::<CA>().clone())
|
||||||
} else {
|
} else {
|
||||||
|
|
19
ext/cache/sqlite.rs
vendored
19
ext/cache/sqlite.rs
vendored
|
@ -42,7 +42,7 @@ pub struct SqliteBackedCache {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SqliteBackedCache {
|
impl SqliteBackedCache {
|
||||||
pub fn new(cache_storage_dir: PathBuf) -> Self {
|
pub fn new(cache_storage_dir: PathBuf) -> Result<Self, CacheError> {
|
||||||
{
|
{
|
||||||
std::fs::create_dir_all(&cache_storage_dir)
|
std::fs::create_dir_all(&cache_storage_dir)
|
||||||
.expect("failed to create cache dir");
|
.expect("failed to create cache dir");
|
||||||
|
@ -57,18 +57,14 @@ impl SqliteBackedCache {
|
||||||
PRAGMA synchronous=NORMAL;
|
PRAGMA synchronous=NORMAL;
|
||||||
PRAGMA optimize;
|
PRAGMA optimize;
|
||||||
";
|
";
|
||||||
connection
|
connection.execute_batch(initial_pragmas)?;
|
||||||
.execute_batch(initial_pragmas)
|
connection.execute(
|
||||||
.expect("failed to execute pragmas");
|
|
||||||
connection
|
|
||||||
.execute(
|
|
||||||
"CREATE TABLE IF NOT EXISTS cache_storage (
|
"CREATE TABLE IF NOT EXISTS cache_storage (
|
||||||
id INTEGER PRIMARY KEY,
|
id INTEGER PRIMARY KEY,
|
||||||
cache_name TEXT NOT NULL UNIQUE
|
cache_name TEXT NOT NULL UNIQUE
|
||||||
)",
|
)",
|
||||||
(),
|
(),
|
||||||
)
|
)?;
|
||||||
.expect("failed to create cache_storage table");
|
|
||||||
connection
|
connection
|
||||||
.execute(
|
.execute(
|
||||||
"CREATE TABLE IF NOT EXISTS request_response_list (
|
"CREATE TABLE IF NOT EXISTS request_response_list (
|
||||||
|
@ -86,12 +82,11 @@ impl SqliteBackedCache {
|
||||||
UNIQUE (cache_id, request_url)
|
UNIQUE (cache_id, request_url)
|
||||||
)",
|
)",
|
||||||
(),
|
(),
|
||||||
)
|
)?;
|
||||||
.expect("failed to create request_response_list table");
|
Ok(SqliteBackedCache {
|
||||||
SqliteBackedCache {
|
|
||||||
connection: Arc::new(Mutex::new(connection)),
|
connection: Arc::new(Mutex::new(connection)),
|
||||||
cache_storage_dir,
|
cache_storage_dir,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_canvas"
|
name = "deno_canvas"
|
||||||
version = "0.45.0"
|
version = "0.46.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_console"
|
name = "deno_console"
|
||||||
version = "0.176.0"
|
version = "0.177.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_cron"
|
name = "deno_cron"
|
||||||
version = "0.56.0"
|
version = "0.57.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_crypto"
|
name = "deno_crypto"
|
||||||
version = "0.190.0"
|
version = "0.191.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -269,12 +269,6 @@ class Request {
|
||||||
/** @type {AbortSignal} */
|
/** @type {AbortSignal} */
|
||||||
get [_signal]() {
|
get [_signal]() {
|
||||||
const signal = this[_signalCache];
|
const signal = this[_signalCache];
|
||||||
// This signal not been created yet, and the request is still in progress
|
|
||||||
if (signal === undefined) {
|
|
||||||
const signal = newSignal();
|
|
||||||
this[_signalCache] = signal;
|
|
||||||
return signal;
|
|
||||||
}
|
|
||||||
// This signal has not been created yet, but the request has already completed
|
// This signal has not been created yet, but the request has already completed
|
||||||
if (signal === false) {
|
if (signal === false) {
|
||||||
const signal = newSignal();
|
const signal = newSignal();
|
||||||
|
@ -282,6 +276,18 @@ class Request {
|
||||||
signal[signalAbort](signalAbortError);
|
signal[signalAbort](signalAbortError);
|
||||||
return signal;
|
return signal;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This signal not been created yet, and the request is still in progress
|
||||||
|
if (signal === undefined) {
|
||||||
|
const signal = newSignal();
|
||||||
|
this[_signalCache] = signal;
|
||||||
|
this[_request].onCancel?.(() => {
|
||||||
|
signal[signalAbort](signalAbortError);
|
||||||
|
});
|
||||||
|
|
||||||
|
return signal;
|
||||||
|
}
|
||||||
|
|
||||||
return signal;
|
return signal;
|
||||||
}
|
}
|
||||||
get [_mimeType]() {
|
get [_mimeType]() {
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_fetch"
|
name = "deno_fetch"
|
||||||
version = "0.200.0"
|
version = "0.201.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_ffi"
|
name = "deno_ffi"
|
||||||
version = "0.163.0"
|
version = "0.164.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_fs"
|
name = "deno_fs"
|
||||||
version = "0.86.0"
|
version = "0.87.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -14,6 +14,7 @@ import {
|
||||||
op_http_get_request_headers,
|
op_http_get_request_headers,
|
||||||
op_http_get_request_method_and_url,
|
op_http_get_request_method_and_url,
|
||||||
op_http_read_request_body,
|
op_http_read_request_body,
|
||||||
|
op_http_request_on_cancel,
|
||||||
op_http_serve,
|
op_http_serve,
|
||||||
op_http_serve_on,
|
op_http_serve_on,
|
||||||
op_http_set_promise_complete,
|
op_http_set_promise_complete,
|
||||||
|
@ -373,6 +374,18 @@ class InnerRequest {
|
||||||
get external() {
|
get external() {
|
||||||
return this.#external;
|
return this.#external;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
onCancel(callback) {
|
||||||
|
if (this.#external === null) {
|
||||||
|
callback();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
PromisePrototypeThen(
|
||||||
|
op_http_request_on_cancel(this.#external),
|
||||||
|
callback,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class CallbackContext {
|
class CallbackContext {
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_http"
|
name = "deno_http"
|
||||||
version = "0.174.0"
|
version = "0.175.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -564,6 +564,7 @@ fn is_request_compressible(
|
||||||
match accept_encoding.to_str() {
|
match accept_encoding.to_str() {
|
||||||
// Firefox and Chrome send this -- no need to parse
|
// Firefox and Chrome send this -- no need to parse
|
||||||
Ok("gzip, deflate, br") => return Compression::Brotli,
|
Ok("gzip, deflate, br") => return Compression::Brotli,
|
||||||
|
Ok("gzip, deflate, br, zstd") => return Compression::Brotli,
|
||||||
Ok("gzip") => return Compression::GZip,
|
Ok("gzip") => return Compression::GZip,
|
||||||
Ok("br") => return Compression::Brotli,
|
Ok("br") => return Compression::Brotli,
|
||||||
_ => (),
|
_ => (),
|
||||||
|
@ -700,6 +701,27 @@ fn set_response(
|
||||||
http.complete();
|
http.complete();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[op2(fast)]
|
||||||
|
pub fn op_http_get_request_cancelled(external: *const c_void) -> bool {
|
||||||
|
let http =
|
||||||
|
// SAFETY: op is called with external.
|
||||||
|
unsafe { clone_external!(external, "op_http_get_request_cancelled") };
|
||||||
|
http.cancelled()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2(async)]
|
||||||
|
pub async fn op_http_request_on_cancel(external: *const c_void) {
|
||||||
|
let http =
|
||||||
|
// SAFETY: op is called with external.
|
||||||
|
unsafe { clone_external!(external, "op_http_request_on_cancel") };
|
||||||
|
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||||
|
|
||||||
|
http.on_cancel(tx);
|
||||||
|
drop(http);
|
||||||
|
|
||||||
|
rx.await.ok();
|
||||||
|
}
|
||||||
|
|
||||||
/// Returned promise resolves when body streaming finishes.
|
/// Returned promise resolves when body streaming finishes.
|
||||||
/// Call [`op_http_close_after_finish`] when done with the external.
|
/// Call [`op_http_close_after_finish`] when done with the external.
|
||||||
#[op2(async)]
|
#[op2(async)]
|
||||||
|
|
|
@ -112,7 +112,9 @@ deno_core::extension!(
|
||||||
http_next::op_http_close_after_finish,
|
http_next::op_http_close_after_finish,
|
||||||
http_next::op_http_get_request_header,
|
http_next::op_http_get_request_header,
|
||||||
http_next::op_http_get_request_headers,
|
http_next::op_http_get_request_headers,
|
||||||
|
http_next::op_http_request_on_cancel,
|
||||||
http_next::op_http_get_request_method_and_url<HTTP>,
|
http_next::op_http_get_request_method_and_url<HTTP>,
|
||||||
|
http_next::op_http_get_request_cancelled,
|
||||||
http_next::op_http_read_request_body,
|
http_next::op_http_read_request_body,
|
||||||
http_next::op_http_serve_on<HTTP>,
|
http_next::op_http_serve_on<HTTP>,
|
||||||
http_next::op_http_serve<HTTP>,
|
http_next::op_http_serve<HTTP>,
|
||||||
|
|
|
@ -27,6 +27,7 @@ use std::rc::Rc;
|
||||||
use std::task::Context;
|
use std::task::Context;
|
||||||
use std::task::Poll;
|
use std::task::Poll;
|
||||||
use std::task::Waker;
|
use std::task::Waker;
|
||||||
|
use tokio::sync::oneshot;
|
||||||
|
|
||||||
pub type Request = hyper::Request<Incoming>;
|
pub type Request = hyper::Request<Incoming>;
|
||||||
pub type Response = hyper::Response<HttpRecordResponse>;
|
pub type Response = hyper::Response<HttpRecordResponse>;
|
||||||
|
@ -211,6 +212,7 @@ pub struct UpgradeUnavailableError;
|
||||||
|
|
||||||
struct HttpRecordInner {
|
struct HttpRecordInner {
|
||||||
server_state: SignallingRc<HttpServerState>,
|
server_state: SignallingRc<HttpServerState>,
|
||||||
|
closed_channel: Option<oneshot::Sender<()>>,
|
||||||
request_info: HttpConnectionProperties,
|
request_info: HttpConnectionProperties,
|
||||||
request_parts: http::request::Parts,
|
request_parts: http::request::Parts,
|
||||||
request_body: Option<RequestBodyState>,
|
request_body: Option<RequestBodyState>,
|
||||||
|
@ -276,6 +278,7 @@ impl HttpRecord {
|
||||||
response_body_finished: false,
|
response_body_finished: false,
|
||||||
response_body_waker: None,
|
response_body_waker: None,
|
||||||
trailers: None,
|
trailers: None,
|
||||||
|
closed_channel: None,
|
||||||
been_dropped: false,
|
been_dropped: false,
|
||||||
finished: false,
|
finished: false,
|
||||||
needs_close_after_finish: false,
|
needs_close_after_finish: false,
|
||||||
|
@ -312,6 +315,10 @@ impl HttpRecord {
|
||||||
RefMut::map(self.self_mut(), |inner| &mut inner.needs_close_after_finish)
|
RefMut::map(self.self_mut(), |inner| &mut inner.needs_close_after_finish)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn on_cancel(&self, sender: oneshot::Sender<()>) {
|
||||||
|
self.self_mut().closed_channel = Some(sender);
|
||||||
|
}
|
||||||
|
|
||||||
fn recycle(self: Rc<Self>) {
|
fn recycle(self: Rc<Self>) {
|
||||||
assert!(
|
assert!(
|
||||||
Rc::strong_count(&self) == 1,
|
Rc::strong_count(&self) == 1,
|
||||||
|
@ -390,6 +397,9 @@ impl HttpRecord {
|
||||||
inner.been_dropped = true;
|
inner.been_dropped = true;
|
||||||
// The request body might include actual resources.
|
// The request body might include actual resources.
|
||||||
inner.request_body.take();
|
inner.request_body.take();
|
||||||
|
if let Some(closed_channel) = inner.closed_channel.take() {
|
||||||
|
let _ = closed_channel.send(());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Complete this record, potentially expunging it if it is fully complete (ie: cancelled as well).
|
/// Complete this record, potentially expunging it if it is fully complete (ie: cancelled as well).
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_io"
|
name = "deno_io"
|
||||||
version = "0.86.0"
|
version = "0.87.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_kv"
|
name = "deno_kv"
|
||||||
version = "0.84.0"
|
version = "0.85.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_napi"
|
name = "deno_napi"
|
||||||
version = "0.107.0"
|
version = "0.108.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "napi_sym"
|
name = "napi_sym"
|
||||||
version = "0.106.0"
|
version = "0.107.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_net"
|
name = "deno_net"
|
||||||
version = "0.168.0"
|
version = "0.169.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
@ -17,11 +17,11 @@ path = "lib.rs"
|
||||||
deno_core.workspace = true
|
deno_core.workspace = true
|
||||||
deno_permissions.workspace = true
|
deno_permissions.workspace = true
|
||||||
deno_tls.workspace = true
|
deno_tls.workspace = true
|
||||||
|
hickory-proto = "0.24"
|
||||||
|
hickory-resolver = { version = "0.24", features = ["tokio-runtime", "serde-config"] }
|
||||||
pin-project.workspace = true
|
pin-project.workspace = true
|
||||||
rustls-tokio-stream.workspace = true
|
rustls-tokio-stream.workspace = true
|
||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
socket2.workspace = true
|
socket2.workspace = true
|
||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
trust-dns-proto = "0.23"
|
|
||||||
trust-dns-resolver = { version = "0.23", features = ["tokio-runtime", "serde-config"] }
|
|
||||||
|
|
|
@ -18,6 +18,16 @@ use deno_core::OpState;
|
||||||
use deno_core::RcRef;
|
use deno_core::RcRef;
|
||||||
use deno_core::Resource;
|
use deno_core::Resource;
|
||||||
use deno_core::ResourceId;
|
use deno_core::ResourceId;
|
||||||
|
use hickory_proto::rr::rdata::caa::Value;
|
||||||
|
use hickory_proto::rr::record_data::RData;
|
||||||
|
use hickory_proto::rr::record_type::RecordType;
|
||||||
|
use hickory_resolver::config::NameServerConfigGroup;
|
||||||
|
use hickory_resolver::config::ResolverConfig;
|
||||||
|
use hickory_resolver::config::ResolverOpts;
|
||||||
|
use hickory_resolver::error::ResolveError;
|
||||||
|
use hickory_resolver::error::ResolveErrorKind;
|
||||||
|
use hickory_resolver::system_conf;
|
||||||
|
use hickory_resolver::AsyncResolver;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use socket2::Domain;
|
use socket2::Domain;
|
||||||
|
@ -33,16 +43,6 @@ use std::rc::Rc;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::net::UdpSocket;
|
use tokio::net::UdpSocket;
|
||||||
use trust_dns_proto::rr::rdata::caa::Value;
|
|
||||||
use trust_dns_proto::rr::record_data::RData;
|
|
||||||
use trust_dns_proto::rr::record_type::RecordType;
|
|
||||||
use trust_dns_resolver::config::NameServerConfigGroup;
|
|
||||||
use trust_dns_resolver::config::ResolverConfig;
|
|
||||||
use trust_dns_resolver::config::ResolverOpts;
|
|
||||||
use trust_dns_resolver::error::ResolveError;
|
|
||||||
use trust_dns_resolver::error::ResolveErrorKind;
|
|
||||||
use trust_dns_resolver::system_conf;
|
|
||||||
use trust_dns_resolver::AsyncResolver;
|
|
||||||
|
|
||||||
#[derive(Serialize, Clone, Debug)]
|
#[derive(Serialize, Clone, Debug)]
|
||||||
#[serde(rename_all = "camelCase")]
|
#[serde(rename_all = "camelCase")]
|
||||||
|
@ -828,6 +828,21 @@ mod tests {
|
||||||
use deno_core::JsRuntime;
|
use deno_core::JsRuntime;
|
||||||
use deno_core::RuntimeOptions;
|
use deno_core::RuntimeOptions;
|
||||||
use deno_permissions::PermissionCheckError;
|
use deno_permissions::PermissionCheckError;
|
||||||
|
use hickory_proto::rr::rdata::a::A;
|
||||||
|
use hickory_proto::rr::rdata::aaaa::AAAA;
|
||||||
|
use hickory_proto::rr::rdata::caa::KeyValue;
|
||||||
|
use hickory_proto::rr::rdata::caa::CAA;
|
||||||
|
use hickory_proto::rr::rdata::mx::MX;
|
||||||
|
use hickory_proto::rr::rdata::name::ANAME;
|
||||||
|
use hickory_proto::rr::rdata::name::CNAME;
|
||||||
|
use hickory_proto::rr::rdata::name::NS;
|
||||||
|
use hickory_proto::rr::rdata::name::PTR;
|
||||||
|
use hickory_proto::rr::rdata::naptr::NAPTR;
|
||||||
|
use hickory_proto::rr::rdata::srv::SRV;
|
||||||
|
use hickory_proto::rr::rdata::txt::TXT;
|
||||||
|
use hickory_proto::rr::rdata::SOA;
|
||||||
|
use hickory_proto::rr::record_data::RData;
|
||||||
|
use hickory_proto::rr::Name;
|
||||||
use socket2::SockRef;
|
use socket2::SockRef;
|
||||||
use std::net::Ipv4Addr;
|
use std::net::Ipv4Addr;
|
||||||
use std::net::Ipv6Addr;
|
use std::net::Ipv6Addr;
|
||||||
|
@ -836,21 +851,6 @@ mod tests {
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::Mutex;
|
use std::sync::Mutex;
|
||||||
use trust_dns_proto::rr::rdata::a::A;
|
|
||||||
use trust_dns_proto::rr::rdata::aaaa::AAAA;
|
|
||||||
use trust_dns_proto::rr::rdata::caa::KeyValue;
|
|
||||||
use trust_dns_proto::rr::rdata::caa::CAA;
|
|
||||||
use trust_dns_proto::rr::rdata::mx::MX;
|
|
||||||
use trust_dns_proto::rr::rdata::name::ANAME;
|
|
||||||
use trust_dns_proto::rr::rdata::name::CNAME;
|
|
||||||
use trust_dns_proto::rr::rdata::name::NS;
|
|
||||||
use trust_dns_proto::rr::rdata::name::PTR;
|
|
||||||
use trust_dns_proto::rr::rdata::naptr::NAPTR;
|
|
||||||
use trust_dns_proto::rr::rdata::srv::SRV;
|
|
||||||
use trust_dns_proto::rr::rdata::txt::TXT;
|
|
||||||
use trust_dns_proto::rr::rdata::SOA;
|
|
||||||
use trust_dns_proto::rr::record_data::RData;
|
|
||||||
use trust_dns_proto::rr::Name;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn rdata_to_return_record_a() {
|
fn rdata_to_return_record_a() {
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_node"
|
name = "deno_node"
|
||||||
version = "0.113.0"
|
version = "0.114.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -47,6 +47,11 @@ pub trait NodePermissions {
|
||||||
url: &Url,
|
url: &Url,
|
||||||
api_name: &str,
|
api_name: &str,
|
||||||
) -> Result<(), PermissionCheckError>;
|
) -> Result<(), PermissionCheckError>;
|
||||||
|
fn check_net(
|
||||||
|
&mut self,
|
||||||
|
host: (&str, Option<u16>),
|
||||||
|
api_name: &str,
|
||||||
|
) -> Result<(), PermissionCheckError>;
|
||||||
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
|
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn check_read(
|
fn check_read(
|
||||||
|
@ -90,6 +95,14 @@ impl NodePermissions for deno_permissions::PermissionsContainer {
|
||||||
deno_permissions::PermissionsContainer::check_net_url(self, url, api_name)
|
deno_permissions::PermissionsContainer::check_net_url(self, url, api_name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn check_net(
|
||||||
|
&mut self,
|
||||||
|
host: (&str, Option<u16>),
|
||||||
|
api_name: &str,
|
||||||
|
) -> Result<(), PermissionCheckError> {
|
||||||
|
deno_permissions::PermissionsContainer::check_net(self, &host, api_name)
|
||||||
|
}
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
fn check_read_with_api_name(
|
fn check_read_with_api_name(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
@ -398,6 +411,15 @@ deno_core::extension!(deno_node,
|
||||||
ops::process::op_node_process_kill,
|
ops::process::op_node_process_kill,
|
||||||
ops::process::op_process_abort,
|
ops::process::op_process_abort,
|
||||||
ops::tls::op_get_root_certificates,
|
ops::tls::op_get_root_certificates,
|
||||||
|
ops::inspector::op_inspector_open<P>,
|
||||||
|
ops::inspector::op_inspector_close,
|
||||||
|
ops::inspector::op_inspector_url,
|
||||||
|
ops::inspector::op_inspector_wait,
|
||||||
|
ops::inspector::op_inspector_connect<P>,
|
||||||
|
ops::inspector::op_inspector_dispatch,
|
||||||
|
ops::inspector::op_inspector_disconnect,
|
||||||
|
ops::inspector::op_inspector_emit_protocol_event,
|
||||||
|
ops::inspector::op_inspector_enabled,
|
||||||
],
|
],
|
||||||
esm_entry_point = "ext:deno_node/02_init.js",
|
esm_entry_point = "ext:deno_node/02_init.js",
|
||||||
esm = [
|
esm = [
|
||||||
|
@ -606,8 +628,8 @@ deno_core::extension!(deno_node,
|
||||||
"node:http" = "http.ts",
|
"node:http" = "http.ts",
|
||||||
"node:http2" = "http2.ts",
|
"node:http2" = "http2.ts",
|
||||||
"node:https" = "https.ts",
|
"node:https" = "https.ts",
|
||||||
"node:inspector" = "inspector.ts",
|
"node:inspector" = "inspector.js",
|
||||||
"node:inspector/promises" = "inspector.ts",
|
"node:inspector/promises" = "inspector/promises.js",
|
||||||
"node:module" = "01_require.js",
|
"node:module" = "01_require.js",
|
||||||
"node:net" = "net.ts",
|
"node:net" = "net.ts",
|
||||||
"node:os" = "os.ts",
|
"node:os" = "os.ts",
|
||||||
|
|
|
@ -4,9 +4,6 @@ use aes::cipher::block_padding::Pkcs7;
|
||||||
use aes::cipher::BlockDecryptMut;
|
use aes::cipher::BlockDecryptMut;
|
||||||
use aes::cipher::BlockEncryptMut;
|
use aes::cipher::BlockEncryptMut;
|
||||||
use aes::cipher::KeyIvInit;
|
use aes::cipher::KeyIvInit;
|
||||||
use deno_core::error::range_error;
|
|
||||||
use deno_core::error::type_error;
|
|
||||||
use deno_core::error::AnyError;
|
|
||||||
use deno_core::Resource;
|
use deno_core::Resource;
|
||||||
use digest::generic_array::GenericArray;
|
use digest::generic_array::GenericArray;
|
||||||
use digest::KeyInit;
|
use digest::KeyInit;
|
||||||
|
@ -50,8 +47,22 @@ pub struct DecipherContext {
|
||||||
decipher: Rc<RefCell<Decipher>>,
|
decipher: Rc<RefCell<Decipher>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum CipherContextError {
|
||||||
|
#[error("Cipher context is already in use")]
|
||||||
|
ContextInUse,
|
||||||
|
#[error("{0}")]
|
||||||
|
Resource(deno_core::error::AnyError),
|
||||||
|
#[error(transparent)]
|
||||||
|
Cipher(#[from] CipherError),
|
||||||
|
}
|
||||||
|
|
||||||
impl CipherContext {
|
impl CipherContext {
|
||||||
pub fn new(algorithm: &str, key: &[u8], iv: &[u8]) -> Result<Self, AnyError> {
|
pub fn new(
|
||||||
|
algorithm: &str,
|
||||||
|
key: &[u8],
|
||||||
|
iv: &[u8],
|
||||||
|
) -> Result<Self, CipherContextError> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
cipher: Rc::new(RefCell::new(Cipher::new(algorithm, key, iv)?)),
|
cipher: Rc::new(RefCell::new(Cipher::new(algorithm, key, iv)?)),
|
||||||
})
|
})
|
||||||
|
@ -74,16 +85,31 @@ impl CipherContext {
|
||||||
auto_pad: bool,
|
auto_pad: bool,
|
||||||
input: &[u8],
|
input: &[u8],
|
||||||
output: &mut [u8],
|
output: &mut [u8],
|
||||||
) -> Result<Tag, AnyError> {
|
) -> Result<Tag, CipherContextError> {
|
||||||
Rc::try_unwrap(self.cipher)
|
Rc::try_unwrap(self.cipher)
|
||||||
.map_err(|_| type_error("Cipher context is already in use"))?
|
.map_err(|_| CipherContextError::ContextInUse)?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.r#final(auto_pad, input, output)
|
.r#final(auto_pad, input, output)
|
||||||
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum DecipherContextError {
|
||||||
|
#[error("Decipher context is already in use")]
|
||||||
|
ContextInUse,
|
||||||
|
#[error("{0}")]
|
||||||
|
Resource(deno_core::error::AnyError),
|
||||||
|
#[error(transparent)]
|
||||||
|
Decipher(#[from] DecipherError),
|
||||||
|
}
|
||||||
|
|
||||||
impl DecipherContext {
|
impl DecipherContext {
|
||||||
pub fn new(algorithm: &str, key: &[u8], iv: &[u8]) -> Result<Self, AnyError> {
|
pub fn new(
|
||||||
|
algorithm: &str,
|
||||||
|
key: &[u8],
|
||||||
|
iv: &[u8],
|
||||||
|
) -> Result<Self, DecipherContextError> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
decipher: Rc::new(RefCell::new(Decipher::new(algorithm, key, iv)?)),
|
decipher: Rc::new(RefCell::new(Decipher::new(algorithm, key, iv)?)),
|
||||||
})
|
})
|
||||||
|
@ -103,11 +129,12 @@ impl DecipherContext {
|
||||||
input: &[u8],
|
input: &[u8],
|
||||||
output: &mut [u8],
|
output: &mut [u8],
|
||||||
auth_tag: &[u8],
|
auth_tag: &[u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), DecipherContextError> {
|
||||||
Rc::try_unwrap(self.decipher)
|
Rc::try_unwrap(self.decipher)
|
||||||
.map_err(|_| type_error("Decipher context is already in use"))?
|
.map_err(|_| DecipherContextError::ContextInUse)?
|
||||||
.into_inner()
|
.into_inner()
|
||||||
.r#final(auto_pad, input, output, auth_tag)
|
.r#final(auto_pad, input, output, auth_tag)
|
||||||
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,12 +150,26 @@ impl Resource for DecipherContext {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum CipherError {
|
||||||
|
#[error("IV length must be 12 bytes")]
|
||||||
|
InvalidIvLength,
|
||||||
|
#[error("Invalid key length")]
|
||||||
|
InvalidKeyLength,
|
||||||
|
#[error("Invalid initialization vector")]
|
||||||
|
InvalidInitializationVector,
|
||||||
|
#[error("Cannot pad the input data")]
|
||||||
|
CannotPadInputData,
|
||||||
|
#[error("Unknown cipher {0}")]
|
||||||
|
UnknownCipher(String),
|
||||||
|
}
|
||||||
|
|
||||||
impl Cipher {
|
impl Cipher {
|
||||||
fn new(
|
fn new(
|
||||||
algorithm_name: &str,
|
algorithm_name: &str,
|
||||||
key: &[u8],
|
key: &[u8],
|
||||||
iv: &[u8],
|
iv: &[u8],
|
||||||
) -> Result<Self, AnyError> {
|
) -> Result<Self, CipherError> {
|
||||||
use Cipher::*;
|
use Cipher::*;
|
||||||
Ok(match algorithm_name {
|
Ok(match algorithm_name {
|
||||||
"aes-128-cbc" => {
|
"aes-128-cbc" => {
|
||||||
|
@ -139,7 +180,7 @@ impl Cipher {
|
||||||
"aes-256-ecb" => Aes256Ecb(Box::new(ecb::Encryptor::new(key.into()))),
|
"aes-256-ecb" => Aes256Ecb(Box::new(ecb::Encryptor::new(key.into()))),
|
||||||
"aes-128-gcm" => {
|
"aes-128-gcm" => {
|
||||||
if iv.len() != 12 {
|
if iv.len() != 12 {
|
||||||
return Err(type_error("IV length must be 12 bytes"));
|
return Err(CipherError::InvalidIvLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
let cipher =
|
let cipher =
|
||||||
|
@ -149,7 +190,7 @@ impl Cipher {
|
||||||
}
|
}
|
||||||
"aes-256-gcm" => {
|
"aes-256-gcm" => {
|
||||||
if iv.len() != 12 {
|
if iv.len() != 12 {
|
||||||
return Err(type_error("IV length must be 12 bytes"));
|
return Err(CipherError::InvalidIvLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
let cipher =
|
let cipher =
|
||||||
|
@ -159,15 +200,15 @@ impl Cipher {
|
||||||
}
|
}
|
||||||
"aes256" | "aes-256-cbc" => {
|
"aes256" | "aes-256-cbc" => {
|
||||||
if key.len() != 32 {
|
if key.len() != 32 {
|
||||||
return Err(range_error("Invalid key length"));
|
return Err(CipherError::InvalidKeyLength);
|
||||||
}
|
}
|
||||||
if iv.len() != 16 {
|
if iv.len() != 16 {
|
||||||
return Err(type_error("Invalid initialization vector"));
|
return Err(CipherError::InvalidInitializationVector);
|
||||||
}
|
}
|
||||||
|
|
||||||
Aes256Cbc(Box::new(cbc::Encryptor::new(key.into(), iv.into())))
|
Aes256Cbc(Box::new(cbc::Encryptor::new(key.into(), iv.into())))
|
||||||
}
|
}
|
||||||
_ => return Err(type_error(format!("Unknown cipher {algorithm_name}"))),
|
_ => return Err(CipherError::UnknownCipher(algorithm_name.to_string())),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,14 +276,14 @@ impl Cipher {
|
||||||
auto_pad: bool,
|
auto_pad: bool,
|
||||||
input: &[u8],
|
input: &[u8],
|
||||||
output: &mut [u8],
|
output: &mut [u8],
|
||||||
) -> Result<Tag, AnyError> {
|
) -> Result<Tag, CipherError> {
|
||||||
assert!(input.len() < 16);
|
assert!(input.len() < 16);
|
||||||
use Cipher::*;
|
use Cipher::*;
|
||||||
match (self, auto_pad) {
|
match (self, auto_pad) {
|
||||||
(Aes128Cbc(encryptor), true) => {
|
(Aes128Cbc(encryptor), true) => {
|
||||||
let _ = (*encryptor)
|
let _ = (*encryptor)
|
||||||
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot pad the input data"))?;
|
.map_err(|_| CipherError::CannotPadInputData)?;
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
(Aes128Cbc(mut encryptor), false) => {
|
(Aes128Cbc(mut encryptor), false) => {
|
||||||
|
@ -255,7 +296,7 @@ impl Cipher {
|
||||||
(Aes128Ecb(encryptor), true) => {
|
(Aes128Ecb(encryptor), true) => {
|
||||||
let _ = (*encryptor)
|
let _ = (*encryptor)
|
||||||
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot pad the input data"))?;
|
.map_err(|_| CipherError::CannotPadInputData)?;
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
(Aes128Ecb(mut encryptor), false) => {
|
(Aes128Ecb(mut encryptor), false) => {
|
||||||
|
@ -268,7 +309,7 @@ impl Cipher {
|
||||||
(Aes192Ecb(encryptor), true) => {
|
(Aes192Ecb(encryptor), true) => {
|
||||||
let _ = (*encryptor)
|
let _ = (*encryptor)
|
||||||
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot pad the input data"))?;
|
.map_err(|_| CipherError::CannotPadInputData)?;
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
(Aes192Ecb(mut encryptor), false) => {
|
(Aes192Ecb(mut encryptor), false) => {
|
||||||
|
@ -281,7 +322,7 @@ impl Cipher {
|
||||||
(Aes256Ecb(encryptor), true) => {
|
(Aes256Ecb(encryptor), true) => {
|
||||||
let _ = (*encryptor)
|
let _ = (*encryptor)
|
||||||
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot pad the input data"))?;
|
.map_err(|_| CipherError::CannotPadInputData)?;
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
(Aes256Ecb(mut encryptor), false) => {
|
(Aes256Ecb(mut encryptor), false) => {
|
||||||
|
@ -296,7 +337,7 @@ impl Cipher {
|
||||||
(Aes256Cbc(encryptor), true) => {
|
(Aes256Cbc(encryptor), true) => {
|
||||||
let _ = (*encryptor)
|
let _ = (*encryptor)
|
||||||
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot pad the input data"))?;
|
.map_err(|_| CipherError::CannotPadInputData)?;
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
(Aes256Cbc(mut encryptor), false) => {
|
(Aes256Cbc(mut encryptor), false) => {
|
||||||
|
@ -319,12 +360,32 @@ impl Cipher {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum DecipherError {
|
||||||
|
#[error("IV length must be 12 bytes")]
|
||||||
|
InvalidIvLength,
|
||||||
|
#[error("Invalid key length")]
|
||||||
|
InvalidKeyLength,
|
||||||
|
#[error("Invalid initialization vector")]
|
||||||
|
InvalidInitializationVector,
|
||||||
|
#[error("Cannot unpad the input data")]
|
||||||
|
CannotUnpadInputData,
|
||||||
|
#[error("Failed to authenticate data")]
|
||||||
|
DataAuthenticationFailed,
|
||||||
|
#[error("setAutoPadding(false) not supported for Aes128Gcm yet")]
|
||||||
|
SetAutoPaddingFalseAes128GcmUnsupported,
|
||||||
|
#[error("setAutoPadding(false) not supported for Aes256Gcm yet")]
|
||||||
|
SetAutoPaddingFalseAes256GcmUnsupported,
|
||||||
|
#[error("Unknown cipher {0}")]
|
||||||
|
UnknownCipher(String),
|
||||||
|
}
|
||||||
|
|
||||||
impl Decipher {
|
impl Decipher {
|
||||||
fn new(
|
fn new(
|
||||||
algorithm_name: &str,
|
algorithm_name: &str,
|
||||||
key: &[u8],
|
key: &[u8],
|
||||||
iv: &[u8],
|
iv: &[u8],
|
||||||
) -> Result<Self, AnyError> {
|
) -> Result<Self, DecipherError> {
|
||||||
use Decipher::*;
|
use Decipher::*;
|
||||||
Ok(match algorithm_name {
|
Ok(match algorithm_name {
|
||||||
"aes-128-cbc" => {
|
"aes-128-cbc" => {
|
||||||
|
@ -335,7 +396,7 @@ impl Decipher {
|
||||||
"aes-256-ecb" => Aes256Ecb(Box::new(ecb::Decryptor::new(key.into()))),
|
"aes-256-ecb" => Aes256Ecb(Box::new(ecb::Decryptor::new(key.into()))),
|
||||||
"aes-128-gcm" => {
|
"aes-128-gcm" => {
|
||||||
if iv.len() != 12 {
|
if iv.len() != 12 {
|
||||||
return Err(type_error("IV length must be 12 bytes"));
|
return Err(DecipherError::InvalidIvLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
let decipher =
|
let decipher =
|
||||||
|
@ -345,7 +406,7 @@ impl Decipher {
|
||||||
}
|
}
|
||||||
"aes-256-gcm" => {
|
"aes-256-gcm" => {
|
||||||
if iv.len() != 12 {
|
if iv.len() != 12 {
|
||||||
return Err(type_error("IV length must be 12 bytes"));
|
return Err(DecipherError::InvalidIvLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
let decipher =
|
let decipher =
|
||||||
|
@ -355,15 +416,17 @@ impl Decipher {
|
||||||
}
|
}
|
||||||
"aes256" | "aes-256-cbc" => {
|
"aes256" | "aes-256-cbc" => {
|
||||||
if key.len() != 32 {
|
if key.len() != 32 {
|
||||||
return Err(range_error("Invalid key length"));
|
return Err(DecipherError::InvalidKeyLength);
|
||||||
}
|
}
|
||||||
if iv.len() != 16 {
|
if iv.len() != 16 {
|
||||||
return Err(type_error("Invalid initialization vector"));
|
return Err(DecipherError::InvalidInitializationVector);
|
||||||
}
|
}
|
||||||
|
|
||||||
Aes256Cbc(Box::new(cbc::Decryptor::new(key.into(), iv.into())))
|
Aes256Cbc(Box::new(cbc::Decryptor::new(key.into(), iv.into())))
|
||||||
}
|
}
|
||||||
_ => return Err(type_error(format!("Unknown cipher {algorithm_name}"))),
|
_ => {
|
||||||
|
return Err(DecipherError::UnknownCipher(algorithm_name.to_string()))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -432,14 +495,14 @@ impl Decipher {
|
||||||
input: &[u8],
|
input: &[u8],
|
||||||
output: &mut [u8],
|
output: &mut [u8],
|
||||||
auth_tag: &[u8],
|
auth_tag: &[u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), DecipherError> {
|
||||||
use Decipher::*;
|
use Decipher::*;
|
||||||
match (self, auto_pad) {
|
match (self, auto_pad) {
|
||||||
(Aes128Cbc(decryptor), true) => {
|
(Aes128Cbc(decryptor), true) => {
|
||||||
assert!(input.len() == 16);
|
assert!(input.len() == 16);
|
||||||
let _ = (*decryptor)
|
let _ = (*decryptor)
|
||||||
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot unpad the input data"))?;
|
.map_err(|_| DecipherError::CannotUnpadInputData)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
(Aes128Cbc(mut decryptor), false) => {
|
(Aes128Cbc(mut decryptor), false) => {
|
||||||
|
@ -453,7 +516,7 @@ impl Decipher {
|
||||||
assert!(input.len() == 16);
|
assert!(input.len() == 16);
|
||||||
let _ = (*decryptor)
|
let _ = (*decryptor)
|
||||||
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot unpad the input data"))?;
|
.map_err(|_| DecipherError::CannotUnpadInputData)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
(Aes128Ecb(mut decryptor), false) => {
|
(Aes128Ecb(mut decryptor), false) => {
|
||||||
|
@ -467,7 +530,7 @@ impl Decipher {
|
||||||
assert!(input.len() == 16);
|
assert!(input.len() == 16);
|
||||||
let _ = (*decryptor)
|
let _ = (*decryptor)
|
||||||
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot unpad the input data"))?;
|
.map_err(|_| DecipherError::CannotUnpadInputData)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
(Aes192Ecb(mut decryptor), false) => {
|
(Aes192Ecb(mut decryptor), false) => {
|
||||||
|
@ -481,7 +544,7 @@ impl Decipher {
|
||||||
assert!(input.len() == 16);
|
assert!(input.len() == 16);
|
||||||
let _ = (*decryptor)
|
let _ = (*decryptor)
|
||||||
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot unpad the input data"))?;
|
.map_err(|_| DecipherError::CannotUnpadInputData)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
(Aes256Ecb(mut decryptor), false) => {
|
(Aes256Ecb(mut decryptor), false) => {
|
||||||
|
@ -496,28 +559,28 @@ impl Decipher {
|
||||||
if tag.as_slice() == auth_tag {
|
if tag.as_slice() == auth_tag {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(type_error("Failed to authenticate data"))
|
Err(DecipherError::DataAuthenticationFailed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(Aes128Gcm(_), false) => Err(type_error(
|
(Aes128Gcm(_), false) => {
|
||||||
"setAutoPadding(false) not supported for Aes256Gcm yet",
|
Err(DecipherError::SetAutoPaddingFalseAes128GcmUnsupported)
|
||||||
)),
|
}
|
||||||
(Aes256Gcm(decipher), true) => {
|
(Aes256Gcm(decipher), true) => {
|
||||||
let tag = decipher.finish();
|
let tag = decipher.finish();
|
||||||
if tag.as_slice() == auth_tag {
|
if tag.as_slice() == auth_tag {
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Err(type_error("Failed to authenticate data"))
|
Err(DecipherError::DataAuthenticationFailed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(Aes256Gcm(_), false) => Err(type_error(
|
(Aes256Gcm(_), false) => {
|
||||||
"setAutoPadding(false) not supported for Aes256Gcm yet",
|
Err(DecipherError::SetAutoPaddingFalseAes256GcmUnsupported)
|
||||||
)),
|
}
|
||||||
(Aes256Cbc(decryptor), true) => {
|
(Aes256Cbc(decryptor), true) => {
|
||||||
assert!(input.len() == 16);
|
assert!(input.len() == 16);
|
||||||
let _ = (*decryptor)
|
let _ = (*decryptor)
|
||||||
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
|
||||||
.map_err(|_| type_error("Cannot unpad the input data"))?;
|
.map_err(|_| DecipherError::CannotUnpadInputData)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
(Aes256Cbc(mut decryptor), false) => {
|
(Aes256Cbc(mut decryptor), false) => {
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
use deno_core::error::generic_error;
|
|
||||||
use deno_core::error::AnyError;
|
|
||||||
use deno_core::GarbageCollected;
|
use deno_core::GarbageCollected;
|
||||||
use digest::Digest;
|
use digest::Digest;
|
||||||
use digest::DynDigest;
|
use digest::DynDigest;
|
||||||
|
@ -19,7 +17,7 @@ impl Hasher {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
algorithm: &str,
|
algorithm: &str,
|
||||||
output_length: Option<usize>,
|
output_length: Option<usize>,
|
||||||
) -> Result<Self, AnyError> {
|
) -> Result<Self, HashError> {
|
||||||
let hash = Hash::new(algorithm, output_length)?;
|
let hash = Hash::new(algorithm, output_length)?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
|
@ -44,7 +42,7 @@ impl Hasher {
|
||||||
pub fn clone_inner(
|
pub fn clone_inner(
|
||||||
&self,
|
&self,
|
||||||
output_length: Option<usize>,
|
output_length: Option<usize>,
|
||||||
) -> Result<Option<Self>, AnyError> {
|
) -> Result<Option<Self>, HashError> {
|
||||||
let hash = self.hash.borrow();
|
let hash = self.hash.borrow();
|
||||||
let Some(hash) = hash.as_ref() else {
|
let Some(hash) = hash.as_ref() else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
|
@ -184,11 +182,19 @@ pub enum Hash {
|
||||||
|
|
||||||
use Hash::*;
|
use Hash::*;
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum HashError {
|
||||||
|
#[error("Output length mismatch for non-extendable algorithm")]
|
||||||
|
OutputLengthMismatch,
|
||||||
|
#[error("Digest method not supported: {0}")]
|
||||||
|
DigestMethodUnsupported(String),
|
||||||
|
}
|
||||||
|
|
||||||
impl Hash {
|
impl Hash {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
algorithm_name: &str,
|
algorithm_name: &str,
|
||||||
output_length: Option<usize>,
|
output_length: Option<usize>,
|
||||||
) -> Result<Self, AnyError> {
|
) -> Result<Self, HashError> {
|
||||||
match algorithm_name {
|
match algorithm_name {
|
||||||
"shake128" => return Ok(Shake128(Default::default(), output_length)),
|
"shake128" => return Ok(Shake128(Default::default(), output_length)),
|
||||||
"shake256" => return Ok(Shake256(Default::default(), output_length)),
|
"shake256" => return Ok(Shake256(Default::default(), output_length)),
|
||||||
|
@ -201,17 +207,13 @@ impl Hash {
|
||||||
let digest: D = Digest::new();
|
let digest: D = Digest::new();
|
||||||
if let Some(length) = output_length {
|
if let Some(length) = output_length {
|
||||||
if length != digest.output_size() {
|
if length != digest.output_size() {
|
||||||
return Err(generic_error(
|
return Err(HashError::OutputLengthMismatch);
|
||||||
"Output length mismatch for non-extendable algorithm",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FixedSize(Box::new(digest))
|
FixedSize(Box::new(digest))
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
return Err(generic_error(format!(
|
return Err(HashError::DigestMethodUnsupported(algorithm_name.to_string()))
|
||||||
"Digest method not supported: {algorithm_name}"
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -243,14 +245,12 @@ impl Hash {
|
||||||
pub fn clone_hash(
|
pub fn clone_hash(
|
||||||
&self,
|
&self,
|
||||||
output_length: Option<usize>,
|
output_length: Option<usize>,
|
||||||
) -> Result<Self, AnyError> {
|
) -> Result<Self, HashError> {
|
||||||
let hash = match self {
|
let hash = match self {
|
||||||
FixedSize(context) => {
|
FixedSize(context) => {
|
||||||
if let Some(length) = output_length {
|
if let Some(length) = output_length {
|
||||||
if length != context.output_size() {
|
if length != context.output_size() {
|
||||||
return Err(generic_error(
|
return Err(HashError::OutputLengthMismatch);
|
||||||
"Output length mismatch for non-extendable algorithm",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
FixedSize(context.box_clone())
|
FixedSize(context.box_clone())
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,7 +1,6 @@
|
||||||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
use deno_core::error::generic_error;
|
use deno_core::error::generic_error;
|
||||||
use deno_core::error::type_error;
|
use deno_core::error::type_error;
|
||||||
use deno_core::error::AnyError;
|
|
||||||
use deno_core::op2;
|
use deno_core::op2;
|
||||||
use deno_core::unsync::spawn_blocking;
|
use deno_core::unsync::spawn_blocking;
|
||||||
use deno_core::JsBuffer;
|
use deno_core::JsBuffer;
|
||||||
|
@ -34,14 +33,14 @@ use rsa::Pkcs1v15Encrypt;
|
||||||
use rsa::RsaPrivateKey;
|
use rsa::RsaPrivateKey;
|
||||||
use rsa::RsaPublicKey;
|
use rsa::RsaPublicKey;
|
||||||
|
|
||||||
mod cipher;
|
pub mod cipher;
|
||||||
mod dh;
|
mod dh;
|
||||||
mod digest;
|
pub mod digest;
|
||||||
pub mod keys;
|
pub mod keys;
|
||||||
mod md5_sha1;
|
mod md5_sha1;
|
||||||
mod pkcs3;
|
mod pkcs3;
|
||||||
mod primes;
|
mod primes;
|
||||||
mod sign;
|
pub mod sign;
|
||||||
pub mod x509;
|
pub mod x509;
|
||||||
|
|
||||||
use self::digest::match_fixed_digest_with_eager_block_buffer;
|
use self::digest::match_fixed_digest_with_eager_block_buffer;
|
||||||
|
@ -58,38 +57,31 @@ pub fn op_node_check_prime(
|
||||||
pub fn op_node_check_prime_bytes(
|
pub fn op_node_check_prime_bytes(
|
||||||
#[anybuffer] bytes: &[u8],
|
#[anybuffer] bytes: &[u8],
|
||||||
#[number] checks: usize,
|
#[number] checks: usize,
|
||||||
) -> Result<bool, AnyError> {
|
) -> bool {
|
||||||
let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes);
|
let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes);
|
||||||
Ok(primes::is_probably_prime(&candidate, checks))
|
primes::is_probably_prime(&candidate, checks)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(async)]
|
#[op2(async)]
|
||||||
pub async fn op_node_check_prime_async(
|
pub async fn op_node_check_prime_async(
|
||||||
#[bigint] num: i64,
|
#[bigint] num: i64,
|
||||||
#[number] checks: usize,
|
#[number] checks: usize,
|
||||||
) -> Result<bool, AnyError> {
|
) -> Result<bool, tokio::task::JoinError> {
|
||||||
// TODO(@littledivy): use rayon for CPU-bound tasks
|
// TODO(@littledivy): use rayon for CPU-bound tasks
|
||||||
Ok(
|
spawn_blocking(move || primes::is_probably_prime(&BigInt::from(num), checks))
|
||||||
spawn_blocking(move || {
|
.await
|
||||||
primes::is_probably_prime(&BigInt::from(num), checks)
|
|
||||||
})
|
|
||||||
.await?,
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(async)]
|
#[op2(async)]
|
||||||
pub fn op_node_check_prime_bytes_async(
|
pub fn op_node_check_prime_bytes_async(
|
||||||
#[anybuffer] bytes: &[u8],
|
#[anybuffer] bytes: &[u8],
|
||||||
#[number] checks: usize,
|
#[number] checks: usize,
|
||||||
) -> Result<impl Future<Output = Result<bool, AnyError>>, AnyError> {
|
) -> impl Future<Output = Result<bool, tokio::task::JoinError>> {
|
||||||
let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes);
|
let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes);
|
||||||
// TODO(@littledivy): use rayon for CPU-bound tasks
|
// TODO(@littledivy): use rayon for CPU-bound tasks
|
||||||
Ok(async move {
|
async move {
|
||||||
Ok(
|
spawn_blocking(move || primes::is_probably_prime(&candidate, checks)).await
|
||||||
spawn_blocking(move || primes::is_probably_prime(&candidate, checks))
|
}
|
||||||
.await?,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
|
@ -97,7 +89,7 @@ pub fn op_node_check_prime_bytes_async(
|
||||||
pub fn op_node_create_hash(
|
pub fn op_node_create_hash(
|
||||||
#[string] algorithm: &str,
|
#[string] algorithm: &str,
|
||||||
output_length: Option<u32>,
|
output_length: Option<u32>,
|
||||||
) -> Result<digest::Hasher, AnyError> {
|
) -> Result<digest::Hasher, digest::HashError> {
|
||||||
digest::Hasher::new(algorithm, output_length.map(|l| l as usize))
|
digest::Hasher::new(algorithm, output_length.map(|l| l as usize))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,17 +137,31 @@ pub fn op_node_hash_digest_hex(
|
||||||
pub fn op_node_hash_clone(
|
pub fn op_node_hash_clone(
|
||||||
#[cppgc] hasher: &digest::Hasher,
|
#[cppgc] hasher: &digest::Hasher,
|
||||||
output_length: Option<u32>,
|
output_length: Option<u32>,
|
||||||
) -> Result<Option<digest::Hasher>, AnyError> {
|
) -> Result<Option<digest::Hasher>, digest::HashError> {
|
||||||
hasher.clone_inner(output_length.map(|l| l as usize))
|
hasher.clone_inner(output_length.map(|l| l as usize))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum PrivateEncryptDecryptError {
|
||||||
|
#[error(transparent)]
|
||||||
|
Pkcs8(#[from] pkcs8::Error),
|
||||||
|
#[error(transparent)]
|
||||||
|
Spki(#[from] spki::Error),
|
||||||
|
#[error(transparent)]
|
||||||
|
Utf8(#[from] std::str::Utf8Error),
|
||||||
|
#[error(transparent)]
|
||||||
|
Rsa(#[from] rsa::Error),
|
||||||
|
#[error("Unknown padding")]
|
||||||
|
UnknownPadding,
|
||||||
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[serde]
|
#[serde]
|
||||||
pub fn op_node_private_encrypt(
|
pub fn op_node_private_encrypt(
|
||||||
#[serde] key: StringOrBuffer,
|
#[serde] key: StringOrBuffer,
|
||||||
#[serde] msg: StringOrBuffer,
|
#[serde] msg: StringOrBuffer,
|
||||||
#[smi] padding: u32,
|
#[smi] padding: u32,
|
||||||
) -> Result<ToJsBuffer, AnyError> {
|
) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
|
||||||
let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?;
|
let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?;
|
||||||
|
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::thread_rng();
|
||||||
|
@ -172,7 +178,7 @@ pub fn op_node_private_encrypt(
|
||||||
.encrypt(&mut rng, Oaep::new::<sha1::Sha1>(), &msg)?
|
.encrypt(&mut rng, Oaep::new::<sha1::Sha1>(), &msg)?
|
||||||
.into(),
|
.into(),
|
||||||
),
|
),
|
||||||
_ => Err(type_error("Unknown padding")),
|
_ => Err(PrivateEncryptDecryptError::UnknownPadding),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,13 +188,13 @@ pub fn op_node_private_decrypt(
|
||||||
#[serde] key: StringOrBuffer,
|
#[serde] key: StringOrBuffer,
|
||||||
#[serde] msg: StringOrBuffer,
|
#[serde] msg: StringOrBuffer,
|
||||||
#[smi] padding: u32,
|
#[smi] padding: u32,
|
||||||
) -> Result<ToJsBuffer, AnyError> {
|
) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
|
||||||
let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?;
|
let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?;
|
||||||
|
|
||||||
match padding {
|
match padding {
|
||||||
1 => Ok(key.decrypt(Pkcs1v15Encrypt, &msg)?.into()),
|
1 => Ok(key.decrypt(Pkcs1v15Encrypt, &msg)?.into()),
|
||||||
4 => Ok(key.decrypt(Oaep::new::<sha1::Sha1>(), &msg)?.into()),
|
4 => Ok(key.decrypt(Oaep::new::<sha1::Sha1>(), &msg)?.into()),
|
||||||
_ => Err(type_error("Unknown padding")),
|
_ => Err(PrivateEncryptDecryptError::UnknownPadding),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,7 +204,7 @@ pub fn op_node_public_encrypt(
|
||||||
#[serde] key: StringOrBuffer,
|
#[serde] key: StringOrBuffer,
|
||||||
#[serde] msg: StringOrBuffer,
|
#[serde] msg: StringOrBuffer,
|
||||||
#[smi] padding: u32,
|
#[smi] padding: u32,
|
||||||
) -> Result<ToJsBuffer, AnyError> {
|
) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
|
||||||
let key = RsaPublicKey::from_public_key_pem((&key).try_into()?)?;
|
let key = RsaPublicKey::from_public_key_pem((&key).try_into()?)?;
|
||||||
|
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::thread_rng();
|
||||||
|
@ -209,7 +215,7 @@ pub fn op_node_public_encrypt(
|
||||||
.encrypt(&mut rng, Oaep::new::<sha1::Sha1>(), &msg)?
|
.encrypt(&mut rng, Oaep::new::<sha1::Sha1>(), &msg)?
|
||||||
.into(),
|
.into(),
|
||||||
),
|
),
|
||||||
_ => Err(type_error("Unknown padding")),
|
_ => Err(PrivateEncryptDecryptError::UnknownPadding),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -220,7 +226,7 @@ pub fn op_node_create_cipheriv(
|
||||||
#[string] algorithm: &str,
|
#[string] algorithm: &str,
|
||||||
#[buffer] key: &[u8],
|
#[buffer] key: &[u8],
|
||||||
#[buffer] iv: &[u8],
|
#[buffer] iv: &[u8],
|
||||||
) -> Result<u32, AnyError> {
|
) -> Result<u32, cipher::CipherContextError> {
|
||||||
let context = cipher::CipherContext::new(algorithm, key, iv)?;
|
let context = cipher::CipherContext::new(algorithm, key, iv)?;
|
||||||
Ok(state.resource_table.add(context))
|
Ok(state.resource_table.add(context))
|
||||||
}
|
}
|
||||||
|
@ -262,11 +268,14 @@ pub fn op_node_cipheriv_final(
|
||||||
auto_pad: bool,
|
auto_pad: bool,
|
||||||
#[buffer] input: &[u8],
|
#[buffer] input: &[u8],
|
||||||
#[anybuffer] output: &mut [u8],
|
#[anybuffer] output: &mut [u8],
|
||||||
) -> Result<Option<Vec<u8>>, AnyError> {
|
) -> Result<Option<Vec<u8>>, cipher::CipherContextError> {
|
||||||
let context = state.resource_table.take::<cipher::CipherContext>(rid)?;
|
let context = state
|
||||||
|
.resource_table
|
||||||
|
.take::<cipher::CipherContext>(rid)
|
||||||
|
.map_err(cipher::CipherContextError::Resource)?;
|
||||||
let context = Rc::try_unwrap(context)
|
let context = Rc::try_unwrap(context)
|
||||||
.map_err(|_| type_error("Cipher context is already in use"))?;
|
.map_err(|_| cipher::CipherContextError::ContextInUse)?;
|
||||||
context.r#final(auto_pad, input, output)
|
context.r#final(auto_pad, input, output).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
|
@ -274,10 +283,13 @@ pub fn op_node_cipheriv_final(
|
||||||
pub fn op_node_cipheriv_take(
|
pub fn op_node_cipheriv_take(
|
||||||
state: &mut OpState,
|
state: &mut OpState,
|
||||||
#[smi] rid: u32,
|
#[smi] rid: u32,
|
||||||
) -> Result<Option<Vec<u8>>, AnyError> {
|
) -> Result<Option<Vec<u8>>, cipher::CipherContextError> {
|
||||||
let context = state.resource_table.take::<cipher::CipherContext>(rid)?;
|
let context = state
|
||||||
|
.resource_table
|
||||||
|
.take::<cipher::CipherContext>(rid)
|
||||||
|
.map_err(cipher::CipherContextError::Resource)?;
|
||||||
let context = Rc::try_unwrap(context)
|
let context = Rc::try_unwrap(context)
|
||||||
.map_err(|_| type_error("Cipher context is already in use"))?;
|
.map_err(|_| cipher::CipherContextError::ContextInUse)?;
|
||||||
Ok(context.take_tag())
|
Ok(context.take_tag())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -288,7 +300,7 @@ pub fn op_node_create_decipheriv(
|
||||||
#[string] algorithm: &str,
|
#[string] algorithm: &str,
|
||||||
#[buffer] key: &[u8],
|
#[buffer] key: &[u8],
|
||||||
#[buffer] iv: &[u8],
|
#[buffer] iv: &[u8],
|
||||||
) -> Result<u32, AnyError> {
|
) -> Result<u32, cipher::DecipherContextError> {
|
||||||
let context = cipher::DecipherContext::new(algorithm, key, iv)?;
|
let context = cipher::DecipherContext::new(algorithm, key, iv)?;
|
||||||
Ok(state.resource_table.add(context))
|
Ok(state.resource_table.add(context))
|
||||||
}
|
}
|
||||||
|
@ -326,10 +338,13 @@ pub fn op_node_decipheriv_decrypt(
|
||||||
pub fn op_node_decipheriv_take(
|
pub fn op_node_decipheriv_take(
|
||||||
state: &mut OpState,
|
state: &mut OpState,
|
||||||
#[smi] rid: u32,
|
#[smi] rid: u32,
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), cipher::DecipherContextError> {
|
||||||
let context = state.resource_table.take::<cipher::DecipherContext>(rid)?;
|
let context = state
|
||||||
|
.resource_table
|
||||||
|
.take::<cipher::DecipherContext>(rid)
|
||||||
|
.map_err(cipher::DecipherContextError::Resource)?;
|
||||||
Rc::try_unwrap(context)
|
Rc::try_unwrap(context)
|
||||||
.map_err(|_| type_error("Cipher context is already in use"))?;
|
.map_err(|_| cipher::DecipherContextError::ContextInUse)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,11 +356,16 @@ pub fn op_node_decipheriv_final(
|
||||||
#[buffer] input: &[u8],
|
#[buffer] input: &[u8],
|
||||||
#[anybuffer] output: &mut [u8],
|
#[anybuffer] output: &mut [u8],
|
||||||
#[buffer] auth_tag: &[u8],
|
#[buffer] auth_tag: &[u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), cipher::DecipherContextError> {
|
||||||
let context = state.resource_table.take::<cipher::DecipherContext>(rid)?;
|
let context = state
|
||||||
|
.resource_table
|
||||||
|
.take::<cipher::DecipherContext>(rid)
|
||||||
|
.map_err(cipher::DecipherContextError::Resource)?;
|
||||||
let context = Rc::try_unwrap(context)
|
let context = Rc::try_unwrap(context)
|
||||||
.map_err(|_| type_error("Cipher context is already in use"))?;
|
.map_err(|_| cipher::DecipherContextError::ContextInUse)?;
|
||||||
context.r#final(auto_pad, input, output, auth_tag)
|
context
|
||||||
|
.r#final(auto_pad, input, output, auth_tag)
|
||||||
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
|
@ -356,7 +376,7 @@ pub fn op_node_sign(
|
||||||
#[string] digest_type: &str,
|
#[string] digest_type: &str,
|
||||||
#[smi] pss_salt_length: Option<u32>,
|
#[smi] pss_salt_length: Option<u32>,
|
||||||
#[smi] dsa_signature_encoding: u32,
|
#[smi] dsa_signature_encoding: u32,
|
||||||
) -> Result<Box<[u8]>, AnyError> {
|
) -> Result<Box<[u8]>, sign::KeyObjectHandlePrehashedSignAndVerifyError> {
|
||||||
handle.sign_prehashed(
|
handle.sign_prehashed(
|
||||||
digest_type,
|
digest_type,
|
||||||
digest,
|
digest,
|
||||||
|
@ -373,7 +393,7 @@ pub fn op_node_verify(
|
||||||
#[buffer] signature: &[u8],
|
#[buffer] signature: &[u8],
|
||||||
#[smi] pss_salt_length: Option<u32>,
|
#[smi] pss_salt_length: Option<u32>,
|
||||||
#[smi] dsa_signature_encoding: u32,
|
#[smi] dsa_signature_encoding: u32,
|
||||||
) -> Result<bool, AnyError> {
|
) -> Result<bool, sign::KeyObjectHandlePrehashedSignAndVerifyError> {
|
||||||
handle.verify_prehashed(
|
handle.verify_prehashed(
|
||||||
digest_type,
|
digest_type,
|
||||||
digest,
|
digest,
|
||||||
|
@ -383,13 +403,21 @@ pub fn op_node_verify(
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum Pbkdf2Error {
|
||||||
|
#[error("unsupported digest: {0}")]
|
||||||
|
UnsupportedDigest(String),
|
||||||
|
#[error(transparent)]
|
||||||
|
Join(#[from] tokio::task::JoinError),
|
||||||
|
}
|
||||||
|
|
||||||
fn pbkdf2_sync(
|
fn pbkdf2_sync(
|
||||||
password: &[u8],
|
password: &[u8],
|
||||||
salt: &[u8],
|
salt: &[u8],
|
||||||
iterations: u32,
|
iterations: u32,
|
||||||
algorithm_name: &str,
|
algorithm_name: &str,
|
||||||
derived_key: &mut [u8],
|
derived_key: &mut [u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), Pbkdf2Error> {
|
||||||
match_fixed_digest_with_eager_block_buffer!(
|
match_fixed_digest_with_eager_block_buffer!(
|
||||||
algorithm_name,
|
algorithm_name,
|
||||||
fn <D>() {
|
fn <D>() {
|
||||||
|
@ -397,10 +425,7 @@ fn pbkdf2_sync(
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
Err(type_error(format!(
|
Err(Pbkdf2Error::UnsupportedDigest(algorithm_name.to_string()))
|
||||||
"unsupported digest: {}",
|
|
||||||
algorithm_name
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -424,7 +449,7 @@ pub async fn op_node_pbkdf2_async(
|
||||||
#[smi] iterations: u32,
|
#[smi] iterations: u32,
|
||||||
#[string] digest: String,
|
#[string] digest: String,
|
||||||
#[number] keylen: usize,
|
#[number] keylen: usize,
|
||||||
) -> Result<ToJsBuffer, AnyError> {
|
) -> Result<ToJsBuffer, Pbkdf2Error> {
|
||||||
spawn_blocking(move || {
|
spawn_blocking(move || {
|
||||||
let mut derived_key = vec![0; keylen];
|
let mut derived_key = vec![0; keylen];
|
||||||
pbkdf2_sync(&password, &salt, iterations, &digest, &mut derived_key)
|
pbkdf2_sync(&password, &salt, iterations, &digest, &mut derived_key)
|
||||||
|
@ -450,15 +475,27 @@ pub async fn op_node_fill_random_async(#[smi] len: i32) -> ToJsBuffer {
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum HkdfError {
|
||||||
|
#[error("expected secret key")]
|
||||||
|
ExpectedSecretKey,
|
||||||
|
#[error("HKDF-Expand failed")]
|
||||||
|
HkdfExpandFailed,
|
||||||
|
#[error("Unsupported digest: {0}")]
|
||||||
|
UnsupportedDigest(String),
|
||||||
|
#[error(transparent)]
|
||||||
|
Join(#[from] tokio::task::JoinError),
|
||||||
|
}
|
||||||
|
|
||||||
fn hkdf_sync(
|
fn hkdf_sync(
|
||||||
digest_algorithm: &str,
|
digest_algorithm: &str,
|
||||||
handle: &KeyObjectHandle,
|
handle: &KeyObjectHandle,
|
||||||
salt: &[u8],
|
salt: &[u8],
|
||||||
info: &[u8],
|
info: &[u8],
|
||||||
okm: &mut [u8],
|
okm: &mut [u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), HkdfError> {
|
||||||
let Some(ikm) = handle.as_secret_key() else {
|
let Some(ikm) = handle.as_secret_key() else {
|
||||||
return Err(type_error("expected secret key"));
|
return Err(HkdfError::ExpectedSecretKey);
|
||||||
};
|
};
|
||||||
|
|
||||||
match_fixed_digest_with_eager_block_buffer!(
|
match_fixed_digest_with_eager_block_buffer!(
|
||||||
|
@ -466,10 +503,10 @@ fn hkdf_sync(
|
||||||
fn <D>() {
|
fn <D>() {
|
||||||
let hk = Hkdf::<D>::new(Some(salt), ikm);
|
let hk = Hkdf::<D>::new(Some(salt), ikm);
|
||||||
hk.expand(info, okm)
|
hk.expand(info, okm)
|
||||||
.map_err(|_| type_error("HKDF-Expand failed"))
|
.map_err(|_| HkdfError::HkdfExpandFailed)
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
Err(type_error(format!("Unsupported digest: {}", digest_algorithm)))
|
Err(HkdfError::UnsupportedDigest(digest_algorithm.to_string()))
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -481,7 +518,7 @@ pub fn op_node_hkdf(
|
||||||
#[buffer] salt: &[u8],
|
#[buffer] salt: &[u8],
|
||||||
#[buffer] info: &[u8],
|
#[buffer] info: &[u8],
|
||||||
#[buffer] okm: &mut [u8],
|
#[buffer] okm: &mut [u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), HkdfError> {
|
||||||
hkdf_sync(digest_algorithm, handle, salt, info, okm)
|
hkdf_sync(digest_algorithm, handle, salt, info, okm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -493,7 +530,7 @@ pub async fn op_node_hkdf_async(
|
||||||
#[buffer] salt: JsBuffer,
|
#[buffer] salt: JsBuffer,
|
||||||
#[buffer] info: JsBuffer,
|
#[buffer] info: JsBuffer,
|
||||||
#[number] okm_len: usize,
|
#[number] okm_len: usize,
|
||||||
) -> Result<ToJsBuffer, AnyError> {
|
) -> Result<ToJsBuffer, HkdfError> {
|
||||||
let handle = handle.clone();
|
let handle = handle.clone();
|
||||||
spawn_blocking(move || {
|
spawn_blocking(move || {
|
||||||
let mut okm = vec![0u8; okm_len];
|
let mut okm = vec![0u8; okm_len];
|
||||||
|
@ -509,27 +546,24 @@ pub fn op_node_dh_compute_secret(
|
||||||
#[buffer] prime: JsBuffer,
|
#[buffer] prime: JsBuffer,
|
||||||
#[buffer] private_key: JsBuffer,
|
#[buffer] private_key: JsBuffer,
|
||||||
#[buffer] their_public_key: JsBuffer,
|
#[buffer] their_public_key: JsBuffer,
|
||||||
) -> Result<ToJsBuffer, AnyError> {
|
) -> ToJsBuffer {
|
||||||
let pubkey: BigUint = BigUint::from_bytes_be(their_public_key.as_ref());
|
let pubkey: BigUint = BigUint::from_bytes_be(their_public_key.as_ref());
|
||||||
let privkey: BigUint = BigUint::from_bytes_be(private_key.as_ref());
|
let privkey: BigUint = BigUint::from_bytes_be(private_key.as_ref());
|
||||||
let primei: BigUint = BigUint::from_bytes_be(prime.as_ref());
|
let primei: BigUint = BigUint::from_bytes_be(prime.as_ref());
|
||||||
let shared_secret: BigUint = pubkey.modpow(&privkey, &primei);
|
let shared_secret: BigUint = pubkey.modpow(&privkey, &primei);
|
||||||
|
|
||||||
Ok(shared_secret.to_bytes_be().into())
|
shared_secret.to_bytes_be().into()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
#[number]
|
#[number]
|
||||||
pub fn op_node_random_int(
|
pub fn op_node_random_int(#[number] min: i64, #[number] max: i64) -> i64 {
|
||||||
#[number] min: i64,
|
|
||||||
#[number] max: i64,
|
|
||||||
) -> Result<i64, AnyError> {
|
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::thread_rng();
|
||||||
// Uniform distribution is required to avoid Modulo Bias
|
// Uniform distribution is required to avoid Modulo Bias
|
||||||
// https://en.wikipedia.org/wiki/Fisher–Yates_shuffle#Modulo_bias
|
// https://en.wikipedia.org/wiki/Fisher–Yates_shuffle#Modulo_bias
|
||||||
let dist = Uniform::from(min..max);
|
let dist = Uniform::from(min..max);
|
||||||
|
|
||||||
Ok(dist.sample(&mut rng))
|
dist.sample(&mut rng)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
@ -542,7 +576,7 @@ fn scrypt(
|
||||||
parallelization: u32,
|
parallelization: u32,
|
||||||
_maxmem: u32,
|
_maxmem: u32,
|
||||||
output_buffer: &mut [u8],
|
output_buffer: &mut [u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), deno_core::error::AnyError> {
|
||||||
// Construct Params
|
// Construct Params
|
||||||
let params = scrypt::Params::new(
|
let params = scrypt::Params::new(
|
||||||
cost as u8,
|
cost as u8,
|
||||||
|
@ -573,7 +607,7 @@ pub fn op_node_scrypt_sync(
|
||||||
#[smi] parallelization: u32,
|
#[smi] parallelization: u32,
|
||||||
#[smi] maxmem: u32,
|
#[smi] maxmem: u32,
|
||||||
#[anybuffer] output_buffer: &mut [u8],
|
#[anybuffer] output_buffer: &mut [u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), deno_core::error::AnyError> {
|
||||||
scrypt(
|
scrypt(
|
||||||
password,
|
password,
|
||||||
salt,
|
salt,
|
||||||
|
@ -586,6 +620,14 @@ pub fn op_node_scrypt_sync(
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum ScryptAsyncError {
|
||||||
|
#[error(transparent)]
|
||||||
|
Join(#[from] tokio::task::JoinError),
|
||||||
|
#[error(transparent)]
|
||||||
|
Other(deno_core::error::AnyError),
|
||||||
|
}
|
||||||
|
|
||||||
#[op2(async)]
|
#[op2(async)]
|
||||||
#[serde]
|
#[serde]
|
||||||
pub async fn op_node_scrypt_async(
|
pub async fn op_node_scrypt_async(
|
||||||
|
@ -596,10 +638,11 @@ pub async fn op_node_scrypt_async(
|
||||||
#[smi] block_size: u32,
|
#[smi] block_size: u32,
|
||||||
#[smi] parallelization: u32,
|
#[smi] parallelization: u32,
|
||||||
#[smi] maxmem: u32,
|
#[smi] maxmem: u32,
|
||||||
) -> Result<ToJsBuffer, AnyError> {
|
) -> Result<ToJsBuffer, ScryptAsyncError> {
|
||||||
spawn_blocking(move || {
|
spawn_blocking(move || {
|
||||||
let mut output_buffer = vec![0u8; keylen as usize];
|
let mut output_buffer = vec![0u8; keylen as usize];
|
||||||
let res = scrypt(
|
|
||||||
|
scrypt(
|
||||||
password,
|
password,
|
||||||
salt,
|
salt,
|
||||||
keylen,
|
keylen,
|
||||||
|
@ -608,25 +651,30 @@ pub async fn op_node_scrypt_async(
|
||||||
parallelization,
|
parallelization,
|
||||||
maxmem,
|
maxmem,
|
||||||
&mut output_buffer,
|
&mut output_buffer,
|
||||||
);
|
)
|
||||||
|
.map(|_| output_buffer.into())
|
||||||
if res.is_ok() {
|
.map_err(ScryptAsyncError::Other)
|
||||||
Ok(output_buffer.into())
|
|
||||||
} else {
|
|
||||||
// TODO(lev): rethrow the error?
|
|
||||||
Err(generic_error("scrypt failure"))
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
.await?
|
.await?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum EcdhEncodePubKey {
|
||||||
|
#[error("Invalid public key")]
|
||||||
|
InvalidPublicKey,
|
||||||
|
#[error("Unsupported curve")]
|
||||||
|
UnsupportedCurve,
|
||||||
|
#[error(transparent)]
|
||||||
|
Sec1(#[from] sec1::Error),
|
||||||
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[buffer]
|
#[buffer]
|
||||||
pub fn op_node_ecdh_encode_pubkey(
|
pub fn op_node_ecdh_encode_pubkey(
|
||||||
#[string] curve: &str,
|
#[string] curve: &str,
|
||||||
#[buffer] pubkey: &[u8],
|
#[buffer] pubkey: &[u8],
|
||||||
compress: bool,
|
compress: bool,
|
||||||
) -> Result<Vec<u8>, AnyError> {
|
) -> Result<Vec<u8>, EcdhEncodePubKey> {
|
||||||
use elliptic_curve::sec1::FromEncodedPoint;
|
use elliptic_curve::sec1::FromEncodedPoint;
|
||||||
|
|
||||||
match curve {
|
match curve {
|
||||||
|
@ -639,7 +687,7 @@ pub fn op_node_ecdh_encode_pubkey(
|
||||||
);
|
);
|
||||||
// CtOption does not expose its variants.
|
// CtOption does not expose its variants.
|
||||||
if pubkey.is_none().into() {
|
if pubkey.is_none().into() {
|
||||||
return Err(type_error("Invalid public key"));
|
return Err(EcdhEncodePubKey::InvalidPublicKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pubkey = pubkey.unwrap();
|
let pubkey = pubkey.unwrap();
|
||||||
|
@ -652,7 +700,7 @@ pub fn op_node_ecdh_encode_pubkey(
|
||||||
);
|
);
|
||||||
// CtOption does not expose its variants.
|
// CtOption does not expose its variants.
|
||||||
if pubkey.is_none().into() {
|
if pubkey.is_none().into() {
|
||||||
return Err(type_error("Invalid public key"));
|
return Err(EcdhEncodePubKey::InvalidPublicKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pubkey = pubkey.unwrap();
|
let pubkey = pubkey.unwrap();
|
||||||
|
@ -665,7 +713,7 @@ pub fn op_node_ecdh_encode_pubkey(
|
||||||
);
|
);
|
||||||
// CtOption does not expose its variants.
|
// CtOption does not expose its variants.
|
||||||
if pubkey.is_none().into() {
|
if pubkey.is_none().into() {
|
||||||
return Err(type_error("Invalid public key"));
|
return Err(EcdhEncodePubKey::InvalidPublicKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pubkey = pubkey.unwrap();
|
let pubkey = pubkey.unwrap();
|
||||||
|
@ -678,14 +726,14 @@ pub fn op_node_ecdh_encode_pubkey(
|
||||||
);
|
);
|
||||||
// CtOption does not expose its variants.
|
// CtOption does not expose its variants.
|
||||||
if pubkey.is_none().into() {
|
if pubkey.is_none().into() {
|
||||||
return Err(type_error("Invalid public key"));
|
return Err(EcdhEncodePubKey::InvalidPublicKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
let pubkey = pubkey.unwrap();
|
let pubkey = pubkey.unwrap();
|
||||||
|
|
||||||
Ok(pubkey.to_encoded_point(compress).as_ref().to_vec())
|
Ok(pubkey.to_encoded_point(compress).as_ref().to_vec())
|
||||||
}
|
}
|
||||||
&_ => Err(type_error("Unsupported curve")),
|
&_ => Err(EcdhEncodePubKey::UnsupportedCurve),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -695,7 +743,7 @@ pub fn op_node_ecdh_generate_keys(
|
||||||
#[buffer] pubbuf: &mut [u8],
|
#[buffer] pubbuf: &mut [u8],
|
||||||
#[buffer] privbuf: &mut [u8],
|
#[buffer] privbuf: &mut [u8],
|
||||||
#[string] format: &str,
|
#[string] format: &str,
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), deno_core::error::AnyError> {
|
||||||
let mut rng = rand::thread_rng();
|
let mut rng = rand::thread_rng();
|
||||||
let compress = format == "compressed";
|
let compress = format == "compressed";
|
||||||
match curve {
|
match curve {
|
||||||
|
@ -742,7 +790,7 @@ pub fn op_node_ecdh_compute_secret(
|
||||||
#[buffer] this_priv: Option<JsBuffer>,
|
#[buffer] this_priv: Option<JsBuffer>,
|
||||||
#[buffer] their_pub: &mut [u8],
|
#[buffer] their_pub: &mut [u8],
|
||||||
#[buffer] secret: &mut [u8],
|
#[buffer] secret: &mut [u8],
|
||||||
) -> Result<(), AnyError> {
|
) {
|
||||||
match curve {
|
match curve {
|
||||||
"secp256k1" => {
|
"secp256k1" => {
|
||||||
let their_public_key =
|
let their_public_key =
|
||||||
|
@ -760,8 +808,6 @@ pub fn op_node_ecdh_compute_secret(
|
||||||
their_public_key.as_affine(),
|
their_public_key.as_affine(),
|
||||||
);
|
);
|
||||||
secret.copy_from_slice(shared_secret.raw_secret_bytes());
|
secret.copy_from_slice(shared_secret.raw_secret_bytes());
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
"prime256v1" | "secp256r1" => {
|
"prime256v1" | "secp256r1" => {
|
||||||
let their_public_key =
|
let their_public_key =
|
||||||
|
@ -776,8 +822,6 @@ pub fn op_node_ecdh_compute_secret(
|
||||||
their_public_key.as_affine(),
|
their_public_key.as_affine(),
|
||||||
);
|
);
|
||||||
secret.copy_from_slice(shared_secret.raw_secret_bytes());
|
secret.copy_from_slice(shared_secret.raw_secret_bytes());
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
"secp384r1" => {
|
"secp384r1" => {
|
||||||
let their_public_key =
|
let their_public_key =
|
||||||
|
@ -792,8 +836,6 @@ pub fn op_node_ecdh_compute_secret(
|
||||||
their_public_key.as_affine(),
|
their_public_key.as_affine(),
|
||||||
);
|
);
|
||||||
secret.copy_from_slice(shared_secret.raw_secret_bytes());
|
secret.copy_from_slice(shared_secret.raw_secret_bytes());
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
"secp224r1" => {
|
"secp224r1" => {
|
||||||
let their_public_key =
|
let their_public_key =
|
||||||
|
@ -808,8 +850,6 @@ pub fn op_node_ecdh_compute_secret(
|
||||||
their_public_key.as_affine(),
|
their_public_key.as_affine(),
|
||||||
);
|
);
|
||||||
secret.copy_from_slice(shared_secret.raw_secret_bytes());
|
secret.copy_from_slice(shared_secret.raw_secret_bytes());
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
&_ => todo!(),
|
&_ => todo!(),
|
||||||
}
|
}
|
||||||
|
@ -820,7 +860,7 @@ pub fn op_node_ecdh_compute_public_key(
|
||||||
#[string] curve: &str,
|
#[string] curve: &str,
|
||||||
#[buffer] privkey: &[u8],
|
#[buffer] privkey: &[u8],
|
||||||
#[buffer] pubkey: &mut [u8],
|
#[buffer] pubkey: &mut [u8],
|
||||||
) -> Result<(), AnyError> {
|
) {
|
||||||
match curve {
|
match curve {
|
||||||
"secp256k1" => {
|
"secp256k1" => {
|
||||||
let this_private_key =
|
let this_private_key =
|
||||||
|
@ -828,8 +868,6 @@ pub fn op_node_ecdh_compute_public_key(
|
||||||
.expect("bad private key");
|
.expect("bad private key");
|
||||||
let public_key = this_private_key.public_key();
|
let public_key = this_private_key.public_key();
|
||||||
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
|
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
"prime256v1" | "secp256r1" => {
|
"prime256v1" | "secp256r1" => {
|
||||||
let this_private_key =
|
let this_private_key =
|
||||||
|
@ -837,7 +875,6 @@ pub fn op_node_ecdh_compute_public_key(
|
||||||
.expect("bad private key");
|
.expect("bad private key");
|
||||||
let public_key = this_private_key.public_key();
|
let public_key = this_private_key.public_key();
|
||||||
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
|
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
"secp384r1" => {
|
"secp384r1" => {
|
||||||
let this_private_key =
|
let this_private_key =
|
||||||
|
@ -845,7 +882,6 @@ pub fn op_node_ecdh_compute_public_key(
|
||||||
.expect("bad private key");
|
.expect("bad private key");
|
||||||
let public_key = this_private_key.public_key();
|
let public_key = this_private_key.public_key();
|
||||||
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
|
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
"secp224r1" => {
|
"secp224r1" => {
|
||||||
let this_private_key =
|
let this_private_key =
|
||||||
|
@ -853,7 +889,6 @@ pub fn op_node_ecdh_compute_public_key(
|
||||||
.expect("bad private key");
|
.expect("bad private key");
|
||||||
let public_key = this_private_key.public_key();
|
let public_key = this_private_key.public_key();
|
||||||
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
|
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
&_ => todo!(),
|
&_ => todo!(),
|
||||||
}
|
}
|
||||||
|
@ -874,8 +909,20 @@ pub fn op_node_gen_prime(#[number] size: usize) -> ToJsBuffer {
|
||||||
#[serde]
|
#[serde]
|
||||||
pub async fn op_node_gen_prime_async(
|
pub async fn op_node_gen_prime_async(
|
||||||
#[number] size: usize,
|
#[number] size: usize,
|
||||||
) -> Result<ToJsBuffer, AnyError> {
|
) -> Result<ToJsBuffer, tokio::task::JoinError> {
|
||||||
Ok(spawn_blocking(move || gen_prime(size)).await?)
|
spawn_blocking(move || gen_prime(size)).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum DiffieHellmanError {
|
||||||
|
#[error("Expected private key")]
|
||||||
|
ExpectedPrivateKey,
|
||||||
|
#[error("Expected public key")]
|
||||||
|
ExpectedPublicKey,
|
||||||
|
#[error("DH parameters mismatch")]
|
||||||
|
DhParametersMismatch,
|
||||||
|
#[error("Unsupported key type for diffie hellman, or key type mismatch")]
|
||||||
|
UnsupportedKeyTypeForDiffieHellmanOrKeyTypeMismatch,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
|
@ -883,15 +930,16 @@ pub async fn op_node_gen_prime_async(
|
||||||
pub fn op_node_diffie_hellman(
|
pub fn op_node_diffie_hellman(
|
||||||
#[cppgc] private: &KeyObjectHandle,
|
#[cppgc] private: &KeyObjectHandle,
|
||||||
#[cppgc] public: &KeyObjectHandle,
|
#[cppgc] public: &KeyObjectHandle,
|
||||||
) -> Result<Box<[u8]>, AnyError> {
|
) -> Result<Box<[u8]>, DiffieHellmanError> {
|
||||||
let private = private
|
let private = private
|
||||||
.as_private_key()
|
.as_private_key()
|
||||||
.ok_or_else(|| type_error("Expected private key"))?;
|
.ok_or(DiffieHellmanError::ExpectedPrivateKey)?;
|
||||||
let public = public
|
let public = public
|
||||||
.as_public_key()
|
.as_public_key()
|
||||||
.ok_or_else(|| type_error("Expected public key"))?;
|
.ok_or(DiffieHellmanError::ExpectedPublicKey)?;
|
||||||
|
|
||||||
let res = match (private, &*public) {
|
let res =
|
||||||
|
match (private, &*public) {
|
||||||
(
|
(
|
||||||
AsymmetricPrivateKey::Ec(EcPrivateKey::P224(private)),
|
AsymmetricPrivateKey::Ec(EcPrivateKey::P224(private)),
|
||||||
AsymmetricPublicKey::Ec(EcPublicKey::P224(public)),
|
AsymmetricPublicKey::Ec(EcPublicKey::P224(public)),
|
||||||
|
@ -934,7 +982,7 @@ pub fn op_node_diffie_hellman(
|
||||||
if private.params.prime != public.params.prime
|
if private.params.prime != public.params.prime
|
||||||
|| private.params.base != public.params.base
|
|| private.params.base != public.params.base
|
||||||
{
|
{
|
||||||
return Err(type_error("DH parameters mismatch"));
|
return Err(DiffieHellmanError::DhParametersMismatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
// OSIP - Octet-String-to-Integer primitive
|
// OSIP - Octet-String-to-Integer primitive
|
||||||
|
@ -949,51 +997,67 @@ pub fn op_node_diffie_hellman(
|
||||||
|
|
||||||
shared_secret.to_bytes_be().into()
|
shared_secret.to_bytes_be().into()
|
||||||
}
|
}
|
||||||
_ => {
|
_ => return Err(
|
||||||
return Err(type_error(
|
DiffieHellmanError::UnsupportedKeyTypeForDiffieHellmanOrKeyTypeMismatch,
|
||||||
"Unsupported key type for diffie hellman, or key type mismatch",
|
),
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(res)
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum SignEd25519Error {
|
||||||
|
#[error("Expected private key")]
|
||||||
|
ExpectedPrivateKey,
|
||||||
|
#[error("Expected Ed25519 private key")]
|
||||||
|
ExpectedEd25519PrivateKey,
|
||||||
|
#[error("Invalid Ed25519 private key")]
|
||||||
|
InvalidEd25519PrivateKey,
|
||||||
|
}
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
pub fn op_node_sign_ed25519(
|
pub fn op_node_sign_ed25519(
|
||||||
#[cppgc] key: &KeyObjectHandle,
|
#[cppgc] key: &KeyObjectHandle,
|
||||||
#[buffer] data: &[u8],
|
#[buffer] data: &[u8],
|
||||||
#[buffer] signature: &mut [u8],
|
#[buffer] signature: &mut [u8],
|
||||||
) -> Result<(), AnyError> {
|
) -> Result<(), SignEd25519Error> {
|
||||||
let private = key
|
let private = key
|
||||||
.as_private_key()
|
.as_private_key()
|
||||||
.ok_or_else(|| type_error("Expected private key"))?;
|
.ok_or(SignEd25519Error::ExpectedPrivateKey)?;
|
||||||
|
|
||||||
let ed25519 = match private {
|
let ed25519 = match private {
|
||||||
AsymmetricPrivateKey::Ed25519(private) => private,
|
AsymmetricPrivateKey::Ed25519(private) => private,
|
||||||
_ => return Err(type_error("Expected Ed25519 private key")),
|
_ => return Err(SignEd25519Error::ExpectedEd25519PrivateKey),
|
||||||
};
|
};
|
||||||
|
|
||||||
let pair = Ed25519KeyPair::from_seed_unchecked(ed25519.as_bytes().as_slice())
|
let pair = Ed25519KeyPair::from_seed_unchecked(ed25519.as_bytes().as_slice())
|
||||||
.map_err(|_| type_error("Invalid Ed25519 private key"))?;
|
.map_err(|_| SignEd25519Error::InvalidEd25519PrivateKey)?;
|
||||||
signature.copy_from_slice(pair.sign(data).as_ref());
|
signature.copy_from_slice(pair.sign(data).as_ref());
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum VerifyEd25519Error {
|
||||||
|
#[error("Expected public key")]
|
||||||
|
ExpectedPublicKey,
|
||||||
|
#[error("Expected Ed25519 public key")]
|
||||||
|
ExpectedEd25519PublicKey,
|
||||||
|
}
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
pub fn op_node_verify_ed25519(
|
pub fn op_node_verify_ed25519(
|
||||||
#[cppgc] key: &KeyObjectHandle,
|
#[cppgc] key: &KeyObjectHandle,
|
||||||
#[buffer] data: &[u8],
|
#[buffer] data: &[u8],
|
||||||
#[buffer] signature: &[u8],
|
#[buffer] signature: &[u8],
|
||||||
) -> Result<bool, AnyError> {
|
) -> Result<bool, VerifyEd25519Error> {
|
||||||
let public = key
|
let public = key
|
||||||
.as_public_key()
|
.as_public_key()
|
||||||
.ok_or_else(|| type_error("Expected public key"))?;
|
.ok_or(VerifyEd25519Error::ExpectedPublicKey)?;
|
||||||
|
|
||||||
let ed25519 = match &*public {
|
let ed25519 = match &*public {
|
||||||
AsymmetricPublicKey::Ed25519(public) => public,
|
AsymmetricPublicKey::Ed25519(public) => public,
|
||||||
_ => return Err(type_error("Expected Ed25519 public key")),
|
_ => return Err(VerifyEd25519Error::ExpectedEd25519PublicKey),
|
||||||
};
|
};
|
||||||
|
|
||||||
let verified = ring::signature::UnparsedPublicKey::new(
|
let verified = ring::signature::UnparsedPublicKey::new(
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
use deno_core::error::generic_error;
|
|
||||||
use deno_core::error::type_error;
|
|
||||||
use deno_core::error::AnyError;
|
|
||||||
use rand::rngs::OsRng;
|
use rand::rngs::OsRng;
|
||||||
use rsa::signature::hazmat::PrehashSigner as _;
|
use rsa::signature::hazmat::PrehashSigner as _;
|
||||||
use rsa::signature::hazmat::PrehashVerifier as _;
|
use rsa::signature::hazmat::PrehashVerifier as _;
|
||||||
|
@ -26,7 +23,7 @@ use elliptic_curve::FieldBytesSize;
|
||||||
fn dsa_signature<C: elliptic_curve::PrimeCurve>(
|
fn dsa_signature<C: elliptic_curve::PrimeCurve>(
|
||||||
encoding: u32,
|
encoding: u32,
|
||||||
signature: ecdsa::Signature<C>,
|
signature: ecdsa::Signature<C>,
|
||||||
) -> Result<Box<[u8]>, AnyError>
|
) -> Result<Box<[u8]>, KeyObjectHandlePrehashedSignAndVerifyError>
|
||||||
where
|
where
|
||||||
MaxSize<C>: ArrayLength<u8>,
|
MaxSize<C>: ArrayLength<u8>,
|
||||||
<FieldBytesSize<C> as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
|
<FieldBytesSize<C> as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
|
||||||
|
@ -36,10 +33,54 @@ where
|
||||||
0 => Ok(signature.to_der().to_bytes().to_vec().into_boxed_slice()),
|
0 => Ok(signature.to_der().to_bytes().to_vec().into_boxed_slice()),
|
||||||
// IEEE P1363
|
// IEEE P1363
|
||||||
1 => Ok(signature.to_bytes().to_vec().into_boxed_slice()),
|
1 => Ok(signature.to_bytes().to_vec().into_boxed_slice()),
|
||||||
_ => Err(type_error("invalid DSA signature encoding")),
|
_ => Err(
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignatureEncoding,
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum KeyObjectHandlePrehashedSignAndVerifyError {
|
||||||
|
#[error("invalid DSA signature encoding")]
|
||||||
|
InvalidDsaSignatureEncoding,
|
||||||
|
#[error("key is not a private key")]
|
||||||
|
KeyIsNotPrivate,
|
||||||
|
#[error("digest not allowed for RSA signature: {0}")]
|
||||||
|
DigestNotAllowedForRsaSignature(String),
|
||||||
|
#[error("failed to sign digest with RSA")]
|
||||||
|
FailedToSignDigestWithRsa,
|
||||||
|
#[error("digest not allowed for RSA-PSS signature: {0}")]
|
||||||
|
DigestNotAllowedForRsaPssSignature(String),
|
||||||
|
#[error("failed to sign digest with RSA-PSS")]
|
||||||
|
FailedToSignDigestWithRsaPss,
|
||||||
|
#[error("failed to sign digest with DSA")]
|
||||||
|
FailedToSignDigestWithDsa,
|
||||||
|
#[error("rsa-pss with different mf1 hash algorithm and hash algorithm is not supported")]
|
||||||
|
RsaPssHashAlgorithmUnsupported,
|
||||||
|
#[error(
|
||||||
|
"private key does not allow {actual} to be used, expected {expected}"
|
||||||
|
)]
|
||||||
|
PrivateKeyDisallowsUsage { actual: String, expected: String },
|
||||||
|
#[error("failed to sign digest")]
|
||||||
|
FailedToSignDigest,
|
||||||
|
#[error("x25519 key cannot be used for signing")]
|
||||||
|
X25519KeyCannotBeUsedForSigning,
|
||||||
|
#[error("Ed25519 key cannot be used for prehashed signing")]
|
||||||
|
Ed25519KeyCannotBeUsedForPrehashedSigning,
|
||||||
|
#[error("DH key cannot be used for signing")]
|
||||||
|
DhKeyCannotBeUsedForSigning,
|
||||||
|
#[error("key is not a public or private key")]
|
||||||
|
KeyIsNotPublicOrPrivate,
|
||||||
|
#[error("Invalid DSA signature")]
|
||||||
|
InvalidDsaSignature,
|
||||||
|
#[error("x25519 key cannot be used for verification")]
|
||||||
|
X25519KeyCannotBeUsedForVerification,
|
||||||
|
#[error("Ed25519 key cannot be used for prehashed verification")]
|
||||||
|
Ed25519KeyCannotBeUsedForPrehashedVerification,
|
||||||
|
#[error("DH key cannot be used for verification")]
|
||||||
|
DhKeyCannotBeUsedForVerification,
|
||||||
|
}
|
||||||
|
|
||||||
impl KeyObjectHandle {
|
impl KeyObjectHandle {
|
||||||
pub fn sign_prehashed(
|
pub fn sign_prehashed(
|
||||||
&self,
|
&self,
|
||||||
|
@ -47,10 +88,10 @@ impl KeyObjectHandle {
|
||||||
digest: &[u8],
|
digest: &[u8],
|
||||||
pss_salt_length: Option<u32>,
|
pss_salt_length: Option<u32>,
|
||||||
dsa_signature_encoding: u32,
|
dsa_signature_encoding: u32,
|
||||||
) -> Result<Box<[u8]>, AnyError> {
|
) -> Result<Box<[u8]>, KeyObjectHandlePrehashedSignAndVerifyError> {
|
||||||
let private_key = self
|
let private_key = self
|
||||||
.as_private_key()
|
.as_private_key()
|
||||||
.ok_or_else(|| type_error("key is not a private key"))?;
|
.ok_or(KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPrivate)?;
|
||||||
|
|
||||||
match private_key {
|
match private_key {
|
||||||
AsymmetricPrivateKey::Rsa(key) => {
|
AsymmetricPrivateKey::Rsa(key) => {
|
||||||
|
@ -63,17 +104,14 @@ impl KeyObjectHandle {
|
||||||
rsa::pkcs1v15::Pkcs1v15Sign::new::<D>()
|
rsa::pkcs1v15::Pkcs1v15Sign::new::<D>()
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
return Err(type_error(format!(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
|
||||||
"digest not allowed for RSA signature: {}",
|
|
||||||
digest_type
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
let signature = signer
|
let signature = signer
|
||||||
.sign(Some(&mut OsRng), key, digest)
|
.sign(Some(&mut OsRng), key, digest)
|
||||||
.map_err(|_| generic_error("failed to sign digest with RSA"))?;
|
.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsa)?;
|
||||||
Ok(signature.into())
|
Ok(signature.into())
|
||||||
}
|
}
|
||||||
AsymmetricPrivateKey::RsaPss(key) => {
|
AsymmetricPrivateKey::RsaPss(key) => {
|
||||||
|
@ -81,9 +119,7 @@ impl KeyObjectHandle {
|
||||||
let mut salt_length = None;
|
let mut salt_length = None;
|
||||||
if let Some(details) = &key.details {
|
if let Some(details) = &key.details {
|
||||||
if details.hash_algorithm != details.mf1_hash_algorithm {
|
if details.hash_algorithm != details.mf1_hash_algorithm {
|
||||||
return Err(type_error(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported);
|
||||||
"rsa-pss with different mf1 hash algorithm and hash algorithm is not supported",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
hash_algorithm = Some(details.hash_algorithm);
|
hash_algorithm = Some(details.hash_algorithm);
|
||||||
salt_length = Some(details.salt_length as usize);
|
salt_length = Some(details.salt_length as usize);
|
||||||
|
@ -96,10 +132,10 @@ impl KeyObjectHandle {
|
||||||
fn <D>(algorithm: Option<RsaPssHashAlgorithm>) {
|
fn <D>(algorithm: Option<RsaPssHashAlgorithm>) {
|
||||||
if let Some(hash_algorithm) = hash_algorithm.take() {
|
if let Some(hash_algorithm) = hash_algorithm.take() {
|
||||||
if Some(hash_algorithm) != algorithm {
|
if Some(hash_algorithm) != algorithm {
|
||||||
return Err(type_error(format!(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage {
|
||||||
"private key does not allow {} to be used, expected {}",
|
actual: digest_type.to_string(),
|
||||||
digest_type, hash_algorithm.as_str()
|
expected: hash_algorithm.as_str().to_string(),
|
||||||
)));
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(salt_length) = salt_length {
|
if let Some(salt_length) = salt_length {
|
||||||
|
@ -109,15 +145,12 @@ impl KeyObjectHandle {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
return Err(type_error(format!(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(digest_type.to_string()));
|
||||||
"digest not allowed for RSA-PSS signature: {}",
|
|
||||||
digest_type
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
let signature = pss
|
let signature = pss
|
||||||
.sign(Some(&mut OsRng), &key.key, digest)
|
.sign(Some(&mut OsRng), &key.key, digest)
|
||||||
.map_err(|_| generic_error("failed to sign digest with RSA-PSS"))?;
|
.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsaPss)?;
|
||||||
Ok(signature.into())
|
Ok(signature.into())
|
||||||
}
|
}
|
||||||
AsymmetricPrivateKey::Dsa(key) => {
|
AsymmetricPrivateKey::Dsa(key) => {
|
||||||
|
@ -127,15 +160,12 @@ impl KeyObjectHandle {
|
||||||
key.sign_prehashed_rfc6979::<D>(digest)
|
key.sign_prehashed_rfc6979::<D>(digest)
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
return Err(type_error(format!(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
|
||||||
"digest not allowed for RSA signature: {}",
|
|
||||||
digest_type
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
let signature =
|
let signature =
|
||||||
res.map_err(|_| generic_error("failed to sign digest with DSA"))?;
|
res.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithDsa)?;
|
||||||
Ok(signature.into())
|
Ok(signature.into())
|
||||||
}
|
}
|
||||||
AsymmetricPrivateKey::Ec(key) => match key {
|
AsymmetricPrivateKey::Ec(key) => match key {
|
||||||
|
@ -143,7 +173,7 @@ impl KeyObjectHandle {
|
||||||
let signing_key = p224::ecdsa::SigningKey::from(key);
|
let signing_key = p224::ecdsa::SigningKey::from(key);
|
||||||
let signature: p224::ecdsa::Signature = signing_key
|
let signature: p224::ecdsa::Signature = signing_key
|
||||||
.sign_prehash(digest)
|
.sign_prehash(digest)
|
||||||
.map_err(|_| type_error("failed to sign digest"))?;
|
.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
|
||||||
|
|
||||||
dsa_signature(dsa_signature_encoding, signature)
|
dsa_signature(dsa_signature_encoding, signature)
|
||||||
}
|
}
|
||||||
|
@ -151,7 +181,7 @@ impl KeyObjectHandle {
|
||||||
let signing_key = p256::ecdsa::SigningKey::from(key);
|
let signing_key = p256::ecdsa::SigningKey::from(key);
|
||||||
let signature: p256::ecdsa::Signature = signing_key
|
let signature: p256::ecdsa::Signature = signing_key
|
||||||
.sign_prehash(digest)
|
.sign_prehash(digest)
|
||||||
.map_err(|_| type_error("failed to sign digest"))?;
|
.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
|
||||||
|
|
||||||
dsa_signature(dsa_signature_encoding, signature)
|
dsa_signature(dsa_signature_encoding, signature)
|
||||||
}
|
}
|
||||||
|
@ -159,19 +189,17 @@ impl KeyObjectHandle {
|
||||||
let signing_key = p384::ecdsa::SigningKey::from(key);
|
let signing_key = p384::ecdsa::SigningKey::from(key);
|
||||||
let signature: p384::ecdsa::Signature = signing_key
|
let signature: p384::ecdsa::Signature = signing_key
|
||||||
.sign_prehash(digest)
|
.sign_prehash(digest)
|
||||||
.map_err(|_| type_error("failed to sign digest"))?;
|
.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
|
||||||
|
|
||||||
dsa_signature(dsa_signature_encoding, signature)
|
dsa_signature(dsa_signature_encoding, signature)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
AsymmetricPrivateKey::X25519(_) => {
|
AsymmetricPrivateKey::X25519(_) => {
|
||||||
Err(type_error("x25519 key cannot be used for signing"))
|
Err(KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForSigning)
|
||||||
}
|
}
|
||||||
AsymmetricPrivateKey::Ed25519(_) => Err(type_error(
|
AsymmetricPrivateKey::Ed25519(_) => Err(KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedSigning),
|
||||||
"Ed25519 key cannot be used for prehashed signing",
|
|
||||||
)),
|
|
||||||
AsymmetricPrivateKey::Dh(_) => {
|
AsymmetricPrivateKey::Dh(_) => {
|
||||||
Err(type_error("DH key cannot be used for signing"))
|
Err(KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForSigning)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -183,10 +211,10 @@ impl KeyObjectHandle {
|
||||||
signature: &[u8],
|
signature: &[u8],
|
||||||
pss_salt_length: Option<u32>,
|
pss_salt_length: Option<u32>,
|
||||||
dsa_signature_encoding: u32,
|
dsa_signature_encoding: u32,
|
||||||
) -> Result<bool, AnyError> {
|
) -> Result<bool, KeyObjectHandlePrehashedSignAndVerifyError> {
|
||||||
let public_key = self
|
let public_key = self.as_public_key().ok_or(
|
||||||
.as_public_key()
|
KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPublicOrPrivate,
|
||||||
.ok_or_else(|| type_error("key is not a public or private key"))?;
|
)?;
|
||||||
|
|
||||||
match &*public_key {
|
match &*public_key {
|
||||||
AsymmetricPublicKey::Rsa(key) => {
|
AsymmetricPublicKey::Rsa(key) => {
|
||||||
|
@ -199,10 +227,7 @@ impl KeyObjectHandle {
|
||||||
rsa::pkcs1v15::Pkcs1v15Sign::new::<D>()
|
rsa::pkcs1v15::Pkcs1v15Sign::new::<D>()
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
return Err(type_error(format!(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
|
||||||
"digest not allowed for RSA signature: {}",
|
|
||||||
digest_type
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
@ -214,9 +239,7 @@ impl KeyObjectHandle {
|
||||||
let mut salt_length = None;
|
let mut salt_length = None;
|
||||||
if let Some(details) = &key.details {
|
if let Some(details) = &key.details {
|
||||||
if details.hash_algorithm != details.mf1_hash_algorithm {
|
if details.hash_algorithm != details.mf1_hash_algorithm {
|
||||||
return Err(type_error(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported);
|
||||||
"rsa-pss with different mf1 hash algorithm and hash algorithm is not supported",
|
|
||||||
));
|
|
||||||
}
|
}
|
||||||
hash_algorithm = Some(details.hash_algorithm);
|
hash_algorithm = Some(details.hash_algorithm);
|
||||||
salt_length = Some(details.salt_length as usize);
|
salt_length = Some(details.salt_length as usize);
|
||||||
|
@ -229,10 +252,10 @@ impl KeyObjectHandle {
|
||||||
fn <D>(algorithm: Option<RsaPssHashAlgorithm>) {
|
fn <D>(algorithm: Option<RsaPssHashAlgorithm>) {
|
||||||
if let Some(hash_algorithm) = hash_algorithm.take() {
|
if let Some(hash_algorithm) = hash_algorithm.take() {
|
||||||
if Some(hash_algorithm) != algorithm {
|
if Some(hash_algorithm) != algorithm {
|
||||||
return Err(type_error(format!(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage {
|
||||||
"private key does not allow {} to be used, expected {}",
|
actual: digest_type.to_string(),
|
||||||
digest_type, hash_algorithm.as_str()
|
expected: hash_algorithm.as_str().to_string(),
|
||||||
)));
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(salt_length) = salt_length {
|
if let Some(salt_length) = salt_length {
|
||||||
|
@ -242,17 +265,14 @@ impl KeyObjectHandle {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
_ => {
|
_ => {
|
||||||
return Err(type_error(format!(
|
return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(digest_type.to_string()));
|
||||||
"digest not allowed for RSA-PSS signature: {}",
|
|
||||||
digest_type
|
|
||||||
)))
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
Ok(pss.verify(&key.key, digest, signature).is_ok())
|
Ok(pss.verify(&key.key, digest, signature).is_ok())
|
||||||
}
|
}
|
||||||
AsymmetricPublicKey::Dsa(key) => {
|
AsymmetricPublicKey::Dsa(key) => {
|
||||||
let signature = dsa::Signature::from_der(signature)
|
let signature = dsa::Signature::from_der(signature)
|
||||||
.map_err(|_| type_error("Invalid DSA signature"))?;
|
.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignature)?;
|
||||||
Ok(key.verify_prehash(digest, &signature).is_ok())
|
Ok(key.verify_prehash(digest, &signature).is_ok())
|
||||||
}
|
}
|
||||||
AsymmetricPublicKey::Ec(key) => match key {
|
AsymmetricPublicKey::Ec(key) => match key {
|
||||||
|
@ -294,13 +314,11 @@ impl KeyObjectHandle {
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
AsymmetricPublicKey::X25519(_) => {
|
AsymmetricPublicKey::X25519(_) => {
|
||||||
Err(type_error("x25519 key cannot be used for verification"))
|
Err(KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForVerification)
|
||||||
}
|
}
|
||||||
AsymmetricPublicKey::Ed25519(_) => Err(type_error(
|
AsymmetricPublicKey::Ed25519(_) => Err(KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedVerification),
|
||||||
"Ed25519 key cannot be used for prehashed verification",
|
|
||||||
)),
|
|
||||||
AsymmetricPublicKey::Dh(_) => {
|
AsymmetricPublicKey::Dh(_) => {
|
||||||
Err(type_error("DH key cannot be used for verification"))
|
Err(KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForVerification)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
|
|
||||||
use deno_core::error::AnyError;
|
|
||||||
use deno_core::op2;
|
use deno_core::op2;
|
||||||
|
|
||||||
use x509_parser::der_parser::asn1_rs::Any;
|
use x509_parser::der_parser::asn1_rs::Any;
|
||||||
use x509_parser::der_parser::asn1_rs::Tag;
|
use x509_parser::der_parser::asn1_rs::Tag;
|
||||||
use x509_parser::der_parser::oid::Oid;
|
use x509_parser::der_parser::oid::Oid;
|
||||||
|
pub use x509_parser::error::X509Error;
|
||||||
use x509_parser::extensions;
|
use x509_parser::extensions;
|
||||||
use x509_parser::pem;
|
use x509_parser::pem;
|
||||||
use x509_parser::prelude::*;
|
use x509_parser::prelude::*;
|
||||||
|
@ -65,7 +65,7 @@ impl<'a> Deref for CertificateView<'a> {
|
||||||
#[cppgc]
|
#[cppgc]
|
||||||
pub fn op_node_x509_parse(
|
pub fn op_node_x509_parse(
|
||||||
#[buffer] buf: &[u8],
|
#[buffer] buf: &[u8],
|
||||||
) -> Result<Certificate, AnyError> {
|
) -> Result<Certificate, X509Error> {
|
||||||
let source = match pem::parse_x509_pem(buf) {
|
let source = match pem::parse_x509_pem(buf) {
|
||||||
Ok((_, pem)) => CertificateSources::Pem(pem),
|
Ok((_, pem)) => CertificateSources::Pem(pem),
|
||||||
Err(_) => CertificateSources::Der(buf.to_vec().into_boxed_slice()),
|
Err(_) => CertificateSources::Der(buf.to_vec().into_boxed_slice()),
|
||||||
|
@ -81,7 +81,7 @@ pub fn op_node_x509_parse(
|
||||||
X509Certificate::from_der(buf).map(|(_, cert)| cert)?
|
X509Certificate::from_der(buf).map(|(_, cert)| cert)?
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
Ok::<_, AnyError>(CertificateView { cert })
|
Ok::<_, X509Error>(CertificateView { cert })
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -89,23 +89,23 @@ pub fn op_node_x509_parse(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
pub fn op_node_x509_ca(#[cppgc] cert: &Certificate) -> Result<bool, AnyError> {
|
pub fn op_node_x509_ca(#[cppgc] cert: &Certificate) -> bool {
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
Ok(cert.is_ca())
|
cert.is_ca()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
pub fn op_node_x509_check_email(
|
pub fn op_node_x509_check_email(
|
||||||
#[cppgc] cert: &Certificate,
|
#[cppgc] cert: &Certificate,
|
||||||
#[string] email: &str,
|
#[string] email: &str,
|
||||||
) -> Result<bool, AnyError> {
|
) -> bool {
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
let subject = cert.subject();
|
let subject = cert.subject();
|
||||||
if subject
|
if subject
|
||||||
.iter_email()
|
.iter_email()
|
||||||
.any(|e| e.as_str().unwrap_or("") == email)
|
.any(|e| e.as_str().unwrap_or("") == email)
|
||||||
{
|
{
|
||||||
return Ok(true);
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
let subject_alt = cert
|
let subject_alt = cert
|
||||||
|
@ -121,62 +121,60 @@ pub fn op_node_x509_check_email(
|
||||||
for name in &subject_alt.general_names {
|
for name in &subject_alt.general_names {
|
||||||
if let extensions::GeneralName::RFC822Name(n) = name {
|
if let extensions::GeneralName::RFC822Name(n) = name {
|
||||||
if *n == email {
|
if *n == email {
|
||||||
return Ok(true);
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(false)
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[string]
|
#[string]
|
||||||
pub fn op_node_x509_fingerprint(
|
pub fn op_node_x509_fingerprint(#[cppgc] cert: &Certificate) -> Option<String> {
|
||||||
#[cppgc] cert: &Certificate,
|
cert.fingerprint::<sha1::Sha1>()
|
||||||
) -> Result<Option<String>, AnyError> {
|
|
||||||
Ok(cert.fingerprint::<sha1::Sha1>())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[string]
|
#[string]
|
||||||
pub fn op_node_x509_fingerprint256(
|
pub fn op_node_x509_fingerprint256(
|
||||||
#[cppgc] cert: &Certificate,
|
#[cppgc] cert: &Certificate,
|
||||||
) -> Result<Option<String>, AnyError> {
|
) -> Option<String> {
|
||||||
Ok(cert.fingerprint::<sha2::Sha256>())
|
cert.fingerprint::<sha2::Sha256>()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[string]
|
#[string]
|
||||||
pub fn op_node_x509_fingerprint512(
|
pub fn op_node_x509_fingerprint512(
|
||||||
#[cppgc] cert: &Certificate,
|
#[cppgc] cert: &Certificate,
|
||||||
) -> Result<Option<String>, AnyError> {
|
) -> Option<String> {
|
||||||
Ok(cert.fingerprint::<sha2::Sha512>())
|
cert.fingerprint::<sha2::Sha512>()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[string]
|
#[string]
|
||||||
pub fn op_node_x509_get_issuer(
|
pub fn op_node_x509_get_issuer(
|
||||||
#[cppgc] cert: &Certificate,
|
#[cppgc] cert: &Certificate,
|
||||||
) -> Result<String, AnyError> {
|
) -> Result<String, X509Error> {
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
Ok(x509name_to_string(cert.issuer(), oid_registry())?)
|
x509name_to_string(cert.issuer(), oid_registry())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[string]
|
#[string]
|
||||||
pub fn op_node_x509_get_subject(
|
pub fn op_node_x509_get_subject(
|
||||||
#[cppgc] cert: &Certificate,
|
#[cppgc] cert: &Certificate,
|
||||||
) -> Result<String, AnyError> {
|
) -> Result<String, X509Error> {
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
Ok(x509name_to_string(cert.subject(), oid_registry())?)
|
x509name_to_string(cert.subject(), oid_registry())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[cppgc]
|
#[cppgc]
|
||||||
pub fn op_node_x509_public_key(
|
pub fn op_node_x509_public_key(
|
||||||
#[cppgc] cert: &Certificate,
|
#[cppgc] cert: &Certificate,
|
||||||
) -> Result<KeyObjectHandle, AnyError> {
|
) -> Result<KeyObjectHandle, super::keys::X509PublicKeyError> {
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
let public_key = &cert.tbs_certificate.subject_pki;
|
let public_key = &cert.tbs_certificate.subject_pki;
|
||||||
|
|
||||||
|
@ -245,37 +243,29 @@ fn x509name_to_string(
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[string]
|
#[string]
|
||||||
pub fn op_node_x509_get_valid_from(
|
pub fn op_node_x509_get_valid_from(#[cppgc] cert: &Certificate) -> String {
|
||||||
#[cppgc] cert: &Certificate,
|
|
||||||
) -> Result<String, AnyError> {
|
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
Ok(cert.validity().not_before.to_string())
|
cert.validity().not_before.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[string]
|
#[string]
|
||||||
pub fn op_node_x509_get_valid_to(
|
pub fn op_node_x509_get_valid_to(#[cppgc] cert: &Certificate) -> String {
|
||||||
#[cppgc] cert: &Certificate,
|
|
||||||
) -> Result<String, AnyError> {
|
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
Ok(cert.validity().not_after.to_string())
|
cert.validity().not_after.to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2]
|
#[op2]
|
||||||
#[string]
|
#[string]
|
||||||
pub fn op_node_x509_get_serial_number(
|
pub fn op_node_x509_get_serial_number(#[cppgc] cert: &Certificate) -> String {
|
||||||
#[cppgc] cert: &Certificate,
|
|
||||||
) -> Result<String, AnyError> {
|
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
let mut s = cert.serial.to_str_radix(16);
|
let mut s = cert.serial.to_str_radix(16);
|
||||||
s.make_ascii_uppercase();
|
s.make_ascii_uppercase();
|
||||||
Ok(s)
|
s
|
||||||
}
|
}
|
||||||
|
|
||||||
#[op2(fast)]
|
#[op2(fast)]
|
||||||
pub fn op_node_x509_key_usage(
|
pub fn op_node_x509_key_usage(#[cppgc] cert: &Certificate) -> u16 {
|
||||||
#[cppgc] cert: &Certificate,
|
|
||||||
) -> Result<u16, AnyError> {
|
|
||||||
let cert = cert.inner.get().deref();
|
let cert = cert.inner.get().deref();
|
||||||
let key_usage = cert
|
let key_usage = cert
|
||||||
.extensions()
|
.extensions()
|
||||||
|
@ -286,5 +276,5 @@ pub fn op_node_x509_key_usage(
|
||||||
_ => None,
|
_ => None,
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(key_usage.map(|k| k.flags).unwrap_or(0))
|
key_usage.map(|k| k.flags).unwrap_or(0)
|
||||||
}
|
}
|
||||||
|
|
161
ext/node/ops/inspector.rs
Normal file
161
ext/node/ops/inspector.rs
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
|
|
||||||
|
use crate::NodePermissions;
|
||||||
|
use deno_core::anyhow::Error;
|
||||||
|
use deno_core::error::generic_error;
|
||||||
|
use deno_core::futures::channel::mpsc;
|
||||||
|
use deno_core::op2;
|
||||||
|
use deno_core::v8;
|
||||||
|
use deno_core::GarbageCollected;
|
||||||
|
use deno_core::InspectorSessionKind;
|
||||||
|
use deno_core::InspectorSessionOptions;
|
||||||
|
use deno_core::JsRuntimeInspector;
|
||||||
|
use deno_core::OpState;
|
||||||
|
use std::cell::RefCell;
|
||||||
|
use std::rc::Rc;
|
||||||
|
|
||||||
|
#[op2(fast)]
|
||||||
|
pub fn op_inspector_enabled() -> bool {
|
||||||
|
// TODO: hook up to InspectorServer
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2]
|
||||||
|
pub fn op_inspector_open<P>(
|
||||||
|
_state: &mut OpState,
|
||||||
|
_port: Option<u16>,
|
||||||
|
#[string] _host: Option<String>,
|
||||||
|
) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
P: NodePermissions + 'static,
|
||||||
|
{
|
||||||
|
// TODO: hook up to InspectorServer
|
||||||
|
/*
|
||||||
|
let server = state.borrow_mut::<InspectorServer>();
|
||||||
|
if let Some(host) = host {
|
||||||
|
server.set_host(host);
|
||||||
|
}
|
||||||
|
if let Some(port) = port {
|
||||||
|
server.set_port(port);
|
||||||
|
}
|
||||||
|
state
|
||||||
|
.borrow_mut::<P>()
|
||||||
|
.check_net((server.host(), Some(server.port())), "inspector.open")?;
|
||||||
|
*/
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2(fast)]
|
||||||
|
pub fn op_inspector_close() {
|
||||||
|
// TODO: hook up to InspectorServer
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2]
|
||||||
|
#[string]
|
||||||
|
pub fn op_inspector_url() -> Option<String> {
|
||||||
|
// TODO: hook up to InspectorServer
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2(fast)]
|
||||||
|
pub fn op_inspector_wait(state: &OpState) -> bool {
|
||||||
|
match state.try_borrow::<Rc<RefCell<JsRuntimeInspector>>>() {
|
||||||
|
Some(inspector) => {
|
||||||
|
inspector
|
||||||
|
.borrow_mut()
|
||||||
|
.wait_for_session_and_break_on_next_statement();
|
||||||
|
true
|
||||||
|
}
|
||||||
|
None => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2(fast)]
|
||||||
|
pub fn op_inspector_emit_protocol_event(
|
||||||
|
#[string] _event_name: String,
|
||||||
|
#[string] _params: String,
|
||||||
|
) {
|
||||||
|
// TODO: inspector channel & protocol notifications
|
||||||
|
}
|
||||||
|
|
||||||
|
struct JSInspectorSession {
|
||||||
|
tx: RefCell<Option<mpsc::UnboundedSender<String>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GarbageCollected for JSInspectorSession {}
|
||||||
|
|
||||||
|
#[op2]
|
||||||
|
#[cppgc]
|
||||||
|
pub fn op_inspector_connect<'s, P>(
|
||||||
|
isolate: *mut v8::Isolate,
|
||||||
|
scope: &mut v8::HandleScope<'s>,
|
||||||
|
state: &mut OpState,
|
||||||
|
connect_to_main_thread: bool,
|
||||||
|
callback: v8::Local<'s, v8::Function>,
|
||||||
|
) -> Result<JSInspectorSession, Error>
|
||||||
|
where
|
||||||
|
P: NodePermissions + 'static,
|
||||||
|
{
|
||||||
|
state
|
||||||
|
.borrow_mut::<P>()
|
||||||
|
.check_sys("inspector", "inspector.Session.connect")?;
|
||||||
|
|
||||||
|
if connect_to_main_thread {
|
||||||
|
return Err(generic_error("connectToMainThread not supported"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let context = scope.get_current_context();
|
||||||
|
let context = v8::Global::new(scope, context);
|
||||||
|
let callback = v8::Global::new(scope, callback);
|
||||||
|
|
||||||
|
let inspector = state
|
||||||
|
.borrow::<Rc<RefCell<JsRuntimeInspector>>>()
|
||||||
|
.borrow_mut();
|
||||||
|
|
||||||
|
let tx = inspector.create_raw_session(
|
||||||
|
InspectorSessionOptions {
|
||||||
|
kind: InspectorSessionKind::NonBlocking {
|
||||||
|
wait_for_disconnect: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// The inspector connection does not keep the event loop alive but
|
||||||
|
// when the inspector sends a message to the frontend, the JS that
|
||||||
|
// that runs may keep the event loop alive so we have to call back
|
||||||
|
// synchronously, instead of using the usual LocalInspectorSession
|
||||||
|
// UnboundedReceiver<InspectorMsg> API.
|
||||||
|
Box::new(move |message| {
|
||||||
|
// SAFETY: This function is called directly by the inspector, so
|
||||||
|
// 1) The isolate is still valid
|
||||||
|
// 2) We are on the same thread as the Isolate
|
||||||
|
let scope = unsafe { &mut v8::CallbackScope::new(&mut *isolate) };
|
||||||
|
let context = v8::Local::new(scope, context.clone());
|
||||||
|
let scope = &mut v8::ContextScope::new(scope, context);
|
||||||
|
let scope = &mut v8::TryCatch::new(scope);
|
||||||
|
let recv = v8::undefined(scope);
|
||||||
|
if let Some(message) = v8::String::new(scope, &message.content) {
|
||||||
|
let callback = v8::Local::new(scope, callback.clone());
|
||||||
|
callback.call(scope, recv.into(), &[message.into()]);
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(JSInspectorSession {
|
||||||
|
tx: RefCell::new(Some(tx)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2(fast)]
|
||||||
|
pub fn op_inspector_dispatch(
|
||||||
|
#[cppgc] session: &JSInspectorSession,
|
||||||
|
#[string] message: String,
|
||||||
|
) {
|
||||||
|
if let Some(tx) = &*session.tx.borrow() {
|
||||||
|
let _ = tx.unbounded_send(message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2(fast)]
|
||||||
|
pub fn op_inspector_disconnect(#[cppgc] session: &JSInspectorSession) {
|
||||||
|
drop(session.tx.borrow_mut().take());
|
||||||
|
}
|
|
@ -7,6 +7,7 @@ pub mod fs;
|
||||||
pub mod http;
|
pub mod http;
|
||||||
pub mod http2;
|
pub mod http2;
|
||||||
pub mod idna;
|
pub mod idna;
|
||||||
|
pub mod inspector;
|
||||||
pub mod ipc;
|
pub mod ipc;
|
||||||
pub mod os;
|
pub mod os;
|
||||||
pub mod process;
|
pub mod process;
|
||||||
|
|
|
@ -14,6 +14,7 @@ import { nextTick } from "ext:deno_node/_next_tick.ts";
|
||||||
import {
|
import {
|
||||||
isAnyArrayBuffer,
|
isAnyArrayBuffer,
|
||||||
isArrayBufferView,
|
isArrayBufferView,
|
||||||
|
isUint8Array,
|
||||||
} from "ext:deno_node/internal/util/types.ts";
|
} from "ext:deno_node/internal/util/types.ts";
|
||||||
|
|
||||||
var kRangeErrorMessage = "Cannot create final Buffer. It would be larger " +
|
var kRangeErrorMessage = "Cannot create final Buffer. It would be larger " +
|
||||||
|
@ -158,6 +159,12 @@ export const inflateRawSync = function (buffer, opts) {
|
||||||
function sanitizeInput(input) {
|
function sanitizeInput(input) {
|
||||||
if (typeof input === "string") input = Buffer.from(input);
|
if (typeof input === "string") input = Buffer.from(input);
|
||||||
|
|
||||||
|
if (isArrayBufferView(input) && !isUint8Array(input)) {
|
||||||
|
input = Buffer.from(input.buffer, input.byteOffset, input.byteLength);
|
||||||
|
} else if (isAnyArrayBuffer(input)) {
|
||||||
|
input = Buffer.from(input);
|
||||||
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
!Buffer.isBuffer(input) &&
|
!Buffer.isBuffer(input) &&
|
||||||
(input.buffer && !input.buffer.constructor === ArrayBuffer)
|
(input.buffer && !input.buffer.constructor === ArrayBuffer)
|
||||||
|
|
210
ext/node/polyfills/inspector.js
Normal file
210
ext/node/polyfills/inspector.js
Normal file
|
@ -0,0 +1,210 @@
|
||||||
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
|
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
|
||||||
|
|
||||||
|
import process from "node:process";
|
||||||
|
import { EventEmitter } from "node:events";
|
||||||
|
import { primordials } from "ext:core/mod.js";
|
||||||
|
import {
|
||||||
|
op_get_extras_binding_object,
|
||||||
|
op_inspector_close,
|
||||||
|
op_inspector_connect,
|
||||||
|
op_inspector_disconnect,
|
||||||
|
op_inspector_dispatch,
|
||||||
|
op_inspector_emit_protocol_event,
|
||||||
|
op_inspector_enabled,
|
||||||
|
op_inspector_open,
|
||||||
|
op_inspector_url,
|
||||||
|
op_inspector_wait,
|
||||||
|
} from "ext:core/ops";
|
||||||
|
import {
|
||||||
|
isUint32,
|
||||||
|
validateFunction,
|
||||||
|
validateInt32,
|
||||||
|
validateObject,
|
||||||
|
validateString,
|
||||||
|
} from "ext:deno_node/internal/validators.mjs";
|
||||||
|
import {
|
||||||
|
ERR_INSPECTOR_ALREADY_ACTIVATED,
|
||||||
|
ERR_INSPECTOR_ALREADY_CONNECTED,
|
||||||
|
ERR_INSPECTOR_CLOSED,
|
||||||
|
ERR_INSPECTOR_COMMAND,
|
||||||
|
ERR_INSPECTOR_NOT_ACTIVE,
|
||||||
|
ERR_INSPECTOR_NOT_CONNECTED,
|
||||||
|
ERR_INSPECTOR_NOT_WORKER,
|
||||||
|
} from "ext:deno_node/internal/errors.ts";
|
||||||
|
|
||||||
|
const {
|
||||||
|
SymbolDispose,
|
||||||
|
JSONParse,
|
||||||
|
JSONStringify,
|
||||||
|
SafeMap,
|
||||||
|
} = primordials;
|
||||||
|
|
||||||
|
class Session extends EventEmitter {
|
||||||
|
#connection = null;
|
||||||
|
#nextId = 1;
|
||||||
|
#messageCallbacks = new SafeMap();
|
||||||
|
|
||||||
|
connect() {
|
||||||
|
if (this.#connection) {
|
||||||
|
throw new ERR_INSPECTOR_ALREADY_CONNECTED("The inspector session");
|
||||||
|
}
|
||||||
|
this.#connection = op_inspector_connect(false, (m) => this.#onMessage(m));
|
||||||
|
}
|
||||||
|
|
||||||
|
connectToMainThread() {
|
||||||
|
if (isMainThread) {
|
||||||
|
throw new ERR_INSPECTOR_NOT_WORKER();
|
||||||
|
}
|
||||||
|
if (this.#connection) {
|
||||||
|
throw new ERR_INSPECTOR_ALREADY_CONNECTED("The inspector session");
|
||||||
|
}
|
||||||
|
this.#connection = op_inspector_connect(true, (m) => this.#onMessage(m));
|
||||||
|
}
|
||||||
|
|
||||||
|
#onMessage(message) {
|
||||||
|
const parsed = JSONParse(message);
|
||||||
|
try {
|
||||||
|
if (parsed.id) {
|
||||||
|
const callback = this.#messageCallbacks.get(parsed.id);
|
||||||
|
this.#messageCallbacks.delete(parsed.id);
|
||||||
|
if (callback) {
|
||||||
|
if (parsed.error) {
|
||||||
|
return callback(
|
||||||
|
new ERR_INSPECTOR_COMMAND(
|
||||||
|
parsed.error.code,
|
||||||
|
parsed.error.message,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
callback(null, parsed.result);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
this.emit(parsed.method, parsed);
|
||||||
|
this.emit("inspectorNotification", parsed);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
process.emitWarning(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post(method, params, callback) {
|
||||||
|
validateString(method, "method");
|
||||||
|
if (!callback && typeof params === "function") {
|
||||||
|
callback = params;
|
||||||
|
params = null;
|
||||||
|
}
|
||||||
|
if (params) {
|
||||||
|
validateObject(params, "params");
|
||||||
|
}
|
||||||
|
if (callback) {
|
||||||
|
validateFunction(callback, "callback");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.#connection) {
|
||||||
|
throw new ERR_INSPECTOR_NOT_CONNECTED();
|
||||||
|
}
|
||||||
|
const id = this.#nextId++;
|
||||||
|
const message = { id, method };
|
||||||
|
if (params) {
|
||||||
|
message.params = params;
|
||||||
|
}
|
||||||
|
if (callback) {
|
||||||
|
this.#messageCallbacks.set(id, callback);
|
||||||
|
}
|
||||||
|
op_inspector_dispatch(this.#connection, JSONStringify(message));
|
||||||
|
}
|
||||||
|
|
||||||
|
disconnect() {
|
||||||
|
if (!this.#connection) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
op_inspector_disconnect(this.#connection);
|
||||||
|
this.#connection = null;
|
||||||
|
// deno-lint-ignore prefer-primordials
|
||||||
|
for (const callback of this.#messageCallbacks.values()) {
|
||||||
|
process.nextTick(callback, new ERR_INSPECTOR_CLOSED());
|
||||||
|
}
|
||||||
|
this.#messageCallbacks.clear();
|
||||||
|
this.#nextId = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function open(port, host, wait) {
|
||||||
|
if (op_inspector_enabled()) {
|
||||||
|
throw new ERR_INSPECTOR_ALREADY_ACTIVATED();
|
||||||
|
}
|
||||||
|
// inspectorOpen() currently does not typecheck its arguments and adding
|
||||||
|
// such checks would be a potentially breaking change. However, the native
|
||||||
|
// open() function requires the port to fit into a 16-bit unsigned integer,
|
||||||
|
// causing an integer overflow otherwise, so we at least need to prevent that.
|
||||||
|
if (isUint32(port)) {
|
||||||
|
validateInt32(port, "port", 0, 65535);
|
||||||
|
} else {
|
||||||
|
// equiv of handling args[0]->IsUint32()
|
||||||
|
port = undefined;
|
||||||
|
}
|
||||||
|
if (typeof host !== "string") {
|
||||||
|
// equiv of handling args[1]->IsString()
|
||||||
|
host = undefined;
|
||||||
|
}
|
||||||
|
op_inspector_open(port, host);
|
||||||
|
if (wait) {
|
||||||
|
op_inspector_wait();
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
__proto__: null,
|
||||||
|
[SymbolDispose]() {
|
||||||
|
_debugEnd();
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function close() {
|
||||||
|
op_inspector_close();
|
||||||
|
}
|
||||||
|
|
||||||
|
function url() {
|
||||||
|
return op_inspector_url();
|
||||||
|
}
|
||||||
|
|
||||||
|
function waitForDebugger() {
|
||||||
|
if (!op_inspector_wait()) {
|
||||||
|
throw new ERR_INSPECTOR_NOT_ACTIVE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function broadcastToFrontend(eventName, params) {
|
||||||
|
validateString(eventName, "eventName");
|
||||||
|
if (params) {
|
||||||
|
validateObject(params, "params");
|
||||||
|
}
|
||||||
|
op_inspector_emit_protocol_event(eventName, JSONStringify(params ?? {}));
|
||||||
|
}
|
||||||
|
|
||||||
|
const Network = {
|
||||||
|
requestWillBeSent: (params) =>
|
||||||
|
broadcastToFrontend("Network.requestWillBeSent", params),
|
||||||
|
responseReceived: (params) =>
|
||||||
|
broadcastToFrontend("Network.responseReceived", params),
|
||||||
|
loadingFinished: (params) =>
|
||||||
|
broadcastToFrontend("Network.loadingFinished", params),
|
||||||
|
loadingFailed: (params) =>
|
||||||
|
broadcastToFrontend("Network.loadingFailed", params),
|
||||||
|
};
|
||||||
|
|
||||||
|
const console = op_get_extras_binding_object().console;
|
||||||
|
|
||||||
|
export { close, console, Network, open, Session, url, waitForDebugger };
|
||||||
|
|
||||||
|
export default {
|
||||||
|
open,
|
||||||
|
close,
|
||||||
|
url,
|
||||||
|
waitForDebugger,
|
||||||
|
console,
|
||||||
|
Session,
|
||||||
|
Network,
|
||||||
|
};
|
|
@ -1,82 +0,0 @@
|
||||||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
|
||||||
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
|
|
||||||
|
|
||||||
import { EventEmitter } from "node:events";
|
|
||||||
import { notImplemented } from "ext:deno_node/_utils.ts";
|
|
||||||
import { primordials } from "ext:core/mod.js";
|
|
||||||
|
|
||||||
const {
|
|
||||||
SafeMap,
|
|
||||||
} = primordials;
|
|
||||||
|
|
||||||
class Session extends EventEmitter {
|
|
||||||
#connection = null;
|
|
||||||
#nextId = 1;
|
|
||||||
#messageCallbacks = new SafeMap();
|
|
||||||
|
|
||||||
/** Connects the session to the inspector back-end. */
|
|
||||||
connect() {
|
|
||||||
notImplemented("inspector.Session.prototype.connect");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Connects the session to the main thread
|
|
||||||
* inspector back-end. */
|
|
||||||
connectToMainThread() {
|
|
||||||
notImplemented("inspector.Session.prototype.connectToMainThread");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Posts a message to the inspector back-end. */
|
|
||||||
post(
|
|
||||||
_method: string,
|
|
||||||
_params?: Record<string, unknown>,
|
|
||||||
_callback?: (...args: unknown[]) => void,
|
|
||||||
) {
|
|
||||||
notImplemented("inspector.Session.prototype.post");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Immediately closes the session, all pending
|
|
||||||
* message callbacks will be called with an
|
|
||||||
* error.
|
|
||||||
*/
|
|
||||||
disconnect() {
|
|
||||||
notImplemented("inspector.Session.prototype.disconnect");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Activates inspector on host and port.
|
|
||||||
* See https://nodejs.org/api/inspector.html#inspectoropenport-host-wait */
|
|
||||||
function open(_port?: number, _host?: string, _wait?: boolean) {
|
|
||||||
notImplemented("inspector.Session.prototype.open");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Deactivate the inspector. Blocks until there are no active connections.
|
|
||||||
* See https://nodejs.org/api/inspector.html#inspectorclose */
|
|
||||||
function close() {
|
|
||||||
notImplemented("inspector.Session.prototype.close");
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Return the URL of the active inspector, or undefined if there is none.
|
|
||||||
* See https://nodejs.org/api/inspector.html#inspectorurl */
|
|
||||||
function url() {
|
|
||||||
// TODO(kt3k): returns undefined for now, which means the inspector is not activated.
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Blocks until a client (existing or connected later) has sent Runtime.runIfWaitingForDebugger command.
|
|
||||||
* See https://nodejs.org/api/inspector.html#inspectorwaitfordebugger */
|
|
||||||
function waitForDebugger() {
|
|
||||||
notImplemented("inspector.wairForDebugger");
|
|
||||||
}
|
|
||||||
|
|
||||||
const console = globalThis.console;
|
|
||||||
|
|
||||||
export { close, console, open, Session, url, waitForDebugger };
|
|
||||||
|
|
||||||
export default {
|
|
||||||
close,
|
|
||||||
console,
|
|
||||||
open,
|
|
||||||
Session,
|
|
||||||
url,
|
|
||||||
waitForDebugger,
|
|
||||||
};
|
|
20
ext/node/polyfills/inspector/promises.js
Normal file
20
ext/node/polyfills/inspector/promises.js
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
|
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
|
||||||
|
|
||||||
|
import inspector from "node:inspector";
|
||||||
|
import { promisify } from "ext:deno_node/internal/util.mjs";
|
||||||
|
|
||||||
|
class Session extends inspector.Session {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Session.prototype.post = promisify(inspector.Session.prototype.post);
|
||||||
|
|
||||||
|
export * from "node:inspector";
|
||||||
|
export { Session };
|
||||||
|
|
||||||
|
export default {
|
||||||
|
...inspector,
|
||||||
|
Session,
|
||||||
|
};
|
|
@ -18,7 +18,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { primordials } from "ext:core/mod.js";
|
import { primordials } from "ext:core/mod.js";
|
||||||
const { JSONStringify, SymbolFor } = primordials;
|
const { JSONStringify, SafeArrayIterator, SymbolFor } = primordials;
|
||||||
import { format, inspect } from "ext:deno_node/internal/util/inspect.mjs";
|
import { format, inspect } from "ext:deno_node/internal/util/inspect.mjs";
|
||||||
import { codes } from "ext:deno_node/internal/error_codes.ts";
|
import { codes } from "ext:deno_node/internal/error_codes.ts";
|
||||||
import {
|
import {
|
||||||
|
@ -1874,6 +1874,11 @@ export class ERR_SOCKET_CLOSED extends NodeError {
|
||||||
super("ERR_SOCKET_CLOSED", `Socket is closed`);
|
super("ERR_SOCKET_CLOSED", `Socket is closed`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
export class ERR_SOCKET_CONNECTION_TIMEOUT extends NodeError {
|
||||||
|
constructor() {
|
||||||
|
super("ERR_SOCKET_CONNECTION_TIMEOUT", `Socket connection timeout`);
|
||||||
|
}
|
||||||
|
}
|
||||||
export class ERR_SOCKET_DGRAM_IS_CONNECTED extends NodeError {
|
export class ERR_SOCKET_DGRAM_IS_CONNECTED extends NodeError {
|
||||||
constructor() {
|
constructor() {
|
||||||
super("ERR_SOCKET_DGRAM_IS_CONNECTED", `Already connected`);
|
super("ERR_SOCKET_DGRAM_IS_CONNECTED", `Already connected`);
|
||||||
|
@ -2633,11 +2638,30 @@ export function aggregateTwoErrors(
|
||||||
}
|
}
|
||||||
return innerError || outerError;
|
return innerError || outerError;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export class NodeAggregateError extends AggregateError {
|
||||||
|
code: string;
|
||||||
|
constructor(errors, message) {
|
||||||
|
super(new SafeArrayIterator(errors), message);
|
||||||
|
this.code = errors[0]?.code;
|
||||||
|
}
|
||||||
|
|
||||||
|
get [kIsNodeError]() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// deno-lint-ignore adjacent-overload-signatures
|
||||||
|
get ["constructor"]() {
|
||||||
|
return AggregateError;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
codes.ERR_IPC_CHANNEL_CLOSED = ERR_IPC_CHANNEL_CLOSED;
|
codes.ERR_IPC_CHANNEL_CLOSED = ERR_IPC_CHANNEL_CLOSED;
|
||||||
codes.ERR_INVALID_ARG_TYPE = ERR_INVALID_ARG_TYPE;
|
codes.ERR_INVALID_ARG_TYPE = ERR_INVALID_ARG_TYPE;
|
||||||
codes.ERR_INVALID_ARG_VALUE = ERR_INVALID_ARG_VALUE;
|
codes.ERR_INVALID_ARG_VALUE = ERR_INVALID_ARG_VALUE;
|
||||||
codes.ERR_OUT_OF_RANGE = ERR_OUT_OF_RANGE;
|
codes.ERR_OUT_OF_RANGE = ERR_OUT_OF_RANGE;
|
||||||
codes.ERR_SOCKET_BAD_PORT = ERR_SOCKET_BAD_PORT;
|
codes.ERR_SOCKET_BAD_PORT = ERR_SOCKET_BAD_PORT;
|
||||||
|
codes.ERR_SOCKET_CONNECTION_TIMEOUT = ERR_SOCKET_CONNECTION_TIMEOUT;
|
||||||
codes.ERR_BUFFER_OUT_OF_BOUNDS = ERR_BUFFER_OUT_OF_BOUNDS;
|
codes.ERR_BUFFER_OUT_OF_BOUNDS = ERR_BUFFER_OUT_OF_BOUNDS;
|
||||||
codes.ERR_UNKNOWN_ENCODING = ERR_UNKNOWN_ENCODING;
|
codes.ERR_UNKNOWN_ENCODING = ERR_UNKNOWN_ENCODING;
|
||||||
codes.ERR_PARSE_ARGS_INVALID_OPTION_VALUE = ERR_PARSE_ARGS_INVALID_OPTION_VALUE;
|
codes.ERR_PARSE_ARGS_INVALID_OPTION_VALUE = ERR_PARSE_ARGS_INVALID_OPTION_VALUE;
|
||||||
|
|
|
@ -95,4 +95,5 @@ export function makeSyncWrite(fd: number) {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export const kReinitializeHandle = Symbol("kReinitializeHandle");
|
||||||
export const normalizedArgsSymbol = Symbol("normalizedArgs");
|
export const normalizedArgsSymbol = Symbol("normalizedArgs");
|
||||||
|
|
|
@ -530,10 +530,12 @@ export function mapSysErrnoToUvErrno(sysErrno: number): number {
|
||||||
|
|
||||||
export const UV_EAI_MEMORY = codeMap.get("EAI_MEMORY")!;
|
export const UV_EAI_MEMORY = codeMap.get("EAI_MEMORY")!;
|
||||||
export const UV_EBADF = codeMap.get("EBADF")!;
|
export const UV_EBADF = codeMap.get("EBADF")!;
|
||||||
|
export const UV_ECANCELED = codeMap.get("ECANCELED")!;
|
||||||
export const UV_EEXIST = codeMap.get("EEXIST");
|
export const UV_EEXIST = codeMap.get("EEXIST");
|
||||||
export const UV_EINVAL = codeMap.get("EINVAL")!;
|
export const UV_EINVAL = codeMap.get("EINVAL")!;
|
||||||
export const UV_ENOENT = codeMap.get("ENOENT");
|
export const UV_ENOENT = codeMap.get("ENOENT");
|
||||||
export const UV_ENOTSOCK = codeMap.get("ENOTSOCK")!;
|
export const UV_ENOTSOCK = codeMap.get("ENOTSOCK")!;
|
||||||
|
export const UV_ETIMEDOUT = codeMap.get("ETIMEDOUT")!;
|
||||||
export const UV_UNKNOWN = codeMap.get("UNKNOWN")!;
|
export const UV_UNKNOWN = codeMap.get("UNKNOWN")!;
|
||||||
|
|
||||||
export function errname(errno: number): string {
|
export function errname(errno: number): string {
|
||||||
|
|
|
@ -31,6 +31,7 @@ import {
|
||||||
isIP,
|
isIP,
|
||||||
isIPv4,
|
isIPv4,
|
||||||
isIPv6,
|
isIPv6,
|
||||||
|
kReinitializeHandle,
|
||||||
normalizedArgsSymbol,
|
normalizedArgsSymbol,
|
||||||
} from "ext:deno_node/internal/net.ts";
|
} from "ext:deno_node/internal/net.ts";
|
||||||
import { Duplex } from "node:stream";
|
import { Duplex } from "node:stream";
|
||||||
|
@ -50,9 +51,11 @@ import {
|
||||||
ERR_SERVER_ALREADY_LISTEN,
|
ERR_SERVER_ALREADY_LISTEN,
|
||||||
ERR_SERVER_NOT_RUNNING,
|
ERR_SERVER_NOT_RUNNING,
|
||||||
ERR_SOCKET_CLOSED,
|
ERR_SOCKET_CLOSED,
|
||||||
|
ERR_SOCKET_CONNECTION_TIMEOUT,
|
||||||
errnoException,
|
errnoException,
|
||||||
exceptionWithHostPort,
|
exceptionWithHostPort,
|
||||||
genericNodeError,
|
genericNodeError,
|
||||||
|
NodeAggregateError,
|
||||||
uvExceptionWithHostPort,
|
uvExceptionWithHostPort,
|
||||||
} from "ext:deno_node/internal/errors.ts";
|
} from "ext:deno_node/internal/errors.ts";
|
||||||
import type { ErrnoException } from "ext:deno_node/internal/errors.ts";
|
import type { ErrnoException } from "ext:deno_node/internal/errors.ts";
|
||||||
|
@ -80,6 +83,7 @@ import { Buffer } from "node:buffer";
|
||||||
import type { LookupOneOptions } from "ext:deno_node/internal/dns/utils.ts";
|
import type { LookupOneOptions } from "ext:deno_node/internal/dns/utils.ts";
|
||||||
import {
|
import {
|
||||||
validateAbortSignal,
|
validateAbortSignal,
|
||||||
|
validateBoolean,
|
||||||
validateFunction,
|
validateFunction,
|
||||||
validateInt32,
|
validateInt32,
|
||||||
validateNumber,
|
validateNumber,
|
||||||
|
@ -100,13 +104,25 @@ import { ShutdownWrap } from "ext:deno_node/internal_binding/stream_wrap.ts";
|
||||||
import { assert } from "ext:deno_node/_util/asserts.ts";
|
import { assert } from "ext:deno_node/_util/asserts.ts";
|
||||||
import { isWindows } from "ext:deno_node/_util/os.ts";
|
import { isWindows } from "ext:deno_node/_util/os.ts";
|
||||||
import { ADDRCONFIG, lookup as dnsLookup } from "node:dns";
|
import { ADDRCONFIG, lookup as dnsLookup } from "node:dns";
|
||||||
import { codeMap } from "ext:deno_node/internal_binding/uv.ts";
|
import {
|
||||||
|
codeMap,
|
||||||
|
UV_ECANCELED,
|
||||||
|
UV_ETIMEDOUT,
|
||||||
|
} from "ext:deno_node/internal_binding/uv.ts";
|
||||||
import { guessHandleType } from "ext:deno_node/internal_binding/util.ts";
|
import { guessHandleType } from "ext:deno_node/internal_binding/util.ts";
|
||||||
import { debuglog } from "ext:deno_node/internal/util/debuglog.ts";
|
import { debuglog } from "ext:deno_node/internal/util/debuglog.ts";
|
||||||
import type { DuplexOptions } from "ext:deno_node/_stream.d.ts";
|
import type { DuplexOptions } from "ext:deno_node/_stream.d.ts";
|
||||||
import type { BufferEncoding } from "ext:deno_node/_global.d.ts";
|
import type { BufferEncoding } from "ext:deno_node/_global.d.ts";
|
||||||
import type { Abortable } from "ext:deno_node/_events.d.ts";
|
import type { Abortable } from "ext:deno_node/_events.d.ts";
|
||||||
import { channel } from "node:diagnostics_channel";
|
import { channel } from "node:diagnostics_channel";
|
||||||
|
import { primordials } from "ext:core/mod.js";
|
||||||
|
|
||||||
|
const {
|
||||||
|
ArrayPrototypeIncludes,
|
||||||
|
ArrayPrototypePush,
|
||||||
|
FunctionPrototypeBind,
|
||||||
|
MathMax,
|
||||||
|
} = primordials;
|
||||||
|
|
||||||
let debug = debuglog("net", (fn) => {
|
let debug = debuglog("net", (fn) => {
|
||||||
debug = fn;
|
debug = fn;
|
||||||
|
@ -120,6 +136,9 @@ const kBytesWritten = Symbol("kBytesWritten");
|
||||||
const DEFAULT_IPV4_ADDR = "0.0.0.0";
|
const DEFAULT_IPV4_ADDR = "0.0.0.0";
|
||||||
const DEFAULT_IPV6_ADDR = "::";
|
const DEFAULT_IPV6_ADDR = "::";
|
||||||
|
|
||||||
|
let autoSelectFamilyDefault = true;
|
||||||
|
let autoSelectFamilyAttemptTimeoutDefault = 250;
|
||||||
|
|
||||||
type Handle = TCP | Pipe;
|
type Handle = TCP | Pipe;
|
||||||
|
|
||||||
interface HandleOptions {
|
interface HandleOptions {
|
||||||
|
@ -214,6 +233,8 @@ interface TcpSocketConnectOptions extends ConnectOptions {
|
||||||
hints?: number;
|
hints?: number;
|
||||||
family?: number;
|
family?: number;
|
||||||
lookup?: LookupFunction;
|
lookup?: LookupFunction;
|
||||||
|
autoSelectFamily?: boolean | undefined;
|
||||||
|
autoSelectFamilyAttemptTimeout?: number | undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
interface IpcSocketConnectOptions extends ConnectOptions {
|
interface IpcSocketConnectOptions extends ConnectOptions {
|
||||||
|
@ -316,12 +337,6 @@ export function _normalizeArgs(args: unknown[]): NormalizedArgs {
|
||||||
return arr;
|
return arr;
|
||||||
}
|
}
|
||||||
|
|
||||||
function _isTCPConnectWrap(
|
|
||||||
req: TCPConnectWrap | PipeConnectWrap,
|
|
||||||
): req is TCPConnectWrap {
|
|
||||||
return "localAddress" in req && "localPort" in req;
|
|
||||||
}
|
|
||||||
|
|
||||||
function _afterConnect(
|
function _afterConnect(
|
||||||
status: number,
|
status: number,
|
||||||
// deno-lint-ignore no-explicit-any
|
// deno-lint-ignore no-explicit-any
|
||||||
|
@ -372,7 +387,7 @@ function _afterConnect(
|
||||||
socket.connecting = false;
|
socket.connecting = false;
|
||||||
let details;
|
let details;
|
||||||
|
|
||||||
if (_isTCPConnectWrap(req)) {
|
if (req.localAddress && req.localPort) {
|
||||||
details = req.localAddress + ":" + req.localPort;
|
details = req.localAddress + ":" + req.localPort;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -384,7 +399,7 @@ function _afterConnect(
|
||||||
details,
|
details,
|
||||||
);
|
);
|
||||||
|
|
||||||
if (_isTCPConnectWrap(req)) {
|
if (details) {
|
||||||
ex.localAddress = req.localAddress;
|
ex.localAddress = req.localAddress;
|
||||||
ex.localPort = req.localPort;
|
ex.localPort = req.localPort;
|
||||||
}
|
}
|
||||||
|
@ -393,6 +408,107 @@ function _afterConnect(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function _createConnectionError(req, status) {
|
||||||
|
let details;
|
||||||
|
|
||||||
|
if (req.localAddress && req.localPort) {
|
||||||
|
details = req.localAddress + ":" + req.localPort;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ex = exceptionWithHostPort(
|
||||||
|
status,
|
||||||
|
"connect",
|
||||||
|
req.address,
|
||||||
|
req.port,
|
||||||
|
details,
|
||||||
|
);
|
||||||
|
if (details) {
|
||||||
|
ex.localAddress = req.localAddress;
|
||||||
|
ex.localPort = req.localPort;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ex;
|
||||||
|
}
|
||||||
|
|
||||||
|
function _afterConnectMultiple(
|
||||||
|
context,
|
||||||
|
current,
|
||||||
|
status,
|
||||||
|
handle,
|
||||||
|
req,
|
||||||
|
readable,
|
||||||
|
writable,
|
||||||
|
) {
|
||||||
|
debug(
|
||||||
|
"connect/multiple: connection attempt to %s:%s completed with status %s",
|
||||||
|
req.address,
|
||||||
|
req.port,
|
||||||
|
status,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Make sure another connection is not spawned
|
||||||
|
clearTimeout(context[kTimeout]);
|
||||||
|
|
||||||
|
// One of the connection has completed and correctly dispatched but after timeout, ignore this one
|
||||||
|
if (status === 0 && current !== context.current - 1) {
|
||||||
|
debug(
|
||||||
|
"connect/multiple: ignoring successful but timedout connection to %s:%s",
|
||||||
|
req.address,
|
||||||
|
req.port,
|
||||||
|
);
|
||||||
|
handle.close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const self = context.socket;
|
||||||
|
|
||||||
|
// Some error occurred, add to the list of exceptions
|
||||||
|
if (status !== 0) {
|
||||||
|
const ex = _createConnectionError(req, status);
|
||||||
|
ArrayPrototypePush(context.errors, ex);
|
||||||
|
|
||||||
|
self.emit(
|
||||||
|
"connectionAttemptFailed",
|
||||||
|
req.address,
|
||||||
|
req.port,
|
||||||
|
req.addressType,
|
||||||
|
ex,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Try the next address, unless we were aborted
|
||||||
|
if (context.socket.connecting) {
|
||||||
|
_internalConnectMultiple(context, status === UV_ECANCELED);
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
_afterConnect(status, self._handle, req, readable, writable);
|
||||||
|
}
|
||||||
|
|
||||||
|
function _internalConnectMultipleTimeout(context, req, handle) {
|
||||||
|
debug(
|
||||||
|
"connect/multiple: connection to %s:%s timed out",
|
||||||
|
req.address,
|
||||||
|
req.port,
|
||||||
|
);
|
||||||
|
context.socket.emit(
|
||||||
|
"connectionAttemptTimeout",
|
||||||
|
req.address,
|
||||||
|
req.port,
|
||||||
|
req.addressType,
|
||||||
|
);
|
||||||
|
|
||||||
|
req.oncomplete = undefined;
|
||||||
|
ArrayPrototypePush(context.errors, _createConnectionError(req, UV_ETIMEDOUT));
|
||||||
|
handle.close();
|
||||||
|
|
||||||
|
// Try the next address, unless we were aborted
|
||||||
|
if (context.socket.connecting) {
|
||||||
|
_internalConnectMultiple(context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function _checkBindError(err: number, port: number, handle: TCP) {
|
function _checkBindError(err: number, port: number, handle: TCP) {
|
||||||
// EADDRINUSE may not be reported until we call `listen()` or `connect()`.
|
// EADDRINUSE may not be reported until we call `listen()` or `connect()`.
|
||||||
// To complicate matters, a failed `bind()` followed by `listen()` or `connect()`
|
// To complicate matters, a failed `bind()` followed by `listen()` or `connect()`
|
||||||
|
@ -495,6 +611,131 @@ function _internalConnect(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function _internalConnectMultiple(context, canceled?: boolean) {
|
||||||
|
clearTimeout(context[kTimeout]);
|
||||||
|
const self = context.socket;
|
||||||
|
|
||||||
|
// We were requested to abort. Stop all operations
|
||||||
|
if (self._aborted) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// All connections have been tried without success, destroy with error
|
||||||
|
if (canceled || context.current === context.addresses.length) {
|
||||||
|
if (context.errors.length === 0) {
|
||||||
|
self.destroy(new ERR_SOCKET_CONNECTION_TIMEOUT());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.destroy(new NodeAggregateError(context.errors));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(self.connecting);
|
||||||
|
|
||||||
|
const current = context.current++;
|
||||||
|
|
||||||
|
if (current > 0) {
|
||||||
|
self[kReinitializeHandle](new TCP(TCPConstants.SOCKET));
|
||||||
|
}
|
||||||
|
|
||||||
|
const { localPort, port, flags } = context;
|
||||||
|
const { address, family: addressType } = context.addresses[current];
|
||||||
|
let localAddress;
|
||||||
|
let err;
|
||||||
|
|
||||||
|
if (localPort) {
|
||||||
|
if (addressType === 4) {
|
||||||
|
localAddress = DEFAULT_IPV4_ADDR;
|
||||||
|
err = self._handle.bind(localAddress, localPort);
|
||||||
|
} else { // addressType === 6
|
||||||
|
localAddress = DEFAULT_IPV6_ADDR;
|
||||||
|
err = self._handle.bind6(localAddress, localPort, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
debug(
|
||||||
|
"connect/multiple: binding to localAddress: %s and localPort: %d (addressType: %d)",
|
||||||
|
localAddress,
|
||||||
|
localPort,
|
||||||
|
addressType,
|
||||||
|
);
|
||||||
|
|
||||||
|
err = _checkBindError(err, localPort, self._handle);
|
||||||
|
if (err) {
|
||||||
|
ArrayPrototypePush(
|
||||||
|
context.errors,
|
||||||
|
exceptionWithHostPort(err, "bind", localAddress, localPort),
|
||||||
|
);
|
||||||
|
_internalConnectMultiple(context);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug(
|
||||||
|
"connect/multiple: attempting to connect to %s:%d (addressType: %d)",
|
||||||
|
address,
|
||||||
|
port,
|
||||||
|
addressType,
|
||||||
|
);
|
||||||
|
self.emit("connectionAttempt", address, port, addressType);
|
||||||
|
|
||||||
|
const req = new TCPConnectWrap();
|
||||||
|
req.oncomplete = FunctionPrototypeBind(
|
||||||
|
_afterConnectMultiple,
|
||||||
|
undefined,
|
||||||
|
context,
|
||||||
|
current,
|
||||||
|
);
|
||||||
|
req.address = address;
|
||||||
|
req.port = port;
|
||||||
|
req.localAddress = localAddress;
|
||||||
|
req.localPort = localPort;
|
||||||
|
req.addressType = addressType;
|
||||||
|
|
||||||
|
ArrayPrototypePush(
|
||||||
|
self.autoSelectFamilyAttemptedAddresses,
|
||||||
|
`${address}:${port}`,
|
||||||
|
);
|
||||||
|
|
||||||
|
if (addressType === 4) {
|
||||||
|
err = self._handle.connect(req, address, port);
|
||||||
|
} else {
|
||||||
|
err = self._handle.connect6(req, address, port);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
const sockname = self._getsockname();
|
||||||
|
let details;
|
||||||
|
|
||||||
|
if (sockname) {
|
||||||
|
details = sockname.address + ":" + sockname.port;
|
||||||
|
}
|
||||||
|
|
||||||
|
const ex = exceptionWithHostPort(err, "connect", address, port, details);
|
||||||
|
ArrayPrototypePush(context.errors, ex);
|
||||||
|
|
||||||
|
self.emit("connectionAttemptFailed", address, port, addressType, ex);
|
||||||
|
_internalConnectMultiple(context);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (current < context.addresses.length - 1) {
|
||||||
|
debug(
|
||||||
|
"connect/multiple: setting the attempt timeout to %d ms",
|
||||||
|
context.timeout,
|
||||||
|
);
|
||||||
|
|
||||||
|
// If the attempt has not returned an error, start the connection timer
|
||||||
|
context[kTimeout] = setTimeout(
|
||||||
|
_internalConnectMultipleTimeout,
|
||||||
|
context.timeout,
|
||||||
|
context,
|
||||||
|
req,
|
||||||
|
self._handle,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Provide a better error message when we call end() as a result
|
// Provide a better error message when we call end() as a result
|
||||||
// of the other side sending a FIN. The standard "write after end"
|
// of the other side sending a FIN. The standard "write after end"
|
||||||
// is overly vague, and makes it seem like the user's code is to blame.
|
// is overly vague, and makes it seem like the user's code is to blame.
|
||||||
|
@ -597,7 +838,7 @@ function _lookupAndConnect(
|
||||||
) {
|
) {
|
||||||
const { localAddress, localPort } = options;
|
const { localAddress, localPort } = options;
|
||||||
const host = options.host || "localhost";
|
const host = options.host || "localhost";
|
||||||
let { port } = options;
|
let { port, autoSelectFamilyAttemptTimeout, autoSelectFamily } = options;
|
||||||
|
|
||||||
if (localAddress && !isIP(localAddress)) {
|
if (localAddress && !isIP(localAddress)) {
|
||||||
throw new ERR_INVALID_IP_ADDRESS(localAddress);
|
throw new ERR_INVALID_IP_ADDRESS(localAddress);
|
||||||
|
@ -621,6 +862,22 @@ function _lookupAndConnect(
|
||||||
|
|
||||||
port |= 0;
|
port |= 0;
|
||||||
|
|
||||||
|
if (autoSelectFamily != null) {
|
||||||
|
validateBoolean(autoSelectFamily, "options.autoSelectFamily");
|
||||||
|
} else {
|
||||||
|
autoSelectFamily = autoSelectFamilyDefault;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (autoSelectFamilyAttemptTimeout !== undefined) {
|
||||||
|
validateInt32(autoSelectFamilyAttemptTimeout);
|
||||||
|
|
||||||
|
if (autoSelectFamilyAttemptTimeout < 10) {
|
||||||
|
autoSelectFamilyAttemptTimeout = 10;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
autoSelectFamilyAttemptTimeout = autoSelectFamilyAttemptTimeoutDefault;
|
||||||
|
}
|
||||||
|
|
||||||
// If host is an IP, skip performing a lookup
|
// If host is an IP, skip performing a lookup
|
||||||
const addressType = isIP(host);
|
const addressType = isIP(host);
|
||||||
if (addressType) {
|
if (addressType) {
|
||||||
|
@ -649,6 +906,7 @@ function _lookupAndConnect(
|
||||||
const dnsOpts = {
|
const dnsOpts = {
|
||||||
family: options.family,
|
family: options.family,
|
||||||
hints: options.hints || 0,
|
hints: options.hints || 0,
|
||||||
|
all: false,
|
||||||
};
|
};
|
||||||
|
|
||||||
if (
|
if (
|
||||||
|
@ -665,6 +923,31 @@ function _lookupAndConnect(
|
||||||
self._host = host;
|
self._host = host;
|
||||||
const lookup = options.lookup || dnsLookup;
|
const lookup = options.lookup || dnsLookup;
|
||||||
|
|
||||||
|
if (
|
||||||
|
dnsOpts.family !== 4 && dnsOpts.family !== 6 && !localAddress &&
|
||||||
|
autoSelectFamily
|
||||||
|
) {
|
||||||
|
debug("connect: autodetecting");
|
||||||
|
|
||||||
|
dnsOpts.all = true;
|
||||||
|
defaultTriggerAsyncIdScope(self[asyncIdSymbol], function () {
|
||||||
|
_lookupAndConnectMultiple(
|
||||||
|
self,
|
||||||
|
asyncIdSymbol,
|
||||||
|
lookup,
|
||||||
|
host,
|
||||||
|
options,
|
||||||
|
dnsOpts,
|
||||||
|
port,
|
||||||
|
localAddress,
|
||||||
|
localPort,
|
||||||
|
autoSelectFamilyAttemptTimeout,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
defaultTriggerAsyncIdScope(self[asyncIdSymbol], function () {
|
defaultTriggerAsyncIdScope(self[asyncIdSymbol], function () {
|
||||||
lookup(
|
lookup(
|
||||||
host,
|
host,
|
||||||
|
@ -723,6 +1006,143 @@ function _lookupAndConnect(
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function _lookupAndConnectMultiple(
|
||||||
|
self: Socket,
|
||||||
|
asyncIdSymbol: number,
|
||||||
|
// deno-lint-ignore no-explicit-any
|
||||||
|
lookup: any,
|
||||||
|
host: string,
|
||||||
|
options: TcpSocketConnectOptions,
|
||||||
|
dnsopts,
|
||||||
|
port: number,
|
||||||
|
localAddress: string,
|
||||||
|
localPort: number,
|
||||||
|
timeout: number | undefined,
|
||||||
|
) {
|
||||||
|
defaultTriggerAsyncIdScope(self[asyncIdSymbol], function emitLookup() {
|
||||||
|
lookup(host, dnsopts, function emitLookup(err, addresses) {
|
||||||
|
// It's possible we were destroyed while looking this up.
|
||||||
|
// XXX it would be great if we could cancel the promise returned by
|
||||||
|
// the look up.
|
||||||
|
if (!self.connecting) {
|
||||||
|
return;
|
||||||
|
} else if (err) {
|
||||||
|
self.emit("lookup", err, undefined, undefined, host);
|
||||||
|
|
||||||
|
// net.createConnection() creates a net.Socket object and immediately
|
||||||
|
// calls net.Socket.connect() on it (that's us). There are no event
|
||||||
|
// listeners registered yet so defer the error event to the next tick.
|
||||||
|
nextTick(_connectErrorNT, self, err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter addresses by only keeping the one which are either IPv4 or IPV6.
|
||||||
|
// The first valid address determines which group has preference on the
|
||||||
|
// alternate family sorting which happens later.
|
||||||
|
const validAddresses = [[], []];
|
||||||
|
const validIps = [[], []];
|
||||||
|
let destinations;
|
||||||
|
for (let i = 0, l = addresses.length; i < l; i++) {
|
||||||
|
const address = addresses[i];
|
||||||
|
const { address: ip, family: addressType } = address;
|
||||||
|
self.emit("lookup", err, ip, addressType, host);
|
||||||
|
// It's possible we were destroyed while looking this up.
|
||||||
|
if (!self.connecting) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (isIP(ip) && (addressType === 4 || addressType === 6)) {
|
||||||
|
destinations ||= addressType === 6 ? { 6: 0, 4: 1 } : { 4: 0, 6: 1 };
|
||||||
|
|
||||||
|
const destination = destinations[addressType];
|
||||||
|
|
||||||
|
// Only try an address once
|
||||||
|
if (!ArrayPrototypeIncludes(validIps[destination], ip)) {
|
||||||
|
ArrayPrototypePush(validAddresses[destination], address);
|
||||||
|
ArrayPrototypePush(validIps[destination], ip);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// When no AAAA or A records are available, fail on the first one
|
||||||
|
if (!validAddresses[0].length && !validAddresses[1].length) {
|
||||||
|
const { address: firstIp, family: firstAddressType } = addresses[0];
|
||||||
|
|
||||||
|
if (!isIP(firstIp)) {
|
||||||
|
err = new ERR_INVALID_IP_ADDRESS(firstIp);
|
||||||
|
nextTick(_connectErrorNT, self, err);
|
||||||
|
} else if (firstAddressType !== 4 && firstAddressType !== 6) {
|
||||||
|
err = new ERR_INVALID_ADDRESS_FAMILY(
|
||||||
|
firstAddressType,
|
||||||
|
options.host,
|
||||||
|
options.port,
|
||||||
|
);
|
||||||
|
nextTick(_connectErrorNT, self, err);
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort addresses alternating families
|
||||||
|
const toAttempt = [];
|
||||||
|
for (
|
||||||
|
let i = 0,
|
||||||
|
l = MathMax(validAddresses[0].length, validAddresses[1].length);
|
||||||
|
i < l;
|
||||||
|
i++
|
||||||
|
) {
|
||||||
|
if (i in validAddresses[0]) {
|
||||||
|
ArrayPrototypePush(toAttempt, validAddresses[0][i]);
|
||||||
|
}
|
||||||
|
if (i in validAddresses[1]) {
|
||||||
|
ArrayPrototypePush(toAttempt, validAddresses[1][i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (toAttempt.length === 1) {
|
||||||
|
debug(
|
||||||
|
"connect/multiple: only one address found, switching back to single connection",
|
||||||
|
);
|
||||||
|
const { address: ip, family: addressType } = toAttempt[0];
|
||||||
|
|
||||||
|
self._unrefTimer();
|
||||||
|
defaultTriggerAsyncIdScope(
|
||||||
|
self[asyncIdSymbol],
|
||||||
|
_internalConnect,
|
||||||
|
self,
|
||||||
|
ip,
|
||||||
|
port,
|
||||||
|
addressType,
|
||||||
|
localAddress,
|
||||||
|
localPort,
|
||||||
|
);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.autoSelectFamilyAttemptedAddresses = [];
|
||||||
|
debug("connect/multiple: will try the following addresses", toAttempt);
|
||||||
|
|
||||||
|
const context = {
|
||||||
|
socket: self,
|
||||||
|
addresses: toAttempt,
|
||||||
|
current: 0,
|
||||||
|
port,
|
||||||
|
localPort,
|
||||||
|
timeout,
|
||||||
|
[kTimeout]: null,
|
||||||
|
errors: [],
|
||||||
|
};
|
||||||
|
|
||||||
|
self._unrefTimer();
|
||||||
|
defaultTriggerAsyncIdScope(
|
||||||
|
self[asyncIdSymbol],
|
||||||
|
_internalConnectMultiple,
|
||||||
|
context,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
function _afterShutdown(this: ShutdownWrap<TCP>) {
|
function _afterShutdown(this: ShutdownWrap<TCP>) {
|
||||||
// deno-lint-ignore no-explicit-any
|
// deno-lint-ignore no-explicit-any
|
||||||
const self: any = this.handle[ownerSymbol];
|
const self: any = this.handle[ownerSymbol];
|
||||||
|
@ -782,6 +1202,7 @@ export class Socket extends Duplex {
|
||||||
// deno-lint-ignore no-explicit-any
|
// deno-lint-ignore no-explicit-any
|
||||||
_parent: any = null;
|
_parent: any = null;
|
||||||
_isNpmAgent = false;
|
_isNpmAgent = false;
|
||||||
|
autoSelectFamilyAttemptedAddresses: AddressInfo[] | undefined = undefined;
|
||||||
|
|
||||||
constructor(options: SocketOptions | number) {
|
constructor(options: SocketOptions | number) {
|
||||||
if (typeof options === "number") {
|
if (typeof options === "number") {
|
||||||
|
@ -1564,6 +1985,16 @@ export class Socket extends Duplex {
|
||||||
set _handle(v: Handle | null) {
|
set _handle(v: Handle | null) {
|
||||||
this[kHandle] = v;
|
this[kHandle] = v;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deno-lint-ignore no-explicit-any
|
||||||
|
[kReinitializeHandle](handle: any) {
|
||||||
|
this._handle?.close();
|
||||||
|
|
||||||
|
this._handle = handle;
|
||||||
|
this._handle[ownerSymbol] = this;
|
||||||
|
|
||||||
|
_initSocketHandle(this);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export const Stream = Socket;
|
export const Stream = Socket;
|
||||||
|
@ -1611,6 +2042,33 @@ export function connect(...args: unknown[]) {
|
||||||
|
|
||||||
export const createConnection = connect;
|
export const createConnection = connect;
|
||||||
|
|
||||||
|
/** https://docs.deno.com/api/node/net/#namespace_getdefaultautoselectfamily */
|
||||||
|
export function getDefaultAutoSelectFamily() {
|
||||||
|
return autoSelectFamilyDefault;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** https://docs.deno.com/api/node/net/#namespace_setdefaultautoselectfamily */
|
||||||
|
export function setDefaultAutoSelectFamily(value: boolean) {
|
||||||
|
validateBoolean(value, "value");
|
||||||
|
autoSelectFamilyDefault = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** https://docs.deno.com/api/node/net/#namespace_getdefaultautoselectfamilyattempttimeout */
|
||||||
|
export function getDefaultAutoSelectFamilyAttemptTimeout() {
|
||||||
|
return autoSelectFamilyAttemptTimeoutDefault;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** https://docs.deno.com/api/node/net/#namespace_setdefaultautoselectfamilyattempttimeout */
|
||||||
|
export function setDefaultAutoSelectFamilyAttemptTimeout(value: number) {
|
||||||
|
validateInt32(value, "value", 1);
|
||||||
|
|
||||||
|
if (value < 10) {
|
||||||
|
value = 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
autoSelectFamilyAttemptTimeoutDefault = value;
|
||||||
|
}
|
||||||
|
|
||||||
export interface ListenOptions extends Abortable {
|
export interface ListenOptions extends Abortable {
|
||||||
fd?: number;
|
fd?: number;
|
||||||
port?: number | undefined;
|
port?: number | undefined;
|
||||||
|
@ -2496,15 +2954,19 @@ export { BlockList, isIP, isIPv4, isIPv6, SocketAddress };
|
||||||
export default {
|
export default {
|
||||||
_createServerHandle,
|
_createServerHandle,
|
||||||
_normalizeArgs,
|
_normalizeArgs,
|
||||||
isIP,
|
|
||||||
isIPv4,
|
|
||||||
isIPv6,
|
|
||||||
BlockList,
|
BlockList,
|
||||||
SocketAddress,
|
|
||||||
connect,
|
connect,
|
||||||
createConnection,
|
createConnection,
|
||||||
createServer,
|
createServer,
|
||||||
|
getDefaultAutoSelectFamily,
|
||||||
|
getDefaultAutoSelectFamilyAttemptTimeout,
|
||||||
|
isIP,
|
||||||
|
isIPv4,
|
||||||
|
isIPv6,
|
||||||
Server,
|
Server,
|
||||||
|
setDefaultAutoSelectFamily,
|
||||||
|
setDefaultAutoSelectFamilyAttemptTimeout,
|
||||||
Socket,
|
Socket,
|
||||||
|
SocketAddress,
|
||||||
Stream,
|
Stream,
|
||||||
};
|
};
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_tls"
|
name = "deno_tls"
|
||||||
version = "0.163.0"
|
version = "0.164.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_url"
|
name = "deno_url"
|
||||||
version = "0.176.0"
|
version = "0.177.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -1,12 +1,9 @@
|
||||||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
|
|
||||||
import { core, primordials } from "ext:core/mod.js";
|
import { core, primordials } from "ext:core/mod.js";
|
||||||
import { op_defer, op_now } from "ext:core/ops";
|
import { op_defer } from "ext:core/ops";
|
||||||
const {
|
const {
|
||||||
Uint8Array,
|
|
||||||
Uint32Array,
|
|
||||||
PromisePrototypeThen,
|
PromisePrototypeThen,
|
||||||
TypedArrayPrototypeGetBuffer,
|
|
||||||
TypeError,
|
TypeError,
|
||||||
indirectEval,
|
indirectEval,
|
||||||
ReflectApply,
|
ReflectApply,
|
||||||
|
@ -18,13 +15,6 @@ const {
|
||||||
|
|
||||||
import * as webidl from "ext:deno_webidl/00_webidl.js";
|
import * as webidl from "ext:deno_webidl/00_webidl.js";
|
||||||
|
|
||||||
const hrU8 = new Uint8Array(8);
|
|
||||||
const hr = new Uint32Array(TypedArrayPrototypeGetBuffer(hrU8));
|
|
||||||
function opNow() {
|
|
||||||
op_now(hrU8);
|
|
||||||
return (hr[0] * 1000 + hr[1] / 1e6);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
function checkThis(thisArg) {
|
function checkThis(thisArg) {
|
||||||
|
@ -151,7 +141,6 @@ export {
|
||||||
clearInterval,
|
clearInterval,
|
||||||
clearTimeout,
|
clearTimeout,
|
||||||
defer,
|
defer,
|
||||||
opNow,
|
|
||||||
refTimer,
|
refTimer,
|
||||||
setImmediate,
|
setImmediate,
|
||||||
setInterval,
|
setInterval,
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
|
|
||||||
import { primordials } from "ext:core/mod.js";
|
import { primordials } from "ext:core/mod.js";
|
||||||
|
import { op_now, op_time_origin } from "ext:core/ops";
|
||||||
const {
|
const {
|
||||||
ArrayPrototypeFilter,
|
ArrayPrototypeFilter,
|
||||||
ArrayPrototypePush,
|
ArrayPrototypePush,
|
||||||
|
@ -10,19 +11,34 @@ const {
|
||||||
Symbol,
|
Symbol,
|
||||||
SymbolFor,
|
SymbolFor,
|
||||||
TypeError,
|
TypeError,
|
||||||
|
TypedArrayPrototypeGetBuffer,
|
||||||
|
Uint8Array,
|
||||||
|
Uint32Array,
|
||||||
} = primordials;
|
} = primordials;
|
||||||
|
|
||||||
import * as webidl from "ext:deno_webidl/00_webidl.js";
|
import * as webidl from "ext:deno_webidl/00_webidl.js";
|
||||||
import { structuredClone } from "./02_structured_clone.js";
|
import { structuredClone } from "./02_structured_clone.js";
|
||||||
import { createFilteredInspectProxy } from "ext:deno_console/01_console.js";
|
import { createFilteredInspectProxy } from "ext:deno_console/01_console.js";
|
||||||
import { EventTarget } from "./02_event.js";
|
import { EventTarget } from "./02_event.js";
|
||||||
import { opNow } from "./02_timers.js";
|
|
||||||
import { DOMException } from "./01_dom_exception.js";
|
import { DOMException } from "./01_dom_exception.js";
|
||||||
|
|
||||||
const illegalConstructorKey = Symbol("illegalConstructorKey");
|
const illegalConstructorKey = Symbol("illegalConstructorKey");
|
||||||
let performanceEntries = [];
|
let performanceEntries = [];
|
||||||
let timeOrigin;
|
let timeOrigin;
|
||||||
|
|
||||||
|
const hrU8 = new Uint8Array(8);
|
||||||
|
const hr = new Uint32Array(TypedArrayPrototypeGetBuffer(hrU8));
|
||||||
|
|
||||||
|
function setTimeOrigin() {
|
||||||
|
op_time_origin(hrU8);
|
||||||
|
timeOrigin = hr[0] * 1000 + hr[1] / 1e6;
|
||||||
|
}
|
||||||
|
|
||||||
|
function now() {
|
||||||
|
op_now(hrU8);
|
||||||
|
return hr[0] * 1000 + hr[1] / 1e6;
|
||||||
|
}
|
||||||
|
|
||||||
webidl.converters["PerformanceMarkOptions"] = webidl
|
webidl.converters["PerformanceMarkOptions"] = webidl
|
||||||
.createDictionaryConverter(
|
.createDictionaryConverter(
|
||||||
"PerformanceMarkOptions",
|
"PerformanceMarkOptions",
|
||||||
|
@ -90,10 +106,6 @@ webidl.converters["DOMString or PerformanceMeasureOptions"] = (
|
||||||
return webidl.converters.DOMString(V, prefix, context, opts);
|
return webidl.converters.DOMString(V, prefix, context, opts);
|
||||||
};
|
};
|
||||||
|
|
||||||
function setTimeOrigin(origin) {
|
|
||||||
timeOrigin = origin;
|
|
||||||
}
|
|
||||||
|
|
||||||
function findMostRecent(
|
function findMostRecent(
|
||||||
name,
|
name,
|
||||||
type,
|
type,
|
||||||
|
@ -135,8 +147,6 @@ function filterByNameType(
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const now = opNow;
|
|
||||||
|
|
||||||
const _name = Symbol("[[name]]");
|
const _name = Symbol("[[name]]");
|
||||||
const _entryType = Symbol("[[entryType]]");
|
const _entryType = Symbol("[[entryType]]");
|
||||||
const _startTime = Symbol("[[startTime]]");
|
const _startTime = Symbol("[[startTime]]");
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_web"
|
name = "deno_web"
|
||||||
version = "0.207.0"
|
version = "0.208.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -52,7 +52,8 @@ pub use crate::message_port::Transferable;
|
||||||
|
|
||||||
use crate::timers::op_defer;
|
use crate::timers::op_defer;
|
||||||
use crate::timers::op_now;
|
use crate::timers::op_now;
|
||||||
use crate::timers::StartTime;
|
use crate::timers::op_time_origin;
|
||||||
|
pub use crate::timers::StartTime;
|
||||||
pub use crate::timers::TimersPermission;
|
pub use crate::timers::TimersPermission;
|
||||||
|
|
||||||
deno_core::extension!(deno_web,
|
deno_core::extension!(deno_web,
|
||||||
|
@ -84,6 +85,7 @@ deno_core::extension!(deno_web,
|
||||||
compression::op_compression_write,
|
compression::op_compression_write,
|
||||||
compression::op_compression_finish,
|
compression::op_compression_finish,
|
||||||
op_now<P>,
|
op_now<P>,
|
||||||
|
op_time_origin<P>,
|
||||||
op_defer,
|
op_defer,
|
||||||
stream_resource::op_readable_stream_resource_allocate,
|
stream_resource::op_readable_stream_resource_allocate,
|
||||||
stream_resource::op_readable_stream_resource_allocate_sized,
|
stream_resource::op_readable_stream_resource_allocate_sized,
|
||||||
|
@ -123,7 +125,7 @@ deno_core::extension!(deno_web,
|
||||||
if let Some(location) = options.maybe_location {
|
if let Some(location) = options.maybe_location {
|
||||||
state.put(Location(location));
|
state.put(Location(location));
|
||||||
}
|
}
|
||||||
state.put(StartTime::now());
|
state.put(StartTime::default());
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,10 @@
|
||||||
|
|
||||||
use deno_core::op2;
|
use deno_core::op2;
|
||||||
use deno_core::OpState;
|
use deno_core::OpState;
|
||||||
|
use std::time::Duration;
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
use std::time::SystemTime;
|
||||||
|
use std::time::UNIX_EPOCH;
|
||||||
|
|
||||||
pub trait TimersPermission {
|
pub trait TimersPermission {
|
||||||
fn allow_hrtime(&mut self) -> bool;
|
fn allow_hrtime(&mut self) -> bool;
|
||||||
|
@ -17,21 +20,28 @@ impl TimersPermission for deno_permissions::PermissionsContainer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type StartTime = Instant;
|
pub struct StartTime(Instant);
|
||||||
|
|
||||||
// Returns a milliseconds and nanoseconds subsec
|
impl Default for StartTime {
|
||||||
// since the start time of the deno runtime.
|
fn default() -> Self {
|
||||||
// If the High precision flag is not set, the
|
Self(Instant::now())
|
||||||
// nanoseconds are rounded on 2ms.
|
}
|
||||||
#[op2(fast)]
|
}
|
||||||
pub fn op_now<TP>(state: &mut OpState, #[buffer] buf: &mut [u8])
|
|
||||||
|
impl std::ops::Deref for StartTime {
|
||||||
|
type Target = Instant;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn expose_time<TP>(state: &mut OpState, duration: Duration, out: &mut [u8])
|
||||||
where
|
where
|
||||||
TP: TimersPermission + 'static,
|
TP: TimersPermission + 'static,
|
||||||
{
|
{
|
||||||
let start_time = state.borrow::<StartTime>();
|
let seconds = duration.as_secs() as u32;
|
||||||
let elapsed = start_time.elapsed();
|
let mut subsec_nanos = duration.subsec_nanos();
|
||||||
let seconds = elapsed.as_secs();
|
|
||||||
let mut subsec_nanos = elapsed.subsec_nanos();
|
|
||||||
|
|
||||||
// If the permission is not enabled
|
// If the permission is not enabled
|
||||||
// Round the nano result on 2 milliseconds
|
// Round the nano result on 2 milliseconds
|
||||||
|
@ -40,14 +50,33 @@ where
|
||||||
let reduced_time_precision = 2_000_000; // 2ms in nanoseconds
|
let reduced_time_precision = 2_000_000; // 2ms in nanoseconds
|
||||||
subsec_nanos -= subsec_nanos % reduced_time_precision;
|
subsec_nanos -= subsec_nanos % reduced_time_precision;
|
||||||
}
|
}
|
||||||
if buf.len() < 8 {
|
|
||||||
return;
|
if out.len() >= 8 {
|
||||||
|
out[0..4].copy_from_slice(&seconds.to_ne_bytes());
|
||||||
|
out[4..8].copy_from_slice(&subsec_nanos.to_ne_bytes());
|
||||||
}
|
}
|
||||||
let buf: &mut [u32] =
|
}
|
||||||
// SAFETY: buffer is at least 8 bytes long.
|
|
||||||
unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr() as _, 2) };
|
#[op2(fast)]
|
||||||
buf[0] = seconds as u32;
|
pub fn op_now<TP>(state: &mut OpState, #[buffer] buf: &mut [u8])
|
||||||
buf[1] = subsec_nanos;
|
where
|
||||||
|
TP: TimersPermission + 'static,
|
||||||
|
{
|
||||||
|
let start_time = state.borrow::<StartTime>();
|
||||||
|
let elapsed = start_time.elapsed();
|
||||||
|
expose_time::<TP>(state, elapsed, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[op2(fast)]
|
||||||
|
pub fn op_time_origin<TP>(state: &mut OpState, #[buffer] buf: &mut [u8])
|
||||||
|
where
|
||||||
|
TP: TimersPermission + 'static,
|
||||||
|
{
|
||||||
|
// https://w3c.github.io/hr-time/#dfn-estimated-monotonic-time-of-the-unix-epoch
|
||||||
|
let wall_time = SystemTime::now();
|
||||||
|
let monotonic_time = state.borrow::<StartTime>().elapsed();
|
||||||
|
let epoch = wall_time.duration_since(UNIX_EPOCH).unwrap() - monotonic_time;
|
||||||
|
expose_time::<TP>(state, epoch, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::unused_async)]
|
#[allow(clippy::unused_async)]
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_webgpu"
|
name = "deno_webgpu"
|
||||||
version = "0.143.0"
|
version = "0.144.0"
|
||||||
authors = ["the Deno authors"]
|
authors = ["the Deno authors"]
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_webidl"
|
name = "deno_webidl"
|
||||||
version = "0.176.0"
|
version = "0.177.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_websocket"
|
name = "deno_websocket"
|
||||||
version = "0.181.0"
|
version = "0.182.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_webstorage"
|
name = "deno_webstorage"
|
||||||
version = "0.171.0"
|
version = "0.172.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_resolver"
|
name = "deno_resolver"
|
||||||
version = "0.8.0"
|
version = "0.9.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "node_resolver"
|
name = "node_resolver"
|
||||||
version = "0.15.0"
|
version = "0.16.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_runtime"
|
name = "deno_runtime"
|
||||||
version = "0.185.0"
|
version = "0.186.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -623,7 +623,7 @@ fn get_ffi_call_error_class(e: &CallError) -> &'static str {
|
||||||
fn get_webstorage_class_name(e: &WebStorageError) -> &'static str {
|
fn get_webstorage_class_name(e: &WebStorageError) -> &'static str {
|
||||||
match e {
|
match e {
|
||||||
WebStorageError::ContextNotSupported => "DOMExceptionNotSupportedError",
|
WebStorageError::ContextNotSupported => "DOMExceptionNotSupportedError",
|
||||||
WebStorageError::Sqlite(_) => todo!(),
|
WebStorageError::Sqlite(_) => "Error",
|
||||||
WebStorageError::Io(e) => get_io_error_class(e),
|
WebStorageError::Io(e) => get_io_error_class(e),
|
||||||
WebStorageError::StorageExceeded => "DOMExceptionQuotaExceededError",
|
WebStorageError::StorageExceeded => "DOMExceptionQuotaExceededError",
|
||||||
}
|
}
|
||||||
|
@ -1051,6 +1051,34 @@ mod node {
|
||||||
use super::get_serde_json_error_class;
|
use super::get_serde_json_error_class;
|
||||||
use super::get_url_parse_error_class;
|
use super::get_url_parse_error_class;
|
||||||
pub use deno_node::ops::blocklist::BlocklistError;
|
pub use deno_node::ops::blocklist::BlocklistError;
|
||||||
|
pub use deno_node::ops::crypto::cipher::CipherContextError;
|
||||||
|
pub use deno_node::ops::crypto::cipher::CipherError;
|
||||||
|
pub use deno_node::ops::crypto::cipher::DecipherContextError;
|
||||||
|
pub use deno_node::ops::crypto::cipher::DecipherError;
|
||||||
|
pub use deno_node::ops::crypto::digest::HashError;
|
||||||
|
pub use deno_node::ops::crypto::keys::AsymmetricPrivateKeyDerError;
|
||||||
|
pub use deno_node::ops::crypto::keys::AsymmetricPrivateKeyError;
|
||||||
|
pub use deno_node::ops::crypto::keys::AsymmetricPublicKeyDerError;
|
||||||
|
pub use deno_node::ops::crypto::keys::AsymmetricPublicKeyError;
|
||||||
|
pub use deno_node::ops::crypto::keys::AsymmetricPublicKeyJwkError;
|
||||||
|
pub use deno_node::ops::crypto::keys::EcJwkError;
|
||||||
|
pub use deno_node::ops::crypto::keys::EdRawError;
|
||||||
|
pub use deno_node::ops::crypto::keys::ExportPrivateKeyPemError;
|
||||||
|
pub use deno_node::ops::crypto::keys::ExportPublicKeyPemError;
|
||||||
|
pub use deno_node::ops::crypto::keys::GenerateRsaPssError;
|
||||||
|
pub use deno_node::ops::crypto::keys::RsaJwkError;
|
||||||
|
pub use deno_node::ops::crypto::keys::RsaPssParamsParseError;
|
||||||
|
pub use deno_node::ops::crypto::keys::X509PublicKeyError;
|
||||||
|
pub use deno_node::ops::crypto::sign::KeyObjectHandlePrehashedSignAndVerifyError;
|
||||||
|
pub use deno_node::ops::crypto::x509::X509Error;
|
||||||
|
pub use deno_node::ops::crypto::DiffieHellmanError;
|
||||||
|
pub use deno_node::ops::crypto::EcdhEncodePubKey;
|
||||||
|
pub use deno_node::ops::crypto::HkdfError;
|
||||||
|
pub use deno_node::ops::crypto::Pbkdf2Error;
|
||||||
|
pub use deno_node::ops::crypto::PrivateEncryptDecryptError;
|
||||||
|
pub use deno_node::ops::crypto::ScryptAsyncError;
|
||||||
|
pub use deno_node::ops::crypto::SignEd25519Error;
|
||||||
|
pub use deno_node::ops::crypto::VerifyEd25519Error;
|
||||||
pub use deno_node::ops::fs::FsError;
|
pub use deno_node::ops::fs::FsError;
|
||||||
pub use deno_node::ops::http2::Http2Error;
|
pub use deno_node::ops::http2::Http2Error;
|
||||||
pub use deno_node::ops::idna::IdnaError;
|
pub use deno_node::ops::idna::IdnaError;
|
||||||
|
@ -1189,6 +1217,324 @@ mod node {
|
||||||
ZlibError::Other(e) => get_error_class_name(e).unwrap_or("Error"),
|
ZlibError::Other(e) => get_error_class_name(e).unwrap_or("Error"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_crypto_cipher_context_error(
|
||||||
|
e: &CipherContextError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
CipherContextError::ContextInUse => "TypeError",
|
||||||
|
CipherContextError::Cipher(e) => get_crypto_cipher_error(e),
|
||||||
|
CipherContextError::Resource(e) => {
|
||||||
|
get_error_class_name(e).unwrap_or("Error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_crypto_cipher_error(e: &CipherError) -> &'static str {
|
||||||
|
match e {
|
||||||
|
CipherError::InvalidIvLength => "TypeError",
|
||||||
|
CipherError::InvalidKeyLength => "RangeError",
|
||||||
|
CipherError::InvalidInitializationVector => "TypeError",
|
||||||
|
CipherError::CannotPadInputData => "TypeError",
|
||||||
|
CipherError::UnknownCipher(_) => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_crypto_decipher_context_error(
|
||||||
|
e: &DecipherContextError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
DecipherContextError::ContextInUse => "TypeError",
|
||||||
|
DecipherContextError::Decipher(e) => get_crypto_decipher_error(e),
|
||||||
|
DecipherContextError::Resource(e) => {
|
||||||
|
get_error_class_name(e).unwrap_or("Error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_crypto_decipher_error(e: &DecipherError) -> &'static str {
|
||||||
|
match e {
|
||||||
|
DecipherError::InvalidIvLength => "TypeError",
|
||||||
|
DecipherError::InvalidKeyLength => "RangeError",
|
||||||
|
DecipherError::InvalidInitializationVector => "TypeError",
|
||||||
|
DecipherError::CannotUnpadInputData => "TypeError",
|
||||||
|
DecipherError::DataAuthenticationFailed => "TypeError",
|
||||||
|
DecipherError::SetAutoPaddingFalseAes128GcmUnsupported => "TypeError",
|
||||||
|
DecipherError::SetAutoPaddingFalseAes256GcmUnsupported => "TypeError",
|
||||||
|
DecipherError::UnknownCipher(_) => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_x509_error(_: &X509Error) -> &'static str {
|
||||||
|
"Error"
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_crypto_key_object_handle_prehashed_sign_and_verify_error(
|
||||||
|
e: &KeyObjectHandlePrehashedSignAndVerifyError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignatureEncoding => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPrivate => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(_) => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsa => "Error",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(_) => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsaPss => "Error",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithDsa => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage { .. } => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForSigning => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedSigning => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForSigning => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPublicOrPrivate => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignature => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForVerification => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedVerification => "TypeError",
|
||||||
|
KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForVerification => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_crypto_hash_error(_: &HashError) -> &'static str {
|
||||||
|
"Error"
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_asymmetric_public_key_jwk_error(
|
||||||
|
e: &AsymmetricPublicKeyJwkError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
AsymmetricPublicKeyJwkError::UnsupportedJwkEcCurveP224 => "TypeError",
|
||||||
|
AsymmetricPublicKeyJwkError::JwkExportNotImplementedForKeyType => {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
AsymmetricPublicKeyJwkError::KeyIsNotAsymmetricPublicKey => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_generate_rsa_pss_error(_: &GenerateRsaPssError) -> &'static str {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_asymmetric_private_key_der_error(
|
||||||
|
e: &AsymmetricPrivateKeyDerError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
AsymmetricPrivateKeyDerError::KeyIsNotAsymmetricPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::InvalidRsaPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::ExportingNonRsaPrivateKeyAsPkcs1Unsupported => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::InvalidEcPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::ExportingNonEcPrivateKeyAsSec1Unsupported => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::ExportingNonRsaPssPrivateKeyAsPkcs8Unsupported => "Error",
|
||||||
|
AsymmetricPrivateKeyDerError::InvalidDsaPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::InvalidDhPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyDerError::UnsupportedKeyType(_) => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_asymmetric_public_key_der_error(
|
||||||
|
_: &AsymmetricPublicKeyDerError,
|
||||||
|
) -> &'static str {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_export_public_key_pem_error(
|
||||||
|
e: &ExportPublicKeyPemError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
ExportPublicKeyPemError::AsymmetricPublicKeyDer(e) => {
|
||||||
|
get_asymmetric_public_key_der_error(e)
|
||||||
|
}
|
||||||
|
ExportPublicKeyPemError::VeryLargeData => "TypeError",
|
||||||
|
ExportPublicKeyPemError::Der(_) => "Error",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_export_private_key_pem_error(
|
||||||
|
e: &ExportPrivateKeyPemError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
ExportPrivateKeyPemError::AsymmetricPublicKeyDer(e) => {
|
||||||
|
get_asymmetric_private_key_der_error(e)
|
||||||
|
}
|
||||||
|
ExportPrivateKeyPemError::VeryLargeData => "TypeError",
|
||||||
|
ExportPrivateKeyPemError::Der(_) => "Error",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_x509_public_key_error(e: &X509PublicKeyError) -> &'static str {
|
||||||
|
match e {
|
||||||
|
X509PublicKeyError::X509(_) => "Error",
|
||||||
|
X509PublicKeyError::Rsa(_) => "Error",
|
||||||
|
X509PublicKeyError::Asn1(_) => "Error",
|
||||||
|
X509PublicKeyError::Ec(_) => "Error",
|
||||||
|
X509PublicKeyError::UnsupportedEcNamedCurve => "TypeError",
|
||||||
|
X509PublicKeyError::MissingEcParameters => "TypeError",
|
||||||
|
X509PublicKeyError::MalformedDssPublicKey => "TypeError",
|
||||||
|
X509PublicKeyError::UnsupportedX509KeyType => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_rsa_jwk_error(e: &RsaJwkError) -> &'static str {
|
||||||
|
match e {
|
||||||
|
RsaJwkError::Base64(_) => "Error",
|
||||||
|
RsaJwkError::Rsa(_) => "Error",
|
||||||
|
RsaJwkError::MissingRsaPrivateComponent => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ec_jwk_error(e: &EcJwkError) -> &'static str {
|
||||||
|
match e {
|
||||||
|
EcJwkError::Ec(_) => "Error",
|
||||||
|
EcJwkError::UnsupportedCurve(_) => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ed_raw_error(e: &EdRawError) -> &'static str {
|
||||||
|
match e {
|
||||||
|
EdRawError::Ed25519Signature(_) => "Error",
|
||||||
|
EdRawError::InvalidEd25519Key => "TypeError",
|
||||||
|
EdRawError::UnsupportedCurve => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_pbkdf2_error(e: &Pbkdf2Error) -> &'static str {
|
||||||
|
match e {
|
||||||
|
Pbkdf2Error::UnsupportedDigest(_) => "TypeError",
|
||||||
|
Pbkdf2Error::Join(_) => "Error",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_scrypt_async_error(e: &ScryptAsyncError) -> &'static str {
|
||||||
|
match e {
|
||||||
|
ScryptAsyncError::Join(_) => "Error",
|
||||||
|
ScryptAsyncError::Other(e) => get_error_class_name(e).unwrap_or("Error"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_hkdf_error_error(e: &HkdfError) -> &'static str {
|
||||||
|
match e {
|
||||||
|
HkdfError::ExpectedSecretKey => "TypeError",
|
||||||
|
HkdfError::HkdfExpandFailed => "TypeError",
|
||||||
|
HkdfError::UnsupportedDigest(_) => "TypeError",
|
||||||
|
HkdfError::Join(_) => "Error",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_rsa_pss_params_parse_error(
|
||||||
|
_: &RsaPssParamsParseError,
|
||||||
|
) -> &'static str {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_asymmetric_private_key_error(
|
||||||
|
e: &AsymmetricPrivateKeyError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
AsymmetricPrivateKeyError::InvalidPemPrivateKeyInvalidUtf8(_) => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidEncryptedPemPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidPemPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::EncryptedPrivateKeyRequiresPassphraseToDecrypt => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidSec1PrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::UnsupportedPemLabel(_) => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::RsaPssParamsParse(e) => get_rsa_pss_params_parse_error(e),
|
||||||
|
AsymmetricPrivateKeyError::InvalidEncryptedPkcs8PrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidPkcs8PrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::Pkcs1PrivateKeyDoesNotSupportEncryptionWithPassphrase => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::Sec1PrivateKeyDoesNotSupportEncryptionWithPassphrase => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::UnsupportedEcNamedCurve => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidDsaPrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::MalformedOrMissingNamedCurveInEcParameters => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::UnsupportedKeyType(_) => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::UnsupportedKeyFormat(_) => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidX25519PrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::X25519PrivateKeyIsWrongLength => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::InvalidEd25519PrivateKey => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::MissingDhParameters => "TypeError",
|
||||||
|
AsymmetricPrivateKeyError::UnsupportedPrivateKeyOid => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_asymmetric_public_key_error(
|
||||||
|
e: &AsymmetricPublicKeyError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
AsymmetricPublicKeyError::InvalidPemPrivateKeyInvalidUtf8(_) => {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
AsymmetricPublicKeyError::InvalidPemPublicKey => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::InvalidPkcs1PublicKey => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::AsymmetricPrivateKey(e) => {
|
||||||
|
get_asymmetric_private_key_error(e)
|
||||||
|
}
|
||||||
|
AsymmetricPublicKeyError::InvalidX509Certificate => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::X509(_) => "Error",
|
||||||
|
AsymmetricPublicKeyError::X509PublicKey(e) => {
|
||||||
|
get_x509_public_key_error(e)
|
||||||
|
}
|
||||||
|
AsymmetricPublicKeyError::UnsupportedPemLabel(_) => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::InvalidSpkiPublicKey => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::UnsupportedKeyType(_) => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::UnsupportedKeyFormat(_) => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::Spki(_) => "Error",
|
||||||
|
AsymmetricPublicKeyError::Pkcs1(_) => "Error",
|
||||||
|
AsymmetricPublicKeyError::RsaPssParamsParse(_) => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::MalformedDssPublicKey => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::MalformedOrMissingNamedCurveInEcParameters => {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInEcSpki => {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
AsymmetricPublicKeyError::Ec(_) => "Error",
|
||||||
|
AsymmetricPublicKeyError::UnsupportedEcNamedCurve => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInX25519Spki => {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
AsymmetricPublicKeyError::X25519PublicKeyIsTooShort => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::InvalidEd25519PublicKey => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::MissingDhParameters => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::MalformedDhParameters => "TypeError",
|
||||||
|
AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInDhSpki => {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
AsymmetricPublicKeyError::UnsupportedPrivateKeyOid => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_private_encrypt_decrypt_error(
|
||||||
|
e: &PrivateEncryptDecryptError,
|
||||||
|
) -> &'static str {
|
||||||
|
match e {
|
||||||
|
PrivateEncryptDecryptError::Pkcs8(_) => "Error",
|
||||||
|
PrivateEncryptDecryptError::Spki(_) => "Error",
|
||||||
|
PrivateEncryptDecryptError::Utf8(_) => "Error",
|
||||||
|
PrivateEncryptDecryptError::Rsa(_) => "Error",
|
||||||
|
PrivateEncryptDecryptError::UnknownPadding => "TypeError",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_ecdh_encode_pub_key_error(e: &EcdhEncodePubKey) -> &'static str {
|
||||||
|
match e {
|
||||||
|
EcdhEncodePubKey::InvalidPublicKey => "TypeError",
|
||||||
|
EcdhEncodePubKey::UnsupportedCurve => "TypeError",
|
||||||
|
EcdhEncodePubKey::Sec1(_) => "Error",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_diffie_hellman_error(_: &DiffieHellmanError) -> &'static str {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_sign_ed25519_error(_: &SignEd25519Error) -> &'static str {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_verify_ed25519_error(_: &VerifyEd25519Error) -> &'static str {
|
||||||
|
"TypeError"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_os_error(error: &OsError) -> &'static str {
|
fn get_os_error(error: &OsError) -> &'static str {
|
||||||
|
@ -1273,6 +1619,114 @@ pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
|
||||||
e.downcast_ref::<node::ZlibError>()
|
e.downcast_ref::<node::ZlibError>()
|
||||||
.map(node::get_zlib_error)
|
.map(node::get_zlib_error)
|
||||||
})
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::CipherError>()
|
||||||
|
.map(node::get_crypto_cipher_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::CipherContextError>()
|
||||||
|
.map(node::get_crypto_cipher_context_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::DecipherError>()
|
||||||
|
.map(node::get_crypto_decipher_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::DecipherContextError>()
|
||||||
|
.map(node::get_crypto_decipher_context_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::X509Error>()
|
||||||
|
.map(node::get_x509_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::KeyObjectHandlePrehashedSignAndVerifyError>()
|
||||||
|
.map(node::get_crypto_key_object_handle_prehashed_sign_and_verify_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::HashError>()
|
||||||
|
.map(node::get_crypto_hash_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::AsymmetricPublicKeyJwkError>()
|
||||||
|
.map(node::get_asymmetric_public_key_jwk_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::GenerateRsaPssError>()
|
||||||
|
.map(node::get_generate_rsa_pss_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::AsymmetricPrivateKeyDerError>()
|
||||||
|
.map(node::get_asymmetric_private_key_der_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::AsymmetricPublicKeyDerError>()
|
||||||
|
.map(node::get_asymmetric_public_key_der_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::ExportPublicKeyPemError>()
|
||||||
|
.map(node::get_export_public_key_pem_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::ExportPrivateKeyPemError>()
|
||||||
|
.map(node::get_export_private_key_pem_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::RsaJwkError>()
|
||||||
|
.map(node::get_rsa_jwk_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::EcJwkError>()
|
||||||
|
.map(node::get_ec_jwk_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::EdRawError>()
|
||||||
|
.map(node::get_ed_raw_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::Pbkdf2Error>()
|
||||||
|
.map(node::get_pbkdf2_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::ScryptAsyncError>()
|
||||||
|
.map(node::get_scrypt_async_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::HkdfError>()
|
||||||
|
.map(node::get_hkdf_error_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::RsaPssParamsParseError>()
|
||||||
|
.map(node::get_rsa_pss_params_parse_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::AsymmetricPrivateKeyError>()
|
||||||
|
.map(node::get_asymmetric_private_key_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::AsymmetricPublicKeyError>()
|
||||||
|
.map(node::get_asymmetric_public_key_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::PrivateEncryptDecryptError>()
|
||||||
|
.map(node::get_private_encrypt_decrypt_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::EcdhEncodePubKey>()
|
||||||
|
.map(node::get_ecdh_encode_pub_key_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::DiffieHellmanError>()
|
||||||
|
.map(node::get_diffie_hellman_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::SignEd25519Error>()
|
||||||
|
.map(node::get_sign_ed25519_error)
|
||||||
|
})
|
||||||
|
.or_else(|| {
|
||||||
|
e.downcast_ref::<node::VerifyEd25519Error>()
|
||||||
|
.map(node::get_verify_ed25519_error)
|
||||||
|
})
|
||||||
.or_else(|| e.downcast_ref::<NApiError>().map(get_napi_error_class))
|
.or_else(|| e.downcast_ref::<NApiError>().map(get_napi_error_class))
|
||||||
.or_else(|| e.downcast_ref::<WebError>().map(get_web_error_class))
|
.or_else(|| e.downcast_ref::<WebError>().map(get_web_error_class))
|
||||||
.or_else(|| {
|
.or_else(|| {
|
||||||
|
|
|
@ -27,7 +27,6 @@ const {
|
||||||
ArrayPrototypeForEach,
|
ArrayPrototypeForEach,
|
||||||
ArrayPrototypeIncludes,
|
ArrayPrototypeIncludes,
|
||||||
ArrayPrototypeMap,
|
ArrayPrototypeMap,
|
||||||
DateNow,
|
|
||||||
Error,
|
Error,
|
||||||
ErrorPrototype,
|
ErrorPrototype,
|
||||||
FunctionPrototypeBind,
|
FunctionPrototypeBind,
|
||||||
|
@ -642,7 +641,7 @@ function bootstrapMainRuntime(runtimeOptions, warmup = false) {
|
||||||
|
|
||||||
removeImportedOps();
|
removeImportedOps();
|
||||||
|
|
||||||
performance.setTimeOrigin(DateNow());
|
performance.setTimeOrigin();
|
||||||
globalThis_ = globalThis;
|
globalThis_ = globalThis;
|
||||||
|
|
||||||
// Remove bootstrapping data from the global scope
|
// Remove bootstrapping data from the global scope
|
||||||
|
@ -858,7 +857,7 @@ function bootstrapWorkerRuntime(
|
||||||
7: nodeDebug,
|
7: nodeDebug,
|
||||||
} = runtimeOptions;
|
} = runtimeOptions;
|
||||||
|
|
||||||
performance.setTimeOrigin(DateNow());
|
performance.setTimeOrigin();
|
||||||
globalThis_ = globalThis;
|
globalThis_ = globalThis;
|
||||||
|
|
||||||
// Remove bootstrapping data from the global scope
|
// Remove bootstrapping data from the global scope
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "deno_permissions"
|
name = "deno_permissions"
|
||||||
version = "0.36.0"
|
version = "0.37.0"
|
||||||
authors.workspace = true
|
authors.workspace = true
|
||||||
edition.workspace = true
|
edition.workspace = true
|
||||||
license.workspace = true
|
license.workspace = true
|
||||||
|
|
|
@ -82,6 +82,13 @@ impl deno_node::NodePermissions for Permissions {
|
||||||
) -> Result<(), PermissionCheckError> {
|
) -> Result<(), PermissionCheckError> {
|
||||||
unreachable!("snapshotting!")
|
unreachable!("snapshotting!")
|
||||||
}
|
}
|
||||||
|
fn check_net(
|
||||||
|
&mut self,
|
||||||
|
_host: (&str, Option<u16>),
|
||||||
|
_api_name: &str,
|
||||||
|
) -> Result<(), PermissionCheckError> {
|
||||||
|
unreachable!("snapshotting!")
|
||||||
|
}
|
||||||
fn check_read_path<'a>(
|
fn check_read_path<'a>(
|
||||||
&mut self,
|
&mut self,
|
||||||
_path: &'a Path,
|
_path: &'a Path,
|
||||||
|
|
|
@ -562,7 +562,7 @@ impl WebWorker {
|
||||||
extension_transpiler: Some(Rc::new(|specifier, source| {
|
extension_transpiler: Some(Rc::new(|specifier, source| {
|
||||||
maybe_transpile_source(specifier, source)
|
maybe_transpile_source(specifier, source)
|
||||||
})),
|
})),
|
||||||
inspector: services.maybe_inspector_server.is_some(),
|
inspector: true,
|
||||||
feature_checker: Some(services.feature_checker),
|
feature_checker: Some(services.feature_checker),
|
||||||
op_metrics_factory_fn,
|
op_metrics_factory_fn,
|
||||||
import_meta_resolve_callback: Some(Box::new(
|
import_meta_resolve_callback: Some(Box::new(
|
||||||
|
@ -579,18 +579,18 @@ impl WebWorker {
|
||||||
js_runtime.op_state().borrow_mut().put(op_summary_metrics);
|
js_runtime.op_state().borrow_mut().put(op_summary_metrics);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Put inspector handle into the op state so we can put a breakpoint when
|
||||||
|
// executing a CJS entrypoint.
|
||||||
|
let op_state = js_runtime.op_state();
|
||||||
|
let inspector = js_runtime.inspector();
|
||||||
|
op_state.borrow_mut().put(inspector);
|
||||||
|
|
||||||
if let Some(server) = services.maybe_inspector_server {
|
if let Some(server) = services.maybe_inspector_server {
|
||||||
server.register_inspector(
|
server.register_inspector(
|
||||||
options.main_module.to_string(),
|
options.main_module.to_string(),
|
||||||
&mut js_runtime,
|
&mut js_runtime,
|
||||||
false,
|
false,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Put inspector handle into the op state so we can put a breakpoint when
|
|
||||||
// executing a CJS entrypoint.
|
|
||||||
let op_state = js_runtime.op_state();
|
|
||||||
let inspector = js_runtime.inspector();
|
|
||||||
op_state.borrow_mut().put(inspector);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let (internal_handle, external_handle) = {
|
let (internal_handle, external_handle) = {
|
||||||
|
|
|
@ -488,7 +488,7 @@ impl MainWorker {
|
||||||
extension_transpiler: Some(Rc::new(|specifier, source| {
|
extension_transpiler: Some(Rc::new(|specifier, source| {
|
||||||
maybe_transpile_source(specifier, source)
|
maybe_transpile_source(specifier, source)
|
||||||
})),
|
})),
|
||||||
inspector: options.maybe_inspector_server.is_some(),
|
inspector: true,
|
||||||
is_main: true,
|
is_main: true,
|
||||||
feature_checker: Some(services.feature_checker.clone()),
|
feature_checker: Some(services.feature_checker.clone()),
|
||||||
op_metrics_factory_fn,
|
op_metrics_factory_fn,
|
||||||
|
@ -546,6 +546,12 @@ impl MainWorker {
|
||||||
js_runtime.op_state().borrow_mut().put(op_summary_metrics);
|
js_runtime.op_state().borrow_mut().put(op_summary_metrics);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Put inspector handle into the op state so we can put a breakpoint when
|
||||||
|
// executing a CJS entrypoint.
|
||||||
|
let op_state = js_runtime.op_state();
|
||||||
|
let inspector = js_runtime.inspector();
|
||||||
|
op_state.borrow_mut().put(inspector);
|
||||||
|
|
||||||
if let Some(server) = options.maybe_inspector_server.clone() {
|
if let Some(server) = options.maybe_inspector_server.clone() {
|
||||||
server.register_inspector(
|
server.register_inspector(
|
||||||
main_module.to_string(),
|
main_module.to_string(),
|
||||||
|
@ -553,13 +559,8 @@ impl MainWorker {
|
||||||
options.should_break_on_first_statement
|
options.should_break_on_first_statement
|
||||||
|| options.should_wait_for_inspector_session,
|
|| options.should_wait_for_inspector_session,
|
||||||
);
|
);
|
||||||
|
|
||||||
// Put inspector handle into the op state so we can put a breakpoint when
|
|
||||||
// executing a CJS entrypoint.
|
|
||||||
let op_state = js_runtime.op_state();
|
|
||||||
let inspector = js_runtime.inspector();
|
|
||||||
op_state.borrow_mut().put(inspector);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let (
|
let (
|
||||||
bootstrap_fn_global,
|
bootstrap_fn_global,
|
||||||
dispatch_load_event_fn_global,
|
dispatch_load_event_fn_global,
|
||||||
|
|
|
@ -47,6 +47,8 @@ deno_tls.workspace = true
|
||||||
fastwebsockets = { workspace = true, features = ["upgrade", "unstable-split"] }
|
fastwebsockets = { workspace = true, features = ["upgrade", "unstable-split"] }
|
||||||
file_test_runner = "0.7.2"
|
file_test_runner = "0.7.2"
|
||||||
flaky_test = "=0.2.2"
|
flaky_test = "=0.2.2"
|
||||||
|
hickory-client = "=0.24"
|
||||||
|
hickory-server = "=0.24"
|
||||||
http.workspace = true
|
http.workspace = true
|
||||||
http-body-util.workspace = true
|
http-body-util.workspace = true
|
||||||
hyper.workspace = true
|
hyper.workspace = true
|
||||||
|
@ -60,8 +62,6 @@ serde.workspace = true
|
||||||
test_util.workspace = true
|
test_util.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
tower-lsp.workspace = true
|
tower-lsp.workspace = true
|
||||||
trust-dns-client = "=0.23.2"
|
|
||||||
trust-dns-server = "=0.23.2"
|
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
uuid = { workspace = true, features = ["serde"] }
|
uuid = { workspace = true, features = ["serde"] }
|
||||||
zeromq.workspace = true
|
zeromq.workspace = true
|
||||||
|
|
|
@ -16,12 +16,11 @@ use deno_tls::rustls;
|
||||||
use deno_tls::rustls::ClientConnection;
|
use deno_tls::rustls::ClientConnection;
|
||||||
use deno_tls::rustls_pemfile;
|
use deno_tls::rustls_pemfile;
|
||||||
use deno_tls::TlsStream;
|
use deno_tls::TlsStream;
|
||||||
|
use hickory_client::serialize::txt::Parser;
|
||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
use test_util as util;
|
use test_util as util;
|
||||||
use test_util::itest;
|
use test_util::itest;
|
||||||
use test_util::TempDir;
|
use test_util::TempDir;
|
||||||
use trust_dns_client::serialize::txt::Lexer;
|
|
||||||
use trust_dns_client::serialize::txt::Parser;
|
|
||||||
use util::assert_contains;
|
use util::assert_contains;
|
||||||
use util::assert_not_contains;
|
use util::assert_not_contains;
|
||||||
use util::PathRef;
|
use util::PathRef;
|
||||||
|
@ -2175,6 +2174,11 @@ fn basic_auth_tokens() {
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn test_resolve_dns() {
|
async fn test_resolve_dns() {
|
||||||
|
use hickory_server::authority::Catalog;
|
||||||
|
use hickory_server::authority::ZoneType;
|
||||||
|
use hickory_server::proto::rr::Name;
|
||||||
|
use hickory_server::store::in_memory::InMemoryAuthority;
|
||||||
|
use hickory_server::ServerFuture;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
@ -2182,11 +2186,6 @@ async fn test_resolve_dns() {
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
use tokio::net::UdpSocket;
|
use tokio::net::UdpSocket;
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
use trust_dns_server::authority::Catalog;
|
|
||||||
use trust_dns_server::authority::ZoneType;
|
|
||||||
use trust_dns_server::proto::rr::Name;
|
|
||||||
use trust_dns_server::store::in_memory::InMemoryAuthority;
|
|
||||||
use trust_dns_server::ServerFuture;
|
|
||||||
|
|
||||||
const DNS_PORT: u16 = 4553;
|
const DNS_PORT: u16 = 4553;
|
||||||
|
|
||||||
|
@ -2196,9 +2195,12 @@ async fn test_resolve_dns() {
|
||||||
util::testdata_path().join("run/resolve_dns.zone.in"),
|
util::testdata_path().join("run/resolve_dns.zone.in"),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let lexer = Lexer::new(&zone_file);
|
let records = Parser::new(
|
||||||
let records =
|
&zone_file,
|
||||||
Parser::new().parse(lexer, Some(Name::from_str("example.com").unwrap()));
|
None,
|
||||||
|
Some(Name::from_str("example.com").unwrap()),
|
||||||
|
)
|
||||||
|
.parse();
|
||||||
if records.is_err() {
|
if records.is_err() {
|
||||||
panic!("failed to parse: {:?}", records.err())
|
panic!("failed to parse: {:?}", records.err())
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,6 +77,7 @@
|
||||||
"test-fs-rmdir-recursive.js",
|
"test-fs-rmdir-recursive.js",
|
||||||
"test-fs-write-file.js",
|
"test-fs-write-file.js",
|
||||||
"test-http-url.parse-https.request.js",
|
"test-http-url.parse-https.request.js",
|
||||||
|
"test-net-autoselectfamily.js",
|
||||||
"test-net-better-error-messages-path.js",
|
"test-net-better-error-messages-path.js",
|
||||||
"test-net-connect-buffer.js",
|
"test-net-connect-buffer.js",
|
||||||
"test-net-connect-buffer2.js",
|
"test-net-connect-buffer2.js",
|
||||||
|
@ -404,6 +405,7 @@
|
||||||
"test-http-url.parse-only-support-http-https-protocol.js",
|
"test-http-url.parse-only-support-http-https-protocol.js",
|
||||||
"test-icu-transcode.js",
|
"test-icu-transcode.js",
|
||||||
"test-net-access-byteswritten.js",
|
"test-net-access-byteswritten.js",
|
||||||
|
"test-net-autoselectfamily.js",
|
||||||
"test-net-better-error-messages-listen-path.js",
|
"test-net-better-error-messages-listen-path.js",
|
||||||
"test-net-better-error-messages-path.js",
|
"test-net-better-error-messages-path.js",
|
||||||
"test-net-better-error-messages-port-hostname.js",
|
"test-net-better-error-messages-port-hostname.js",
|
||||||
|
|
|
@ -1767,7 +1767,6 @@ NOTE: This file should not be manually edited. Please edit `tests/node_compat/co
|
||||||
- [parallel/test-net-autoselectfamily-commandline-option.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-autoselectfamily-commandline-option.js)
|
- [parallel/test-net-autoselectfamily-commandline-option.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-autoselectfamily-commandline-option.js)
|
||||||
- [parallel/test-net-autoselectfamily-default.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-autoselectfamily-default.js)
|
- [parallel/test-net-autoselectfamily-default.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-autoselectfamily-default.js)
|
||||||
- [parallel/test-net-autoselectfamily-ipv4first.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-autoselectfamily-ipv4first.js)
|
- [parallel/test-net-autoselectfamily-ipv4first.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-autoselectfamily-ipv4first.js)
|
||||||
- [parallel/test-net-autoselectfamily.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-autoselectfamily.js)
|
|
||||||
- [parallel/test-net-better-error-messages-listen.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-better-error-messages-listen.js)
|
- [parallel/test-net-better-error-messages-listen.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-better-error-messages-listen.js)
|
||||||
- [parallel/test-net-binary.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-binary.js)
|
- [parallel/test-net-binary.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-binary.js)
|
||||||
- [parallel/test-net-bind-twice.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-bind-twice.js)
|
- [parallel/test-net-bind-twice.js](https://github.com/nodejs/node/tree/v18.12.1/test/parallel/test-net-bind-twice.js)
|
||||||
|
|
|
@ -19,6 +19,7 @@ import { magenta } from "@std/fmt/colors";
|
||||||
import { pooledMap } from "@std/async/pool";
|
import { pooledMap } from "@std/async/pool";
|
||||||
import { dirname, fromFileUrl, join } from "@std/path";
|
import { dirname, fromFileUrl, join } from "@std/path";
|
||||||
import { assertEquals, fail } from "@std/assert";
|
import { assertEquals, fail } from "@std/assert";
|
||||||
|
import { distinct } from "@std/collections";
|
||||||
import {
|
import {
|
||||||
config,
|
config,
|
||||||
getPathsFromTestSuites,
|
getPathsFromTestSuites,
|
||||||
|
@ -36,6 +37,9 @@ const testPaths = partitionParallelTestPaths(
|
||||||
getPathsFromTestSuites(config.ignore),
|
getPathsFromTestSuites(config.ignore),
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
|
testPaths.sequential = distinct(testPaths.sequential);
|
||||||
|
testPaths.parallel = distinct(testPaths.parallel);
|
||||||
|
|
||||||
const cwd = new URL(".", import.meta.url);
|
const cwd = new URL(".", import.meta.url);
|
||||||
const windowsIgnorePaths = new Set(
|
const windowsIgnorePaths = new Set(
|
||||||
getPathsFromTestSuites(config.windowsIgnore),
|
getPathsFromTestSuites(config.windowsIgnore),
|
||||||
|
|
|
@ -473,6 +473,7 @@ const pwdCommand = isWindows ?
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
allowGlobals,
|
allowGlobals,
|
||||||
|
defaultAutoSelectFamilyAttemptTimeout: 2500,
|
||||||
expectsError,
|
expectsError,
|
||||||
expectWarning,
|
expectWarning,
|
||||||
getArrayBufferViews,
|
getArrayBufferViews,
|
||||||
|
|
312
tests/node_compat/test/parallel/test-net-autoselectfamily.js
Normal file
312
tests/node_compat/test/parallel/test-net-autoselectfamily.js
Normal file
|
@ -0,0 +1,312 @@
|
||||||
|
// deno-fmt-ignore-file
|
||||||
|
// deno-lint-ignore-file
|
||||||
|
|
||||||
|
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
|
||||||
|
// Taken from Node 18.12.1
|
||||||
|
// This file is automatically generated by `tests/node_compat/runner/setup.ts`. Do not modify this file manually.
|
||||||
|
|
||||||
|
'use strict';
|
||||||
|
|
||||||
|
const common = require('../common');
|
||||||
|
const { parseDNSPacket, writeDNSPacket } = require('../common/dns');
|
||||||
|
|
||||||
|
const assert = require('assert');
|
||||||
|
const dgram = require('dgram');
|
||||||
|
const { Resolver } = require('dns');
|
||||||
|
const { createConnection, createServer } = require('net');
|
||||||
|
|
||||||
|
// Test that happy eyeballs algorithm is properly implemented.
|
||||||
|
|
||||||
|
// Purposely not using setDefaultAutoSelectFamilyAttemptTimeout here to test the
|
||||||
|
// parameter is correctly used in options.
|
||||||
|
//
|
||||||
|
// Some of the machines in the CI need more time to establish connection
|
||||||
|
const autoSelectFamilyAttemptTimeout = common.defaultAutoSelectFamilyAttemptTimeout;
|
||||||
|
|
||||||
|
function _lookup(resolver, hostname, options, cb) {
|
||||||
|
resolver.resolve(hostname, 'ANY', (err, replies) => {
|
||||||
|
assert.notStrictEqual(options.family, 4);
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
const hosts = replies
|
||||||
|
.map((r) => ({ address: r.address, family: r.type === 'AAAA' ? 6 : 4 }))
|
||||||
|
.sort((a, b) => b.family - a.family);
|
||||||
|
|
||||||
|
if (options.all === true) {
|
||||||
|
return cb(null, hosts);
|
||||||
|
}
|
||||||
|
|
||||||
|
return cb(null, hosts[0].address, hosts[0].family);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function createDnsServer(ipv6Addrs, ipv4Addrs, cb) {
|
||||||
|
if (!Array.isArray(ipv6Addrs)) {
|
||||||
|
ipv6Addrs = [ipv6Addrs];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!Array.isArray(ipv4Addrs)) {
|
||||||
|
ipv4Addrs = [ipv4Addrs];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a DNS server which replies with a AAAA and a A record for the same host
|
||||||
|
const socket = dgram.createSocket('udp4');
|
||||||
|
|
||||||
|
// TODO(kt3k): We use common.mustCallAtLeast instead of common.mustCall
|
||||||
|
// because Deno sends multiple requests to the DNS server.
|
||||||
|
// This can be addressed if Deno.resolveDns supports ANY record type.
|
||||||
|
// See https://github.com/denoland/deno/issues/14492
|
||||||
|
socket.on('message', common.mustCallAtLeast((msg, { address, port }) => {
|
||||||
|
const parsed = parseDNSPacket(msg);
|
||||||
|
const domain = parsed.questions[0].domain;
|
||||||
|
assert.strictEqual(domain, 'example.org');
|
||||||
|
|
||||||
|
socket.send(writeDNSPacket({
|
||||||
|
id: parsed.id,
|
||||||
|
questions: parsed.questions,
|
||||||
|
answers: [
|
||||||
|
...ipv6Addrs.map((address) => ({ type: 'AAAA', address, ttl: 123, domain: 'example.org' })),
|
||||||
|
...ipv4Addrs.map((address) => ({ type: 'A', address, ttl: 123, domain: 'example.org' })),
|
||||||
|
]
|
||||||
|
}), port, address);
|
||||||
|
}));
|
||||||
|
|
||||||
|
socket.bind(0, () => {
|
||||||
|
const resolver = new Resolver();
|
||||||
|
resolver.setServers([`127.0.0.1:${socket.address().port}`]);
|
||||||
|
|
||||||
|
cb({ dnsServer: socket, lookup: _lookup.bind(null, resolver) });
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that IPV4 is reached if IPV6 is not reachable
|
||||||
|
{
|
||||||
|
createDnsServer('::1', '127.0.0.1', common.mustCall(function({ dnsServer, lookup }) {
|
||||||
|
const ipv4Server = createServer((socket) => {
|
||||||
|
socket.on('data', common.mustCall(() => {
|
||||||
|
socket.write('response-ipv4');
|
||||||
|
socket.end();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
ipv4Server.listen(0, '127.0.0.1', common.mustCall(() => {
|
||||||
|
const port = ipv4Server.address().port;
|
||||||
|
|
||||||
|
const connection = createConnection({
|
||||||
|
host: 'example.org',
|
||||||
|
port: port,
|
||||||
|
lookup,
|
||||||
|
autoSelectFamily: true,
|
||||||
|
autoSelectFamilyAttemptTimeout,
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = '';
|
||||||
|
connection.setEncoding('utf-8');
|
||||||
|
|
||||||
|
connection.on('ready', common.mustCall(() => {
|
||||||
|
assert.deepStrictEqual(connection.autoSelectFamilyAttemptedAddresses, [`::1:${port}`, `127.0.0.1:${port}`]);
|
||||||
|
}));
|
||||||
|
|
||||||
|
connection.on('data', (chunk) => {
|
||||||
|
response += chunk;
|
||||||
|
});
|
||||||
|
|
||||||
|
connection.on('end', common.mustCall(() => {
|
||||||
|
assert.strictEqual(response, 'response-ipv4');
|
||||||
|
ipv4Server.close();
|
||||||
|
dnsServer.close();
|
||||||
|
}));
|
||||||
|
|
||||||
|
connection.write('request');
|
||||||
|
}));
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that only the last successful connection is established.
|
||||||
|
{
|
||||||
|
createDnsServer(
|
||||||
|
['2606:4700::6810:85e5', '2606:4700::6810:84e5', "::1"],
|
||||||
|
// TODO(kt3k): Comment out ipv4 addresses to make the test pass faster.
|
||||||
|
// Enable this when Deno.connect() call becomes cancellable.
|
||||||
|
// See https://github.com/denoland/deno/issues/26819
|
||||||
|
// ['104.20.22.46', '104.20.23.46', '127.0.0.1'],
|
||||||
|
['127.0.0.1'],
|
||||||
|
common.mustCall(function({ dnsServer, lookup }) {
|
||||||
|
const ipv4Server = createServer((socket) => {
|
||||||
|
socket.on('data', common.mustCall(() => {
|
||||||
|
socket.write('response-ipv4');
|
||||||
|
socket.end();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
ipv4Server.listen(0, '127.0.0.1', common.mustCall(() => {
|
||||||
|
const port = ipv4Server.address().port;
|
||||||
|
|
||||||
|
const connection = createConnection({
|
||||||
|
host: 'example.org',
|
||||||
|
port: port,
|
||||||
|
lookup,
|
||||||
|
autoSelectFamily: true,
|
||||||
|
autoSelectFamilyAttemptTimeout,
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = '';
|
||||||
|
connection.setEncoding('utf-8');
|
||||||
|
|
||||||
|
connection.on('ready', common.mustCall(() => {
|
||||||
|
assert.deepStrictEqual(
|
||||||
|
connection.autoSelectFamilyAttemptedAddresses,
|
||||||
|
[
|
||||||
|
`2606:4700::6810:85e5:${port}`,
|
||||||
|
`104.20.22.46:${port}`,
|
||||||
|
`2606:4700::6810:84e5:${port}`,
|
||||||
|
`104.20.23.46:${port}`,
|
||||||
|
`::1:${port}`,
|
||||||
|
`127.0.0.1:${port}`,
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}));
|
||||||
|
|
||||||
|
connection.on('data', (chunk) => {
|
||||||
|
response += chunk;
|
||||||
|
});
|
||||||
|
|
||||||
|
connection.on('end', common.mustCall(() => {
|
||||||
|
assert.strictEqual(response, 'response-ipv4');
|
||||||
|
ipv4Server.close();
|
||||||
|
dnsServer.close();
|
||||||
|
}));
|
||||||
|
|
||||||
|
connection.write('request');
|
||||||
|
}));
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that IPV4 is NOT reached if IPV6 is reachable
|
||||||
|
if (common.hasIPv6) {
|
||||||
|
createDnsServer('::1', '127.0.0.1', common.mustCall(function({ dnsServer, lookup }) {
|
||||||
|
const ipv4Server = createServer((socket) => {
|
||||||
|
socket.on('data', common.mustNotCall(() => {
|
||||||
|
socket.write('response-ipv4');
|
||||||
|
socket.end();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
const ipv6Server = createServer((socket) => {
|
||||||
|
socket.on('data', common.mustCall(() => {
|
||||||
|
socket.write('response-ipv6');
|
||||||
|
socket.end();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
ipv4Server.listen(0, '127.0.0.1', common.mustCall(() => {
|
||||||
|
const port = ipv4Server.address().port;
|
||||||
|
|
||||||
|
ipv6Server.listen(port, '::1', common.mustCall(() => {
|
||||||
|
const connection = createConnection({
|
||||||
|
host: 'example.org',
|
||||||
|
port,
|
||||||
|
lookup,
|
||||||
|
autoSelectFamily: true,
|
||||||
|
autoSelectFamilyAttemptTimeout,
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = '';
|
||||||
|
connection.setEncoding('utf-8');
|
||||||
|
|
||||||
|
connection.on('ready', common.mustCall(() => {
|
||||||
|
assert.deepStrictEqual(connection.autoSelectFamilyAttemptedAddresses, [`::1:${port}`]);
|
||||||
|
}));
|
||||||
|
|
||||||
|
connection.on('data', (chunk) => {
|
||||||
|
response += chunk;
|
||||||
|
});
|
||||||
|
|
||||||
|
connection.on('end', common.mustCall(() => {
|
||||||
|
assert.strictEqual(response, 'response-ipv6');
|
||||||
|
ipv4Server.close();
|
||||||
|
ipv6Server.close();
|
||||||
|
dnsServer.close();
|
||||||
|
}));
|
||||||
|
|
||||||
|
connection.write('request');
|
||||||
|
}));
|
||||||
|
}));
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that when all errors are returned when no connections succeeded
|
||||||
|
{
|
||||||
|
createDnsServer('::1', '127.0.0.1', common.mustCall(function({ dnsServer, lookup }) {
|
||||||
|
const connection = createConnection({
|
||||||
|
host: 'example.org',
|
||||||
|
port: 10,
|
||||||
|
lookup,
|
||||||
|
autoSelectFamily: true,
|
||||||
|
autoSelectFamilyAttemptTimeout,
|
||||||
|
});
|
||||||
|
|
||||||
|
connection.on('ready', common.mustNotCall());
|
||||||
|
connection.on('error', common.mustCall((error) => {
|
||||||
|
assert.deepStrictEqual(connection.autoSelectFamilyAttemptedAddresses, ['::1:10', '127.0.0.1:10']);
|
||||||
|
assert.strictEqual(error.constructor.name, 'AggregateError');
|
||||||
|
assert.strictEqual(error.errors.length, 2);
|
||||||
|
|
||||||
|
const errors = error.errors.map((e) => e.message);
|
||||||
|
assert.ok(errors.includes('connect ECONNREFUSED 127.0.0.1:10'));
|
||||||
|
|
||||||
|
if (common.hasIPv6) {
|
||||||
|
assert.ok(errors.includes('connect ECONNREFUSED ::1:10'));
|
||||||
|
}
|
||||||
|
|
||||||
|
dnsServer.close();
|
||||||
|
}));
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that the option can be disabled
|
||||||
|
{
|
||||||
|
createDnsServer('::1', '127.0.0.1', common.mustCall(function({ dnsServer, lookup }) {
|
||||||
|
const ipv4Server = createServer((socket) => {
|
||||||
|
socket.on('data', common.mustCall(() => {
|
||||||
|
socket.write('response-ipv4');
|
||||||
|
socket.end();
|
||||||
|
}));
|
||||||
|
});
|
||||||
|
|
||||||
|
ipv4Server.listen(0, '127.0.0.1', common.mustCall(() => {
|
||||||
|
const port = ipv4Server.address().port;
|
||||||
|
|
||||||
|
const connection = createConnection({
|
||||||
|
host: 'example.org',
|
||||||
|
port,
|
||||||
|
lookup,
|
||||||
|
autoSelectFamily: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
connection.on('ready', common.mustNotCall());
|
||||||
|
connection.on('error', common.mustCall((error) => {
|
||||||
|
assert.strictEqual(connection.autoSelectFamilyAttemptedAddresses, undefined);
|
||||||
|
|
||||||
|
if (common.hasIPv6) {
|
||||||
|
assert.strictEqual(error.code, 'ECONNREFUSED');
|
||||||
|
assert.strictEqual(error.message, `connect ECONNREFUSED ::1:${port}`);
|
||||||
|
} else if (error.code === 'EAFNOSUPPORT') {
|
||||||
|
assert.strictEqual(error.message, `connect EAFNOSUPPORT ::1:${port} - Local (undefined:undefined)`);
|
||||||
|
} else if (error.code === 'EUNATCH') {
|
||||||
|
assert.strictEqual(error.message, `connect EUNATCH ::1:${port} - Local (:::0)`);
|
||||||
|
} else {
|
||||||
|
assert.strictEqual(error.code, 'EADDRNOTAVAIL');
|
||||||
|
assert.strictEqual(error.message, `connect EADDRNOTAVAIL ::1:${port} - Local (:::0)`);
|
||||||
|
}
|
||||||
|
|
||||||
|
ipv4Server.close();
|
||||||
|
dnsServer.close();
|
||||||
|
}));
|
||||||
|
}));
|
||||||
|
}));
|
||||||
|
}
|
|
@ -2,7 +2,7 @@
|
||||||
"name": "@denotest/node-addon",
|
"name": "@denotest/node-addon",
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"install": "node-gyp configure build"
|
"install": "node-gyp configure --verbose build"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"node-gyp": "10.1.0"
|
"node-gyp": "10.1.0"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
<div class="container">content</div>
|
<div class="container">content<br></div>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
let counter = 0;
|
let counter = 0;
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
{
|
||||||
|
"tempDir": true,
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"args": "install",
|
||||||
|
"output": "install.out"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,6 @@
|
||||||
|
{
|
||||||
|
"workspace": ["packages/foo", "packages/bar"],
|
||||||
|
"imports": {
|
||||||
|
"@denotest/subtract": "jsr:@denotest/subtract@^1.0.0"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
[UNORDERED_START]
|
||||||
|
Download http://localhost:4260/@denotest%2fesm-basic
|
||||||
|
Download http://localhost:4260/@denotest/esm-basic/1.0.0.tgz
|
||||||
|
Download http://127.0.0.1:4250/@denotest/subtract/meta.json
|
||||||
|
Download http://127.0.0.1:4250/@denotest/add/meta.json
|
||||||
|
Download http://127.0.0.1:4250/@denotest/add/1.0.0_meta.json
|
||||||
|
Download http://127.0.0.1:4250/@denotest/subtract/1.0.0_meta.json
|
||||||
|
Download http://127.0.0.1:4250/@denotest/add/1.0.0/mod.ts
|
||||||
|
Download http://127.0.0.1:4250/@denotest/subtract/1.0.0/mod.ts
|
||||||
|
[UNORDERED_END]
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"imports": {
|
||||||
|
"@denotest/esm-basic": "npm:@denotest/esm-basic@^1.0.0"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"imports": {
|
||||||
|
"@denotest/add": "jsr:@denotest/add@^1.0.0"
|
||||||
|
}
|
||||||
|
}
|
BIN
tests/testdata/assets/node-gyp/node-v20.11.1-headers.tar.gz
vendored
Normal file
BIN
tests/testdata/assets/node-gyp/node-v20.11.1-headers.tar.gz
vendored
Normal file
Binary file not shown.
BIN
tests/testdata/assets/node-gyp/v20.11.1__win-x64__node.lib.tar.gz
vendored
Normal file
BIN
tests/testdata/assets/node-gyp/v20.11.1__win-x64__node.lib.tar.gz
vendored
Normal file
Binary file not shown.
|
@ -14,27 +14,31 @@ Deno.test(
|
||||||
const enc = new TextEncoder();
|
const enc = new TextEncoder();
|
||||||
const cwd = await Deno.makeTempDir({ prefix: "deno_command_test" });
|
const cwd = await Deno.makeTempDir({ prefix: "deno_command_test" });
|
||||||
|
|
||||||
|
const exitCodeFileLock = "deno_was_here.lock";
|
||||||
const exitCodeFile = "deno_was_here";
|
const exitCodeFile = "deno_was_here";
|
||||||
const programFile = "poll_exit.ts";
|
const programFile = "poll_exit.ts";
|
||||||
const program = `
|
const program = `
|
||||||
|
const file = await Deno.open("${exitCodeFileLock}", { write: true, create: true });
|
||||||
async function tryExit() {
|
async function tryExit() {
|
||||||
|
await file.lock(true);
|
||||||
try {
|
try {
|
||||||
const code = parseInt(await Deno.readTextFile("${exitCodeFile}"));
|
const code = parseInt(await Deno.readTextFile("${exitCodeFile}"));
|
||||||
Deno.exit(code);
|
Deno.exit(code);
|
||||||
} catch {
|
} catch {
|
||||||
// Retry if we got here before deno wrote the file.
|
// Retry if we got here before deno wrote the file.
|
||||||
setTimeout(tryExit, 0.01);
|
setTimeout(tryExit, 0.01);
|
||||||
|
} finally {
|
||||||
|
await file.unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tryExit();
|
tryExit();
|
||||||
`;
|
`;
|
||||||
|
|
||||||
Deno.writeFileSync(`${cwd}/${programFile}`, enc.encode(program));
|
Deno.writeFileSync(`${cwd}/${programFile}`, enc.encode(program));
|
||||||
|
|
||||||
const command = new Deno.Command(Deno.execPath(), {
|
const command = new Deno.Command(Deno.execPath(), {
|
||||||
cwd,
|
cwd,
|
||||||
args: ["run", "--allow-read", programFile],
|
args: ["run", "-RW", programFile],
|
||||||
stdout: "inherit",
|
stdout: "inherit",
|
||||||
stderr: "inherit",
|
stderr: "inherit",
|
||||||
});
|
});
|
||||||
|
@ -43,12 +47,18 @@ tryExit();
|
||||||
// Write the expected exit code *after* starting deno.
|
// Write the expected exit code *after* starting deno.
|
||||||
// This is how we verify that `Child` is actually asynchronous.
|
// This is how we verify that `Child` is actually asynchronous.
|
||||||
const code = 84;
|
const code = 84;
|
||||||
Deno.writeFileSync(`${cwd}/${exitCodeFile}`, enc.encode(`${code}`));
|
|
||||||
|
|
||||||
|
await using file = await Deno.open(`${cwd}/${exitCodeFileLock}`, {
|
||||||
|
write: true,
|
||||||
|
create: true,
|
||||||
|
});
|
||||||
|
await file.lock(true);
|
||||||
|
Deno.writeFileSync(`${cwd}/${exitCodeFile}`, enc.encode(`${code}`));
|
||||||
|
await file.unlock();
|
||||||
const status = await child.status;
|
const status = await child.status;
|
||||||
await Deno.remove(cwd, { recursive: true });
|
await Deno.remove(cwd, { recursive: true });
|
||||||
assertEquals(status.success, false);
|
|
||||||
assertEquals(status.code, code);
|
assertEquals(status.code, code);
|
||||||
|
assertEquals(status.success, false);
|
||||||
assertEquals(status.signal, null);
|
assertEquals(status.signal, null);
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
|
@ -4270,3 +4270,60 @@ Deno.test({
|
||||||
assertEquals(hostname, "0.0.0.0");
|
assertEquals(hostname, "0.0.0.0");
|
||||||
await server.shutdown();
|
await server.shutdown();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
Deno.test({
|
||||||
|
name: "AbortSignal aborted when request is cancelled",
|
||||||
|
}, async () => {
|
||||||
|
const { promise, resolve } = Promise.withResolvers<void>();
|
||||||
|
|
||||||
|
let cancelled = false;
|
||||||
|
|
||||||
|
const server = Deno.serve({
|
||||||
|
hostname: "0.0.0.0",
|
||||||
|
port: servePort,
|
||||||
|
onListen: () => resolve(),
|
||||||
|
}, async (request) => {
|
||||||
|
request.signal.addEventListener("abort", () => cancelled = true);
|
||||||
|
assert(!request.signal.aborted);
|
||||||
|
await new Promise((resolve) => setTimeout(resolve, 3000)); // abort during waiting
|
||||||
|
assert(request.signal.aborted);
|
||||||
|
return new Response("Ok");
|
||||||
|
});
|
||||||
|
|
||||||
|
await promise;
|
||||||
|
await fetch(`http://localhost:${servePort}/`, {
|
||||||
|
signal: AbortSignal.timeout(1000),
|
||||||
|
}).catch(() => {});
|
||||||
|
|
||||||
|
await server.shutdown();
|
||||||
|
|
||||||
|
assert(cancelled);
|
||||||
|
});
|
||||||
|
|
||||||
|
Deno.test({
|
||||||
|
name: "AbortSignal event aborted when request is cancelled",
|
||||||
|
}, async () => {
|
||||||
|
const { promise, resolve } = Promise.withResolvers<void>();
|
||||||
|
|
||||||
|
const server = Deno.serve({
|
||||||
|
hostname: "0.0.0.0",
|
||||||
|
port: servePort,
|
||||||
|
onListen: () => resolve(),
|
||||||
|
}, async (request) => {
|
||||||
|
const { promise: promiseAbort, resolve: resolveAbort } = Promise
|
||||||
|
.withResolvers<void>();
|
||||||
|
request.signal.addEventListener("abort", () => resolveAbort());
|
||||||
|
assert(!request.signal.aborted);
|
||||||
|
|
||||||
|
await promiseAbort;
|
||||||
|
|
||||||
|
return new Response("Ok");
|
||||||
|
});
|
||||||
|
|
||||||
|
await promise;
|
||||||
|
await fetch(`http://localhost:${servePort}/`, {
|
||||||
|
signal: AbortSignal.timeout(100),
|
||||||
|
}).catch(() => {});
|
||||||
|
|
||||||
|
await server.shutdown();
|
||||||
|
});
|
||||||
|
|
|
@ -10,6 +10,11 @@ import * as net from "node:net";
|
||||||
import { assert, assertEquals } from "@std/assert";
|
import { assert, assertEquals } from "@std/assert";
|
||||||
import { curlRequest } from "../unit/test_util.ts";
|
import { curlRequest } from "../unit/test_util.ts";
|
||||||
|
|
||||||
|
// Increase the timeout for the auto select family to avoid flakiness
|
||||||
|
net.setDefaultAutoSelectFamilyAttemptTimeout(
|
||||||
|
net.getDefaultAutoSelectFamilyAttemptTimeout() * 30,
|
||||||
|
);
|
||||||
|
|
||||||
for (const url of ["http://localhost:4246", "https://localhost:4247"]) {
|
for (const url of ["http://localhost:4246", "https://localhost:4247"]) {
|
||||||
Deno.test(`[node/http2 client] ${url}`, {
|
Deno.test(`[node/http2 client] ${url}`, {
|
||||||
ignore: Deno.build.os === "windows",
|
ignore: Deno.build.os === "windows",
|
||||||
|
|
|
@ -10,6 +10,7 @@ import {
|
||||||
createBrotliCompress,
|
createBrotliCompress,
|
||||||
createBrotliDecompress,
|
createBrotliDecompress,
|
||||||
createDeflate,
|
createDeflate,
|
||||||
|
gzip,
|
||||||
gzipSync,
|
gzipSync,
|
||||||
unzipSync,
|
unzipSync,
|
||||||
} from "node:zlib";
|
} from "node:zlib";
|
||||||
|
@ -210,3 +211,17 @@ Deno.test("createBrotliCompress params", async () => {
|
||||||
);
|
);
|
||||||
assertEquals(output.length, input.length);
|
assertEquals(output.length, input.length);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
Deno.test("gzip() and gzipSync() accept ArrayBuffer", async () => {
|
||||||
|
const deffered = Promise.withResolvers<void>();
|
||||||
|
const buf = new ArrayBuffer(0);
|
||||||
|
let output: Buffer;
|
||||||
|
gzip(buf, (_err, data) => {
|
||||||
|
output = data;
|
||||||
|
deffered.resolve();
|
||||||
|
});
|
||||||
|
await deffered.promise;
|
||||||
|
assert(output! instanceof Buffer);
|
||||||
|
const outputSync = gzipSync(buf);
|
||||||
|
assert(outputSync instanceof Buffer);
|
||||||
|
});
|
||||||
|
|
|
@ -21,6 +21,7 @@ bytes.workspace = true
|
||||||
console_static_text.workspace = true
|
console_static_text.workspace = true
|
||||||
deno_unsync = "0"
|
deno_unsync = "0"
|
||||||
denokv_proto.workspace = true
|
denokv_proto.workspace = true
|
||||||
|
faster-hex.workspace = true
|
||||||
fastwebsockets.workspace = true
|
fastwebsockets.workspace = true
|
||||||
flate2 = { workspace = true, features = ["default"] }
|
flate2 = { workspace = true, features = ["default"] }
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
|
|
@ -28,6 +28,7 @@ use crate::fs::PathRef;
|
||||||
use crate::http_server;
|
use crate::http_server;
|
||||||
use crate::jsr_registry_unset_url;
|
use crate::jsr_registry_unset_url;
|
||||||
use crate::lsp::LspClientBuilder;
|
use crate::lsp::LspClientBuilder;
|
||||||
|
use crate::nodejs_org_mirror_unset_url;
|
||||||
use crate::npm_registry_unset_url;
|
use crate::npm_registry_unset_url;
|
||||||
use crate::pty::Pty;
|
use crate::pty::Pty;
|
||||||
use crate::strip_ansi_codes;
|
use crate::strip_ansi_codes;
|
||||||
|
@ -843,6 +844,12 @@ impl TestCommandBuilder {
|
||||||
if !envs.contains_key("JSR_URL") {
|
if !envs.contains_key("JSR_URL") {
|
||||||
envs.insert("JSR_URL".to_string(), jsr_registry_unset_url());
|
envs.insert("JSR_URL".to_string(), jsr_registry_unset_url());
|
||||||
}
|
}
|
||||||
|
if !envs.contains_key("NODEJS_ORG_MIRROR") {
|
||||||
|
envs.insert(
|
||||||
|
"NODEJS_ORG_MIRROR".to_string(),
|
||||||
|
nodejs_org_mirror_unset_url(),
|
||||||
|
);
|
||||||
|
}
|
||||||
for key in &self.envs_remove {
|
for key in &self.envs_remove {
|
||||||
envs.remove(key);
|
envs.remove(key);
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,6 +52,7 @@ static GUARD: Lazy<Mutex<HttpServerCount>> = Lazy::new(Default::default);
|
||||||
pub fn env_vars_for_npm_tests() -> Vec<(String, String)> {
|
pub fn env_vars_for_npm_tests() -> Vec<(String, String)> {
|
||||||
vec![
|
vec![
|
||||||
("NPM_CONFIG_REGISTRY".to_string(), npm_registry_url()),
|
("NPM_CONFIG_REGISTRY".to_string(), npm_registry_url()),
|
||||||
|
("NODEJS_ORG_MIRROR".to_string(), nodejs_org_mirror_url()),
|
||||||
("NO_COLOR".to_string(), "1".to_string()),
|
("NO_COLOR".to_string(), "1".to_string()),
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -130,6 +131,7 @@ pub fn env_vars_for_jsr_npm_tests() -> Vec<(String, String)> {
|
||||||
),
|
),
|
||||||
("DISABLE_JSR_PROVENANCE".to_string(), "true".to_string()),
|
("DISABLE_JSR_PROVENANCE".to_string(), "true".to_string()),
|
||||||
("NO_COLOR".to_string(), "1".to_string()),
|
("NO_COLOR".to_string(), "1".to_string()),
|
||||||
|
("NODEJS_ORG_MIRROR".to_string(), nodejs_org_mirror_url()),
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,27 +177,41 @@ pub fn deno_config_path() -> PathRef {
|
||||||
|
|
||||||
/// Test server registry url.
|
/// Test server registry url.
|
||||||
pub fn npm_registry_url() -> String {
|
pub fn npm_registry_url() -> String {
|
||||||
"http://localhost:4260/".to_string()
|
format!("http://localhost:{}/", servers::PUBLIC_NPM_REGISTRY_PORT)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn npm_registry_unset_url() -> String {
|
pub fn npm_registry_unset_url() -> String {
|
||||||
"http://NPM_CONFIG_REGISTRY.is.unset".to_string()
|
"http://NPM_CONFIG_REGISTRY.is.unset".to_string()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn nodejs_org_mirror_url() -> String {
|
||||||
|
format!(
|
||||||
|
"http://127.0.0.1:{}/",
|
||||||
|
servers::NODEJS_ORG_MIRROR_SERVER_PORT
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn nodejs_org_mirror_unset_url() -> String {
|
||||||
|
"http://NODEJS_ORG_MIRROR.is.unset".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn jsr_registry_url() -> String {
|
pub fn jsr_registry_url() -> String {
|
||||||
"http://127.0.0.1:4250/".to_string()
|
format!("http://127.0.0.1:{}/", servers::JSR_REGISTRY_SERVER_PORT)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn rekor_url() -> String {
|
pub fn rekor_url() -> String {
|
||||||
"http://127.0.0.1:4251".to_string()
|
format!("http://127.0.0.1:{}", servers::PROVENANCE_MOCK_SERVER_PORT)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fulcio_url() -> String {
|
pub fn fulcio_url() -> String {
|
||||||
"http://127.0.0.1:4251".to_string()
|
format!("http://127.0.0.1:{}", servers::PROVENANCE_MOCK_SERVER_PORT)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gha_token_url() -> String {
|
pub fn gha_token_url() -> String {
|
||||||
"http://127.0.0.1:4251/gha_oidc?test=true".to_string()
|
format!(
|
||||||
|
"http://127.0.0.1:{}/gha_oidc?test=true",
|
||||||
|
servers::PROVENANCE_MOCK_SERVER_PORT
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn jsr_registry_unset_url() -> String {
|
pub fn jsr_registry_unset_url() -> String {
|
||||||
|
@ -307,7 +323,7 @@ async fn get_tcp_listener_stream(
|
||||||
futures::stream::select_all(listeners)
|
futures::stream::select_all(listeners)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const TEST_SERVERS_COUNT: usize = 32;
|
pub const TEST_SERVERS_COUNT: usize = 33;
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct HttpServerCount {
|
struct HttpServerCount {
|
||||||
|
@ -565,6 +581,7 @@ pub fn deno_cmd_with_deno_dir(deno_dir: &TempDir) -> TestCommandBuilder {
|
||||||
TestCommandBuilder::new(deno_dir.clone())
|
TestCommandBuilder::new(deno_dir.clone())
|
||||||
.env("DENO_DIR", deno_dir.path())
|
.env("DENO_DIR", deno_dir.path())
|
||||||
.env("NPM_CONFIG_REGISTRY", npm_registry_unset_url())
|
.env("NPM_CONFIG_REGISTRY", npm_registry_unset_url())
|
||||||
|
.env("NODEJS_ORG_MIRROR", nodejs_org_mirror_unset_url())
|
||||||
.env("JSR_URL", jsr_registry_unset_url())
|
.env("JSR_URL", jsr_registry_unset_url())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ use tokio::net::TcpStream;
|
||||||
mod grpc;
|
mod grpc;
|
||||||
mod hyper_utils;
|
mod hyper_utils;
|
||||||
mod jsr_registry;
|
mod jsr_registry;
|
||||||
|
mod nodejs_org_mirror;
|
||||||
mod npm_registry;
|
mod npm_registry;
|
||||||
mod ws;
|
mod ws;
|
||||||
|
|
||||||
|
@ -86,8 +87,9 @@ const WS_CLOSE_PORT: u16 = 4244;
|
||||||
const WS_PING_PORT: u16 = 4245;
|
const WS_PING_PORT: u16 = 4245;
|
||||||
const H2_GRPC_PORT: u16 = 4246;
|
const H2_GRPC_PORT: u16 = 4246;
|
||||||
const H2S_GRPC_PORT: u16 = 4247;
|
const H2S_GRPC_PORT: u16 = 4247;
|
||||||
const JSR_REGISTRY_SERVER_PORT: u16 = 4250;
|
pub(crate) const JSR_REGISTRY_SERVER_PORT: u16 = 4250;
|
||||||
const PROVENANCE_MOCK_SERVER_PORT: u16 = 4251;
|
pub(crate) const PROVENANCE_MOCK_SERVER_PORT: u16 = 4251;
|
||||||
|
pub(crate) const NODEJS_ORG_MIRROR_SERVER_PORT: u16 = 4252;
|
||||||
pub(crate) const PUBLIC_NPM_REGISTRY_PORT: u16 = 4260;
|
pub(crate) const PUBLIC_NPM_REGISTRY_PORT: u16 = 4260;
|
||||||
pub(crate) const PRIVATE_NPM_REGISTRY_1_PORT: u16 = 4261;
|
pub(crate) const PRIVATE_NPM_REGISTRY_1_PORT: u16 = 4261;
|
||||||
pub(crate) const PRIVATE_NPM_REGISTRY_2_PORT: u16 = 4262;
|
pub(crate) const PRIVATE_NPM_REGISTRY_2_PORT: u16 = 4262;
|
||||||
|
@ -147,6 +149,10 @@ pub async fn run_all_servers() {
|
||||||
let private_npm_registry_3_server_futs =
|
let private_npm_registry_3_server_futs =
|
||||||
npm_registry::private_npm_registry3(PRIVATE_NPM_REGISTRY_3_PORT);
|
npm_registry::private_npm_registry3(PRIVATE_NPM_REGISTRY_3_PORT);
|
||||||
|
|
||||||
|
// for serving node header files to node-gyp in tests
|
||||||
|
let node_js_mirror_server_fut =
|
||||||
|
nodejs_org_mirror::nodejs_org_mirror(NODEJS_ORG_MIRROR_SERVER_PORT);
|
||||||
|
|
||||||
let mut futures = vec![
|
let mut futures = vec![
|
||||||
redirect_server_fut.boxed_local(),
|
redirect_server_fut.boxed_local(),
|
||||||
ws_server_fut.boxed_local(),
|
ws_server_fut.boxed_local(),
|
||||||
|
@ -172,6 +178,7 @@ pub async fn run_all_servers() {
|
||||||
h2_grpc_server_fut.boxed_local(),
|
h2_grpc_server_fut.boxed_local(),
|
||||||
registry_server_fut.boxed_local(),
|
registry_server_fut.boxed_local(),
|
||||||
provenance_mock_server_fut.boxed_local(),
|
provenance_mock_server_fut.boxed_local(),
|
||||||
|
node_js_mirror_server_fut.boxed_local(),
|
||||||
];
|
];
|
||||||
futures.extend(npm_registry_server_futs);
|
futures.extend(npm_registry_server_futs);
|
||||||
futures.extend(private_npm_registry_1_server_futs);
|
futures.extend(private_npm_registry_1_server_futs);
|
||||||
|
|
245
tests/util/server/src/servers/nodejs_org_mirror.rs
Normal file
245
tests/util/server/src/servers/nodejs_org_mirror.rs
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
||||||
|
|
||||||
|
//! Server for NodeJS header tarballs, used by `node-gyp` in tests to download headers
|
||||||
|
//!
|
||||||
|
//! Loads from `testdata/assets`, if we update our node version in `process.versions` we'll need to
|
||||||
|
//! update the header tarball there.
|
||||||
|
|
||||||
|
#![allow(clippy::print_stderr)]
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::convert::Infallible;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::LazyLock;
|
||||||
|
|
||||||
|
use bytes::Bytes;
|
||||||
|
use http::Response;
|
||||||
|
use http::StatusCode;
|
||||||
|
use http_body_util::combinators::UnsyncBoxBody;
|
||||||
|
use http_body_util::Full;
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
|
use crate::servers::hyper_utils::run_server;
|
||||||
|
use crate::servers::hyper_utils::ServerKind;
|
||||||
|
use crate::servers::hyper_utils::ServerOptions;
|
||||||
|
use crate::servers::string_body;
|
||||||
|
use crate::testdata_path;
|
||||||
|
use crate::PathRef;
|
||||||
|
|
||||||
|
/// a little helper extension trait to log errors but convert to option
|
||||||
|
trait OkWarn<T, E> {
|
||||||
|
fn ok_warn(self) -> Option<T>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, E> OkWarn<T, E> for Result<T, E>
|
||||||
|
where
|
||||||
|
E: std::fmt::Display,
|
||||||
|
{
|
||||||
|
fn ok_warn(self) -> Option<T> {
|
||||||
|
self
|
||||||
|
.inspect_err(|err| {
|
||||||
|
eprintln!(
|
||||||
|
"test_server warning: error occurred in nodejs_org_mirror.rs: {err}"
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub static NODEJS_MIRROR: LazyLock<NodeJsMirror> =
|
||||||
|
LazyLock::new(NodeJsMirror::default);
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct NodeJsMirror {
|
||||||
|
cache: Mutex<HashMap<String, Bytes>>,
|
||||||
|
checksum_cache: Mutex<HashMap<String, String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn asset_file_path(file: &str) -> PathRef {
|
||||||
|
testdata_path().join("assets").join("node-gyp").join(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeJsMirror {
|
||||||
|
pub fn get_header_bytes(&self, file: &str) -> Option<Bytes> {
|
||||||
|
let mut cache = self.cache.lock();
|
||||||
|
let entry = cache.entry(file.to_owned());
|
||||||
|
match entry {
|
||||||
|
std::collections::hash_map::Entry::Occupied(occupied) => {
|
||||||
|
Some(occupied.get().clone())
|
||||||
|
}
|
||||||
|
std::collections::hash_map::Entry::Vacant(vacant) => {
|
||||||
|
let contents = asset_file_path(file);
|
||||||
|
let contents = contents
|
||||||
|
.read_to_bytes_if_exists()
|
||||||
|
.ok_warn()
|
||||||
|
.map(Bytes::from)?;
|
||||||
|
vacant.insert(contents.clone());
|
||||||
|
Some(contents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_checksum(&self, file: &str, bytes: Bytes) -> String {
|
||||||
|
use sha2::Digest;
|
||||||
|
if let Some(checksum) = self.checksum_cache.lock().get(file).cloned() {
|
||||||
|
return checksum;
|
||||||
|
}
|
||||||
|
let mut hasher = sha2::Sha256::new();
|
||||||
|
hasher.update(&bytes);
|
||||||
|
let checksum = faster_hex::hex_string(hasher.finalize().as_ref());
|
||||||
|
self
|
||||||
|
.checksum_cache
|
||||||
|
.lock()
|
||||||
|
.insert(file.to_owned(), checksum.clone());
|
||||||
|
checksum
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_checksum_file(&self, version: &str) -> Option<String> {
|
||||||
|
let mut entries = Vec::with_capacity(2);
|
||||||
|
|
||||||
|
let header_file = header_tar_name(version);
|
||||||
|
let header_bytes = self.get_header_bytes(&header_file)?;
|
||||||
|
let header_checksum = self.get_checksum(&header_file, header_bytes);
|
||||||
|
entries.push((header_file, header_checksum));
|
||||||
|
|
||||||
|
if cfg!(windows) {
|
||||||
|
if !cfg!(target_arch = "x86_64") {
|
||||||
|
panic!("unsupported target arch on windows, only support x86_64");
|
||||||
|
}
|
||||||
|
let Some(bytes) = self.get_node_lib_bytes(version, "win-x64") else {
|
||||||
|
eprintln!("test server failed to get node lib");
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
{
|
||||||
|
let file = format!("{version}/win-x64/node.lib");
|
||||||
|
let checksum = self.get_checksum(&file, bytes);
|
||||||
|
let filename_for_checksum =
|
||||||
|
file.trim_start_matches(&format!("{version}/"));
|
||||||
|
entries.push((filename_for_checksum.to_owned(), checksum));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(
|
||||||
|
entries
|
||||||
|
.into_iter()
|
||||||
|
.map(|(file, checksum)| format!("{checksum} {file}"))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join("\n"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_node_lib_bytes(
|
||||||
|
&self,
|
||||||
|
version: &str,
|
||||||
|
platform: &str,
|
||||||
|
) -> Option<Bytes> {
|
||||||
|
let mut cache = self.cache.lock();
|
||||||
|
let file_name = format!("{version}/{platform}/node.lib");
|
||||||
|
let entry = cache.entry(file_name);
|
||||||
|
match entry {
|
||||||
|
std::collections::hash_map::Entry::Occupied(occupied) => {
|
||||||
|
Some(occupied.get().clone())
|
||||||
|
}
|
||||||
|
std::collections::hash_map::Entry::Vacant(vacant) => {
|
||||||
|
let tarball_filename =
|
||||||
|
format!("{version}__{platform}__node.lib.tar.gz");
|
||||||
|
let contents = asset_file_path(&tarball_filename);
|
||||||
|
let contents = contents.read_to_bytes_if_exists().ok_warn()?;
|
||||||
|
let extracted = Bytes::from(extract_tarball(&contents)?);
|
||||||
|
vacant.insert(extracted.clone());
|
||||||
|
Some(extracted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn header_tar_name(version: &str) -> String {
|
||||||
|
format!("node-{version}-headers.tar.gz")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_tarball(compressed: &[u8]) -> Option<Vec<u8>> {
|
||||||
|
let mut out = Vec::with_capacity(compressed.len());
|
||||||
|
let decoder = flate2::read::GzDecoder::new(compressed);
|
||||||
|
let mut archive = tar::Archive::new(decoder);
|
||||||
|
for file in archive.entries().ok_warn()? {
|
||||||
|
let mut file = file.ok_warn()?;
|
||||||
|
|
||||||
|
std::io::copy(&mut file, &mut out).ok_warn()?;
|
||||||
|
}
|
||||||
|
Some(out)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Server for node JS header tarballs, used by `node-gyp` in tests
|
||||||
|
pub async fn nodejs_org_mirror(port: u16) {
|
||||||
|
let addr = SocketAddr::from(([127, 0, 0, 1], port));
|
||||||
|
|
||||||
|
run_server(
|
||||||
|
ServerOptions {
|
||||||
|
addr,
|
||||||
|
error_msg: "nodejs mirror server error",
|
||||||
|
kind: ServerKind::Auto,
|
||||||
|
},
|
||||||
|
|req| async move {
|
||||||
|
let path = req.uri().path();
|
||||||
|
if path.contains("-headers.tar.gz")
|
||||||
|
|| path.contains("SHASUMS256.txt")
|
||||||
|
|| path.contains("node.lib")
|
||||||
|
{
|
||||||
|
let mut parts = path.split('/');
|
||||||
|
let _ = parts.next(); // empty
|
||||||
|
let Some(version) = parts.next() else {
|
||||||
|
return not_found(format!("missing node version in path: {path}"));
|
||||||
|
};
|
||||||
|
let Some(file) = parts.next() else {
|
||||||
|
return not_found(format!("missing file version in path: {path}"));
|
||||||
|
};
|
||||||
|
if file == "SHASUMS256.txt" {
|
||||||
|
let Some(checksum_file) = NODEJS_MIRROR.get_checksum_file(version)
|
||||||
|
else {
|
||||||
|
return not_found(format!("failed to get header checksum: {path}"));
|
||||||
|
};
|
||||||
|
return Ok(Response::new(string_body(&checksum_file)));
|
||||||
|
} else if !file.contains("headers") {
|
||||||
|
let platform = file;
|
||||||
|
let Some(file) = parts.next() else {
|
||||||
|
return not_found("expected file");
|
||||||
|
};
|
||||||
|
if file != "node.lib" {
|
||||||
|
return not_found(format!(
|
||||||
|
"unexpected file name, expected node.lib, got: {file}"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let Some(bytes) = NODEJS_MIRROR.get_node_lib_bytes(version, platform)
|
||||||
|
else {
|
||||||
|
return not_found("expected node lib bytes");
|
||||||
|
};
|
||||||
|
|
||||||
|
return Ok(Response::new(UnsyncBoxBody::new(Full::new(bytes))));
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(bytes) = NODEJS_MIRROR.get_header_bytes(file) else {
|
||||||
|
return not_found(format!(
|
||||||
|
"couldn't find headers for version {version}, missing file: {file}"
|
||||||
|
));
|
||||||
|
};
|
||||||
|
Ok(Response::new(UnsyncBoxBody::new(Full::new(bytes))))
|
||||||
|
} else {
|
||||||
|
not_found(format!("unexpected request path: {path}"))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn not_found(
|
||||||
|
msg: impl AsRef<str>,
|
||||||
|
) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
|
||||||
|
let msg = msg.as_ref();
|
||||||
|
eprintln!(
|
||||||
|
"test_server warning: error likely occurred in nodejs_org_mirror.rs: {msg}"
|
||||||
|
);
|
||||||
|
Response::builder()
|
||||||
|
.status(StatusCode::NOT_FOUND)
|
||||||
|
.body(string_body(msg))
|
||||||
|
.map_err(|e| e.into())
|
||||||
|
}
|
Loading…
Reference in a new issue