1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2025-01-06 22:35:51 -05:00

Merge branch 'main' into support_create_connection

This commit is contained in:
Yoshiya Hinosawa 2024-10-18 16:17:15 +09:00 committed by GitHub
commit 765c481cb8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
66 changed files with 1030 additions and 612 deletions

View file

@ -5,7 +5,7 @@ import { stringify } from "jsr:@std/yaml@^0.221/stringify";
// Bump this number when you want to purge the cache. // Bump this number when you want to purge the cache.
// Note: the tools/release/01_bump_crate_versions.ts script will update this version // Note: the tools/release/01_bump_crate_versions.ts script will update this version
// automatically via regex, so ensure that this line maintains this format. // automatically via regex, so ensure that this line maintains this format.
const cacheVersion = 20; const cacheVersion = 21;
const ubuntuX86Runner = "ubuntu-22.04"; const ubuntuX86Runner = "ubuntu-22.04";
const ubuntuX86XlRunner = "ubuntu-22.04-xl"; const ubuntuX86XlRunner = "ubuntu-22.04-xl";

View file

@ -361,8 +361,8 @@ jobs:
path: |- path: |-
~/.cargo/registry/index ~/.cargo/registry/index
~/.cargo/registry/cache ~/.cargo/registry/cache
key: '20-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}' key: '21-cargo-home-${{ matrix.os }}-${{ matrix.arch }}-${{ hashFiles(''Cargo.lock'') }}'
restore-keys: '20-cargo-home-${{ matrix.os }}-${{ matrix.arch }}' restore-keys: '21-cargo-home-${{ matrix.os }}-${{ matrix.arch }}'
if: '!(matrix.skip)' if: '!(matrix.skip)'
- name: Restore cache build output (PR) - name: Restore cache build output (PR)
uses: actions/cache/restore@v4 uses: actions/cache/restore@v4
@ -375,7 +375,7 @@ jobs:
!./target/*/*.zip !./target/*/*.zip
!./target/*/*.tar.gz !./target/*/*.tar.gz
key: never_saved key: never_saved
restore-keys: '20-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-' restore-keys: '21-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-'
- name: Apply and update mtime cache - name: Apply and update mtime cache
if: '!(matrix.skip) && (!startsWith(github.ref, ''refs/tags/''))' if: '!(matrix.skip) && (!startsWith(github.ref, ''refs/tags/''))'
uses: ./.github/mtime_cache uses: ./.github/mtime_cache
@ -685,7 +685,7 @@ jobs:
!./target/*/*.zip !./target/*/*.zip
!./target/*/*.sha256sum !./target/*/*.sha256sum
!./target/*/*.tar.gz !./target/*/*.tar.gz
key: '20-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}' key: '21-cargo-target-${{ matrix.os }}-${{ matrix.arch }}-${{ matrix.profile }}-${{ matrix.job }}-${{ github.sha }}'
publish-canary: publish-canary:
name: publish canary name: publish canary
runs-on: ubuntu-22.04 runs-on: ubuntu-22.04

74
Cargo.lock generated
View file

@ -1154,7 +1154,7 @@ dependencies = [
[[package]] [[package]]
name = "deno" name = "deno"
version = "2.0.1" version = "2.0.2"
dependencies = [ dependencies = [
"anstream", "anstream",
"async-trait", "async-trait",
@ -1328,7 +1328,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_bench_util" name = "deno_bench_util"
version = "0.166.0" version = "0.167.0"
dependencies = [ dependencies = [
"bencher", "bencher",
"deno_core", "deno_core",
@ -1337,7 +1337,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_broadcast_channel" name = "deno_broadcast_channel"
version = "0.166.0" version = "0.167.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"deno_core", "deno_core",
@ -1348,7 +1348,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_cache" name = "deno_cache"
version = "0.104.0" version = "0.105.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"deno_core", "deno_core",
@ -1381,7 +1381,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_canvas" name = "deno_canvas"
version = "0.41.0" version = "0.42.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
"deno_webgpu", "deno_webgpu",
@ -1416,16 +1416,16 @@ dependencies = [
[[package]] [[package]]
name = "deno_console" name = "deno_console"
version = "0.172.0" version = "0.173.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
] ]
[[package]] [[package]]
name = "deno_core" name = "deno_core"
version = "0.313.0" version = "0.314.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29f36be738d78e39b6603a6b07f1cf91e28baf3681f87205f07482999e0d0bc2" checksum = "1fcd11ab87426c611b7170138a768dad7170c8fb66d8095b773d25e58fd254ea"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"bincode", "bincode",
@ -1461,7 +1461,7 @@ checksum = "a13951ea98c0a4c372f162d669193b4c9d991512de9f2381dd161027f34b26b1"
[[package]] [[package]]
name = "deno_cron" name = "deno_cron"
version = "0.52.0" version = "0.53.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@ -1474,7 +1474,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_crypto" name = "deno_crypto"
version = "0.186.0" version = "0.187.0"
dependencies = [ dependencies = [
"aes", "aes",
"aes-gcm", "aes-gcm",
@ -1534,7 +1534,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_fetch" name = "deno_fetch"
version = "0.196.0" version = "0.197.0"
dependencies = [ dependencies = [
"base64 0.21.7", "base64 0.21.7",
"bytes", "bytes",
@ -1566,7 +1566,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_ffi" name = "deno_ffi"
version = "0.159.0" version = "0.160.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
"deno_permissions", "deno_permissions",
@ -1585,7 +1585,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_fs" name = "deno_fs"
version = "0.82.0" version = "0.83.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"base32", "base32",
@ -1635,7 +1635,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_http" name = "deno_http"
version = "0.170.0" version = "0.171.0"
dependencies = [ dependencies = [
"async-compression", "async-compression",
"async-trait", "async-trait",
@ -1674,7 +1674,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_io" name = "deno_io"
version = "0.82.0" version = "0.83.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"deno_core", "deno_core",
@ -1695,7 +1695,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_kv" name = "deno_kv"
version = "0.80.0" version = "0.81.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@ -1720,6 +1720,7 @@ dependencies = [
"rand", "rand",
"rusqlite", "rusqlite",
"serde", "serde",
"thiserror",
"url", "url",
] ]
@ -1766,11 +1767,12 @@ dependencies = [
[[package]] [[package]]
name = "deno_napi" name = "deno_napi"
version = "0.103.0" version = "0.104.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
"deno_permissions", "deno_permissions",
"libloading 0.7.4", "libloading 0.7.4",
"thiserror",
] ]
[[package]] [[package]]
@ -1788,7 +1790,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_net" name = "deno_net"
version = "0.164.0" version = "0.165.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
"deno_permissions", "deno_permissions",
@ -1797,6 +1799,7 @@ dependencies = [
"rustls-tokio-stream", "rustls-tokio-stream",
"serde", "serde",
"socket2", "socket2",
"thiserror",
"tokio", "tokio",
"trust-dns-proto", "trust-dns-proto",
"trust-dns-resolver", "trust-dns-resolver",
@ -1804,7 +1807,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_node" name = "deno_node"
version = "0.109.0" version = "0.110.0"
dependencies = [ dependencies = [
"aead-gcm-stream", "aead-gcm-stream",
"aes", "aes",
@ -1913,9 +1916,9 @@ dependencies = [
[[package]] [[package]]
name = "deno_ops" name = "deno_ops"
version = "0.189.0" version = "0.190.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8f998ad1d5b36064109367ffe67b1088385eb3d8025efc95e445bc013a147a2" checksum = "a48a3e06cace18a2c49e148da067678c6af80e70757a8c3991301397cf6b9919"
dependencies = [ dependencies = [
"proc-macro-rules", "proc-macro-rules",
"proc-macro2", "proc-macro2",
@ -1953,7 +1956,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_permissions" name = "deno_permissions"
version = "0.32.0" version = "0.33.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
"deno_path_util", "deno_path_util",
@ -1970,7 +1973,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_resolver" name = "deno_resolver"
version = "0.4.0" version = "0.5.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"base32", "base32",
@ -1986,7 +1989,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_runtime" name = "deno_runtime"
version = "0.181.0" version = "0.182.0"
dependencies = [ dependencies = [
"color-print", "color-print",
"deno_ast", "deno_ast",
@ -2102,7 +2105,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_tls" name = "deno_tls"
version = "0.159.0" version = "0.160.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
"deno_native_certs", "deno_native_certs",
@ -2151,7 +2154,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_url" name = "deno_url"
version = "0.172.0" version = "0.173.0"
dependencies = [ dependencies = [
"deno_bench_util", "deno_bench_util",
"deno_console", "deno_console",
@ -2163,7 +2166,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_web" name = "deno_web"
version = "0.203.0" version = "0.204.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"base64-simd 0.8.0", "base64-simd 0.8.0",
@ -2178,13 +2181,14 @@ dependencies = [
"flate2", "flate2",
"futures", "futures",
"serde", "serde",
"thiserror",
"tokio", "tokio",
"uuid", "uuid",
] ]
[[package]] [[package]]
name = "deno_webgpu" name = "deno_webgpu"
version = "0.139.0" version = "0.140.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
"raw-window-handle", "raw-window-handle",
@ -2196,7 +2200,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_webidl" name = "deno_webidl"
version = "0.172.0" version = "0.173.0"
dependencies = [ dependencies = [
"deno_bench_util", "deno_bench_util",
"deno_core", "deno_core",
@ -2204,7 +2208,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_websocket" name = "deno_websocket"
version = "0.177.0" version = "0.178.0"
dependencies = [ dependencies = [
"bytes", "bytes",
"deno_core", "deno_core",
@ -2225,7 +2229,7 @@ dependencies = [
[[package]] [[package]]
name = "deno_webstorage" name = "deno_webstorage"
version = "0.167.0" version = "0.168.0"
dependencies = [ dependencies = [
"deno_core", "deno_core",
"deno_web", "deno_web",
@ -4511,7 +4515,7 @@ dependencies = [
[[package]] [[package]]
name = "napi_sym" name = "napi_sym"
version = "0.102.0" version = "0.103.0"
dependencies = [ dependencies = [
"quote", "quote",
"serde", "serde",
@ -4580,7 +4584,7 @@ dependencies = [
[[package]] [[package]]
name = "node_resolver" name = "node_resolver"
version = "0.11.0" version = "0.12.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",
@ -6211,9 +6215,9 @@ dependencies = [
[[package]] [[package]]
name = "serde_v8" name = "serde_v8"
version = "0.222.0" version = "0.223.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "27130b5cd87f6f06228940a1f3a7ecc988ea13d1bede1398a48d74cb59dabc9a" checksum = "c127bb9f2024433d06789b242477c808fd7f7dc4c3278576dd5bc99c4e5c75ff"
dependencies = [ dependencies = [
"num-bigint", "num-bigint",
"serde", "serde",

View file

@ -46,18 +46,18 @@ repository = "https://github.com/denoland/deno"
[workspace.dependencies] [workspace.dependencies]
deno_ast = { version = "=0.42.2", features = ["transpiling"] } deno_ast = { version = "=0.42.2", features = ["transpiling"] }
deno_core = { version = "0.313.0" } deno_core = { version = "0.314.1" }
deno_bench_util = { version = "0.166.0", path = "./bench_util" } deno_bench_util = { version = "0.167.0", path = "./bench_util" }
deno_lockfile = "=0.23.1" deno_lockfile = "=0.23.1"
deno_media_type = { version = "0.1.4", features = ["module_specifier"] } deno_media_type = { version = "0.1.4", features = ["module_specifier"] }
deno_npm = "=0.25.4" deno_npm = "=0.25.4"
deno_path_util = "=0.2.1" deno_path_util = "=0.2.1"
deno_permissions = { version = "0.32.0", path = "./runtime/permissions" } deno_permissions = { version = "0.33.0", path = "./runtime/permissions" }
deno_runtime = { version = "0.181.0", path = "./runtime" } deno_runtime = { version = "0.182.0", path = "./runtime" }
deno_semver = "=0.5.14" deno_semver = "=0.5.14"
deno_terminal = "0.2.0" deno_terminal = "0.2.0"
napi_sym = { version = "0.102.0", path = "./cli/napi/sym" } napi_sym = { version = "0.103.0", path = "./cli/napi/sym" }
test_util = { package = "test_server", path = "./tests/util/server" } test_util = { package = "test_server", path = "./tests/util/server" }
denokv_proto = "0.8.1" denokv_proto = "0.8.1"
@ -66,32 +66,32 @@ denokv_remote = "0.8.1"
denokv_sqlite = { default-features = false, version = "0.8.2" } denokv_sqlite = { default-features = false, version = "0.8.2" }
# exts # exts
deno_broadcast_channel = { version = "0.166.0", path = "./ext/broadcast_channel" } deno_broadcast_channel = { version = "0.167.0", path = "./ext/broadcast_channel" }
deno_cache = { version = "0.104.0", path = "./ext/cache" } deno_cache = { version = "0.105.0", path = "./ext/cache" }
deno_canvas = { version = "0.41.0", path = "./ext/canvas" } deno_canvas = { version = "0.42.0", path = "./ext/canvas" }
deno_console = { version = "0.172.0", path = "./ext/console" } deno_console = { version = "0.173.0", path = "./ext/console" }
deno_cron = { version = "0.52.0", path = "./ext/cron" } deno_cron = { version = "0.53.0", path = "./ext/cron" }
deno_crypto = { version = "0.186.0", path = "./ext/crypto" } deno_crypto = { version = "0.187.0", path = "./ext/crypto" }
deno_fetch = { version = "0.196.0", path = "./ext/fetch" } deno_fetch = { version = "0.197.0", path = "./ext/fetch" }
deno_ffi = { version = "0.159.0", path = "./ext/ffi" } deno_ffi = { version = "0.160.0", path = "./ext/ffi" }
deno_fs = { version = "0.82.0", path = "./ext/fs" } deno_fs = { version = "0.83.0", path = "./ext/fs" }
deno_http = { version = "0.170.0", path = "./ext/http" } deno_http = { version = "0.171.0", path = "./ext/http" }
deno_io = { version = "0.82.0", path = "./ext/io" } deno_io = { version = "0.83.0", path = "./ext/io" }
deno_kv = { version = "0.80.0", path = "./ext/kv" } deno_kv = { version = "0.81.0", path = "./ext/kv" }
deno_napi = { version = "0.103.0", path = "./ext/napi" } deno_napi = { version = "0.104.0", path = "./ext/napi" }
deno_net = { version = "0.164.0", path = "./ext/net" } deno_net = { version = "0.165.0", path = "./ext/net" }
deno_node = { version = "0.109.0", path = "./ext/node" } deno_node = { version = "0.110.0", path = "./ext/node" }
deno_tls = { version = "0.159.0", path = "./ext/tls" } deno_tls = { version = "0.160.0", path = "./ext/tls" }
deno_url = { version = "0.172.0", path = "./ext/url" } deno_url = { version = "0.173.0", path = "./ext/url" }
deno_web = { version = "0.203.0", path = "./ext/web" } deno_web = { version = "0.204.0", path = "./ext/web" }
deno_webgpu = { version = "0.139.0", path = "./ext/webgpu" } deno_webgpu = { version = "0.140.0", path = "./ext/webgpu" }
deno_webidl = { version = "0.172.0", path = "./ext/webidl" } deno_webidl = { version = "0.173.0", path = "./ext/webidl" }
deno_websocket = { version = "0.177.0", path = "./ext/websocket" } deno_websocket = { version = "0.178.0", path = "./ext/websocket" }
deno_webstorage = { version = "0.167.0", path = "./ext/webstorage" } deno_webstorage = { version = "0.168.0", path = "./ext/webstorage" }
# resolvers # resolvers
deno_resolver = { version = "0.4.0", path = "./resolvers/deno" } deno_resolver = { version = "0.5.0", path = "./resolvers/deno" }
node_resolver = { version = "0.11.0", path = "./resolvers/node" } node_resolver = { version = "0.12.0", path = "./resolvers/node" }
aes = "=0.8.3" aes = "=0.8.3"
anyhow = "1.0.57" anyhow = "1.0.57"

View file

@ -6,6 +6,21 @@ https://github.com/denoland/deno/releases
We also have one-line install commands at: We also have one-line install commands at:
https://github.com/denoland/deno_install https://github.com/denoland/deno_install
### 2.0.2 / 2024.10.17
- fix(cli): set napi object property properly (#26344)
- fix(ext/node): add null check for kStreamBaseField (#26368)
- fix(install): don't attempt to cache specifiers that point to directories
(#26369)
- fix(jupyter): fix panics for overslow subtraction (#26371)
- fix(jupyter): update to the new logo (#26353)
- fix(net): don't try to set nodelay on upgrade streams (#26342)
- fix(node/fs): copyFile with `COPYFILE_EXCL` should not throw if the
destination doesn't exist (#26360)
- fix(node/http): normalize header names in `ServerResponse` (#26339)
- fix(runtime): send ws ping frames from inspector server (#26352)
- fix: don't warn on ignored signals on windows (#26332)
### 2.0.1 / 2024.10.16 ### 2.0.1 / 2024.10.16
- feat(lsp): "deno/didRefreshDenoConfigurationTree" notifications (#26215) - feat(lsp): "deno/didRefreshDenoConfigurationTree" notifications (#26215)

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_bench_util" name = "deno_bench_util"
version = "0.166.0" version = "0.167.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno" name = "deno"
version = "2.0.1" version = "2.0.2"
authors.workspace = true authors.workspace = true
default-run = "deno" default-run = "deno"
edition.workspace = true edition.workspace = true

View file

@ -333,7 +333,7 @@ impl FileFetcher {
) )
})?; })?;
let bytes = blob.read_all().await?; let bytes = blob.read_all().await;
let headers = let headers =
HashMap::from([("content-type".to_string(), blob.media_type.clone())]); HashMap::from([("content-type".to_string(), blob.media_type.clone())]);

View file

@ -2,7 +2,7 @@
[package] [package]
name = "napi_sym" name = "napi_sym"
version = "0.102.0" version = "0.103.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1 KiB

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2 KiB

After

Width:  |  Height:  |  Size: 2.8 KiB

View file

@ -1 +1,17 @@
<svg viewBox="0 0 30 30" fill="none" xmlns="http://www.w3.org/2000/svg"><g clip-path="url(#clip0_29_599)"><path d="M15 0C23.2843 0 30 6.71572 30 15C30 23.2843 23.2843 30 15 30C6.71572 30 0 23.2843 0 15C0 6.71572 6.71572 0 15 0Z" fill="currentColor"></path><path d="M14.6635 22.3394C14.2788 22.2357 13.8831 22.4584 13.7705 22.8381L13.7655 22.8558L12.7694 26.5472L12.7649 26.565C12.6711 26.9498 12.9011 27.3414 13.2858 27.4451C13.6704 27.549 14.0661 27.3263 14.1787 26.9465L14.1837 26.9289L15.1797 23.2375L15.1843 23.2196C15.1911 23.1919 15.1962 23.164 15.1997 23.1362L15.2026 23.1084L15.179 22.9888L15.1445 22.8166L15.1227 22.7091C15.076 22.619 15.0111 22.5396 14.932 22.4759C14.853 22.4123 14.7615 22.3658 14.6635 22.3394ZM7.7224 18.5379C7.70424 18.5741 7.68883 18.6123 7.67658 18.6522L7.66967 18.6763L6.67358 22.3677L6.669 22.3856C6.57525 22.7704 6.80524 23.1619 7.1899 23.2657C7.57451 23.3695 7.97026 23.1469 8.08287 22.7671L8.08779 22.7494L8.99096 19.4023C8.51793 19.1518 8.09336 18.8628 7.7224 18.5379ZM5.34707 14.2929C4.9624 14.1891 4.56666 14.4117 4.4541 14.7915L4.44912 14.8092L3.45303 18.5006L3.44846 18.5184C3.35471 18.9032 3.58469 19.2947 3.96936 19.3985C4.35397 19.5023 4.74971 19.2797 4.86232 18.8999L4.86725 18.8822L5.86334 15.1908L5.86791 15.173C5.96166 14.7882 5.73174 14.3967 5.34707 14.2929ZM27.682 13.4546C27.2973 13.3508 26.9015 13.5734 26.789 13.9532L26.784 13.9709L25.7879 17.6623L25.7833 17.6801C25.6896 18.0649 25.9196 18.4564 26.3042 18.5602C26.6889 18.664 27.0846 18.4414 27.1972 18.0616L27.2021 18.0439L28.1982 14.3525L28.2028 14.3347C28.2965 13.9499 28.0666 13.5584 27.682 13.4546ZM3.17781 8.52527C2.34361 10.0444 1.81243 11.7112 1.61377 13.4329C1.7088 13.5412 1.83381 13.619 1.97301 13.6563C2.35768 13.7602 2.75342 13.5375 2.86598 13.1577L2.87096 13.1401L3.86705 9.44865L3.87162 9.43084C3.96537 9.04599 3.73539 8.65447 3.35072 8.5507C3.2943 8.53547 3.23623 8.52694 3.17781 8.52527ZM25.159 8.5507C24.7744 8.44687 24.3786 8.66953 24.266 9.04933L24.2611 9.06697L23.265 12.7584L23.2604 12.7762C23.1667 13.161 23.3966 13.5526 23.7813 13.6563C24.1659 13.7602 24.5617 13.5375 24.6743 13.1577L24.6792 13.1401L25.6753 9.44865L25.6799 9.43084C25.7736 9.04599 25.5436 8.65447 25.159 8.5507Z" fill="white"></path><path d="M7.51285 5.04065C7.12824 4.93682 6.73249 5.15948 6.61988 5.53929L6.61495 5.55692L5.61886 9.24833L5.61429 9.26614C5.52054 9.65098 5.75052 10.0425 6.13519 10.1463C6.5198 10.2501 6.91554 10.0274 7.02816 9.64764L7.03308 9.63001L8.02917 5.9386L8.03374 5.92079C8.12749 5.53595 7.89751 5.14442 7.51285 5.04065ZM20.3116 5.73845C19.9269 5.63462 19.5312 5.85727 19.4186 6.23708L19.4136 6.25471L18.7443 8.73499C19.1779 8.94915 19.5917 9.20126 19.9809 9.48839L20.0453 9.53643L20.8279 6.63639L20.8324 6.61858C20.9262 6.23374 20.6963 5.84221 20.3116 5.73845ZM13.7968 1.57642C13.3296 1.61771 12.8647 1.68338 12.4043 1.77317L12.3066 1.79263L11.3782 5.23419L11.3736 5.252C11.2799 5.63684 11.5099 6.02837 11.8945 6.13214C12.2792 6.23596 12.6749 6.01331 12.7875 5.6335L12.7924 5.61587L13.7885 1.92446L13.7931 1.90665C13.8196 1.79831 13.8209 1.68533 13.7968 1.57642ZM22.9626 4.1263L22.7669 4.85169L22.7623 4.86944C22.6686 5.25429 22.8986 5.64581 23.2832 5.74958C23.6678 5.85341 24.0636 5.63075 24.1762 5.25095L24.1811 5.23331L24.2025 5.15462C23.8362 4.81205 23.4511 4.49009 23.0491 4.19022L22.9626 4.1263ZM17.1672 1.69677L16.8139 3.00593L16.8094 3.02374C16.7156 3.40858 16.9456 3.80011 17.3303 3.90388C17.7149 4.0077 18.1106 3.78505 18.2233 3.40524L18.2282 3.38761L18.6 2.00966C18.1624 1.88867 17.719 1.79001 17.2714 1.71405L17.1672 1.69677Z" fill="white"></path><path d="M9.69085 24.6253C9.80341 24.2455 10.1992 24.0229 10.5838 24.1266C10.9685 24.2303 11.1984 24.6219 11.1047 25.0068L11.1001 25.0246L10.3872 27.6664L10.2876 27.6297C9.85836 27.4694 9.43765 27.2873 9.0271 27.0839L9.68587 24.6429L9.69085 24.6253Z" fill="white"></path><path d="M14.4141 8.49082C10.0522 8.49082 6.65918 11.2368 6.65918 14.6517C6.65918 17.8769 9.78123 19.9362 14.6211 19.8331C15.0327 19.8243 15.1517 20.1008 15.2856 20.4734C15.4196 20.846 15.7796 22.8097 16.0665 24.3117C16.3233 25.656 16.5842 27.0052 16.7834 28.3596C19.9439 27.9418 22.8663 26.3807 25.0076 24.0261L22.7237 15.5088C22.1544 13.4518 21.489 11.5564 19.7283 10.1794C18.3118 9.07166 16.5122 8.49082 14.4141 8.49082Z" fill="white"></path><path d="M15.3516 10.957C15.8694 10.957 16.2891 11.3767 16.2891 11.8945C16.2891 12.4123 15.8694 12.832 15.3516 12.832C14.8338 12.832 14.4141 12.4123 14.4141 11.8945C14.4141 11.3767 14.8338 10.957 15.3516 10.957Z" fill="currentColor"></path></g><defs><clipPath id="clip0_29_599"><rect width="30" height="30" fill="white"></rect></clipPath></defs></svg> <svg width="100%" height="100%" viewBox="0 0 441 441" version="1.1" xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/"
style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;">
<g transform="matrix(1.02631,-2.08167e-17,2.08167e-17,1.02631,-0.525826,-0.525138)">
<path
d="M37.965,296.635C26.441,271.766 20.009,244.065 20.009,214.873C20.009,207.318 20.439,199.863 21.278,192.531C22.129,185.123 23.39,177.852 25.036,170.742C34.286,130.852 55.801,95.64 85.384,69.301C110.233,47.207 140.674,31.444 174.043,24.299C187.212,21.486 200.872,20.006 214.875,20.006C219.783,20.011 224.727,20.2 229.701,20.579C253.285,22.38 275.571,28.317 295.904,37.625C312.305,45.143 327.486,54.87 341.064,66.426C375.17,95.48 398.957,135.953 406.867,181.369C408.757,192.255 409.742,203.45 409.742,214.873C409.738,219.789 409.548,224.74 409.168,229.721C407.731,248.545 403.659,266.542 397.34,283.379C388.521,306.83 375.308,328.136 358.706,346.294C337.113,368.342 309.673,378.152 286.755,377.744C270.09,377.447 253.784,370.816 242.516,361.114C226.42,347.253 219.918,331.409 217.69,313.729C217.136,309.334 217.461,297.358 219.748,289.066C221.453,282.885 225.777,270.948 232.1,265.727C224.703,262.541 215.183,255.604 212.182,252.274C211.445,251.455 211.54,250.174 212.2,249.292C212.861,248.41 214.02,248.062 215.057,248.435C221.416,250.618 229.161,252.771 237.327,254.137C248.067,255.932 261.424,258.194 274.955,258.859C307.946,260.479 342.407,245.67 353.103,216.207C363.798,186.744 359.649,157.602 321.279,140.121C282.909,122.64 265.185,101.856 234.183,89.32C213.934,81.131 191.396,85.992 168.257,98.78C105.931,133.223 50.092,242.048 75.833,342.873C76.201,344.252 75.58,345.705 74.328,346.392C73.156,347.036 71.713,346.852 70.741,345.962C63.25,337.731 56.454,328.857 50.445,319.433C45.796,312.139 41.623,304.524 37.965,296.635Z" />
</g>
<g transform="matrix(0.0920293,0.00428099,-0.00428099,0.0920293,-28.1272,-500.301)">
<path
d="M3053.7,5296.9C4371.65,5296.9 5441.66,6366.91 5441.66,7684.86C5441.66,9002.81 4371.65,10072.8 3053.7,10072.8C1735.75,10072.8 665.74,9002.81 665.74,7684.86C665.74,6366.91 1735.75,5296.9 3053.7,5296.9ZM3745.03,8143.22C3594.12,8142.82 3444.31,8124.57 3323.87,8110.15C3232.29,8099.18 3144.99,8079.23 3073.1,8058.23C3061.36,8054.62 3048.65,8059.09 3041.75,8069.24C3034.86,8079.4 3034.46,8093.71 3043.09,8102.44C3078.21,8137.94 3187.74,8210.21 3271.7,8241.83C3204.04,8303.2 3162.1,8438.28 3146.33,8507.94C3125.17,8601.4 3127.75,8734.83 3136.19,8783.45C3170.14,8979.04 3250.69,9151.99 3436.99,9297.9C3567.4,9400.03 3752.28,9465.38 3937.88,9460.06C4194.01,9452.71 4495.48,9328.51 4724.65,9070.17C5023.25,8710.58 5208.52,8252.45 5223.47,7749.5C5259.08,6551.9 4315.7,5550.69 3118.1,5515.08C1920.51,5479.47 919.301,6422.86 883.689,7620.45C865.246,8240.66 1109.37,8808.21 1515.43,9216.2C1526.73,9227.39 1544.21,9229.43 1557.78,9221.14C1571.35,9212.85 1577.51,9196.36 1572.7,9181.2C1234.07,8072.55 1799.11,6832.64 2474.84,6417.1C2725.71,6262.82 2973.99,6197.06 3203.56,6277.7C3555.04,6401.15 3763.03,6623.26 4199.06,6797.93C4635.09,6972.59 4696.35,7294.74 4592.58,7628.14C4488.81,7961.54 4113,8144.17 3745.03,8143.22ZM2917.17,6442.51C2777.75,6459.97 2693.93,6637.44 2687.08,6749.42C2680.18,6861.39 2744.03,7042.7 2926.19,7030.63C3139.52,7016.49 3195.89,6830.7 3164.24,6654.94C3140.48,6522.94 3033.73,6427.9 2917.17,6442.51Z"
style="fill:white;" />
</g>
<g transform="matrix(7.12289,0.543899,-0.543899,7.12289,-4867.49,-1040.55)">
<path
d="M721.316,105.751C722.813,105.518 724.225,106.703 724.582,108.395C725.058,110.649 724.402,113.065 721.658,113.329C719.314,113.555 718.422,111.242 718.468,109.796C718.513,108.35 719.525,106.03 721.316,105.751Z" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 4.5 KiB

After

Width:  |  Height:  |  Size: 3.7 KiB

View file

@ -329,7 +329,12 @@ impl JupyterServer {
}) })
.collect(); .collect();
(candidates, cursor_pos - prop_name.len()) if prop_name.len() > cursor_pos {
// TODO(bartlomieju): most likely not correct, but better than panicking because of sub with overflow
(candidates, cursor_pos)
} else {
(candidates, cursor_pos - prop_name.len())
}
} else { } else {
// combine results of declarations and globalThis properties // combine results of declarations and globalThis properties
let mut candidates = get_expression_property_names( let mut candidates = get_expression_property_names(
@ -349,7 +354,12 @@ impl JupyterServer {
candidates.sort(); candidates.sort();
candidates.dedup(); // make sure to sort first candidates.dedup(); // make sure to sort first
(candidates, cursor_pos - expr.len()) if expr.len() > cursor_pos {
// TODO(bartlomieju): most likely not correct, but better than panicking because of sub with overflow
(candidates, cursor_pos)
} else {
(candidates, cursor_pos - expr.len())
}
}; };
connection connection

View file

@ -75,6 +75,13 @@ pub async fn cache_top_level_deps(
if entry.key.ends_with('/') && specifier.as_str().ends_with('/') { if entry.key.ends_with('/') && specifier.as_str().ends_with('/') {
continue; continue;
} }
if specifier.scheme() == "file" {
if let Ok(path) = specifier.to_file_path() {
if !path.is_file() {
continue;
}
}
}
roots.push(specifier.clone()); roots.push(specifier.clone());
} }
} }

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_broadcast_channel" name = "deno_broadcast_channel"
version = "0.166.0" version = "0.167.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_cache" name = "deno_cache"
version = "0.104.0" version = "0.105.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

2
ext/cache/lib.rs vendored
View file

@ -28,7 +28,7 @@ pub enum CacheError {
Resource(deno_core::error::AnyError), Resource(deno_core::error::AnyError),
#[error(transparent)] #[error(transparent)]
Other(deno_core::error::AnyError), Other(deno_core::error::AnyError),
#[error(transparent)] #[error("{0}")]
Io(#[from] std::io::Error), Io(#[from] std::io::Error),
} }

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_canvas" name = "deno_canvas"
version = "0.41.0" version = "0.42.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_console" name = "deno_console"
version = "0.172.0" version = "0.173.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_cron" name = "deno_cron"
version = "0.52.0" version = "0.53.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_crypto" name = "deno_crypto"
version = "0.186.0" version = "0.187.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -149,10 +149,7 @@ pub fn op_crypto_get_random_values(
#[buffer] out: &mut [u8], #[buffer] out: &mut [u8],
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
if out.len() > 65536 { if out.len() > 65536 {
return Err( return Err(custom_error("DOMExceptionQuotaExceededError", format!("The ArrayBufferView's byte length ({}) exceeds the number of bytes of entropy available via this API (65536)", out.len())));
deno_web::DomExceptionQuotaExceededError::new(&format!("The ArrayBufferView's byte length ({}) exceeds the number of bytes of entropy available via this API (65536)", out.len()))
.into(),
);
} }
let maybe_seeded_rng = state.try_borrow_mut::<StdRng>(); let maybe_seeded_rng = state.try_borrow_mut::<StdRng>();

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_fetch" name = "deno_fetch"
version = "0.196.0" version = "0.197.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_ffi" name = "deno_ffi"
version = "0.159.0" version = "0.160.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_fs" name = "deno_fs"
version = "0.82.0" version = "0.83.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_http" name = "deno_http"
version = "0.170.0" version = "0.171.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_io" name = "deno_io"
version = "0.82.0" version = "0.83.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_kv" name = "deno_kv"
version = "0.80.0" version = "0.81.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
@ -36,6 +36,7 @@ prost.workspace = true
rand.workspace = true rand.workspace = true
rusqlite.workspace = true rusqlite.workspace = true
serde.workspace = true serde.workspace = true
thiserror.workspace = true
url.workspace = true url.workspace = true
[build-dependencies] [build-dependencies]

View file

@ -12,15 +12,11 @@ use std::num::NonZeroU32;
use std::rc::Rc; use std::rc::Rc;
use std::time::Duration; use std::time::Duration;
use anyhow::bail;
use base64::prelude::BASE64_URL_SAFE; use base64::prelude::BASE64_URL_SAFE;
use base64::Engine; use base64::Engine;
use chrono::DateTime; use chrono::DateTime;
use chrono::Utc; use chrono::Utc;
use deno_core::anyhow::Context;
use deno_core::error::get_custom_error_class; use deno_core::error::get_custom_error_class;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::futures::StreamExt; use deno_core::futures::StreamExt;
use deno_core::op2; use deno_core::op2;
use deno_core::serde_v8::AnyValue; use deno_core::serde_v8::AnyValue;
@ -118,12 +114,72 @@ impl Resource for DatabaseWatcherResource {
} }
} }
#[derive(Debug, thiserror::Error)]
pub enum KvError {
#[error(transparent)]
DatabaseHandler(deno_core::error::AnyError),
#[error(transparent)]
Resource(deno_core::error::AnyError),
#[error("Too many ranges (max {0})")]
TooManyRanges(usize),
#[error("Too many entries (max {0})")]
TooManyEntries(usize),
#[error("Too many checks (max {0})")]
TooManyChecks(usize),
#[error("Too many mutations (max {0})")]
TooManyMutations(usize),
#[error("Too many keys (max {0})")]
TooManyKeys(usize),
#[error("limit must be greater than 0")]
InvalidLimit,
#[error("Invalid boundary key")]
InvalidBoundaryKey,
#[error("Key too large for read (max {0} bytes)")]
KeyTooLargeToRead(usize),
#[error("Key too large for write (max {0} bytes)")]
KeyTooLargeToWrite(usize),
#[error("Total mutation size too large (max {0} bytes)")]
TotalMutationTooLarge(usize),
#[error("Total key size too large (max {0} bytes)")]
TotalKeyTooLarge(usize),
#[error(transparent)]
Kv(deno_core::error::AnyError),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("Queue message not found")]
QueueMessageNotFound,
#[error("Start key is not in the keyspace defined by prefix")]
StartKeyNotInKeyspace,
#[error("End key is not in the keyspace defined by prefix")]
EndKeyNotInKeyspace,
#[error("Start key is greater than end key")]
StartKeyGreaterThanEndKey,
#[error("Invalid check")]
InvalidCheck(#[source] KvCheckError),
#[error("Invalid mutation")]
InvalidMutation(#[source] KvMutationError),
#[error("Invalid enqueue")]
InvalidEnqueue(#[source] std::io::Error),
#[error("key cannot be empty")]
EmptyKey, // TypeError
#[error("Value too large (max {0} bytes)")]
ValueTooLarge(usize), // TypeError
#[error("enqueue payload too large (max {0} bytes)")]
EnqueuePayloadTooLarge(usize), // TypeError
#[error("invalid cursor")]
InvalidCursor,
#[error("cursor out of bounds")]
CursorOutOfBounds,
#[error("Invalid range")]
InvalidRange,
}
#[op2(async)] #[op2(async)]
#[smi] #[smi]
async fn op_kv_database_open<DBH>( async fn op_kv_database_open<DBH>(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[string] path: Option<String>, #[string] path: Option<String>,
) -> Result<ResourceId, AnyError> ) -> Result<ResourceId, KvError>
where where
DBH: DatabaseHandler + 'static, DBH: DatabaseHandler + 'static,
{ {
@ -134,7 +190,10 @@ where
.check_or_exit(UNSTABLE_FEATURE_NAME, "Deno.openKv"); .check_or_exit(UNSTABLE_FEATURE_NAME, "Deno.openKv");
state.borrow::<Rc<DBH>>().clone() state.borrow::<Rc<DBH>>().clone()
}; };
let db = handler.open(state.clone(), path).await?; let db = handler
.open(state.clone(), path)
.await
.map_err(KvError::DatabaseHandler)?;
let rid = state.borrow_mut().resource_table.add(DatabaseResource { let rid = state.borrow_mut().resource_table.add(DatabaseResource {
db, db,
cancel_handle: CancelHandle::new_rc(), cancel_handle: CancelHandle::new_rc(),
@ -184,8 +243,8 @@ enum ToV8Value {
} }
impl TryFrom<FromV8Value> for KvValue { impl TryFrom<FromV8Value> for KvValue {
type Error = AnyError; type Error = num_bigint::TryFromBigIntError<num_bigint::BigInt>;
fn try_from(value: FromV8Value) -> Result<Self, AnyError> { fn try_from(value: FromV8Value) -> Result<Self, Self::Error> {
Ok(match value { Ok(match value {
FromV8Value::V8(buf) => KvValue::V8(buf.to_vec()), FromV8Value::V8(buf) => KvValue::V8(buf.to_vec()),
FromV8Value::Bytes(buf) => KvValue::Bytes(buf.to_vec()), FromV8Value::Bytes(buf) => KvValue::Bytes(buf.to_vec()),
@ -214,8 +273,8 @@ struct ToV8KvEntry {
} }
impl TryFrom<KvEntry> for ToV8KvEntry { impl TryFrom<KvEntry> for ToV8KvEntry {
type Error = AnyError; type Error = std::io::Error;
fn try_from(entry: KvEntry) -> Result<Self, AnyError> { fn try_from(entry: KvEntry) -> Result<Self, Self::Error> {
Ok(ToV8KvEntry { Ok(ToV8KvEntry {
key: decode_key(&entry.key)? key: decode_key(&entry.key)?
.0 .0
@ -261,14 +320,16 @@ async fn op_kv_snapshot_read<DBH>(
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[serde] ranges: Vec<SnapshotReadRange>, #[serde] ranges: Vec<SnapshotReadRange>,
#[serde] consistency: V8Consistency, #[serde] consistency: V8Consistency,
) -> Result<Vec<Vec<ToV8KvEntry>>, AnyError> ) -> Result<Vec<Vec<ToV8KvEntry>>, KvError>
where where
DBH: DatabaseHandler + 'static, DBH: DatabaseHandler + 'static,
{ {
let db = { let db = {
let state = state.borrow(); let state = state.borrow();
let resource = let resource = state
state.resource_table.get::<DatabaseResource<DBH::DB>>(rid)?; .resource_table
.get::<DatabaseResource<DBH::DB>>(rid)
.map_err(KvError::Resource)?;
resource.db.clone() resource.db.clone()
}; };
@ -278,10 +339,7 @@ where
}; };
if ranges.len() > config.max_read_ranges { if ranges.len() > config.max_read_ranges {
return Err(type_error(format!( return Err(KvError::TooManyRanges(config.max_read_ranges));
"Too many ranges (max {})",
config.max_read_ranges
)));
} }
let mut total_entries = 0usize; let mut total_entries = 0usize;
@ -300,33 +358,32 @@ where
Ok(ReadRange { Ok(ReadRange {
start, start,
end, end,
limit: NonZeroU32::new(limit) limit: NonZeroU32::new(limit).ok_or(KvError::InvalidLimit)?,
.with_context(|| "limit must be greater than 0")?,
reverse, reverse,
}) })
}) })
.collect::<Result<Vec<_>, AnyError>>()?; .collect::<Result<Vec<_>, KvError>>()?;
if total_entries > config.max_read_entries { if total_entries > config.max_read_entries {
return Err(type_error(format!( return Err(KvError::TooManyEntries(config.max_read_entries));
"Too many entries (max {})",
config.max_read_entries
)));
} }
let opts = SnapshotReadOptions { let opts = SnapshotReadOptions {
consistency: consistency.into(), consistency: consistency.into(),
}; };
let output_ranges = db.snapshot_read(read_ranges, opts).await?; let output_ranges = db
.snapshot_read(read_ranges, opts)
.await
.map_err(KvError::Kv)?;
let output_ranges = output_ranges let output_ranges = output_ranges
.into_iter() .into_iter()
.map(|x| { .map(|x| {
x.entries x.entries
.into_iter() .into_iter()
.map(TryInto::try_into) .map(TryInto::try_into)
.collect::<Result<Vec<_>, AnyError>>() .collect::<Result<Vec<_>, std::io::Error>>()
}) })
.collect::<Result<Vec<_>, AnyError>>()?; .collect::<Result<Vec<_>, std::io::Error>>()?;
Ok(output_ranges) Ok(output_ranges)
} }
@ -345,7 +402,7 @@ impl<QMH: QueueMessageHandle + 'static> Resource for QueueMessageResource<QMH> {
async fn op_kv_dequeue_next_message<DBH>( async fn op_kv_dequeue_next_message<DBH>(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<Option<(ToJsBuffer, ResourceId)>, AnyError> ) -> Result<Option<(ToJsBuffer, ResourceId)>, KvError>
where where
DBH: DatabaseHandler + 'static, DBH: DatabaseHandler + 'static,
{ {
@ -358,17 +415,19 @@ where
if get_custom_error_class(&err) == Some("BadResource") { if get_custom_error_class(&err) == Some("BadResource") {
return Ok(None); return Ok(None);
} else { } else {
return Err(err); return Err(KvError::Resource(err));
} }
} }
}; };
resource.db.clone() resource.db.clone()
}; };
let Some(mut handle) = db.dequeue_next_message().await? else { let Some(mut handle) =
db.dequeue_next_message().await.map_err(KvError::Kv)?
else {
return Ok(None); return Ok(None);
}; };
let payload = handle.take_payload().await?.into(); let payload = handle.take_payload().await.map_err(KvError::Kv)?.into();
let handle_rid = { let handle_rid = {
let mut state = state.borrow_mut(); let mut state = state.borrow_mut();
state.resource_table.add(QueueMessageResource { handle }) state.resource_table.add(QueueMessageResource { handle })
@ -382,18 +441,18 @@ fn op_kv_watch<DBH>(
state: &mut OpState, state: &mut OpState,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[serde] keys: Vec<KvKey>, #[serde] keys: Vec<KvKey>,
) -> Result<ResourceId, AnyError> ) -> Result<ResourceId, KvError>
where where
DBH: DatabaseHandler + 'static, DBH: DatabaseHandler + 'static,
{ {
let resource = state.resource_table.get::<DatabaseResource<DBH::DB>>(rid)?; let resource = state
.resource_table
.get::<DatabaseResource<DBH::DB>>(rid)
.map_err(KvError::Resource)?;
let config = state.borrow::<Rc<KvConfig>>().clone(); let config = state.borrow::<Rc<KvConfig>>().clone();
if keys.len() > config.max_watched_keys { if keys.len() > config.max_watched_keys {
return Err(type_error(format!( return Err(KvError::TooManyKeys(config.max_watched_keys));
"Too many keys (max {})",
config.max_watched_keys
)));
} }
let keys: Vec<Vec<u8>> = keys let keys: Vec<Vec<u8>> = keys
@ -428,10 +487,13 @@ enum WatchEntry {
async fn op_kv_watch_next( async fn op_kv_watch_next(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<Option<Vec<WatchEntry>>, AnyError> { ) -> Result<Option<Vec<WatchEntry>>, KvError> {
let resource = { let resource = {
let state = state.borrow(); let state = state.borrow();
let resource = state.resource_table.get::<DatabaseWatcherResource>(rid)?; let resource = state
.resource_table
.get::<DatabaseWatcherResource>(rid)
.map_err(KvError::Resource)?;
resource.clone() resource.clone()
}; };
@ -457,7 +519,7 @@ async fn op_kv_watch_next(
return Ok(None); return Ok(None);
}; };
let entries = res?; let entries = res.map_err(KvError::Kv)?;
let entries = entries let entries = entries
.into_iter() .into_iter()
.map(|entry| { .map(|entry| {
@ -468,7 +530,7 @@ async fn op_kv_watch_next(
WatchKeyOutput::Unchanged => WatchEntry::Unchanged, WatchKeyOutput::Unchanged => WatchEntry::Unchanged,
}) })
}) })
.collect::<Result<_, anyhow::Error>>()?; .collect::<Result<_, KvError>>()?;
Ok(Some(entries)) Ok(Some(entries))
} }
@ -478,7 +540,7 @@ async fn op_kv_finish_dequeued_message<DBH>(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] handle_rid: ResourceId, #[smi] handle_rid: ResourceId,
success: bool, success: bool,
) -> Result<(), AnyError> ) -> Result<(), KvError>
where where
DBH: DatabaseHandler + 'static, DBH: DatabaseHandler + 'static,
{ {
@ -487,9 +549,9 @@ where
let handle = state let handle = state
.resource_table .resource_table
.take::<QueueMessageResource<<<DBH>::DB as Database>::QMH>>(handle_rid) .take::<QueueMessageResource<<<DBH>::DB as Database>::QMH>>(handle_rid)
.map_err(|_| type_error("Queue message not found"))?; .map_err(|_| KvError::QueueMessageNotFound)?;
Rc::try_unwrap(handle) Rc::try_unwrap(handle)
.map_err(|_| type_error("Queue message not found"))? .map_err(|_| KvError::QueueMessageNotFound)?
.handle .handle
}; };
// if we fail to finish the message, there is not much we can do and the // if we fail to finish the message, there is not much we can do and the
@ -500,32 +562,52 @@ where
Ok(()) Ok(())
} }
#[derive(Debug, thiserror::Error)]
pub enum KvCheckError {
#[error("invalid versionstamp")]
InvalidVersionstamp,
#[error(transparent)]
Io(std::io::Error),
}
type V8KvCheck = (KvKey, Option<ByteString>); type V8KvCheck = (KvKey, Option<ByteString>);
fn check_from_v8(value: V8KvCheck) -> Result<Check, AnyError> { fn check_from_v8(value: V8KvCheck) -> Result<Check, KvCheckError> {
let versionstamp = match value.1 { let versionstamp = match value.1 {
Some(data) => { Some(data) => {
let mut out = [0u8; 10]; let mut out = [0u8; 10];
if data.len() != out.len() * 2 { if data.len() != out.len() * 2 {
bail!(type_error("invalid versionstamp")); return Err(KvCheckError::InvalidVersionstamp);
} }
faster_hex::hex_decode(&data, &mut out) faster_hex::hex_decode(&data, &mut out)
.map_err(|_| type_error("invalid versionstamp"))?; .map_err(|_| KvCheckError::InvalidVersionstamp)?;
Some(out) Some(out)
} }
None => None, None => None,
}; };
Ok(Check { Ok(Check {
key: encode_v8_key(value.0)?, key: encode_v8_key(value.0).map_err(KvCheckError::Io)?,
versionstamp, versionstamp,
}) })
} }
#[derive(Debug, thiserror::Error)]
pub enum KvMutationError {
#[error(transparent)]
BigInt(#[from] num_bigint::TryFromBigIntError<num_bigint::BigInt>),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error("Invalid mutation '{0}' with value")]
InvalidMutationWithValue(String),
#[error("Invalid mutation '{0}' without value")]
InvalidMutationWithoutValue(String),
}
type V8KvMutation = (KvKey, String, Option<FromV8Value>, Option<u64>); type V8KvMutation = (KvKey, String, Option<FromV8Value>, Option<u64>);
fn mutation_from_v8( fn mutation_from_v8(
(value, current_timstamp): (V8KvMutation, DateTime<Utc>), (value, current_timstamp): (V8KvMutation, DateTime<Utc>),
) -> Result<Mutation, AnyError> { ) -> Result<Mutation, KvMutationError> {
let key = encode_v8_key(value.0)?; let key = encode_v8_key(value.0)?;
let kind = match (value.1.as_str(), value.2) { let kind = match (value.1.as_str(), value.2) {
("set", Some(value)) => MutationKind::Set(value.try_into()?), ("set", Some(value)) => MutationKind::Set(value.try_into()?),
@ -542,10 +624,10 @@ fn mutation_from_v8(
MutationKind::SetSuffixVersionstampedKey(value.try_into()?) MutationKind::SetSuffixVersionstampedKey(value.try_into()?)
} }
(op, Some(_)) => { (op, Some(_)) => {
return Err(type_error(format!("Invalid mutation '{op}' with value"))) return Err(KvMutationError::InvalidMutationWithValue(op.to_string()))
} }
(op, None) => { (op, None) => {
return Err(type_error(format!("Invalid mutation '{op}' without value"))) return Err(KvMutationError::InvalidMutationWithoutValue(op.to_string()))
} }
}; };
Ok(Mutation { Ok(Mutation {
@ -562,7 +644,7 @@ type V8Enqueue = (JsBuffer, u64, Vec<KvKey>, Option<Vec<u32>>);
fn enqueue_from_v8( fn enqueue_from_v8(
value: V8Enqueue, value: V8Enqueue,
current_timestamp: DateTime<Utc>, current_timestamp: DateTime<Utc>,
) -> Result<Enqueue, AnyError> { ) -> Result<Enqueue, std::io::Error> {
Ok(Enqueue { Ok(Enqueue {
payload: value.0.to_vec(), payload: value.0.to_vec(),
deadline: current_timestamp deadline: current_timestamp
@ -597,7 +679,7 @@ impl RawSelector {
prefix: Option<KvKey>, prefix: Option<KvKey>,
start: Option<KvKey>, start: Option<KvKey>,
end: Option<KvKey>, end: Option<KvKey>,
) -> Result<Self, AnyError> { ) -> Result<Self, KvError> {
let prefix = prefix.map(encode_v8_key).transpose()?; let prefix = prefix.map(encode_v8_key).transpose()?;
let start = start.map(encode_v8_key).transpose()?; let start = start.map(encode_v8_key).transpose()?;
let end = end.map(encode_v8_key).transpose()?; let end = end.map(encode_v8_key).transpose()?;
@ -610,9 +692,7 @@ impl RawSelector {
}), }),
(Some(prefix), Some(start), None) => { (Some(prefix), Some(start), None) => {
if !start.starts_with(&prefix) || start.len() == prefix.len() { if !start.starts_with(&prefix) || start.len() == prefix.len() {
return Err(type_error( return Err(KvError::StartKeyNotInKeyspace);
"Start key is not in the keyspace defined by prefix",
));
} }
Ok(Self::Prefixed { Ok(Self::Prefixed {
prefix, prefix,
@ -622,9 +702,7 @@ impl RawSelector {
} }
(Some(prefix), None, Some(end)) => { (Some(prefix), None, Some(end)) => {
if !end.starts_with(&prefix) || end.len() == prefix.len() { if !end.starts_with(&prefix) || end.len() == prefix.len() {
return Err(type_error( return Err(KvError::EndKeyNotInKeyspace);
"End key is not in the keyspace defined by prefix",
));
} }
Ok(Self::Prefixed { Ok(Self::Prefixed {
prefix, prefix,
@ -634,7 +712,7 @@ impl RawSelector {
} }
(None, Some(start), Some(end)) => { (None, Some(start), Some(end)) => {
if start > end { if start > end {
return Err(type_error("Start key is greater than end key")); return Err(KvError::StartKeyGreaterThanEndKey);
} }
Ok(Self::Range { start, end }) Ok(Self::Range { start, end })
} }
@ -642,7 +720,7 @@ impl RawSelector {
let end = start.iter().copied().chain(Some(0)).collect(); let end = start.iter().copied().chain(Some(0)).collect();
Ok(Self::Range { start, end }) Ok(Self::Range { start, end })
} }
_ => Err(type_error("Invalid range")), _ => Err(KvError::InvalidRange),
} }
} }
@ -701,10 +779,10 @@ fn common_prefix_for_bytes<'a>(a: &'a [u8], b: &'a [u8]) -> &'a [u8] {
fn encode_cursor( fn encode_cursor(
selector: &RawSelector, selector: &RawSelector,
boundary_key: &[u8], boundary_key: &[u8],
) -> Result<String, AnyError> { ) -> Result<String, KvError> {
let common_prefix = selector.common_prefix(); let common_prefix = selector.common_prefix();
if !boundary_key.starts_with(common_prefix) { if !boundary_key.starts_with(common_prefix) {
return Err(type_error("Invalid boundary key")); return Err(KvError::InvalidBoundaryKey);
} }
Ok(BASE64_URL_SAFE.encode(&boundary_key[common_prefix.len()..])) Ok(BASE64_URL_SAFE.encode(&boundary_key[common_prefix.len()..]))
} }
@ -713,7 +791,7 @@ fn decode_selector_and_cursor(
selector: &RawSelector, selector: &RawSelector,
reverse: bool, reverse: bool,
cursor: Option<&ByteString>, cursor: Option<&ByteString>,
) -> Result<(Vec<u8>, Vec<u8>), AnyError> { ) -> Result<(Vec<u8>, Vec<u8>), KvError> {
let Some(cursor) = cursor else { let Some(cursor) = cursor else {
return Ok((selector.range_start_key(), selector.range_end_key())); return Ok((selector.range_start_key(), selector.range_end_key()));
}; };
@ -721,7 +799,7 @@ fn decode_selector_and_cursor(
let common_prefix = selector.common_prefix(); let common_prefix = selector.common_prefix();
let cursor = BASE64_URL_SAFE let cursor = BASE64_URL_SAFE
.decode(cursor) .decode(cursor)
.map_err(|_| type_error("invalid cursor"))?; .map_err(|_| KvError::InvalidCursor)?;
let first_key: Vec<u8>; let first_key: Vec<u8>;
let last_key: Vec<u8>; let last_key: Vec<u8>;
@ -746,13 +824,13 @@ fn decode_selector_and_cursor(
// Defend against out-of-bounds reading // Defend against out-of-bounds reading
if let Some(start) = selector.start() { if let Some(start) = selector.start() {
if &first_key[..] < start { if &first_key[..] < start {
return Err(type_error("cursor out of bounds")); return Err(KvError::CursorOutOfBounds);
} }
} }
if let Some(end) = selector.end() { if let Some(end) = selector.end() {
if &last_key[..] > end { if &last_key[..] > end {
return Err(type_error("cursor out of bounds")); return Err(KvError::CursorOutOfBounds);
} }
} }
@ -767,15 +845,17 @@ async fn op_kv_atomic_write<DBH>(
#[serde] checks: Vec<V8KvCheck>, #[serde] checks: Vec<V8KvCheck>,
#[serde] mutations: Vec<V8KvMutation>, #[serde] mutations: Vec<V8KvMutation>,
#[serde] enqueues: Vec<V8Enqueue>, #[serde] enqueues: Vec<V8Enqueue>,
) -> Result<Option<String>, AnyError> ) -> Result<Option<String>, KvError>
where where
DBH: DatabaseHandler + 'static, DBH: DatabaseHandler + 'static,
{ {
let current_timestamp = chrono::Utc::now(); let current_timestamp = chrono::Utc::now();
let db = { let db = {
let state = state.borrow(); let state = state.borrow();
let resource = let resource = state
state.resource_table.get::<DatabaseResource<DBH::DB>>(rid)?; .resource_table
.get::<DatabaseResource<DBH::DB>>(rid)
.map_err(KvError::Resource)?;
resource.db.clone() resource.db.clone()
}; };
@ -785,34 +865,28 @@ where
}; };
if checks.len() > config.max_checks { if checks.len() > config.max_checks {
return Err(type_error(format!( return Err(KvError::TooManyChecks(config.max_checks));
"Too many checks (max {})",
config.max_checks
)));
} }
if mutations.len() + enqueues.len() > config.max_mutations { if mutations.len() + enqueues.len() > config.max_mutations {
return Err(type_error(format!( return Err(KvError::TooManyMutations(config.max_mutations));
"Too many mutations (max {})",
config.max_mutations
)));
} }
let checks = checks let checks = checks
.into_iter() .into_iter()
.map(check_from_v8) .map(check_from_v8)
.collect::<Result<Vec<Check>, AnyError>>() .collect::<Result<Vec<Check>, KvCheckError>>()
.with_context(|| "invalid check")?; .map_err(KvError::InvalidCheck)?;
let mutations = mutations let mutations = mutations
.into_iter() .into_iter()
.map(|mutation| mutation_from_v8((mutation, current_timestamp))) .map(|mutation| mutation_from_v8((mutation, current_timestamp)))
.collect::<Result<Vec<Mutation>, AnyError>>() .collect::<Result<Vec<Mutation>, KvMutationError>>()
.with_context(|| "Invalid mutation")?; .map_err(KvError::InvalidMutation)?;
let enqueues = enqueues let enqueues = enqueues
.into_iter() .into_iter()
.map(|e| enqueue_from_v8(e, current_timestamp)) .map(|e| enqueue_from_v8(e, current_timestamp))
.collect::<Result<Vec<Enqueue>, AnyError>>() .collect::<Result<Vec<Enqueue>, std::io::Error>>()
.with_context(|| "invalid enqueue")?; .map_err(KvError::InvalidEnqueue)?;
let mut total_payload_size = 0usize; let mut total_payload_size = 0usize;
let mut total_key_size = 0usize; let mut total_key_size = 0usize;
@ -823,7 +897,7 @@ where
.chain(mutations.iter().map(|m| &m.key)) .chain(mutations.iter().map(|m| &m.key))
{ {
if key.is_empty() { if key.is_empty() {
return Err(type_error("key cannot be empty")); return Err(KvError::EmptyKey);
} }
total_payload_size += check_write_key_size(key, &config)?; total_payload_size += check_write_key_size(key, &config)?;
@ -847,17 +921,13 @@ where
} }
if total_payload_size > config.max_total_mutation_size_bytes { if total_payload_size > config.max_total_mutation_size_bytes {
return Err(type_error(format!( return Err(KvError::TotalMutationTooLarge(
"Total mutation size too large (max {} bytes)", config.max_total_mutation_size_bytes,
config.max_total_mutation_size_bytes ));
)));
} }
if total_key_size > config.max_total_key_size_bytes { if total_key_size > config.max_total_key_size_bytes {
return Err(type_error(format!( return Err(KvError::TotalKeyTooLarge(config.max_total_key_size_bytes));
"Total key size too large (max {} bytes)",
config.max_total_key_size_bytes
)));
} }
let atomic_write = AtomicWrite { let atomic_write = AtomicWrite {
@ -866,7 +936,7 @@ where
enqueues, enqueues,
}; };
let result = db.atomic_write(atomic_write).await?; let result = db.atomic_write(atomic_write).await.map_err(KvError::Kv)?;
Ok(result.map(|res| faster_hex::hex_string(&res.versionstamp))) Ok(result.map(|res| faster_hex::hex_string(&res.versionstamp)))
} }
@ -879,19 +949,16 @@ type EncodeCursorRangeSelector = (Option<KvKey>, Option<KvKey>, Option<KvKey>);
fn op_kv_encode_cursor( fn op_kv_encode_cursor(
#[serde] (prefix, start, end): EncodeCursorRangeSelector, #[serde] (prefix, start, end): EncodeCursorRangeSelector,
#[serde] boundary_key: KvKey, #[serde] boundary_key: KvKey,
) -> Result<String, AnyError> { ) -> Result<String, KvError> {
let selector = RawSelector::from_tuple(prefix, start, end)?; let selector = RawSelector::from_tuple(prefix, start, end)?;
let boundary_key = encode_v8_key(boundary_key)?; let boundary_key = encode_v8_key(boundary_key)?;
let cursor = encode_cursor(&selector, &boundary_key)?; let cursor = encode_cursor(&selector, &boundary_key)?;
Ok(cursor) Ok(cursor)
} }
fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), AnyError> { fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), KvError> {
if key.len() > config.max_read_key_size_bytes { if key.len() > config.max_read_key_size_bytes {
Err(type_error(format!( Err(KvError::KeyTooLargeToRead(config.max_read_key_size_bytes))
"Key too large for read (max {} bytes)",
config.max_read_key_size_bytes
)))
} else { } else {
Ok(()) Ok(())
} }
@ -900,12 +967,9 @@ fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), AnyError> {
fn check_write_key_size( fn check_write_key_size(
key: &[u8], key: &[u8],
config: &KvConfig, config: &KvConfig,
) -> Result<usize, AnyError> { ) -> Result<usize, KvError> {
if key.len() > config.max_write_key_size_bytes { if key.len() > config.max_write_key_size_bytes {
Err(type_error(format!( Err(KvError::KeyTooLargeToWrite(config.max_write_key_size_bytes))
"Key too large for write (max {} bytes)",
config.max_write_key_size_bytes
)))
} else { } else {
Ok(key.len()) Ok(key.len())
} }
@ -914,7 +978,7 @@ fn check_write_key_size(
fn check_value_size( fn check_value_size(
value: &KvValue, value: &KvValue,
config: &KvConfig, config: &KvConfig,
) -> Result<usize, AnyError> { ) -> Result<usize, KvError> {
let payload = match value { let payload = match value {
KvValue::Bytes(x) => x, KvValue::Bytes(x) => x,
KvValue::V8(x) => x, KvValue::V8(x) => x,
@ -922,10 +986,7 @@ fn check_value_size(
}; };
if payload.len() > config.max_value_size_bytes { if payload.len() > config.max_value_size_bytes {
Err(type_error(format!( Err(KvError::ValueTooLarge(config.max_value_size_bytes))
"Value too large (max {} bytes)",
config.max_value_size_bytes
)))
} else { } else {
Ok(payload.len()) Ok(payload.len())
} }
@ -934,12 +995,9 @@ fn check_value_size(
fn check_enqueue_payload_size( fn check_enqueue_payload_size(
payload: &[u8], payload: &[u8],
config: &KvConfig, config: &KvConfig,
) -> Result<usize, AnyError> { ) -> Result<usize, KvError> {
if payload.len() > config.max_value_size_bytes { if payload.len() > config.max_value_size_bytes {
Err(type_error(format!( Err(KvError::EnqueuePayloadTooLarge(config.max_value_size_bytes))
"enqueue payload too large (max {} bytes)",
config.max_value_size_bytes
)))
} else { } else {
Ok(payload.len()) Ok(payload.len())
} }

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_napi" name = "deno_napi"
version = "0.103.0" version = "0.104.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
@ -17,3 +17,4 @@ path = "lib.rs"
deno_core.workspace = true deno_core.workspace = true
deno_permissions.workspace = true deno_permissions.workspace = true
libloading = { version = "0.7" } libloading = { version = "0.7" }
thiserror.workspace = true

View file

@ -6,8 +6,6 @@
#![deny(clippy::missing_safety_doc)] #![deny(clippy::missing_safety_doc)]
use core::ptr::NonNull; use core::ptr::NonNull;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op2; use deno_core::op2;
use deno_core::parking_lot::RwLock; use deno_core::parking_lot::RwLock;
use deno_core::url::Url; use deno_core::url::Url;
@ -20,6 +18,18 @@ use std::path::PathBuf;
use std::rc::Rc; use std::rc::Rc;
use std::thread_local; use std::thread_local;
#[derive(Debug, thiserror::Error)]
pub enum NApiError {
#[error("Invalid path")]
InvalidPath,
#[error(transparent)]
LibLoading(#[from] libloading::Error),
#[error("Unable to find register Node-API module at {}", .0.display())]
ModuleNotFound(PathBuf),
#[error(transparent)]
Permission(deno_core::error::AnyError),
}
#[cfg(unix)] #[cfg(unix)]
use libloading::os::unix::*; use libloading::os::unix::*;
@ -482,14 +492,20 @@ deno_core::extension!(deno_napi,
pub trait NapiPermissions { pub trait NapiPermissions {
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check(&mut self, path: &str) -> std::result::Result<PathBuf, AnyError>; fn check(
&mut self,
path: &str,
) -> Result<PathBuf, deno_core::error::AnyError>;
} }
// NOTE(bartlomieju): for now, NAPI uses `--allow-ffi` flag, but that might // NOTE(bartlomieju): for now, NAPI uses `--allow-ffi` flag, but that might
// change in the future. // change in the future.
impl NapiPermissions for deno_permissions::PermissionsContainer { impl NapiPermissions for deno_permissions::PermissionsContainer {
#[inline(always)] #[inline(always)]
fn check(&mut self, path: &str) -> Result<PathBuf, AnyError> { fn check(
&mut self,
path: &str,
) -> Result<PathBuf, deno_core::error::AnyError> {
deno_permissions::PermissionsContainer::check_ffi(self, path) deno_permissions::PermissionsContainer::check_ffi(self, path)
} }
} }
@ -512,7 +528,7 @@ fn op_napi_open<NP, 'scope>(
global: v8::Local<'scope, v8::Object>, global: v8::Local<'scope, v8::Object>,
buffer_constructor: v8::Local<'scope, v8::Function>, buffer_constructor: v8::Local<'scope, v8::Function>,
report_error: v8::Local<'scope, v8::Function>, report_error: v8::Local<'scope, v8::Function>,
) -> std::result::Result<v8::Local<'scope, v8::Value>, AnyError> ) -> Result<v8::Local<'scope, v8::Value>, NApiError>
where where
NP: NapiPermissions + 'static, NP: NapiPermissions + 'static,
{ {
@ -521,7 +537,7 @@ where
let (async_work_sender, cleanup_hooks, external_ops_tracker, path) = { let (async_work_sender, cleanup_hooks, external_ops_tracker, path) = {
let mut op_state = op_state.borrow_mut(); let mut op_state = op_state.borrow_mut();
let permissions = op_state.borrow_mut::<NP>(); let permissions = op_state.borrow_mut::<NP>();
let path = permissions.check(&path)?; let path = permissions.check(&path).map_err(NApiError::Permission)?;
let napi_state = op_state.borrow::<NapiState>(); let napi_state = op_state.borrow::<NapiState>();
( (
op_state.borrow::<V8CrossThreadTaskSpawner>().clone(), op_state.borrow::<V8CrossThreadTaskSpawner>().clone(),
@ -540,7 +556,7 @@ where
let type_tag = v8::Global::new(scope, type_tag); let type_tag = v8::Global::new(scope, type_tag);
let url_filename = let url_filename =
Url::from_file_path(&path).map_err(|_| type_error("Invalid path"))?; Url::from_file_path(&path).map_err(|_| NApiError::InvalidPath)?;
let env_shared = let env_shared =
EnvShared::new(napi_wrap, type_tag, format!("{url_filename}\0")); EnvShared::new(napi_wrap, type_tag, format!("{url_filename}\0"));
@ -565,17 +581,11 @@ where
// SAFETY: opening a DLL calls dlopen // SAFETY: opening a DLL calls dlopen
#[cfg(unix)] #[cfg(unix)]
let library = match unsafe { Library::open(Some(&path), flags) } { let library = unsafe { Library::open(Some(&path), flags) }?;
Ok(lib) => lib,
Err(e) => return Err(type_error(e.to_string())),
};
// SAFETY: opening a DLL calls dlopen // SAFETY: opening a DLL calls dlopen
#[cfg(not(unix))] #[cfg(not(unix))]
let library = match unsafe { Library::load_with_flags(&path, flags) } { let library = unsafe { Library::load_with_flags(&path, flags) }?;
Ok(lib) => lib,
Err(e) => return Err(type_error(e.to_string())),
};
let maybe_module = MODULE_TO_REGISTER.with(|cell| { let maybe_module = MODULE_TO_REGISTER.with(|cell| {
let mut slot = cell.borrow_mut(); let mut slot = cell.borrow_mut();
@ -610,10 +620,7 @@ where
// SAFETY: we are going blind, calling the register function on the other side. // SAFETY: we are going blind, calling the register function on the other side.
unsafe { init(env_ptr, exports.into()) } unsafe { init(env_ptr, exports.into()) }
} else { } else {
return Err(type_error(format!( return Err(NApiError::ModuleNotFound(path));
"Unable to find register Node-API module at {}",
path.display()
)));
}; };
let exports = maybe_exports.unwrap_or(exports.into()); let exports = maybe_exports.unwrap_or(exports.into());

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_net" name = "deno_net"
version = "0.164.0" version = "0.165.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
@ -21,6 +21,7 @@ pin-project.workspace = true
rustls-tokio-stream.workspace = true rustls-tokio-stream.workspace = true
serde.workspace = true serde.workspace = true
socket2.workspace = true socket2.workspace = true
thiserror.workspace = true
tokio.workspace = true tokio.workspace = true
trust-dns-proto = "0.23" trust-dns-proto = "0.23"
trust-dns-resolver = { version = "0.23", features = ["tokio-runtime", "serde-config"] } trust-dns-resolver = { version = "0.23", features = ["tokio-runtime", "serde-config"] }

View file

@ -1,7 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::error::generic_error; use deno_core::futures::TryFutureExt;
use deno_core::error::AnyError;
use deno_core::AsyncMutFuture; use deno_core::AsyncMutFuture;
use deno_core::AsyncRefCell; use deno_core::AsyncRefCell;
use deno_core::AsyncResult; use deno_core::AsyncResult;
@ -69,25 +68,36 @@ where
pub async fn read( pub async fn read(
self: Rc<Self>, self: Rc<Self>,
data: &mut [u8], data: &mut [u8],
) -> Result<usize, AnyError> { ) -> Result<usize, std::io::Error> {
let mut rd = self.rd_borrow_mut().await; let mut rd = self.rd_borrow_mut().await;
let nread = rd.read(data).try_or_cancel(self.cancel_handle()).await?; let nread = rd.read(data).try_or_cancel(self.cancel_handle()).await?;
Ok(nread) Ok(nread)
} }
pub async fn write(self: Rc<Self>, data: &[u8]) -> Result<usize, AnyError> { pub async fn write(
self: Rc<Self>,
data: &[u8],
) -> Result<usize, std::io::Error> {
let mut wr = self.wr_borrow_mut().await; let mut wr = self.wr_borrow_mut().await;
let nwritten = wr.write(data).await?; let nwritten = wr.write(data).await?;
Ok(nwritten) Ok(nwritten)
} }
pub async fn shutdown(self: Rc<Self>) -> Result<(), AnyError> { pub async fn shutdown(self: Rc<Self>) -> Result<(), std::io::Error> {
let mut wr = self.wr_borrow_mut().await; let mut wr = self.wr_borrow_mut().await;
wr.shutdown().await?; wr.shutdown().await?;
Ok(()) Ok(())
} }
} }
#[derive(Debug, thiserror::Error)]
pub enum MapError {
#[error("{0}")]
Io(std::io::Error),
#[error("Unable to get resources")]
NoResources,
}
pub type TcpStreamResource = pub type TcpStreamResource =
FullDuplexResource<tcp::OwnedReadHalf, tcp::OwnedWriteHalf>; FullDuplexResource<tcp::OwnedReadHalf, tcp::OwnedWriteHalf>;
@ -100,7 +110,7 @@ impl Resource for TcpStreamResource {
} }
fn shutdown(self: Rc<Self>) -> AsyncResult<()> { fn shutdown(self: Rc<Self>) -> AsyncResult<()> {
Box::pin(self.shutdown()) Box::pin(self.shutdown().map_err(Into::into))
} }
fn close(self: Rc<Self>) { fn close(self: Rc<Self>) {
@ -109,31 +119,30 @@ impl Resource for TcpStreamResource {
} }
impl TcpStreamResource { impl TcpStreamResource {
pub fn set_nodelay(self: Rc<Self>, nodelay: bool) -> Result<(), AnyError> { pub fn set_nodelay(self: Rc<Self>, nodelay: bool) -> Result<(), MapError> {
self.map_socket(Box::new(move |socket| Ok(socket.set_nodelay(nodelay)?))) self.map_socket(Box::new(move |socket| socket.set_nodelay(nodelay)))
} }
pub fn set_keepalive( pub fn set_keepalive(
self: Rc<Self>, self: Rc<Self>,
keepalive: bool, keepalive: bool,
) -> Result<(), AnyError> { ) -> Result<(), MapError> {
self self.map_socket(Box::new(move |socket| socket.set_keepalive(keepalive)))
.map_socket(Box::new(move |socket| Ok(socket.set_keepalive(keepalive)?)))
} }
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
fn map_socket( fn map_socket(
self: Rc<Self>, self: Rc<Self>,
map: Box<dyn FnOnce(SockRef) -> Result<(), AnyError>>, map: Box<dyn FnOnce(SockRef) -> Result<(), std::io::Error>>,
) -> Result<(), AnyError> { ) -> Result<(), MapError> {
if let Some(wr) = RcRef::map(self, |r| &r.wr).try_borrow() { if let Some(wr) = RcRef::map(self, |r| &r.wr).try_borrow() {
let stream = wr.as_ref().as_ref(); let stream = wr.as_ref().as_ref();
let socket = socket2::SockRef::from(stream); let socket = socket2::SockRef::from(stream);
return map(socket); return map(socket).map_err(MapError::Io);
} }
Err(generic_error("Unable to get resources")) Err(MapError::NoResources)
} }
} }
@ -153,7 +162,9 @@ impl UnixStreamResource {
unreachable!() unreachable!()
} }
#[allow(clippy::unused_async)] #[allow(clippy::unused_async)]
pub async fn shutdown(self: Rc<Self>) -> Result<(), AnyError> { pub async fn shutdown(
self: Rc<Self>,
) -> Result<(), deno_core::error::AnyError> {
unreachable!() unreachable!()
} }
pub fn cancel_read_ops(&self) { pub fn cancel_read_ops(&self) {
@ -170,7 +181,7 @@ impl Resource for UnixStreamResource {
} }
fn shutdown(self: Rc<Self>) -> AsyncResult<()> { fn shutdown(self: Rc<Self>) -> AsyncResult<()> {
Box::pin(self.shutdown()) Box::pin(self.shutdown().map_err(Into::into))
} }
fn close(self: Rc<Self>) { fn close(self: Rc<Self>) {

View file

@ -6,10 +6,6 @@ use crate::resolve_addr::resolve_addr;
use crate::resolve_addr::resolve_addr_sync; use crate::resolve_addr::resolve_addr_sync;
use crate::tcp::TcpListener; use crate::tcp::TcpListener;
use crate::NetPermissions; use crate::NetPermissions;
use deno_core::error::bad_resource;
use deno_core::error::custom_error;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::op2; use deno_core::op2;
use deno_core::CancelFuture; use deno_core::CancelFuture;
@ -43,6 +39,7 @@ use trust_dns_proto::rr::record_type::RecordType;
use trust_dns_resolver::config::NameServerConfigGroup; use trust_dns_resolver::config::NameServerConfigGroup;
use trust_dns_resolver::config::ResolverConfig; use trust_dns_resolver::config::ResolverConfig;
use trust_dns_resolver::config::ResolverOpts; use trust_dns_resolver::config::ResolverOpts;
use trust_dns_resolver::error::ResolveError;
use trust_dns_resolver::error::ResolveErrorKind; use trust_dns_resolver::error::ResolveErrorKind;
use trust_dns_resolver::system_conf; use trust_dns_resolver::system_conf;
use trust_dns_resolver::AsyncResolver; use trust_dns_resolver::AsyncResolver;
@ -68,11 +65,69 @@ impl From<SocketAddr> for IpAddr {
} }
} }
pub(crate) fn accept_err(e: std::io::Error) -> AnyError { #[derive(Debug, thiserror::Error)]
pub enum NetError {
#[error("Listener has been closed")]
ListenerClosed,
#[error("Listener already in use")]
ListenerBusy,
#[error("Socket has been closed")]
SocketClosed,
#[error("Socket has been closed")]
SocketClosedNotConnected,
#[error("Socket already in use")]
SocketBusy,
#[error("{0}")]
Io(#[from] std::io::Error),
#[error("Another accept task is ongoing")]
AcceptTaskOngoing,
#[error("{0}")]
Permission(deno_core::error::AnyError),
#[error("{0}")]
Resource(deno_core::error::AnyError),
#[error("No resolved address found")]
NoResolvedAddress,
#[error("{0}")]
AddrParse(#[from] std::net::AddrParseError),
#[error("{0}")]
Map(crate::io::MapError),
#[error("{0}")]
Canceled(#[from] deno_core::Canceled),
#[error("{0}")]
DnsNotFound(ResolveError),
#[error("{0}")]
DnsNotConnected(ResolveError),
#[error("{0}")]
DnsTimedOut(ResolveError),
#[error("{0}")]
Dns(#[from] ResolveError),
#[error("Provided record type is not supported")]
UnsupportedRecordType,
#[error("File name or path {0:?} is not valid UTF-8")]
InvalidUtf8(std::ffi::OsString),
#[error("unexpected key type")]
UnexpectedKeyType,
#[error("Invalid hostname: '{0}'")]
InvalidHostname(String), // TypeError
#[error("TCP stream is currently in use")]
TcpStreamBusy,
#[error("{0}")]
Rustls(#[from] deno_tls::rustls::Error),
#[error("{0}")]
Tls(#[from] deno_tls::TlsError),
#[error("Error creating TLS certificate: Deno.listenTls requires a key")]
ListenTlsRequiresKey, // InvalidData
#[error("{0}")]
RootCertStore(deno_core::anyhow::Error),
#[error("{0}")]
Reunite(tokio::net::tcp::ReuniteError),
}
pub(crate) fn accept_err(e: std::io::Error) -> NetError {
if let std::io::ErrorKind::Interrupted = e.kind() { if let std::io::ErrorKind::Interrupted = e.kind() {
bad_resource("Listener has been closed") NetError::ListenerClosed
} else { } else {
e.into() NetError::Io(e)
} }
} }
@ -81,15 +136,15 @@ pub(crate) fn accept_err(e: std::io::Error) -> AnyError {
pub async fn op_net_accept_tcp( pub async fn op_net_accept_tcp(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> { ) -> Result<(ResourceId, IpAddr, IpAddr), NetError> {
let resource = state let resource = state
.borrow() .borrow()
.resource_table .resource_table
.get::<NetworkListenerResource<TcpListener>>(rid) .get::<NetworkListenerResource<TcpListener>>(rid)
.map_err(|_| bad_resource("Listener has been closed"))?; .map_err(|_| NetError::ListenerClosed)?;
let listener = RcRef::map(&resource, |r| &r.listener) let listener = RcRef::map(&resource, |r| &r.listener)
.try_borrow_mut() .try_borrow_mut()
.ok_or_else(|| custom_error("Busy", "Another accept task is ongoing"))?; .ok_or_else(|| NetError::AcceptTaskOngoing)?;
let cancel = RcRef::map(resource, |r| &r.cancel); let cancel = RcRef::map(resource, |r| &r.cancel);
let (tcp_stream, _socket_addr) = listener let (tcp_stream, _socket_addr) = listener
.accept() .accept()
@ -112,12 +167,12 @@ pub async fn op_net_recv_udp(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[buffer] mut buf: JsBuffer, #[buffer] mut buf: JsBuffer,
) -> Result<(usize, IpAddr), AnyError> { ) -> Result<(usize, IpAddr), NetError> {
let resource = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.get::<UdpSocketResource>(rid) .get::<UdpSocketResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let cancel_handle = RcRef::map(&resource, |r| &r.cancel); let cancel_handle = RcRef::map(&resource, |r| &r.cancel);
let (nread, remote_addr) = socket let (nread, remote_addr) = socket
@ -134,27 +189,29 @@ pub async fn op_net_send_udp<NP>(
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[serde] addr: IpAddr, #[serde] addr: IpAddr,
#[buffer] zero_copy: JsBuffer, #[buffer] zero_copy: JsBuffer,
) -> Result<usize, AnyError> ) -> Result<usize, NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
{ {
let mut s = state.borrow_mut(); let mut s = state.borrow_mut();
s.borrow_mut::<NP>().check_net( s.borrow_mut::<NP>()
&(&addr.hostname, Some(addr.port)), .check_net(
"Deno.DatagramConn.send()", &(&addr.hostname, Some(addr.port)),
)?; "Deno.DatagramConn.send()",
)
.map_err(NetError::Permission)?;
} }
let addr = resolve_addr(&addr.hostname, addr.port) let addr = resolve_addr(&addr.hostname, addr.port)
.await? .await?
.next() .next()
.ok_or_else(|| generic_error("No resolved address found"))?; .ok_or(NetError::NoResolvedAddress)?;
let resource = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.get::<UdpSocketResource>(rid) .get::<UdpSocketResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let nwritten = socket.send_to(&zero_copy, &addr).await?; let nwritten = socket.send_to(&zero_copy, &addr).await?;
@ -167,12 +224,12 @@ pub async fn op_net_join_multi_v4_udp(
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[string] address: String, #[string] address: String,
#[string] multi_interface: String, #[string] multi_interface: String,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let resource = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.get::<UdpSocketResource>(rid) .get::<UdpSocketResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let addr = Ipv4Addr::from_str(address.as_str())?; let addr = Ipv4Addr::from_str(address.as_str())?;
@ -189,12 +246,12 @@ pub async fn op_net_join_multi_v6_udp(
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[string] address: String, #[string] address: String,
#[smi] multi_interface: u32, #[smi] multi_interface: u32,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let resource = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.get::<UdpSocketResource>(rid) .get::<UdpSocketResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let addr = Ipv6Addr::from_str(address.as_str())?; let addr = Ipv6Addr::from_str(address.as_str())?;
@ -210,12 +267,12 @@ pub async fn op_net_leave_multi_v4_udp(
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[string] address: String, #[string] address: String,
#[string] multi_interface: String, #[string] multi_interface: String,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let resource = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.get::<UdpSocketResource>(rid) .get::<UdpSocketResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let addr = Ipv4Addr::from_str(address.as_str())?; let addr = Ipv4Addr::from_str(address.as_str())?;
@ -232,12 +289,12 @@ pub async fn op_net_leave_multi_v6_udp(
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[string] address: String, #[string] address: String,
#[smi] multi_interface: u32, #[smi] multi_interface: u32,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let resource = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.get::<UdpSocketResource>(rid) .get::<UdpSocketResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let addr = Ipv6Addr::from_str(address.as_str())?; let addr = Ipv6Addr::from_str(address.as_str())?;
@ -253,16 +310,16 @@ pub async fn op_net_set_multi_loopback_udp(
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
is_v4_membership: bool, is_v4_membership: bool,
loopback: bool, loopback: bool,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let resource = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.get::<UdpSocketResource>(rid) .get::<UdpSocketResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
if is_v4_membership { if is_v4_membership {
socket.set_multicast_loop_v4(loopback)? socket.set_multicast_loop_v4(loopback)?;
} else { } else {
socket.set_multicast_loop_v6(loopback)?; socket.set_multicast_loop_v6(loopback)?;
} }
@ -275,12 +332,12 @@ pub async fn op_net_set_multi_ttl_udp(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[smi] ttl: u32, #[smi] ttl: u32,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let resource = state let resource = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.get::<UdpSocketResource>(rid) .get::<UdpSocketResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
socket.set_multicast_ttl_v4(ttl)?; socket.set_multicast_ttl_v4(ttl)?;
@ -293,7 +350,7 @@ pub async fn op_net_set_multi_ttl_udp(
pub async fn op_net_connect_tcp<NP>( pub async fn op_net_connect_tcp<NP>(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[serde] addr: IpAddr, #[serde] addr: IpAddr,
) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -304,7 +361,7 @@ where
pub async fn op_net_connect_tcp_inner<NP>( pub async fn op_net_connect_tcp_inner<NP>(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
addr: IpAddr, addr: IpAddr,
) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -312,13 +369,14 @@ where
let mut state_ = state.borrow_mut(); let mut state_ = state.borrow_mut();
state_ state_
.borrow_mut::<NP>() .borrow_mut::<NP>()
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.connect()")?; .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connect()")
.map_err(NetError::Permission)?;
} }
let addr = resolve_addr(&addr.hostname, addr.port) let addr = resolve_addr(&addr.hostname, addr.port)
.await? .await?
.next() .next()
.ok_or_else(|| generic_error("No resolved address found"))?; .ok_or_else(|| NetError::NoResolvedAddress)?;
let tcp_stream = TcpStream::connect(&addr).await?; let tcp_stream = TcpStream::connect(&addr).await?;
let local_addr = tcp_stream.local_addr()?; let local_addr = tcp_stream.local_addr()?;
let remote_addr = tcp_stream.peer_addr()?; let remote_addr = tcp_stream.peer_addr()?;
@ -353,7 +411,7 @@ pub fn op_net_listen_tcp<NP>(
#[serde] addr: IpAddr, #[serde] addr: IpAddr,
reuse_port: bool, reuse_port: bool,
load_balanced: bool, load_balanced: bool,
) -> Result<(ResourceId, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -362,10 +420,11 @@ where
} }
state state
.borrow_mut::<NP>() .borrow_mut::<NP>()
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.listen()")?; .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listen()")
.map_err(NetError::Permission)?;
let addr = resolve_addr_sync(&addr.hostname, addr.port)? let addr = resolve_addr_sync(&addr.hostname, addr.port)?
.next() .next()
.ok_or_else(|| generic_error("No resolved address found"))?; .ok_or_else(|| NetError::NoResolvedAddress)?;
let listener = if load_balanced { let listener = if load_balanced {
TcpListener::bind_load_balanced(addr) TcpListener::bind_load_balanced(addr)
@ -384,16 +443,17 @@ fn net_listen_udp<NP>(
addr: IpAddr, addr: IpAddr,
reuse_address: bool, reuse_address: bool,
loopback: bool, loopback: bool,
) -> Result<(ResourceId, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
state state
.borrow_mut::<NP>() .borrow_mut::<NP>()
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenDatagram()")?; .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenDatagram()")
.map_err(NetError::Permission)?;
let addr = resolve_addr_sync(&addr.hostname, addr.port)? let addr = resolve_addr_sync(&addr.hostname, addr.port)?
.next() .next()
.ok_or_else(|| generic_error("No resolved address found"))?; .ok_or_else(|| NetError::NoResolvedAddress)?;
let domain = if addr.is_ipv4() { let domain = if addr.is_ipv4() {
Domain::IPV4 Domain::IPV4
@ -453,7 +513,7 @@ pub fn op_net_listen_udp<NP>(
#[serde] addr: IpAddr, #[serde] addr: IpAddr,
reuse_address: bool, reuse_address: bool,
loopback: bool, loopback: bool,
) -> Result<(ResourceId, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -468,7 +528,7 @@ pub fn op_node_unstable_net_listen_udp<NP>(
#[serde] addr: IpAddr, #[serde] addr: IpAddr,
reuse_address: bool, reuse_address: bool,
loopback: bool, loopback: bool,
) -> Result<(ResourceId, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -551,7 +611,7 @@ pub struct NameServer {
pub async fn op_dns_resolve<NP>( pub async fn op_dns_resolve<NP>(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[serde] args: ResolveAddrArgs, #[serde] args: ResolveAddrArgs,
) -> Result<Vec<DnsReturnRecord>, AnyError> ) -> Result<Vec<DnsReturnRecord>, NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -587,7 +647,9 @@ where
let socker_addr = &ns.socket_addr; let socker_addr = &ns.socket_addr;
let ip = socker_addr.ip().to_string(); let ip = socker_addr.ip().to_string();
let port = socker_addr.port(); let port = socker_addr.port();
perm.check_net(&(ip, Some(port)), "Deno.resolveDns()")?; perm
.check_net(&(ip, Some(port)), "Deno.resolveDns()")
.map_err(NetError::Permission)?;
} }
} }
@ -618,22 +680,17 @@ where
}; };
lookup lookup
.map_err(|e| { .map_err(|e| match e.kind() {
let message = format!("{e}"); ResolveErrorKind::NoRecordsFound { .. } => NetError::DnsNotFound(e),
match e.kind() { ResolveErrorKind::Message("No connections available") => {
ResolveErrorKind::NoRecordsFound { .. } => { NetError::DnsNotConnected(e)
custom_error("NotFound", message)
}
ResolveErrorKind::Message("No connections available") => {
custom_error("NotConnected", message)
}
ResolveErrorKind::Timeout => custom_error("TimedOut", message),
_ => generic_error(message),
} }
ResolveErrorKind::Timeout => NetError::DnsTimedOut(e),
_ => NetError::Dns(e),
})? })?
.iter() .iter()
.filter_map(|rdata| rdata_to_return_record(record_type)(rdata).transpose()) .filter_map(|rdata| rdata_to_return_record(record_type)(rdata).transpose())
.collect::<Result<Vec<DnsReturnRecord>, AnyError>>() .collect::<Result<Vec<DnsReturnRecord>, NetError>>()
} }
#[op2(fast)] #[op2(fast)]
@ -641,7 +698,7 @@ pub fn op_set_nodelay(
state: &mut OpState, state: &mut OpState,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
nodelay: bool, nodelay: bool,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
op_set_nodelay_inner(state, rid, nodelay) op_set_nodelay_inner(state, rid, nodelay)
} }
@ -650,10 +707,12 @@ pub fn op_set_nodelay_inner(
state: &mut OpState, state: &mut OpState,
rid: ResourceId, rid: ResourceId,
nodelay: bool, nodelay: bool,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let resource: Rc<TcpStreamResource> = let resource: Rc<TcpStreamResource> = state
state.resource_table.get::<TcpStreamResource>(rid)?; .resource_table
resource.set_nodelay(nodelay) .get::<TcpStreamResource>(rid)
.map_err(NetError::Resource)?;
resource.set_nodelay(nodelay).map_err(NetError::Map)
} }
#[op2(fast)] #[op2(fast)]
@ -661,7 +720,7 @@ pub fn op_set_keepalive(
state: &mut OpState, state: &mut OpState,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
keepalive: bool, keepalive: bool,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
op_set_keepalive_inner(state, rid, keepalive) op_set_keepalive_inner(state, rid, keepalive)
} }
@ -670,17 +729,19 @@ pub fn op_set_keepalive_inner(
state: &mut OpState, state: &mut OpState,
rid: ResourceId, rid: ResourceId,
keepalive: bool, keepalive: bool,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let resource: Rc<TcpStreamResource> = let resource: Rc<TcpStreamResource> = state
state.resource_table.get::<TcpStreamResource>(rid)?; .resource_table
resource.set_keepalive(keepalive) .get::<TcpStreamResource>(rid)
.map_err(NetError::Resource)?;
resource.set_keepalive(keepalive).map_err(NetError::Map)
} }
fn rdata_to_return_record( fn rdata_to_return_record(
ty: RecordType, ty: RecordType,
) -> impl Fn(&RData) -> Result<Option<DnsReturnRecord>, AnyError> { ) -> impl Fn(&RData) -> Result<Option<DnsReturnRecord>, NetError> {
use RecordType::*; use RecordType::*;
move |r: &RData| -> Result<Option<DnsReturnRecord>, AnyError> { move |r: &RData| -> Result<Option<DnsReturnRecord>, NetError> {
let record = match ty { let record = match ty {
A => r.as_a().map(ToString::to_string).map(DnsReturnRecord::A), A => r.as_a().map(ToString::to_string).map(DnsReturnRecord::A),
AAAA => r AAAA => r
@ -761,12 +822,7 @@ fn rdata_to_return_record(
.collect(); .collect();
DnsReturnRecord::Txt(texts) DnsReturnRecord::Txt(texts)
}), }),
_ => { _ => return Err(NetError::UnsupportedRecordType),
return Err(custom_error(
"NotSupported",
"Provided record type is not supported",
))
}
}; };
Ok(record) Ok(record)
} }
@ -985,7 +1041,7 @@ mod tests {
&mut self, &mut self,
_host: &(T, Option<u16>), _host: &(T, Option<u16>),
_api_name: &str, _api_name: &str,
) -> Result<(), AnyError> { ) -> Result<(), deno_core::error::AnyError> {
Ok(()) Ok(())
} }
@ -993,7 +1049,7 @@ mod tests {
&mut self, &mut self,
p: &str, p: &str,
_api_name: &str, _api_name: &str,
) -> Result<PathBuf, AnyError> { ) -> Result<PathBuf, deno_core::error::AnyError> {
Ok(PathBuf::from(p)) Ok(PathBuf::from(p))
} }
@ -1001,7 +1057,7 @@ mod tests {
&mut self, &mut self,
p: &str, p: &str,
_api_name: &str, _api_name: &str,
) -> Result<PathBuf, AnyError> { ) -> Result<PathBuf, deno_core::error::AnyError> {
Ok(PathBuf::from(p)) Ok(PathBuf::from(p))
} }
@ -1009,7 +1065,7 @@ mod tests {
&mut self, &mut self,
p: &'a Path, p: &'a Path,
_api_name: &str, _api_name: &str,
) -> Result<Cow<'a, Path>, AnyError> { ) -> Result<Cow<'a, Path>, deno_core::error::AnyError> {
Ok(Cow::Borrowed(p)) Ok(Cow::Borrowed(p))
} }
} }
@ -1091,7 +1147,7 @@ mod tests {
let vals = result.unwrap(); let vals = result.unwrap();
rid = rid.or(Some(vals.0)); rid = rid.or(Some(vals.0));
} }
}; }
let rid = rid.unwrap(); let rid = rid.unwrap();
let state = runtime.op_state(); let state = runtime.op_state();

View file

@ -2,6 +2,7 @@
use crate::io::TcpStreamResource; use crate::io::TcpStreamResource;
use crate::ops::IpAddr; use crate::ops::IpAddr;
use crate::ops::NetError;
use crate::ops::TlsHandshakeInfo; use crate::ops::TlsHandshakeInfo;
use crate::raw::NetworkListenerResource; use crate::raw::NetworkListenerResource;
use crate::resolve_addr::resolve_addr; use crate::resolve_addr::resolve_addr;
@ -10,13 +11,7 @@ use crate::tcp::TcpListener;
use crate::DefaultTlsOptions; use crate::DefaultTlsOptions;
use crate::NetPermissions; use crate::NetPermissions;
use crate::UnsafelyIgnoreCertificateErrors; use crate::UnsafelyIgnoreCertificateErrors;
use deno_core::anyhow::anyhow; use deno_core::futures::TryFutureExt;
use deno_core::anyhow::bail;
use deno_core::error::bad_resource;
use deno_core::error::custom_error;
use deno_core::error::generic_error;
use deno_core::error::invalid_hostname;
use deno_core::error::AnyError;
use deno_core::op2; use deno_core::op2;
use deno_core::v8; use deno_core::v8;
use deno_core::AsyncRefCell; use deno_core::AsyncRefCell;
@ -118,20 +113,23 @@ impl TlsStreamResource {
pub async fn read( pub async fn read(
self: Rc<Self>, self: Rc<Self>,
data: &mut [u8], data: &mut [u8],
) -> Result<usize, AnyError> { ) -> Result<usize, std::io::Error> {
let mut rd = RcRef::map(&self, |r| &r.rd).borrow_mut().await; let mut rd = RcRef::map(&self, |r| &r.rd).borrow_mut().await;
let cancel_handle = RcRef::map(&self, |r| &r.cancel_handle); let cancel_handle = RcRef::map(&self, |r| &r.cancel_handle);
Ok(rd.read(data).try_or_cancel(cancel_handle).await?) rd.read(data).try_or_cancel(cancel_handle).await
} }
pub async fn write(self: Rc<Self>, data: &[u8]) -> Result<usize, AnyError> { pub async fn write(
self: Rc<Self>,
data: &[u8],
) -> Result<usize, std::io::Error> {
let mut wr = RcRef::map(self, |r| &r.wr).borrow_mut().await; let mut wr = RcRef::map(self, |r| &r.wr).borrow_mut().await;
let nwritten = wr.write(data).await?; let nwritten = wr.write(data).await?;
wr.flush().await?; wr.flush().await?;
Ok(nwritten) Ok(nwritten)
} }
pub async fn shutdown(self: Rc<Self>) -> Result<(), AnyError> { pub async fn shutdown(self: Rc<Self>) -> Result<(), std::io::Error> {
let mut wr = RcRef::map(self, |r| &r.wr).borrow_mut().await; let mut wr = RcRef::map(self, |r| &r.wr).borrow_mut().await;
wr.shutdown().await?; wr.shutdown().await?;
Ok(()) Ok(())
@ -139,7 +137,7 @@ impl TlsStreamResource {
pub async fn handshake( pub async fn handshake(
self: &Rc<Self>, self: &Rc<Self>,
) -> Result<TlsHandshakeInfo, AnyError> { ) -> Result<TlsHandshakeInfo, std::io::Error> {
if let Some(tls_info) = &*self.handshake_info.borrow() { if let Some(tls_info) = &*self.handshake_info.borrow() {
return Ok(tls_info.clone()); return Ok(tls_info.clone());
} }
@ -164,7 +162,7 @@ impl Resource for TlsStreamResource {
} }
fn shutdown(self: Rc<Self>) -> AsyncResult<()> { fn shutdown(self: Rc<Self>) -> AsyncResult<()> {
Box::pin(self.shutdown()) Box::pin(self.shutdown().map_err(Into::into))
} }
fn close(self: Rc<Self>) { fn close(self: Rc<Self>) {
@ -201,7 +199,7 @@ pub fn op_tls_key_null() -> TlsKeysHolder {
pub fn op_tls_key_static( pub fn op_tls_key_static(
#[string] cert: &str, #[string] cert: &str,
#[string] key: &str, #[string] key: &str,
) -> Result<TlsKeysHolder, AnyError> { ) -> Result<TlsKeysHolder, deno_tls::TlsError> {
let cert = load_certs(&mut BufReader::new(cert.as_bytes()))?; let cert = load_certs(&mut BufReader::new(cert.as_bytes()))?;
let key = load_private_keys(key.as_bytes())? let key = load_private_keys(key.as_bytes())?
.into_iter() .into_iter()
@ -236,9 +234,9 @@ pub fn op_tls_cert_resolver_resolve(
#[cppgc] lookup: &TlsKeyLookup, #[cppgc] lookup: &TlsKeyLookup,
#[string] sni: String, #[string] sni: String,
#[cppgc] key: &TlsKeysHolder, #[cppgc] key: &TlsKeysHolder,
) -> Result<(), AnyError> { ) -> Result<(), NetError> {
let TlsKeys::Static(key) = key.take() else { let TlsKeys::Static(key) = key.take() else {
bail!("unexpected key type"); return Err(NetError::UnexpectedKeyType);
}; };
lookup.resolve(sni, Ok(key)); lookup.resolve(sni, Ok(key));
Ok(()) Ok(())
@ -258,7 +256,7 @@ pub fn op_tls_cert_resolver_resolve_error(
pub fn op_tls_start<NP>( pub fn op_tls_start<NP>(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[serde] args: StartTlsArgs, #[serde] args: StartTlsArgs,
) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -271,7 +269,9 @@ where
{ {
let mut s = state.borrow_mut(); let mut s = state.borrow_mut();
let permissions = s.borrow_mut::<NP>(); let permissions = s.borrow_mut::<NP>();
permissions.check_net(&(&hostname, Some(0)), "Deno.startTls()")?; permissions
.check_net(&(&hostname, Some(0)), "Deno.startTls()")
.map_err(NetError::Permission)?;
} }
let ca_certs = args let ca_certs = args
@ -281,7 +281,7 @@ where
.collect::<Vec<_>>(); .collect::<Vec<_>>();
let hostname_dns = ServerName::try_from(hostname.to_string()) let hostname_dns = ServerName::try_from(hostname.to_string())
.map_err(|_| invalid_hostname(&hostname))?; .map_err(|_| NetError::InvalidHostname(hostname))?;
let unsafely_ignore_certificate_errors = state let unsafely_ignore_certificate_errors = state
.borrow() .borrow()
@ -291,19 +291,21 @@ where
let root_cert_store = state let root_cert_store = state
.borrow() .borrow()
.borrow::<DefaultTlsOptions>() .borrow::<DefaultTlsOptions>()
.root_cert_store()?; .root_cert_store()
.map_err(NetError::RootCertStore)?;
let resource_rc = state let resource_rc = state
.borrow_mut() .borrow_mut()
.resource_table .resource_table
.take::<TcpStreamResource>(rid)?; .take::<TcpStreamResource>(rid)
.map_err(NetError::Resource)?;
// This TCP connection might be used somewhere else. If it's the case, we cannot proceed with the // This TCP connection might be used somewhere else. If it's the case, we cannot proceed with the
// process of starting a TLS connection on top of this TCP connection, so we just return a Busy error. // process of starting a TLS connection on top of this TCP connection, so we just return a Busy error.
// See also: https://github.com/denoland/deno/pull/16242 // See also: https://github.com/denoland/deno/pull/16242
let resource = Rc::try_unwrap(resource_rc) let resource =
.map_err(|_| custom_error("Busy", "TCP stream is currently in use"))?; Rc::try_unwrap(resource_rc).map_err(|_| NetError::TcpStreamBusy)?;
let (read_half, write_half) = resource.into_inner(); let (read_half, write_half) = resource.into_inner();
let tcp_stream = read_half.reunite(write_half)?; let tcp_stream = read_half.reunite(write_half).map_err(NetError::Reunite)?;
let local_addr = tcp_stream.local_addr()?; let local_addr = tcp_stream.local_addr()?;
let remote_addr = tcp_stream.peer_addr()?; let remote_addr = tcp_stream.peer_addr()?;
@ -345,7 +347,7 @@ pub async fn op_net_connect_tls<NP>(
#[serde] addr: IpAddr, #[serde] addr: IpAddr,
#[serde] args: ConnectTlsArgs, #[serde] args: ConnectTlsArgs,
#[cppgc] key_pair: &TlsKeysHolder, #[cppgc] key_pair: &TlsKeysHolder,
) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -359,9 +361,14 @@ where
let mut s = state.borrow_mut(); let mut s = state.borrow_mut();
let permissions = s.borrow_mut::<NP>(); let permissions = s.borrow_mut::<NP>();
permissions permissions
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.connectTls()")?; .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connectTls()")
.map_err(NetError::Permission)?;
if let Some(path) = cert_file { if let Some(path) = cert_file {
Some(permissions.check_read(path, "Deno.connectTls()")?) Some(
permissions
.check_read(path, "Deno.connectTls()")
.map_err(NetError::Permission)?,
)
} else { } else {
None None
} }
@ -382,17 +389,18 @@ where
let root_cert_store = state let root_cert_store = state
.borrow() .borrow()
.borrow::<DefaultTlsOptions>() .borrow::<DefaultTlsOptions>()
.root_cert_store()?; .root_cert_store()
.map_err(NetError::RootCertStore)?;
let hostname_dns = if let Some(server_name) = args.server_name { let hostname_dns = if let Some(server_name) = args.server_name {
ServerName::try_from(server_name) ServerName::try_from(server_name)
} else { } else {
ServerName::try_from(addr.hostname.clone()) ServerName::try_from(addr.hostname.clone())
} }
.map_err(|_| invalid_hostname(&addr.hostname))?; .map_err(|_| NetError::InvalidHostname(addr.hostname.clone()))?;
let connect_addr = resolve_addr(&addr.hostname, addr.port) let connect_addr = resolve_addr(&addr.hostname, addr.port)
.await? .await?
.next() .next()
.ok_or_else(|| generic_error("No resolved address found"))?; .ok_or_else(|| NetError::NoResolvedAddress)?;
let tcp_stream = TcpStream::connect(connect_addr).await?; let tcp_stream = TcpStream::connect(connect_addr).await?;
let local_addr = tcp_stream.local_addr()?; let local_addr = tcp_stream.local_addr()?;
let remote_addr = tcp_stream.peer_addr()?; let remote_addr = tcp_stream.peer_addr()?;
@ -444,7 +452,7 @@ pub fn op_net_listen_tls<NP>(
#[serde] addr: IpAddr, #[serde] addr: IpAddr,
#[serde] args: ListenTlsArgs, #[serde] args: ListenTlsArgs,
#[cppgc] keys: &TlsKeysHolder, #[cppgc] keys: &TlsKeysHolder,
) -> Result<(ResourceId, IpAddr), AnyError> ) -> Result<(ResourceId, IpAddr), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -455,12 +463,13 @@ where
{ {
let permissions = state.borrow_mut::<NP>(); let permissions = state.borrow_mut::<NP>();
permissions permissions
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenTls()")?; .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenTls()")
.map_err(NetError::Permission)?;
} }
let bind_addr = resolve_addr_sync(&addr.hostname, addr.port)? let bind_addr = resolve_addr_sync(&addr.hostname, addr.port)?
.next() .next()
.ok_or_else(|| generic_error("No resolved address found"))?; .ok_or(NetError::NoResolvedAddress)?;
let tcp_listener = if args.load_balanced { let tcp_listener = if args.load_balanced {
TcpListener::bind_load_balanced(bind_addr) TcpListener::bind_load_balanced(bind_addr)
@ -475,28 +484,24 @@ where
.map(|s| s.into_bytes()) .map(|s| s.into_bytes())
.collect(); .collect();
let listener = match keys.take() { let listener = match keys.take() {
TlsKeys::Null => Err(anyhow!("Deno.listenTls requires a key")), TlsKeys::Null => return Err(NetError::ListenTlsRequiresKey),
TlsKeys::Static(TlsKey(cert, key)) => { TlsKeys::Static(TlsKey(cert, key)) => {
let mut tls_config = ServerConfig::builder() let mut tls_config = ServerConfig::builder()
.with_no_client_auth() .with_no_client_auth()
.with_single_cert(cert, key) .with_single_cert(cert, key)?;
.map_err(|e| anyhow!(e))?;
tls_config.alpn_protocols = alpn; tls_config.alpn_protocols = alpn;
Ok(TlsListener { TlsListener {
tcp_listener, tcp_listener,
tls_config: Some(tls_config.into()), tls_config: Some(tls_config.into()),
server_config_provider: None, server_config_provider: None,
}) }
} }
TlsKeys::Resolver(resolver) => Ok(TlsListener { TlsKeys::Resolver(resolver) => TlsListener {
tcp_listener, tcp_listener,
tls_config: None, tls_config: None,
server_config_provider: Some(resolver.into_server_config_provider(alpn)), server_config_provider: Some(resolver.into_server_config_provider(alpn)),
}), },
} };
.map_err(|e| {
custom_error("InvalidData", "Error creating TLS certificate").context(e)
})?;
let tls_listener_resource = NetworkListenerResource::new(listener); let tls_listener_resource = NetworkListenerResource::new(listener);
@ -510,23 +515,23 @@ where
pub async fn op_net_accept_tls( pub async fn op_net_accept_tls(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> { ) -> Result<(ResourceId, IpAddr, IpAddr), NetError> {
let resource = state let resource = state
.borrow() .borrow()
.resource_table .resource_table
.get::<NetworkListenerResource<TlsListener>>(rid) .get::<NetworkListenerResource<TlsListener>>(rid)
.map_err(|_| bad_resource("Listener has been closed"))?; .map_err(|_| NetError::ListenerClosed)?;
let cancel_handle = RcRef::map(&resource, |r| &r.cancel); let cancel_handle = RcRef::map(&resource, |r| &r.cancel);
let listener = RcRef::map(&resource, |r| &r.listener) let listener = RcRef::map(&resource, |r| &r.listener)
.try_borrow_mut() .try_borrow_mut()
.ok_or_else(|| custom_error("Busy", "Another accept task is ongoing"))?; .ok_or_else(|| NetError::AcceptTaskOngoing)?;
let (tls_stream, remote_addr) = let (tls_stream, remote_addr) =
match listener.accept().try_or_cancel(&cancel_handle).await { match listener.accept().try_or_cancel(&cancel_handle).await {
Ok(tuple) => tuple, Ok(tuple) => tuple,
Err(err) if err.kind() == ErrorKind::Interrupted => { Err(err) if err.kind() == ErrorKind::Interrupted => {
return Err(bad_resource("Listener has been closed")); return Err(NetError::ListenerClosed);
} }
Err(err) => return Err(err.into()), Err(err) => return Err(err.into()),
}; };
@ -547,11 +552,11 @@ pub async fn op_net_accept_tls(
pub async fn op_tls_handshake( pub async fn op_tls_handshake(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<TlsHandshakeInfo, AnyError> { ) -> Result<TlsHandshakeInfo, NetError> {
let resource = state let resource = state
.borrow() .borrow()
.resource_table .resource_table
.get::<TlsStreamResource>(rid) .get::<TlsStreamResource>(rid)
.map_err(|_| bad_resource("Listener has been closed"))?; .map_err(|_| NetError::ListenerClosed)?;
resource.handshake().await resource.handshake().await.map_err(Into::into)
} }

View file

@ -1,11 +1,9 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use crate::io::UnixStreamResource; use crate::io::UnixStreamResource;
use crate::ops::NetError;
use crate::raw::NetworkListenerResource; use crate::raw::NetworkListenerResource;
use crate::NetPermissions; use crate::NetPermissions;
use deno_core::error::bad_resource;
use deno_core::error::custom_error;
use deno_core::error::AnyError;
use deno_core::op2; use deno_core::op2;
use deno_core::AsyncRefCell; use deno_core::AsyncRefCell;
use deno_core::CancelHandle; use deno_core::CancelHandle;
@ -26,11 +24,8 @@ use tokio::net::UnixListener;
pub use tokio::net::UnixStream; pub use tokio::net::UnixStream;
/// A utility function to map OsStrings to Strings /// A utility function to map OsStrings to Strings
pub fn into_string(s: std::ffi::OsString) -> Result<String, AnyError> { pub fn into_string(s: std::ffi::OsString) -> Result<String, NetError> {
s.into_string().map_err(|s| { s.into_string().map_err(NetError::InvalidUtf8)
let message = format!("File name or path {s:?} is not valid UTF-8");
custom_error("InvalidData", message)
})
} }
pub struct UnixDatagramResource { pub struct UnixDatagramResource {
@ -63,15 +58,15 @@ pub struct UnixListenArgs {
pub async fn op_net_accept_unix( pub async fn op_net_accept_unix(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<(ResourceId, Option<String>, Option<String>), AnyError> { ) -> Result<(ResourceId, Option<String>, Option<String>), NetError> {
let resource = state let resource = state
.borrow() .borrow()
.resource_table .resource_table
.get::<NetworkListenerResource<UnixListener>>(rid) .get::<NetworkListenerResource<UnixListener>>(rid)
.map_err(|_| bad_resource("Listener has been closed"))?; .map_err(|_| NetError::ListenerClosed)?;
let listener = RcRef::map(&resource, |r| &r.listener) let listener = RcRef::map(&resource, |r| &r.listener)
.try_borrow_mut() .try_borrow_mut()
.ok_or_else(|| custom_error("Busy", "Listener already in use"))?; .ok_or(NetError::ListenerBusy)?;
let cancel = RcRef::map(resource, |r| &r.cancel); let cancel = RcRef::map(resource, |r| &r.cancel);
let (unix_stream, _socket_addr) = listener let (unix_stream, _socket_addr) = listener
.accept() .accept()
@ -95,7 +90,7 @@ pub async fn op_net_accept_unix(
pub async fn op_net_connect_unix<NP>( pub async fn op_net_connect_unix<NP>(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[string] address_path: String, #[string] address_path: String,
) -> Result<(ResourceId, Option<String>, Option<String>), AnyError> ) -> Result<(ResourceId, Option<String>, Option<String>), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -103,10 +98,12 @@ where
let mut state_ = state.borrow_mut(); let mut state_ = state.borrow_mut();
let address_path = state_ let address_path = state_
.borrow_mut::<NP>() .borrow_mut::<NP>()
.check_read(&address_path, "Deno.connect()")?; .check_read(&address_path, "Deno.connect()")
.map_err(NetError::Permission)?;
_ = state_ _ = state_
.borrow_mut::<NP>() .borrow_mut::<NP>()
.check_write_path(&address_path, "Deno.connect()")?; .check_write_path(&address_path, "Deno.connect()")
.map_err(NetError::Permission)?;
address_path address_path
}; };
let unix_stream = UnixStream::connect(&address_path).await?; let unix_stream = UnixStream::connect(&address_path).await?;
@ -127,15 +124,15 @@ pub async fn op_net_recv_unixpacket(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[buffer] mut buf: JsBuffer, #[buffer] mut buf: JsBuffer,
) -> Result<(usize, Option<String>), AnyError> { ) -> Result<(usize, Option<String>), NetError> {
let resource = state let resource = state
.borrow() .borrow()
.resource_table .resource_table
.get::<UnixDatagramResource>(rid) .get::<UnixDatagramResource>(rid)
.map_err(|_| bad_resource("Socket has been closed"))?; .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket) let socket = RcRef::map(&resource, |r| &r.socket)
.try_borrow_mut() .try_borrow_mut()
.ok_or_else(|| custom_error("Busy", "Socket already in use"))?; .ok_or(NetError::SocketBusy)?;
let cancel = RcRef::map(resource, |r| &r.cancel); let cancel = RcRef::map(resource, |r| &r.cancel);
let (nread, remote_addr) = let (nread, remote_addr) =
socket.recv_from(&mut buf).try_or_cancel(cancel).await?; socket.recv_from(&mut buf).try_or_cancel(cancel).await?;
@ -150,24 +147,25 @@ pub async fn op_net_send_unixpacket<NP>(
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[string] address_path: String, #[string] address_path: String,
#[buffer] zero_copy: JsBuffer, #[buffer] zero_copy: JsBuffer,
) -> Result<usize, AnyError> ) -> Result<usize, NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
let address_path = { let address_path = {
let mut s = state.borrow_mut(); let mut s = state.borrow_mut();
s.borrow_mut::<NP>() s.borrow_mut::<NP>()
.check_write(&address_path, "Deno.DatagramConn.send()")? .check_write(&address_path, "Deno.DatagramConn.send()")
.map_err(NetError::Permission)?
}; };
let resource = state let resource = state
.borrow() .borrow()
.resource_table .resource_table
.get::<UnixDatagramResource>(rid) .get::<UnixDatagramResource>(rid)
.map_err(|_| custom_error("NotConnected", "Socket has been closed"))?; .map_err(|_| NetError::SocketClosedNotConnected)?;
let socket = RcRef::map(&resource, |r| &r.socket) let socket = RcRef::map(&resource, |r| &r.socket)
.try_borrow_mut() .try_borrow_mut()
.ok_or_else(|| custom_error("Busy", "Socket already in use"))?; .ok_or(NetError::SocketBusy)?;
let nwritten = socket.send_to(&zero_copy, address_path).await?; let nwritten = socket.send_to(&zero_copy, address_path).await?;
Ok(nwritten) Ok(nwritten)
@ -179,14 +177,18 @@ pub fn op_net_listen_unix<NP>(
state: &mut OpState, state: &mut OpState,
#[string] address_path: String, #[string] address_path: String,
#[string] api_name: String, #[string] api_name: String,
) -> Result<(ResourceId, Option<String>), AnyError> ) -> Result<(ResourceId, Option<String>), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
let permissions = state.borrow_mut::<NP>(); let permissions = state.borrow_mut::<NP>();
let api_call_expr = format!("{}()", api_name); let api_call_expr = format!("{}()", api_name);
let address_path = permissions.check_read(&address_path, &api_call_expr)?; let address_path = permissions
_ = permissions.check_write_path(&address_path, &api_call_expr)?; .check_read(&address_path, &api_call_expr)
.map_err(NetError::Permission)?;
_ = permissions
.check_write_path(&address_path, &api_call_expr)
.map_err(NetError::Permission)?;
let listener = UnixListener::bind(address_path)?; let listener = UnixListener::bind(address_path)?;
let local_addr = listener.local_addr()?; let local_addr = listener.local_addr()?;
let pathname = local_addr.as_pathname().map(pathstring).transpose()?; let pathname = local_addr.as_pathname().map(pathstring).transpose()?;
@ -198,14 +200,17 @@ where
pub fn net_listen_unixpacket<NP>( pub fn net_listen_unixpacket<NP>(
state: &mut OpState, state: &mut OpState,
address_path: String, address_path: String,
) -> Result<(ResourceId, Option<String>), AnyError> ) -> Result<(ResourceId, Option<String>), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
let permissions = state.borrow_mut::<NP>(); let permissions = state.borrow_mut::<NP>();
let address_path = let address_path = permissions
permissions.check_read(&address_path, "Deno.listenDatagram()")?; .check_read(&address_path, "Deno.listenDatagram()")
_ = permissions.check_write_path(&address_path, "Deno.listenDatagram()")?; .map_err(NetError::Permission)?;
_ = permissions
.check_write_path(&address_path, "Deno.listenDatagram()")
.map_err(NetError::Permission)?;
let socket = UnixDatagram::bind(address_path)?; let socket = UnixDatagram::bind(address_path)?;
let local_addr = socket.local_addr()?; let local_addr = socket.local_addr()?;
let pathname = local_addr.as_pathname().map(pathstring).transpose()?; let pathname = local_addr.as_pathname().map(pathstring).transpose()?;
@ -222,7 +227,7 @@ where
pub fn op_net_listen_unixpacket<NP>( pub fn op_net_listen_unixpacket<NP>(
state: &mut OpState, state: &mut OpState,
#[string] path: String, #[string] path: String,
) -> Result<(ResourceId, Option<String>), AnyError> ) -> Result<(ResourceId, Option<String>), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
@ -235,13 +240,13 @@ where
pub fn op_node_unstable_net_listen_unixpacket<NP>( pub fn op_node_unstable_net_listen_unixpacket<NP>(
state: &mut OpState, state: &mut OpState,
#[string] path: String, #[string] path: String,
) -> Result<(ResourceId, Option<String>), AnyError> ) -> Result<(ResourceId, Option<String>), NetError>
where where
NP: NetPermissions + 'static, NP: NetPermissions + 'static,
{ {
net_listen_unixpacket::<NP>(state, path) net_listen_unixpacket::<NP>(state, path)
} }
pub fn pathstring(pathname: &Path) -> Result<String, AnyError> { pub fn pathstring(pathname: &Path) -> Result<String, NetError> {
into_string(pathname.into()) into_string(pathname.into())
} }

View file

@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::error::AnyError;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::net::ToSocketAddrs; use std::net::ToSocketAddrs;
use tokio::net::lookup_host; use tokio::net::lookup_host;
@ -9,7 +8,7 @@ use tokio::net::lookup_host;
pub async fn resolve_addr( pub async fn resolve_addr(
hostname: &str, hostname: &str,
port: u16, port: u16,
) -> Result<impl Iterator<Item = SocketAddr> + '_, AnyError> { ) -> Result<impl Iterator<Item = SocketAddr> + '_, std::io::Error> {
let addr_port_pair = make_addr_port_pair(hostname, port); let addr_port_pair = make_addr_port_pair(hostname, port);
let result = lookup_host(addr_port_pair).await?; let result = lookup_host(addr_port_pair).await?;
Ok(result) Ok(result)
@ -19,7 +18,7 @@ pub async fn resolve_addr(
pub fn resolve_addr_sync( pub fn resolve_addr_sync(
hostname: &str, hostname: &str,
port: u16, port: u16,
) -> Result<impl Iterator<Item = SocketAddr>, AnyError> { ) -> Result<impl Iterator<Item = SocketAddr>, std::io::Error> {
let addr_port_pair = make_addr_port_pair(hostname, port); let addr_port_pair = make_addr_port_pair(hostname, port);
let result = addr_port_pair.to_socket_addrs()?; let result = addr_port_pair.to_socket_addrs()?;
Ok(result) Ok(result)

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_node" name = "deno_node"
version = "0.109.0" version = "0.110.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -53,8 +53,9 @@ export function copyFile(
}, (e) => { }, (e) => {
if (e instanceof Deno.errors.NotFound) { if (e instanceof Deno.errors.NotFound) {
Deno.copyFile(srcStr, destStr).then(() => cb(null), cb); Deno.copyFile(srcStr, destStr).then(() => cb(null), cb);
} else {
cb(e);
} }
cb(e);
}); });
} else { } else {
Deno.copyFile(srcStr, destStr).then(() => cb(null), cb); Deno.copyFile(srcStr, destStr).then(() => cb(null), cb);
@ -83,8 +84,9 @@ export function copyFileSync(
} catch (e) { } catch (e) {
if (e instanceof Deno.errors.NotFound) { if (e instanceof Deno.errors.NotFound) {
Deno.copyFileSync(srcStr, destStr); Deno.copyFileSync(srcStr, destStr);
} else {
throw e;
} }
throw e;
} }
} else { } else {
Deno.copyFileSync(srcStr, destStr); Deno.copyFileSync(srcStr, destStr);

View file

@ -304,7 +304,7 @@ export class TCP extends ConnectionWrap {
* @return An error status code. * @return An error status code.
*/ */
setNoDelay(noDelay: boolean): number { setNoDelay(noDelay: boolean): number {
if ("setNoDelay" in this[kStreamBaseField]) { if (this[kStreamBaseField] && "setNoDelay" in this[kStreamBaseField]) {
this[kStreamBaseField].setNoDelay(noDelay); this[kStreamBaseField].setNoDelay(noDelay);
} }
return 0; return 0;

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_tls" name = "deno_tls"
version = "0.159.0" version = "0.160.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_url" name = "deno_url"
version = "0.172.0" version = "0.173.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_web" name = "deno_web"
version = "0.203.0" version = "0.204.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
@ -23,6 +23,7 @@ encoding_rs.workspace = true
flate2 = { workspace = true, features = ["default"] } flate2 = { workspace = true, features = ["default"] }
futures.workspace = true futures.workspace = true
serde = "1.0.149" serde = "1.0.149"
thiserror.workspace = true
tokio.workspace = true tokio.workspace = true
uuid = { workspace = true, features = ["serde"] } uuid = { workspace = true, features = ["serde"] }

View file

@ -7,8 +7,6 @@ use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use async_trait::async_trait; use async_trait::async_trait;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op2; use deno_core::op2;
use deno_core::parking_lot::Mutex; use deno_core::parking_lot::Mutex;
use deno_core::url::Url; use deno_core::url::Url;
@ -19,6 +17,18 @@ use serde::Deserialize;
use serde::Serialize; use serde::Serialize;
use uuid::Uuid; use uuid::Uuid;
#[derive(Debug, thiserror::Error)]
pub enum BlobError {
#[error("Blob part not found")]
BlobPartNotFound,
#[error("start + len can not be larger than blob part size")]
SizeLargerThanBlobPart,
#[error("Blob URLs are not supported in this context")]
BlobURLsNotSupported,
#[error(transparent)]
Url(#[from] deno_core::url::ParseError),
}
use crate::Location; use crate::Location;
pub type PartMap = HashMap<Uuid, Arc<dyn BlobPart + Send + Sync>>; pub type PartMap = HashMap<Uuid, Arc<dyn BlobPart + Send + Sync>>;
@ -96,18 +106,18 @@ pub struct Blob {
impl Blob { impl Blob {
// TODO(lucacsonato): this should be a stream! // TODO(lucacsonato): this should be a stream!
pub async fn read_all(&self) -> Result<Vec<u8>, AnyError> { pub async fn read_all(&self) -> Vec<u8> {
let size = self.size(); let size = self.size();
let mut bytes = Vec::with_capacity(size); let mut bytes = Vec::with_capacity(size);
for part in &self.parts { for part in &self.parts {
let chunk = part.read().await?; let chunk = part.read().await;
bytes.extend_from_slice(chunk); bytes.extend_from_slice(chunk);
} }
assert_eq!(bytes.len(), size); assert_eq!(bytes.len(), size);
Ok(bytes) bytes
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@ -122,7 +132,7 @@ impl Blob {
#[async_trait] #[async_trait]
pub trait BlobPart: Debug { pub trait BlobPart: Debug {
// TODO(lucacsonato): this should be a stream! // TODO(lucacsonato): this should be a stream!
async fn read(&self) -> Result<&[u8], AnyError>; async fn read(&self) -> &[u8];
fn size(&self) -> usize; fn size(&self) -> usize;
} }
@ -137,8 +147,8 @@ impl From<Vec<u8>> for InMemoryBlobPart {
#[async_trait] #[async_trait]
impl BlobPart for InMemoryBlobPart { impl BlobPart for InMemoryBlobPart {
async fn read(&self) -> Result<&[u8], AnyError> { async fn read(&self) -> &[u8] {
Ok(&self.0) &self.0
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@ -155,9 +165,9 @@ pub struct SlicedBlobPart {
#[async_trait] #[async_trait]
impl BlobPart for SlicedBlobPart { impl BlobPart for SlicedBlobPart {
async fn read(&self) -> Result<&[u8], AnyError> { async fn read(&self) -> &[u8] {
let original = self.part.read().await?; let original = self.part.read().await;
Ok(&original[self.start..self.start + self.len]) &original[self.start..self.start + self.len]
} }
fn size(&self) -> usize { fn size(&self) -> usize {
@ -189,19 +199,17 @@ pub fn op_blob_slice_part(
state: &mut OpState, state: &mut OpState,
#[serde] id: Uuid, #[serde] id: Uuid,
#[serde] options: SliceOptions, #[serde] options: SliceOptions,
) -> Result<Uuid, AnyError> { ) -> Result<Uuid, BlobError> {
let blob_store = state.borrow::<Arc<BlobStore>>(); let blob_store = state.borrow::<Arc<BlobStore>>();
let part = blob_store let part = blob_store
.get_part(&id) .get_part(&id)
.ok_or_else(|| type_error("Blob part not found"))?; .ok_or(BlobError::BlobPartNotFound)?;
let SliceOptions { start, len } = options; let SliceOptions { start, len } = options;
let size = part.size(); let size = part.size();
if start + len > size { if start + len > size {
return Err(type_error( return Err(BlobError::SizeLargerThanBlobPart);
"start + len can not be larger than blob part size",
));
} }
let sliced_part = SlicedBlobPart { part, start, len }; let sliced_part = SlicedBlobPart { part, start, len };
@ -215,14 +223,14 @@ pub fn op_blob_slice_part(
pub async fn op_blob_read_part( pub async fn op_blob_read_part(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[serde] id: Uuid, #[serde] id: Uuid,
) -> Result<ToJsBuffer, AnyError> { ) -> Result<ToJsBuffer, BlobError> {
let part = { let part = {
let state = state.borrow(); let state = state.borrow();
let blob_store = state.borrow::<Arc<BlobStore>>(); let blob_store = state.borrow::<Arc<BlobStore>>();
blob_store.get_part(&id) blob_store.get_part(&id)
} }
.ok_or_else(|| type_error("Blob part not found"))?; .ok_or(BlobError::BlobPartNotFound)?;
let buf = part.read().await?; let buf = part.read().await;
Ok(ToJsBuffer::from(buf.to_vec())) Ok(ToJsBuffer::from(buf.to_vec()))
} }
@ -238,13 +246,13 @@ pub fn op_blob_create_object_url(
state: &mut OpState, state: &mut OpState,
#[string] media_type: String, #[string] media_type: String,
#[serde] part_ids: Vec<Uuid>, #[serde] part_ids: Vec<Uuid>,
) -> Result<String, AnyError> { ) -> Result<String, BlobError> {
let mut parts = Vec::with_capacity(part_ids.len()); let mut parts = Vec::with_capacity(part_ids.len());
let blob_store = state.borrow::<Arc<BlobStore>>(); let blob_store = state.borrow::<Arc<BlobStore>>();
for part_id in part_ids { for part_id in part_ids {
let part = blob_store let part = blob_store
.get_part(&part_id) .get_part(&part_id)
.ok_or_else(|| type_error("Blob part not found"))?; .ok_or(BlobError::BlobPartNotFound)?;
parts.push(part); parts.push(part);
} }
@ -263,7 +271,7 @@ pub fn op_blob_create_object_url(
pub fn op_blob_revoke_object_url( pub fn op_blob_revoke_object_url(
state: &mut OpState, state: &mut OpState,
#[string] url: &str, #[string] url: &str,
) -> Result<(), AnyError> { ) -> Result<(), BlobError> {
let url = Url::parse(url)?; let url = Url::parse(url)?;
let blob_store = state.borrow::<Arc<BlobStore>>(); let blob_store = state.borrow::<Arc<BlobStore>>();
blob_store.remove_object_url(&url); blob_store.remove_object_url(&url);
@ -287,15 +295,15 @@ pub struct ReturnBlobPart {
pub fn op_blob_from_object_url( pub fn op_blob_from_object_url(
state: &mut OpState, state: &mut OpState,
#[string] url: String, #[string] url: String,
) -> Result<Option<ReturnBlob>, AnyError> { ) -> Result<Option<ReturnBlob>, BlobError> {
let url = Url::parse(&url)?; let url = Url::parse(&url)?;
if url.scheme() != "blob" { if url.scheme() != "blob" {
return Ok(None); return Ok(None);
} }
let blob_store = state.try_borrow::<Arc<BlobStore>>().ok_or_else(|| { let blob_store = state
type_error("Blob URLs are not supported in this context.") .try_borrow::<Arc<BlobStore>>()
})?; .ok_or(BlobError::BlobURLsNotSupported)?;
if let Some(blob) = blob_store.get_object_url(url) { if let Some(blob) = blob_store.get_object_url(url) {
let parts = blob let parts = blob
.parts .parts

View file

@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op2; use deno_core::op2;
use flate2::write::DeflateDecoder; use flate2::write::DeflateDecoder;
use flate2::write::DeflateEncoder; use flate2::write::DeflateEncoder;
@ -13,6 +11,18 @@ use flate2::Compression;
use std::cell::RefCell; use std::cell::RefCell;
use std::io::Write; use std::io::Write;
#[derive(Debug, thiserror::Error)]
pub enum CompressionError {
#[error("Unsupported format")]
UnsupportedFormat,
#[error("resource is closed")]
ResourceClosed,
#[error(transparent)]
IoTypeError(std::io::Error),
#[error(transparent)]
Io(std::io::Error),
}
#[derive(Debug)] #[derive(Debug)]
struct CompressionResource(RefCell<Option<Inner>>); struct CompressionResource(RefCell<Option<Inner>>);
@ -34,7 +44,7 @@ enum Inner {
pub fn op_compression_new( pub fn op_compression_new(
#[string] format: &str, #[string] format: &str,
is_decoder: bool, is_decoder: bool,
) -> Result<CompressionResource, AnyError> { ) -> Result<CompressionResource, CompressionError> {
let w = Vec::new(); let w = Vec::new();
let inner = match (format, is_decoder) { let inner = match (format, is_decoder) {
("deflate", true) => Inner::DeflateDecoder(ZlibDecoder::new(w)), ("deflate", true) => Inner::DeflateDecoder(ZlibDecoder::new(w)),
@ -49,7 +59,7 @@ pub fn op_compression_new(
("gzip", false) => { ("gzip", false) => {
Inner::GzEncoder(GzEncoder::new(w, Compression::default())) Inner::GzEncoder(GzEncoder::new(w, Compression::default()))
} }
_ => return Err(type_error("Unsupported format")), _ => return Err(CompressionError::UnsupportedFormat),
}; };
Ok(CompressionResource(RefCell::new(Some(inner)))) Ok(CompressionResource(RefCell::new(Some(inner))))
} }
@ -59,40 +69,38 @@ pub fn op_compression_new(
pub fn op_compression_write( pub fn op_compression_write(
#[cppgc] resource: &CompressionResource, #[cppgc] resource: &CompressionResource,
#[anybuffer] input: &[u8], #[anybuffer] input: &[u8],
) -> Result<Vec<u8>, AnyError> { ) -> Result<Vec<u8>, CompressionError> {
let mut inner = resource.0.borrow_mut(); let mut inner = resource.0.borrow_mut();
let inner = inner let inner = inner.as_mut().ok_or(CompressionError::ResourceClosed)?;
.as_mut()
.ok_or_else(|| type_error("resource is closed"))?;
let out: Vec<u8> = match &mut *inner { let out: Vec<u8> = match &mut *inner {
Inner::DeflateDecoder(d) => { Inner::DeflateDecoder(d) => {
d.write_all(input).map_err(|e| type_error(e.to_string()))?; d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush()?; d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..) d.get_mut().drain(..)
} }
Inner::DeflateEncoder(d) => { Inner::DeflateEncoder(d) => {
d.write_all(input).map_err(|e| type_error(e.to_string()))?; d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush()?; d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..) d.get_mut().drain(..)
} }
Inner::DeflateRawDecoder(d) => { Inner::DeflateRawDecoder(d) => {
d.write_all(input).map_err(|e| type_error(e.to_string()))?; d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush()?; d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..) d.get_mut().drain(..)
} }
Inner::DeflateRawEncoder(d) => { Inner::DeflateRawEncoder(d) => {
d.write_all(input).map_err(|e| type_error(e.to_string()))?; d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush()?; d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..) d.get_mut().drain(..)
} }
Inner::GzDecoder(d) => { Inner::GzDecoder(d) => {
d.write_all(input).map_err(|e| type_error(e.to_string()))?; d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush()?; d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..) d.get_mut().drain(..)
} }
Inner::GzEncoder(d) => { Inner::GzEncoder(d) => {
d.write_all(input).map_err(|e| type_error(e.to_string()))?; d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush()?; d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..) d.get_mut().drain(..)
} }
} }
@ -105,27 +113,27 @@ pub fn op_compression_write(
pub fn op_compression_finish( pub fn op_compression_finish(
#[cppgc] resource: &CompressionResource, #[cppgc] resource: &CompressionResource,
report_errors: bool, report_errors: bool,
) -> Result<Vec<u8>, AnyError> { ) -> Result<Vec<u8>, CompressionError> {
let inner = resource let inner = resource
.0 .0
.borrow_mut() .borrow_mut()
.take() .take()
.ok_or_else(|| type_error("resource is closed"))?; .ok_or(CompressionError::ResourceClosed)?;
let out = match inner { let out = match inner {
Inner::DeflateDecoder(d) => { Inner::DeflateDecoder(d) => {
d.finish().map_err(|e| type_error(e.to_string())) d.finish().map_err(CompressionError::IoTypeError)
} }
Inner::DeflateEncoder(d) => { Inner::DeflateEncoder(d) => {
d.finish().map_err(|e| type_error(e.to_string())) d.finish().map_err(CompressionError::IoTypeError)
} }
Inner::DeflateRawDecoder(d) => { Inner::DeflateRawDecoder(d) => {
d.finish().map_err(|e| type_error(e.to_string())) d.finish().map_err(CompressionError::IoTypeError)
} }
Inner::DeflateRawEncoder(d) => { Inner::DeflateRawEncoder(d) => {
d.finish().map_err(|e| type_error(e.to_string())) d.finish().map_err(CompressionError::IoTypeError)
} }
Inner::GzDecoder(d) => d.finish().map_err(|e| type_error(e.to_string())), Inner::GzDecoder(d) => d.finish().map_err(CompressionError::IoTypeError),
Inner::GzEncoder(d) => d.finish().map_err(|e| type_error(e.to_string())), Inner::GzEncoder(d) => d.finish().map_err(CompressionError::IoTypeError),
}; };
match out { match out {
Err(err) => { Err(err) => {

View file

@ -6,9 +6,6 @@ mod message_port;
mod stream_resource; mod stream_resource;
mod timers; mod timers;
use deno_core::error::range_error;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op2; use deno_core::op2;
use deno_core::url::Url; use deno_core::url::Url;
use deno_core::v8; use deno_core::v8;
@ -22,10 +19,14 @@ use encoding_rs::DecoderResult;
use encoding_rs::Encoding; use encoding_rs::Encoding;
use std::borrow::Cow; use std::borrow::Cow;
use std::cell::RefCell; use std::cell::RefCell;
use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
pub use blob::BlobError;
pub use compression::CompressionError;
pub use message_port::MessagePortError;
pub use stream_resource::StreamResourceError;
use crate::blob::op_blob_create_object_url; use crate::blob::op_blob_create_object_url;
use crate::blob::op_blob_create_part; use crate::blob::op_blob_create_part;
use crate::blob::op_blob_from_object_url; use crate::blob::op_blob_from_object_url;
@ -126,9 +127,27 @@ deno_core::extension!(deno_web,
} }
); );
#[derive(Debug, thiserror::Error)]
pub enum WebError {
#[error("Failed to decode base64")]
Base64Decode,
#[error("The encoding label provided ('{0}') is invalid.")]
InvalidEncodingLabel(String),
#[error("buffer exceeds maximum length")]
BufferTooLong,
#[error("Value too large to decode")]
ValueTooLarge,
#[error("Provided buffer too small")]
BufferTooSmall,
#[error("The encoded data is not valid")]
DataInvalid,
#[error(transparent)]
DataError(#[from] v8::DataError),
}
#[op2] #[op2]
#[serde] #[serde]
fn op_base64_decode(#[string] input: String) -> Result<ToJsBuffer, AnyError> { fn op_base64_decode(#[string] input: String) -> Result<ToJsBuffer, WebError> {
let mut s = input.into_bytes(); let mut s = input.into_bytes();
let decoded_len = forgiving_base64_decode_inplace(&mut s)?; let decoded_len = forgiving_base64_decode_inplace(&mut s)?;
s.truncate(decoded_len); s.truncate(decoded_len);
@ -137,7 +156,7 @@ fn op_base64_decode(#[string] input: String) -> Result<ToJsBuffer, AnyError> {
#[op2] #[op2]
#[serde] #[serde]
fn op_base64_atob(#[serde] mut s: ByteString) -> Result<ByteString, AnyError> { fn op_base64_atob(#[serde] mut s: ByteString) -> Result<ByteString, WebError> {
let decoded_len = forgiving_base64_decode_inplace(&mut s)?; let decoded_len = forgiving_base64_decode_inplace(&mut s)?;
s.truncate(decoded_len); s.truncate(decoded_len);
Ok(s) Ok(s)
@ -147,11 +166,9 @@ fn op_base64_atob(#[serde] mut s: ByteString) -> Result<ByteString, AnyError> {
#[inline] #[inline]
fn forgiving_base64_decode_inplace( fn forgiving_base64_decode_inplace(
input: &mut [u8], input: &mut [u8],
) -> Result<usize, AnyError> { ) -> Result<usize, WebError> {
let error = let decoded = base64_simd::forgiving_decode_inplace(input)
|| DomExceptionInvalidCharacterError::new("Failed to decode base64"); .map_err(|_| WebError::Base64Decode)?;
let decoded =
base64_simd::forgiving_decode_inplace(input).map_err(|_| error())?;
Ok(decoded.len()) Ok(decoded.len())
} }
@ -177,13 +194,9 @@ fn forgiving_base64_encode(s: &[u8]) -> String {
#[string] #[string]
fn op_encoding_normalize_label( fn op_encoding_normalize_label(
#[string] label: String, #[string] label: String,
) -> Result<String, AnyError> { ) -> Result<String, WebError> {
let encoding = Encoding::for_label_no_replacement(label.as_bytes()) let encoding = Encoding::for_label_no_replacement(label.as_bytes())
.ok_or_else(|| { .ok_or(WebError::InvalidEncodingLabel(label))?;
range_error(format!(
"The encoding label provided ('{label}') is invalid."
))
})?;
Ok(encoding.name().to_lowercase()) Ok(encoding.name().to_lowercase())
} }
@ -192,7 +205,7 @@ fn op_encoding_decode_utf8<'a>(
scope: &mut v8::HandleScope<'a>, scope: &mut v8::HandleScope<'a>,
#[anybuffer] zero_copy: &[u8], #[anybuffer] zero_copy: &[u8],
ignore_bom: bool, ignore_bom: bool,
) -> Result<v8::Local<'a, v8::String>, AnyError> { ) -> Result<v8::Local<'a, v8::String>, WebError> {
let buf = &zero_copy; let buf = &zero_copy;
let buf = if !ignore_bom let buf = if !ignore_bom
@ -216,7 +229,7 @@ fn op_encoding_decode_utf8<'a>(
// - https://github.com/v8/v8/blob/d68fb4733e39525f9ff0a9222107c02c28096e2a/include/v8.h#L3277-L3278 // - https://github.com/v8/v8/blob/d68fb4733e39525f9ff0a9222107c02c28096e2a/include/v8.h#L3277-L3278
match v8::String::new_from_utf8(scope, buf, v8::NewStringType::Normal) { match v8::String::new_from_utf8(scope, buf, v8::NewStringType::Normal) {
Some(text) => Ok(text), Some(text) => Ok(text),
None => Err(type_error("buffer exceeds maximum length")), None => Err(WebError::BufferTooLong),
} }
} }
@ -227,12 +240,9 @@ fn op_encoding_decode_single(
#[string] label: String, #[string] label: String,
fatal: bool, fatal: bool,
ignore_bom: bool, ignore_bom: bool,
) -> Result<U16String, AnyError> { ) -> Result<U16String, WebError> {
let encoding = Encoding::for_label(label.as_bytes()).ok_or_else(|| { let encoding = Encoding::for_label(label.as_bytes())
range_error(format!( .ok_or(WebError::InvalidEncodingLabel(label))?;
"The encoding label provided ('{label}') is invalid."
))
})?;
let mut decoder = if ignore_bom { let mut decoder = if ignore_bom {
encoding.new_decoder_without_bom_handling() encoding.new_decoder_without_bom_handling()
@ -242,7 +252,7 @@ fn op_encoding_decode_single(
let max_buffer_length = decoder let max_buffer_length = decoder
.max_utf16_buffer_length(data.len()) .max_utf16_buffer_length(data.len())
.ok_or_else(|| range_error("Value too large to decode."))?; .ok_or(WebError::ValueTooLarge)?;
let mut output = vec![0; max_buffer_length]; let mut output = vec![0; max_buffer_length];
@ -254,12 +264,8 @@ fn op_encoding_decode_single(
output.truncate(written); output.truncate(written);
Ok(output.into()) Ok(output.into())
} }
DecoderResult::OutputFull => { DecoderResult::OutputFull => Err(WebError::BufferTooSmall),
Err(range_error("Provided buffer too small.")) DecoderResult::Malformed(_, _) => Err(WebError::DataInvalid),
}
DecoderResult::Malformed(_, _) => {
Err(type_error("The encoded data is not valid."))
}
} }
} else { } else {
let (result, _, written, _) = let (result, _, written, _) =
@ -269,7 +275,7 @@ fn op_encoding_decode_single(
output.truncate(written); output.truncate(written);
Ok(output.into()) Ok(output.into())
} }
CoderResult::OutputFull => Err(range_error("Provided buffer too small.")), CoderResult::OutputFull => Err(WebError::BufferTooSmall),
} }
} }
} }
@ -280,12 +286,9 @@ fn op_encoding_new_decoder(
#[string] label: &str, #[string] label: &str,
fatal: bool, fatal: bool,
ignore_bom: bool, ignore_bom: bool,
) -> Result<TextDecoderResource, AnyError> { ) -> Result<TextDecoderResource, WebError> {
let encoding = Encoding::for_label(label.as_bytes()).ok_or_else(|| { let encoding = Encoding::for_label(label.as_bytes())
range_error(format!( .ok_or_else(|| WebError::InvalidEncodingLabel(label.to_string()))?;
"The encoding label provided ('{label}') is invalid."
))
})?;
let decoder = if ignore_bom { let decoder = if ignore_bom {
encoding.new_decoder_without_bom_handling() encoding.new_decoder_without_bom_handling()
@ -305,13 +308,13 @@ fn op_encoding_decode(
#[anybuffer] data: &[u8], #[anybuffer] data: &[u8],
#[cppgc] resource: &TextDecoderResource, #[cppgc] resource: &TextDecoderResource,
stream: bool, stream: bool,
) -> Result<U16String, AnyError> { ) -> Result<U16String, WebError> {
let mut decoder = resource.decoder.borrow_mut(); let mut decoder = resource.decoder.borrow_mut();
let fatal = resource.fatal; let fatal = resource.fatal;
let max_buffer_length = decoder let max_buffer_length = decoder
.max_utf16_buffer_length(data.len()) .max_utf16_buffer_length(data.len())
.ok_or_else(|| range_error("Value too large to decode."))?; .ok_or(WebError::ValueTooLarge)?;
let mut output = vec![0; max_buffer_length]; let mut output = vec![0; max_buffer_length];
@ -323,12 +326,8 @@ fn op_encoding_decode(
output.truncate(written); output.truncate(written);
Ok(output.into()) Ok(output.into())
} }
DecoderResult::OutputFull => { DecoderResult::OutputFull => Err(WebError::BufferTooSmall),
Err(range_error("Provided buffer too small.")) DecoderResult::Malformed(_, _) => Err(WebError::DataInvalid),
}
DecoderResult::Malformed(_, _) => {
Err(type_error("The encoded data is not valid."))
}
} }
} else { } else {
let (result, _, written, _) = let (result, _, written, _) =
@ -338,7 +337,7 @@ fn op_encoding_decode(
output.truncate(written); output.truncate(written);
Ok(output.into()) Ok(output.into())
} }
CoderResult::OutputFull => Err(range_error("Provided buffer too small.")), CoderResult::OutputFull => Err(WebError::BufferTooSmall),
} }
} }
} }
@ -356,7 +355,7 @@ fn op_encoding_encode_into(
input: v8::Local<v8::Value>, input: v8::Local<v8::Value>,
#[buffer] buffer: &mut [u8], #[buffer] buffer: &mut [u8],
#[buffer] out_buf: &mut [u32], #[buffer] out_buf: &mut [u32],
) -> Result<(), AnyError> { ) -> Result<(), WebError> {
let s = v8::Local::<v8::String>::try_from(input)?; let s = v8::Local::<v8::String>::try_from(input)?;
let mut nchars = 0; let mut nchars = 0;
@ -414,53 +413,4 @@ pub fn get_declaration() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("lib.deno_web.d.ts") PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("lib.deno_web.d.ts")
} }
#[derive(Debug)]
pub struct DomExceptionQuotaExceededError {
pub msg: String,
}
impl DomExceptionQuotaExceededError {
pub fn new(msg: &str) -> Self {
DomExceptionQuotaExceededError {
msg: msg.to_string(),
}
}
}
#[derive(Debug)]
pub struct DomExceptionInvalidCharacterError {
pub msg: String,
}
impl DomExceptionInvalidCharacterError {
pub fn new(msg: &str) -> Self {
DomExceptionInvalidCharacterError {
msg: msg.to_string(),
}
}
}
impl fmt::Display for DomExceptionQuotaExceededError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad(&self.msg)
}
}
impl fmt::Display for DomExceptionInvalidCharacterError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad(&self.msg)
}
}
impl std::error::Error for DomExceptionQuotaExceededError {}
impl std::error::Error for DomExceptionInvalidCharacterError {}
pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
e.downcast_ref::<DomExceptionQuotaExceededError>()
.map(|_| "DOMExceptionQuotaExceededError")
.or_else(|| {
e.downcast_ref::<DomExceptionInvalidCharacterError>()
.map(|_| "DOMExceptionInvalidCharacterError")
})
}
pub struct Location(pub Url); pub struct Location(pub Url);

View file

@ -4,8 +4,6 @@ use std::borrow::Cow;
use std::cell::RefCell; use std::cell::RefCell;
use std::rc::Rc; use std::rc::Rc;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::op2; use deno_core::op2;
use deno_core::CancelFuture; use deno_core::CancelFuture;
@ -23,6 +21,20 @@ use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::mpsc::UnboundedReceiver; use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
#[derive(Debug, thiserror::Error)]
pub enum MessagePortError {
#[error("Invalid message port transfer")]
InvalidTransfer,
#[error("Message port is not ready for transfer")]
NotReady,
#[error("Can not transfer self message port")]
TransferSelf,
#[error(transparent)]
Canceled(#[from] deno_core::Canceled),
#[error(transparent)]
Resource(deno_core::error::AnyError),
}
pub enum Transferable { pub enum Transferable {
MessagePort(MessagePort), MessagePort(MessagePort),
ArrayBuffer(u32), ArrayBuffer(u32),
@ -40,7 +52,7 @@ impl MessagePort {
&self, &self,
state: &mut OpState, state: &mut OpState,
data: JsMessageData, data: JsMessageData,
) -> Result<(), AnyError> { ) -> Result<(), MessagePortError> {
let transferables = let transferables =
deserialize_js_transferables(state, data.transferables)?; deserialize_js_transferables(state, data.transferables)?;
@ -56,7 +68,7 @@ impl MessagePort {
pub async fn recv( pub async fn recv(
&self, &self,
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
) -> Result<Option<JsMessageData>, AnyError> { ) -> Result<Option<JsMessageData>, MessagePortError> {
let rx = &self.rx; let rx = &self.rx;
let maybe_data = poll_fn(|cx| { let maybe_data = poll_fn(|cx| {
@ -147,7 +159,7 @@ pub enum JsTransferable {
pub fn deserialize_js_transferables( pub fn deserialize_js_transferables(
state: &mut OpState, state: &mut OpState,
js_transferables: Vec<JsTransferable>, js_transferables: Vec<JsTransferable>,
) -> Result<Vec<Transferable>, AnyError> { ) -> Result<Vec<Transferable>, MessagePortError> {
let mut transferables = Vec::with_capacity(js_transferables.len()); let mut transferables = Vec::with_capacity(js_transferables.len());
for js_transferable in js_transferables { for js_transferable in js_transferables {
match js_transferable { match js_transferable {
@ -155,10 +167,10 @@ pub fn deserialize_js_transferables(
let resource = state let resource = state
.resource_table .resource_table
.take::<MessagePortResource>(id) .take::<MessagePortResource>(id)
.map_err(|_| type_error("Invalid message port transfer"))?; .map_err(|_| MessagePortError::InvalidTransfer)?;
resource.cancel.cancel(); resource.cancel.cancel();
let resource = Rc::try_unwrap(resource) let resource =
.map_err(|_| type_error("Message port is not ready for transfer"))?; Rc::try_unwrap(resource).map_err(|_| MessagePortError::NotReady)?;
transferables.push(Transferable::MessagePort(resource.port)); transferables.push(Transferable::MessagePort(resource.port));
} }
JsTransferable::ArrayBuffer(id) => { JsTransferable::ArrayBuffer(id) => {
@ -202,16 +214,19 @@ pub fn op_message_port_post_message(
state: &mut OpState, state: &mut OpState,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
#[serde] data: JsMessageData, #[serde] data: JsMessageData,
) -> Result<(), AnyError> { ) -> Result<(), MessagePortError> {
for js_transferable in &data.transferables { for js_transferable in &data.transferables {
if let JsTransferable::MessagePort(id) = js_transferable { if let JsTransferable::MessagePort(id) = js_transferable {
if *id == rid { if *id == rid {
return Err(type_error("Can not transfer self message port")); return Err(MessagePortError::TransferSelf);
} }
} }
} }
let resource = state.resource_table.get::<MessagePortResource>(rid)?; let resource = state
.resource_table
.get::<MessagePortResource>(rid)
.map_err(MessagePortError::Resource)?;
resource.port.send(state, data) resource.port.send(state, data)
} }
@ -220,7 +235,7 @@ pub fn op_message_port_post_message(
pub async fn op_message_port_recv_message( pub async fn op_message_port_recv_message(
state: Rc<RefCell<OpState>>, state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<Option<JsMessageData>, AnyError> { ) -> Result<Option<JsMessageData>, MessagePortError> {
let resource = { let resource = {
let state = state.borrow(); let state = state.borrow();
match state.resource_table.get::<MessagePortResource>(rid) { match state.resource_table.get::<MessagePortResource>(rid) {
@ -237,8 +252,11 @@ pub async fn op_message_port_recv_message(
pub fn op_message_port_recv_message_sync( pub fn op_message_port_recv_message_sync(
state: &mut OpState, // Rc<RefCell<OpState>>, state: &mut OpState, // Rc<RefCell<OpState>>,
#[smi] rid: ResourceId, #[smi] rid: ResourceId,
) -> Result<Option<JsMessageData>, AnyError> { ) -> Result<Option<JsMessageData>, MessagePortError> {
let resource = state.resource_table.get::<MessagePortResource>(rid)?; let resource = state
.resource_table
.get::<MessagePortResource>(rid)
.map_err(MessagePortError::Resource)?;
let mut rx = resource.port.rx.borrow_mut(); let mut rx = resource.port.rx.borrow_mut();
match rx.try_recv() { match rx.try_recv() {

View file

@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use bytes::BytesMut; use bytes::BytesMut;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::external; use deno_core::external;
use deno_core::op2; use deno_core::op2;
use deno_core::serde_v8::V8Slice; use deno_core::serde_v8::V8Slice;
@ -18,6 +16,7 @@ use deno_core::RcRef;
use deno_core::Resource; use deno_core::Resource;
use deno_core::ResourceId; use deno_core::ResourceId;
use futures::future::poll_fn; use futures::future::poll_fn;
use futures::TryFutureExt;
use std::borrow::Cow; use std::borrow::Cow;
use std::cell::RefCell; use std::cell::RefCell;
use std::cell::RefMut; use std::cell::RefMut;
@ -31,6 +30,14 @@ use std::task::Context;
use std::task::Poll; use std::task::Poll;
use std::task::Waker; use std::task::Waker;
#[derive(Debug, thiserror::Error)]
pub enum StreamResourceError {
#[error(transparent)]
Canceled(#[from] deno_core::Canceled),
#[error("{0}")]
Js(String),
}
// How many buffers we'll allow in the channel before we stop allowing writes. // How many buffers we'll allow in the channel before we stop allowing writes.
const BUFFER_CHANNEL_SIZE: u16 = 1024; const BUFFER_CHANNEL_SIZE: u16 = 1024;
@ -48,7 +55,7 @@ struct BoundedBufferChannelInner {
buffers: [MaybeUninit<V8Slice<u8>>; BUFFER_CHANNEL_SIZE as _], buffers: [MaybeUninit<V8Slice<u8>>; BUFFER_CHANNEL_SIZE as _],
ring_producer: u16, ring_producer: u16,
ring_consumer: u16, ring_consumer: u16,
error: Option<AnyError>, error: Option<StreamResourceError>,
current_size: usize, current_size: usize,
// TODO(mmastrac): we can math this field instead of accounting for it // TODO(mmastrac): we can math this field instead of accounting for it
len: usize, len: usize,
@ -141,7 +148,10 @@ impl BoundedBufferChannelInner {
self.len = 0; self.len = 0;
} }
pub fn read(&mut self, limit: usize) -> Result<Option<BufView>, AnyError> { pub fn read(
&mut self,
limit: usize,
) -> Result<Option<BufView>, StreamResourceError> {
// Empty buffers will return the error, if one exists, or None // Empty buffers will return the error, if one exists, or None
if self.len == 0 { if self.len == 0 {
if let Some(error) = self.error.take() { if let Some(error) = self.error.take() {
@ -230,7 +240,7 @@ impl BoundedBufferChannelInner {
Ok(()) Ok(())
} }
pub fn write_error(&mut self, error: AnyError) { pub fn write_error(&mut self, error: StreamResourceError) {
self.error = Some(error); self.error = Some(error);
if let Some(waker) = self.read_waker.take() { if let Some(waker) = self.read_waker.take() {
waker.wake(); waker.wake();
@ -306,7 +316,10 @@ impl BoundedBufferChannel {
self.inner.borrow_mut() self.inner.borrow_mut()
} }
pub fn read(&self, limit: usize) -> Result<Option<BufView>, AnyError> { pub fn read(
&self,
limit: usize,
) -> Result<Option<BufView>, StreamResourceError> {
self.inner().read(limit) self.inner().read(limit)
} }
@ -314,7 +327,7 @@ impl BoundedBufferChannel {
self.inner().write(buffer) self.inner().write(buffer)
} }
pub fn write_error(&self, error: AnyError) { pub fn write_error(&self, error: StreamResourceError) {
self.inner().write_error(error) self.inner().write_error(error)
} }
@ -358,7 +371,10 @@ impl ReadableStreamResource {
RcRef::map(self, |s| &s.cancel_handle).clone() RcRef::map(self, |s| &s.cancel_handle).clone()
} }
async fn read(self: Rc<Self>, limit: usize) -> Result<BufView, AnyError> { async fn read(
self: Rc<Self>,
limit: usize,
) -> Result<BufView, StreamResourceError> {
let cancel_handle = self.cancel_handle(); let cancel_handle = self.cancel_handle();
// Serialize all the reads using a task queue. // Serialize all the reads using a task queue.
let _read_permit = self.read_queue.acquire().await; let _read_permit = self.read_queue.acquire().await;
@ -387,7 +403,7 @@ impl Resource for ReadableStreamResource {
} }
fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> { fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> {
Box::pin(ReadableStreamResource::read(self, limit)) Box::pin(ReadableStreamResource::read(self, limit).map_err(|e| e.into()))
} }
fn close(self: Rc<Self>) { fn close(self: Rc<Self>) {
@ -550,7 +566,7 @@ pub fn op_readable_stream_resource_write_error(
) -> bool { ) -> bool {
let sender = get_sender(sender); let sender = get_sender(sender);
// We can always write an error, no polling required // We can always write an error, no polling required
sender.write_error(type_error(Cow::Owned(error))); sender.write_error(StreamResourceError::Js(error));
!sender.closed() !sender.closed()
} }

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_webgpu" name = "deno_webgpu"
version = "0.139.0" version = "0.140.0"
authors = ["the Deno authors"] authors = ["the Deno authors"]
edition.workspace = true edition.workspace = true
license = "MIT" license = "MIT"

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_webidl" name = "deno_webidl"
version = "0.172.0" version = "0.173.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_websocket" name = "deno_websocket"
version = "0.177.0" version = "0.178.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_webstorage" name = "deno_webstorage"
version = "0.167.0" version = "0.168.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_resolver" name = "deno_resolver"
version = "0.4.0" version = "0.5.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "node_resolver" name = "node_resolver"
version = "0.11.0" version = "0.12.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_runtime" name = "deno_runtime"
version = "0.181.0" version = "0.182.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -23,7 +23,17 @@ use deno_ffi::DlfcnError;
use deno_ffi::IRError; use deno_ffi::IRError;
use deno_ffi::ReprError; use deno_ffi::ReprError;
use deno_ffi::StaticError; use deno_ffi::StaticError;
use deno_kv::KvCheckError;
use deno_kv::KvError;
use deno_kv::KvMutationError;
use deno_napi::NApiError;
use deno_net::ops::NetError;
use deno_tls::TlsError; use deno_tls::TlsError;
use deno_web::BlobError;
use deno_web::CompressionError;
use deno_web::MessagePortError;
use deno_web::StreamResourceError;
use deno_web::WebError;
use deno_webstorage::WebStorageError; use deno_webstorage::WebStorageError;
use std::env; use std::env;
use std::error::Error; use std::error::Error;
@ -165,6 +175,70 @@ pub fn get_nix_error_class(error: &nix::Error) -> &'static str {
} }
} }
fn get_napi_error_class(e: &NApiError) -> &'static str {
match e {
NApiError::InvalidPath
| NApiError::LibLoading(_)
| NApiError::ModuleNotFound(_) => "TypeError",
NApiError::Permission(e) => get_error_class_name(e).unwrap_or("Error"),
}
}
fn get_web_error_class(e: &WebError) -> &'static str {
match e {
WebError::Base64Decode => "DOMExceptionInvalidCharacterError",
WebError::InvalidEncodingLabel(_) => "RangeError",
WebError::BufferTooLong => "TypeError",
WebError::ValueTooLarge => "RangeError",
WebError::BufferTooSmall => "RangeError",
WebError::DataInvalid => "TypeError",
WebError::DataError(_) => "Error",
}
}
fn get_web_compression_error_class(e: &CompressionError) -> &'static str {
match e {
CompressionError::UnsupportedFormat => "TypeError",
CompressionError::ResourceClosed => "TypeError",
CompressionError::IoTypeError(_) => "TypeError",
CompressionError::Io(e) => get_io_error_class(e),
}
}
fn get_web_message_port_error_class(e: &MessagePortError) -> &'static str {
match e {
MessagePortError::InvalidTransfer => "TypeError",
MessagePortError::NotReady => "TypeError",
MessagePortError::TransferSelf => "TypeError",
MessagePortError::Canceled(e) => {
let io_err: io::Error = e.to_owned().into();
get_io_error_class(&io_err)
}
MessagePortError::Resource(e) => get_error_class_name(e).unwrap_or("Error"),
}
}
fn get_web_stream_resource_error_class(
e: &StreamResourceError,
) -> &'static str {
match e {
StreamResourceError::Canceled(e) => {
let io_err: io::Error = e.to_owned().into();
get_io_error_class(&io_err)
}
StreamResourceError::Js(_) => "TypeError",
}
}
fn get_web_blob_error_class(e: &BlobError) -> &'static str {
match e {
BlobError::BlobPartNotFound => "TypeError",
BlobError::SizeLargerThanBlobPart => "TypeError",
BlobError::BlobURLsNotSupported => "TypeError",
BlobError::Url(_) => "Error",
}
}
fn get_ffi_repr_error_class(e: &ReprError) -> &'static str { fn get_ffi_repr_error_class(e: &ReprError) -> &'static str {
match e { match e {
ReprError::InvalidOffset => "TypeError", ReprError::InvalidOffset => "TypeError",
@ -292,11 +366,108 @@ fn get_broadcast_channel_error(error: &BroadcastChannelError) -> &'static str {
} }
} }
fn get_kv_error(error: &KvError) -> &'static str {
match error {
KvError::DatabaseHandler(e) | KvError::Resource(e) | KvError::Kv(e) => {
get_error_class_name(e).unwrap_or("Error")
}
KvError::TooManyRanges(_) => "TypeError",
KvError::TooManyEntries(_) => "TypeError",
KvError::TooManyChecks(_) => "TypeError",
KvError::TooManyMutations(_) => "TypeError",
KvError::TooManyKeys(_) => "TypeError",
KvError::InvalidLimit => "TypeError",
KvError::InvalidBoundaryKey => "TypeError",
KvError::KeyTooLargeToRead(_) => "TypeError",
KvError::KeyTooLargeToWrite(_) => "TypeError",
KvError::TotalMutationTooLarge(_) => "TypeError",
KvError::TotalKeyTooLarge(_) => "TypeError",
KvError::Io(e) => get_io_error_class(e),
KvError::QueueMessageNotFound => "TypeError",
KvError::StartKeyNotInKeyspace => "TypeError",
KvError::EndKeyNotInKeyspace => "TypeError",
KvError::StartKeyGreaterThanEndKey => "TypeError",
KvError::InvalidCheck(e) => match e {
KvCheckError::InvalidVersionstamp => "TypeError",
KvCheckError::Io(e) => get_io_error_class(e),
},
KvError::InvalidMutation(e) => match e {
KvMutationError::BigInt(_) => "Error",
KvMutationError::Io(e) => get_io_error_class(e),
KvMutationError::InvalidMutationWithValue(_) => "TypeError",
KvMutationError::InvalidMutationWithoutValue(_) => "TypeError",
},
KvError::InvalidEnqueue(e) => get_io_error_class(e),
KvError::EmptyKey => "TypeError",
KvError::ValueTooLarge(_) => "TypeError",
KvError::EnqueuePayloadTooLarge(_) => "TypeError",
KvError::InvalidCursor => "TypeError",
KvError::CursorOutOfBounds => "TypeError",
KvError::InvalidRange => "TypeError",
}
}
fn get_net_error(error: &NetError) -> &'static str {
match error {
NetError::ListenerClosed => "BadResource",
NetError::ListenerBusy => "Busy",
NetError::SocketClosed => "BadResource",
NetError::SocketClosedNotConnected => "NotConnected",
NetError::SocketBusy => "Busy",
NetError::Io(e) => get_io_error_class(e),
NetError::AcceptTaskOngoing => "Busy",
NetError::RootCertStore(e)
| NetError::Permission(e)
| NetError::Resource(e) => get_error_class_name(e).unwrap_or("Error"),
NetError::NoResolvedAddress => "Error",
NetError::AddrParse(_) => "Error",
NetError::Map(e) => get_net_map_error(e),
NetError::Canceled(e) => {
let io_err: io::Error = e.to_owned().into();
get_io_error_class(&io_err)
}
NetError::DnsNotFound(_) => "NotFound",
NetError::DnsNotConnected(_) => "NotConnected",
NetError::DnsTimedOut(_) => "TimedOut",
NetError::Dns(_) => "Error",
NetError::UnsupportedRecordType => "NotSupported",
NetError::InvalidUtf8(_) => "InvalidData",
NetError::UnexpectedKeyType => "Error",
NetError::InvalidHostname(_) => "TypeError",
NetError::TcpStreamBusy => "Busy",
NetError::Rustls(_) => "Error",
NetError::Tls(e) => get_tls_error_class(e),
NetError::ListenTlsRequiresKey => "InvalidData",
NetError::Reunite(_) => "Error",
}
}
fn get_net_map_error(error: &deno_net::io::MapError) -> &'static str {
match error {
deno_net::io::MapError::Io(e) => get_io_error_class(e),
deno_net::io::MapError::NoResources => "Error",
}
}
pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> { pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
deno_core::error::get_custom_error_class(e) deno_core::error::get_custom_error_class(e)
.or_else(|| deno_webgpu::error::get_error_class_name(e)) .or_else(|| deno_webgpu::error::get_error_class_name(e))
.or_else(|| deno_web::get_error_class_name(e))
.or_else(|| deno_websocket::get_network_error_class_name(e)) .or_else(|| deno_websocket::get_network_error_class_name(e))
.or_else(|| e.downcast_ref::<NApiError>().map(get_napi_error_class))
.or_else(|| e.downcast_ref::<WebError>().map(get_web_error_class))
.or_else(|| {
e.downcast_ref::<CompressionError>()
.map(get_web_compression_error_class)
})
.or_else(|| {
e.downcast_ref::<MessagePortError>()
.map(get_web_message_port_error_class)
})
.or_else(|| {
e.downcast_ref::<StreamResourceError>()
.map(get_web_stream_resource_error_class)
})
.or_else(|| e.downcast_ref::<BlobError>().map(get_web_blob_error_class))
.or_else(|| e.downcast_ref::<IRError>().map(|_| "TypeError")) .or_else(|| e.downcast_ref::<IRError>().map(|_| "TypeError"))
.or_else(|| e.downcast_ref::<ReprError>().map(get_ffi_repr_error_class)) .or_else(|| e.downcast_ref::<ReprError>().map(get_ffi_repr_error_class))
.or_else(|| { .or_else(|| {
@ -316,6 +487,12 @@ pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
.or_else(|| e.downcast_ref::<CronError>().map(get_cron_error_class)) .or_else(|| e.downcast_ref::<CronError>().map(get_cron_error_class))
.or_else(|| e.downcast_ref::<CanvasError>().map(get_canvas_error)) .or_else(|| e.downcast_ref::<CanvasError>().map(get_canvas_error))
.or_else(|| e.downcast_ref::<CacheError>().map(get_cache_error)) .or_else(|| e.downcast_ref::<CacheError>().map(get_cache_error))
.or_else(|| e.downcast_ref::<KvError>().map(get_kv_error))
.or_else(|| e.downcast_ref::<NetError>().map(get_net_error))
.or_else(|| {
e.downcast_ref::<deno_net::io::MapError>()
.map(get_net_map_error)
})
.or_else(|| { .or_else(|| {
e.downcast_ref::<BroadcastChannelError>() e.downcast_ref::<BroadcastChannelError>()
.map(get_broadcast_channel_error) .map(get_broadcast_channel_error)

View file

@ -23,6 +23,7 @@ use deno_core::InspectorSessionProxy;
use deno_core::JsRuntime; use deno_core::JsRuntime;
use fastwebsockets::Frame; use fastwebsockets::Frame;
use fastwebsockets::OpCode; use fastwebsockets::OpCode;
use fastwebsockets::Payload;
use fastwebsockets::WebSocket; use fastwebsockets::WebSocket;
use hyper::body::Bytes; use hyper::body::Bytes;
use hyper_util::rt::TokioIo; use hyper_util::rt::TokioIo;
@ -33,6 +34,7 @@ use std::pin::pin;
use std::process; use std::process;
use std::rc::Rc; use std::rc::Rc;
use std::thread; use std::thread;
use std::time::Duration;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use uuid::Uuid; use uuid::Uuid;
@ -393,8 +395,13 @@ async fn pump_websocket_messages(
inbound_tx: UnboundedSender<String>, inbound_tx: UnboundedSender<String>,
mut outbound_rx: UnboundedReceiver<InspectorMsg>, mut outbound_rx: UnboundedReceiver<InspectorMsg>,
) { ) {
let mut ticker = tokio::time::interval(Duration::from_secs(30));
'pump: loop { 'pump: loop {
tokio::select! { tokio::select! {
_ = ticker.tick() => {
let _ = websocket.write_frame(Frame::new(true, OpCode::Ping, None, Payload::Borrowed(&[]))).await;
}
Some(msg) = outbound_rx.next() => { Some(msg) = outbound_rx.next() => {
let msg = Frame::text(msg.content.into_bytes().into()); let msg = Frame::text(msg.content.into_bytes().into());
let _ = websocket.write_frame(msg).await; let _ = websocket.write_frame(msg).await;

View file

@ -50,6 +50,7 @@ async fn op_worker_recv_message(
.recv(state.clone()) .recv(state.clone())
.or_cancel(handle.cancel) .or_cancel(handle.cancel)
.await? .await?
.map_err(|e| e.into())
} }
#[op2(fast)] #[op2(fast)]

View file

@ -134,7 +134,7 @@ pub fn op_worker_sync_fetch(
let mime_type = mime_type_essence(&blob.media_type); let mime_type = mime_type_essence(&blob.media_type);
let body = blob.read_all().await?; let body = blob.read_all().await;
(Bytes::from(body), Some(mime_type), script) (Bytes::from(body), Some(mime_type), script)
} }

View file

@ -359,7 +359,7 @@ async fn op_host_recv_message(
} }
Ok(ret) Ok(ret)
} }
Ok(Err(err)) => Err(err), Ok(Err(err)) => Err(err.into()),
Err(_) => { Err(_) => {
// The worker was terminated. // The worker was terminated.
Ok(None) Ok(None)

View file

@ -2,7 +2,7 @@
[package] [package]
name = "deno_permissions" name = "deno_permissions"
version = "0.32.0" version = "0.33.0"
authors.workspace = true authors.workspace = true
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true

View file

@ -35,11 +35,13 @@ use std::sync::Arc;
pub mod prompter; pub mod prompter;
use prompter::permission_prompt; use prompter::permission_prompt;
use prompter::PromptResponse;
use prompter::PERMISSION_EMOJI; use prompter::PERMISSION_EMOJI;
pub use prompter::set_prompt_callbacks; pub use prompter::set_prompt_callbacks;
pub use prompter::set_prompter;
pub use prompter::PermissionPrompter;
pub use prompter::PromptCallback; pub use prompter::PromptCallback;
pub use prompter::PromptResponse;
/// Fast exit from permission check routines if this permission /// Fast exit from permission check routines if this permission
/// is in the "fully-granted" state. /// is in the "fully-granted" state.

View file

@ -80,6 +80,10 @@ pub fn set_prompt_callbacks(
*MAYBE_AFTER_PROMPT_CALLBACK.lock() = Some(after_callback); *MAYBE_AFTER_PROMPT_CALLBACK.lock() = Some(after_callback);
} }
pub fn set_prompter(prompter: Box<dyn PermissionPrompter>) {
*PERMISSION_PROMPTER.lock() = prompter;
}
pub type PromptCallback = Box<dyn FnMut() + Send + Sync>; pub type PromptCallback = Box<dyn FnMut() + Send + Sync>;
pub trait PermissionPrompter: Send + Sync { pub trait PermissionPrompter: Send + Sync {
@ -476,8 +480,4 @@ pub mod tests {
STUB_PROMPT_VALUE.store(value, Ordering::SeqCst); STUB_PROMPT_VALUE.store(value, Ordering::SeqCst);
} }
} }
pub fn set_prompter(prompter: Box<dyn PermissionPrompter>) {
*PERMISSION_PROMPTER.lock() = prompter;
}
} }

View file

@ -0,0 +1,9 @@
{
"tempDir": true,
"steps": [
{
"args": "install",
"output": ""
}
]
}

View file

@ -0,0 +1,5 @@
{
"imports": {
"@assets": "./src/assets/"
}
}

View file

@ -1,11 +1,13 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
import { assert, assertEquals, assertThrows } from "@std/assert"; /// <reference lib="deno.ns" />
import { assert, assertEquals, assertRejects, assertThrows } from "@std/assert";
import { join } from "node:path"; import { join } from "node:path";
import { tmpdir } from "node:os"; import { tmpdir } from "node:os";
import { import {
closeSync, closeSync,
constants, constants,
copyFileSync,
createWriteStream, createWriteStream,
existsSync, existsSync,
lstatSync, lstatSync,
@ -20,6 +22,7 @@ import {
} from "node:fs"; } from "node:fs";
import { import {
constants as fsPromiseConstants, constants as fsPromiseConstants,
copyFile,
cp, cp,
FileHandle, FileHandle,
open, open,
@ -212,3 +215,21 @@ Deno.test("[node/fs] readSync works", () => {
assertEquals(bytesRead, 12); assertEquals(bytesRead, 12);
closeSync(fd!); closeSync(fd!);
}); });
Deno.test("[node/fs] copyFile COPYFILE_EXCL works", async () => {
const dir = mkdtempSync(join(tmpdir(), "foo-"));
const src = join(dir, "src.txt");
const dest = join(dir, "dest.txt");
await writeFile(src, "");
await copyFile(src, dest, fsPromiseConstants.COPYFILE_EXCL);
assert(existsSync(dest));
await assertRejects(() =>
copyFile(src, dest, fsPromiseConstants.COPYFILE_EXCL)
);
const dest2 = join(dir, "dest2.txt");
copyFileSync(src, dest2, fsPromiseConstants.COPYFILE_EXCL);
assert(existsSync(dest2));
assertThrows(() =>
copyFileSync(src, dest2, fsPromiseConstants.COPYFILE_EXCL)
);
});