1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-22 15:24:46 -05:00
denoland-deno/cli/tools/run/mod.rs

193 lines
5.6 KiB
Rust
Raw Normal View History

// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::io::Read;
use std::sync::Arc;
2024-08-30 17:58:24 -04:00
use deno_config::deno_json::NodeModulesDirMode;
use deno_core::error::AnyError;
use deno_runtime::WorkerExecutionMode;
use crate::args::EvalFlags;
use crate::args::Flags;
use crate::args::WatchFlagsWithPaths;
use crate::factory::CliFactory;
use crate::file_fetcher::File;
use crate::util;
use crate::util::file_watcher::WatcherRestartMode;
pub mod hmr;
feat(serve): Opt-in parallelism for `deno serve` (#24920) Adds a `parallel` flag to `deno serve`. When present, we spawn multiple workers to parallelize serving requests. ```bash deno serve --parallel main.ts ``` Currently on linux we use `SO_REUSEPORT` and rely on the fact that the kernel will distribute connections in a round-robin manner. On mac and windows, we sort of emulate this by cloning the underlying file descriptor and passing a handle to each worker. The connections will not be guaranteed to be fairly distributed (and in practice almost certainly won't be), but the distribution is still spread enough to provide a significant performance increase. --- (Run on an Macbook Pro with an M3 Max, serving `deno.com` baseline:: ``` ❯ wrk -d 30s -c 125 --latency http://127.0.0.1:8000 Running 30s test @ http://127.0.0.1:8000 2 threads and 125 connections Thread Stats Avg Stdev Max +/- Stdev Latency 239.78ms 13.56ms 330.54ms 79.12% Req/Sec 258.58 35.56 360.00 70.64% Latency Distribution 50% 236.72ms 75% 248.46ms 90% 256.84ms 99% 268.23ms 15458 requests in 30.02s, 2.47GB read Requests/sec: 514.89 Transfer/sec: 84.33MB ``` this PR (`with --parallel` flag) ``` ❯ wrk -d 30s -c 125 --latency http://127.0.0.1:8000 Running 30s test @ http://127.0.0.1:8000 2 threads and 125 connections Thread Stats Avg Stdev Max +/- Stdev Latency 117.40ms 142.84ms 590.45ms 79.07% Req/Sec 1.33k 175.19 1.77k 69.00% Latency Distribution 50% 22.34ms 75% 223.67ms 90% 357.32ms 99% 460.50ms 79636 requests in 30.07s, 12.74GB read Requests/sec: 2647.96 Transfer/sec: 433.71MB ```
2024-08-14 18:26:21 -04:00
pub fn check_permission_before_script(flags: &Flags) {
if !flags.has_permission() && flags.has_permission_in_argv() {
log::warn!(
"{}",
crate::colors::yellow(
r#"Permission flags have likely been incorrectly set after the script argument.
To grant permissions, set them before the script argument. For example:
deno run --allow-read=. main.js"#
)
);
}
feat(serve): Opt-in parallelism for `deno serve` (#24920) Adds a `parallel` flag to `deno serve`. When present, we spawn multiple workers to parallelize serving requests. ```bash deno serve --parallel main.ts ``` Currently on linux we use `SO_REUSEPORT` and rely on the fact that the kernel will distribute connections in a round-robin manner. On mac and windows, we sort of emulate this by cloning the underlying file descriptor and passing a handle to each worker. The connections will not be guaranteed to be fairly distributed (and in practice almost certainly won't be), but the distribution is still spread enough to provide a significant performance increase. --- (Run on an Macbook Pro with an M3 Max, serving `deno.com` baseline:: ``` ❯ wrk -d 30s -c 125 --latency http://127.0.0.1:8000 Running 30s test @ http://127.0.0.1:8000 2 threads and 125 connections Thread Stats Avg Stdev Max +/- Stdev Latency 239.78ms 13.56ms 330.54ms 79.12% Req/Sec 258.58 35.56 360.00 70.64% Latency Distribution 50% 236.72ms 75% 248.46ms 90% 256.84ms 99% 268.23ms 15458 requests in 30.02s, 2.47GB read Requests/sec: 514.89 Transfer/sec: 84.33MB ``` this PR (`with --parallel` flag) ``` ❯ wrk -d 30s -c 125 --latency http://127.0.0.1:8000 Running 30s test @ http://127.0.0.1:8000 2 threads and 125 connections Thread Stats Avg Stdev Max +/- Stdev Latency 117.40ms 142.84ms 590.45ms 79.07% Req/Sec 1.33k 175.19 1.77k 69.00% Latency Distribution 50% 22.34ms 75% 223.67ms 90% 357.32ms 99% 460.50ms 79636 requests in 30.07s, 12.74GB read Requests/sec: 2647.96 Transfer/sec: 433.71MB ```
2024-08-14 18:26:21 -04:00
}
pub async fn run_script(
mode: WorkerExecutionMode,
flags: Arc<Flags>,
watch: Option<WatchFlagsWithPaths>,
) -> Result<i32, AnyError> {
check_permission_before_script(&flags);
if let Some(watch_flags) = watch {
return run_with_watch(mode, flags, watch_flags).await;
}
// TODO(bartlomieju): actually I think it will also fail if there's an import
// map specified and bare specifier is used on the command line
let factory = CliFactory::from_flags(flags);
let cli_options = factory.cli_options()?;
let deno_dir = factory.deno_dir()?;
let http_client = factory.http_client_provider();
// Run a background task that checks for available upgrades or output
// if an earlier run of this background task found a new version of Deno.
#[cfg(feature = "upgrade")]
super::upgrade::check_for_upgrades(
http_client.clone(),
deno_dir.upgrade_check_file_path(),
);
let main_module = cli_options.resolve_main_module()?;
maybe_npm_install(&factory).await?;
let worker_factory = factory.create_cli_main_worker_factory().await?;
let mut worker = worker_factory
.create_main_worker(mode, main_module.clone())
.await?;
let exit_code = worker.run().await?;
Ok(exit_code)
}
pub async fn run_from_stdin(flags: Arc<Flags>) -> Result<i32, AnyError> {
let factory = CliFactory::from_flags(flags);
let cli_options = factory.cli_options()?;
let main_module = cli_options.resolve_main_module()?;
maybe_npm_install(&factory).await?;
let file_fetcher = factory.file_fetcher()?;
let worker_factory = factory.create_cli_main_worker_factory().await?;
let mut source = Vec::new();
std::io::stdin().read_to_end(&mut source)?;
// Save a fake file into file fetcher cache
// to allow module access by TS compiler
file_fetcher.insert_memory_files(File {
specifier: main_module.clone(),
maybe_headers: None,
source: source.into(),
});
let mut worker = worker_factory
.create_main_worker(WorkerExecutionMode::Run, main_module.clone())
.await?;
let exit_code = worker.run().await?;
Ok(exit_code)
}
// TODO(bartlomieju): this function is not handling `exit_code` set by the runtime
// code properly.
async fn run_with_watch(
mode: WorkerExecutionMode,
flags: Arc<Flags>,
watch_flags: WatchFlagsWithPaths,
) -> Result<i32, AnyError> {
util::file_watcher::watch_recv(
flags,
util::file_watcher::PrintConfig::new_with_banner(
if watch_flags.hmr { "HMR" } else { "Watcher" },
"Process",
!watch_flags.no_clear_screen,
),
WatcherRestartMode::Automatic,
move |flags, watcher_communicator, _changed_paths| {
Ok(async move {
let factory = CliFactory::from_flags_for_watcher(
flags,
watcher_communicator.clone(),
);
let cli_options = factory.cli_options()?;
let main_module = cli_options.resolve_main_module()?;
maybe_npm_install(&factory).await?;
let _ = watcher_communicator.watch_paths(cli_options.watch_paths());
let mut worker = factory
.create_cli_main_worker_factory()
.await?
.create_main_worker(mode, main_module.clone())
.await?;
if watch_flags.hmr {
worker.run().await?;
} else {
worker.run_for_watcher().await?;
}
Ok(())
})
},
)
.await?;
Ok(0)
}
pub async fn eval_command(
flags: Arc<Flags>,
eval_flags: EvalFlags,
) -> Result<i32, AnyError> {
let factory = CliFactory::from_flags(flags);
let cli_options = factory.cli_options()?;
let file_fetcher = factory.file_fetcher()?;
let main_module = cli_options.resolve_main_module()?;
maybe_npm_install(&factory).await?;
// Create a dummy source file.
let source_code = if eval_flags.print {
format!("console.log({})", eval_flags.code)
} else {
eval_flags.code
};
// Save a fake file into file fetcher cache
// to allow module access by TS compiler.
file_fetcher.insert_memory_files(File {
specifier: main_module.clone(),
maybe_headers: None,
source: source_code.into_bytes().into(),
});
let worker_factory = factory.create_cli_main_worker_factory().await?;
let mut worker = worker_factory
.create_main_worker(WorkerExecutionMode::Eval, main_module.clone())
.await?;
let exit_code = worker.run().await?;
Ok(exit_code)
}
feat(serve): Opt-in parallelism for `deno serve` (#24920) Adds a `parallel` flag to `deno serve`. When present, we spawn multiple workers to parallelize serving requests. ```bash deno serve --parallel main.ts ``` Currently on linux we use `SO_REUSEPORT` and rely on the fact that the kernel will distribute connections in a round-robin manner. On mac and windows, we sort of emulate this by cloning the underlying file descriptor and passing a handle to each worker. The connections will not be guaranteed to be fairly distributed (and in practice almost certainly won't be), but the distribution is still spread enough to provide a significant performance increase. --- (Run on an Macbook Pro with an M3 Max, serving `deno.com` baseline:: ``` ❯ wrk -d 30s -c 125 --latency http://127.0.0.1:8000 Running 30s test @ http://127.0.0.1:8000 2 threads and 125 connections Thread Stats Avg Stdev Max +/- Stdev Latency 239.78ms 13.56ms 330.54ms 79.12% Req/Sec 258.58 35.56 360.00 70.64% Latency Distribution 50% 236.72ms 75% 248.46ms 90% 256.84ms 99% 268.23ms 15458 requests in 30.02s, 2.47GB read Requests/sec: 514.89 Transfer/sec: 84.33MB ``` this PR (`with --parallel` flag) ``` ❯ wrk -d 30s -c 125 --latency http://127.0.0.1:8000 Running 30s test @ http://127.0.0.1:8000 2 threads and 125 connections Thread Stats Avg Stdev Max +/- Stdev Latency 117.40ms 142.84ms 590.45ms 79.07% Req/Sec 1.33k 175.19 1.77k 69.00% Latency Distribution 50% 22.34ms 75% 223.67ms 90% 357.32ms 99% 460.50ms 79636 requests in 30.07s, 12.74GB read Requests/sec: 2647.96 Transfer/sec: 433.71MB ```
2024-08-14 18:26:21 -04:00
pub async fn maybe_npm_install(factory: &CliFactory) -> Result<(), AnyError> {
// ensure an "npm install" is done if the user has explicitly
// opted into using a managed node_modules directory
2024-08-30 17:58:24 -04:00
if factory.cli_options()?.node_modules_dir()?
== Some(NodeModulesDirMode::Auto)
{
if let Some(npm_resolver) = factory.npm_resolver().await?.as_managed() {
npm_resolver.ensure_top_level_package_json_install().await?;
}
}
Ok(())
}