2023-01-02 16:00:42 -05:00
|
|
|
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
2020-09-05 20:34:02 -04:00
|
|
|
|
2022-06-29 11:51:11 -04:00
|
|
|
use crate::args::CliOptions;
|
2022-06-28 16:45:55 -04:00
|
|
|
use crate::args::DenoSubcommand;
|
2022-06-27 16:54:09 -04:00
|
|
|
use crate::args::Flags;
|
2022-11-25 17:00:28 -05:00
|
|
|
use crate::args::Lockfile;
|
2023-04-27 10:05:20 -04:00
|
|
|
use crate::args::StorageKeyResolver;
|
2022-11-25 18:29:48 -05:00
|
|
|
use crate::args::TsConfigType;
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
use crate::cache::Caches;
|
2022-11-25 19:04:30 -05:00
|
|
|
use crate::cache::DenoDir;
|
2022-07-12 18:58:39 -04:00
|
|
|
use crate::cache::EmitCache;
|
2022-11-28 17:28:54 -05:00
|
|
|
use crate::cache::HttpCache;
|
2022-10-01 06:15:56 -04:00
|
|
|
use crate::cache::NodeAnalysisCache;
|
2022-08-22 12:14:59 -04:00
|
|
|
use crate::cache::ParsedSourceCache;
|
2023-04-13 14:03:07 -04:00
|
|
|
use crate::emit::Emitter;
|
2020-11-05 19:38:21 -05:00
|
|
|
use crate::file_fetcher::FileFetcher;
|
2023-04-14 16:22:33 -04:00
|
|
|
use crate::graph_util::ModuleGraphBuilder;
|
2023-02-24 14:42:45 -05:00
|
|
|
use crate::graph_util::ModuleGraphContainer;
|
2022-11-18 17:28:14 -05:00
|
|
|
use crate::http_util::HttpClient;
|
2023-04-27 10:05:20 -04:00
|
|
|
use crate::module_loader::CliModuleLoaderFactory;
|
2023-04-14 16:22:33 -04:00
|
|
|
use crate::module_loader::ModuleLoadPreparer;
|
2023-04-27 10:05:20 -04:00
|
|
|
use crate::module_loader::NpmModuleLoader;
|
2023-04-21 16:38:10 -04:00
|
|
|
use crate::node::CliCjsEsmCodeAnalyzer;
|
2023-04-21 21:02:46 -04:00
|
|
|
use crate::node::CliNodeCodeTranslator;
|
2023-03-12 23:32:59 -04:00
|
|
|
use crate::npm::create_npm_fs_resolver;
|
2023-04-12 08:36:11 -04:00
|
|
|
use crate::npm::CliNpmRegistryApi;
|
2023-04-21 21:02:46 -04:00
|
|
|
use crate::npm::CliNpmResolver;
|
2022-09-13 11:59:01 -04:00
|
|
|
use crate::npm::NpmCache;
|
2023-03-12 23:32:59 -04:00
|
|
|
use crate::npm::NpmResolution;
|
2023-02-24 19:35:43 -05:00
|
|
|
use crate::npm::PackageJsonDepsInstaller;
|
2023-02-15 11:30:54 -05:00
|
|
|
use crate::resolver::CliGraphResolver;
|
2023-04-14 18:05:46 -04:00
|
|
|
use crate::tools::check::TypeChecker;
|
2022-11-28 17:28:54 -05:00
|
|
|
use crate::util::progress_bar::ProgressBar;
|
2022-12-12 20:52:10 -05:00
|
|
|
use crate::util::progress_bar::ProgressBarStyle;
|
2023-04-27 10:05:20 -04:00
|
|
|
use crate::worker::CliMainWorkerFactory;
|
|
|
|
use crate::worker::CliMainWorkerOptions;
|
2020-10-22 20:50:15 -04:00
|
|
|
|
2020-09-14 12:48:57 -04:00
|
|
|
use deno_core::error::AnyError;
|
2021-07-06 23:48:01 -04:00
|
|
|
use deno_core::parking_lot::Mutex;
|
2020-01-05 11:56:18 -05:00
|
|
|
use deno_core::ModuleSpecifier;
|
2023-04-14 16:22:33 -04:00
|
|
|
|
2023-04-24 19:44:35 -04:00
|
|
|
use deno_runtime::deno_node;
|
2023-04-21 16:38:10 -04:00
|
|
|
use deno_runtime::deno_node::analyze::NodeCodeTranslator;
|
2023-04-21 21:02:46 -04:00
|
|
|
use deno_runtime::deno_node::NodeResolver;
|
2021-12-01 11:13:11 -05:00
|
|
|
use deno_runtime::deno_tls::rustls::RootCertStore;
|
2021-09-10 21:38:24 -04:00
|
|
|
use deno_runtime::deno_web::BlobStore;
|
|
|
|
use deno_runtime::inspector_server::InspectorServer;
|
2023-04-27 10:05:20 -04:00
|
|
|
use deno_semver::npm::NpmPackageReqReference;
|
2021-09-10 21:38:24 -04:00
|
|
|
use import_map::ImportMap;
|
2022-01-13 18:17:56 -05:00
|
|
|
use log::warn;
|
2021-06-19 10:14:43 -04:00
|
|
|
use std::collections::HashSet;
|
2022-06-08 06:07:25 -04:00
|
|
|
use std::path::PathBuf;
|
2019-11-04 10:38:52 -05:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
2023-04-30 16:51:31 -04:00
|
|
|
/// This structure used to represent state of single "deno" program
|
|
|
|
/// that was shared by all created workers. It morphed into being the
|
|
|
|
/// "factory" for all objects, but is being slowly phased out.
|
|
|
|
pub struct ProcState {
|
2022-11-25 19:04:30 -05:00
|
|
|
pub dir: DenoDir,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub caches: Arc<Caches>,
|
2023-02-03 14:15:16 -05:00
|
|
|
pub file_fetcher: Arc<FileFetcher>,
|
2022-12-12 21:30:44 -05:00
|
|
|
pub http_client: HttpClient,
|
2022-06-29 11:51:11 -04:00
|
|
|
pub options: Arc<CliOptions>,
|
2022-07-19 11:58:18 -04:00
|
|
|
pub emit_cache: EmitCache,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub emitter: Arc<Emitter>,
|
|
|
|
pub graph_container: Arc<ModuleGraphContainer>,
|
2020-10-23 17:01:54 -04:00
|
|
|
pub lockfile: Option<Arc<Mutex<Lockfile>>>,
|
2021-11-08 20:26:39 -05:00
|
|
|
pub maybe_import_map: Option<Arc<ImportMap>>,
|
2020-09-25 04:24:51 -04:00
|
|
|
pub maybe_inspector_server: Option<Arc<InspectorServer>>,
|
2022-06-28 16:45:55 -04:00
|
|
|
pub root_cert_store: RootCertStore,
|
2021-07-05 09:34:37 -04:00
|
|
|
pub blob_store: BlobStore,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub parsed_source_cache: Arc<ParsedSourceCache>,
|
2023-02-15 11:30:54 -05:00
|
|
|
pub resolver: Arc<CliGraphResolver>,
|
2022-06-08 06:07:25 -04:00
|
|
|
maybe_file_watcher_reporter: Option<FileWatcherReporter>,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub module_graph_builder: Arc<ModuleGraphBuilder>,
|
|
|
|
pub module_load_preparer: Arc<ModuleLoadPreparer>,
|
2023-04-21 21:02:46 -04:00
|
|
|
pub node_code_translator: Arc<CliNodeCodeTranslator>,
|
2023-04-24 19:44:35 -04:00
|
|
|
pub node_fs: Arc<dyn deno_node::NodeFs>,
|
|
|
|
pub node_resolver: Arc<NodeResolver>,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub npm_api: Arc<CliNpmRegistryApi>,
|
|
|
|
pub npm_cache: Arc<NpmCache>,
|
2023-04-21 21:02:46 -04:00
|
|
|
pub npm_resolver: Arc<CliNpmResolver>,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub npm_resolution: Arc<NpmResolution>,
|
|
|
|
pub package_json_deps_installer: Arc<PackageJsonDepsInstaller>,
|
|
|
|
pub cjs_resolutions: Arc<CjsResolutionStore>,
|
2021-09-24 11:10:42 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ProcState {
|
2023-04-12 14:54:28 -04:00
|
|
|
pub async fn from_cli_options(
|
2022-06-29 11:51:11 -04:00
|
|
|
options: Arc<CliOptions>,
|
2022-06-28 16:45:55 -04:00
|
|
|
) -> Result<Self, AnyError> {
|
2022-06-29 11:51:11 -04:00
|
|
|
Self::build_with_sender(options, None).await
|
2022-06-08 06:07:25 -04:00
|
|
|
}
|
|
|
|
|
2023-04-12 14:54:28 -04:00
|
|
|
pub async fn from_flags(flags: Flags) -> Result<Self, AnyError> {
|
|
|
|
Self::from_cli_options(Arc::new(CliOptions::from_flags(flags)?)).await
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn from_flags_for_file_watcher(
|
2022-06-28 16:45:55 -04:00
|
|
|
flags: Flags,
|
2022-06-08 06:07:25 -04:00
|
|
|
files_to_watch_sender: tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>,
|
|
|
|
) -> Result<Self, AnyError> {
|
2022-06-28 16:45:55 -04:00
|
|
|
// resolve the config each time
|
2022-06-29 11:51:11 -04:00
|
|
|
let cli_options = Arc::new(CliOptions::from_flags(flags)?);
|
2022-06-28 16:45:55 -04:00
|
|
|
let ps =
|
2022-06-29 11:51:11 -04:00
|
|
|
Self::build_with_sender(cli_options, Some(files_to_watch_sender.clone()))
|
2022-06-28 16:45:55 -04:00
|
|
|
.await?;
|
2023-02-03 14:15:16 -05:00
|
|
|
ps.init_watcher();
|
|
|
|
Ok(ps)
|
|
|
|
}
|
2022-06-08 06:07:25 -04:00
|
|
|
|
2023-02-03 14:15:16 -05:00
|
|
|
/// Reset all runtime state to its default. This should be used on file
|
|
|
|
/// watcher restarts.
|
2023-04-30 16:51:31 -04:00
|
|
|
pub fn reset_for_file_watcher(&self) {
|
2023-04-14 16:22:33 -04:00
|
|
|
self.cjs_resolutions.clear();
|
|
|
|
self.parsed_source_cache.clear();
|
|
|
|
self.graph_container.clear();
|
|
|
|
|
2023-02-03 14:15:16 -05:00
|
|
|
self.init_watcher();
|
|
|
|
}
|
2022-06-08 06:07:25 -04:00
|
|
|
|
2023-02-03 14:15:16 -05:00
|
|
|
// Add invariant files like the import map and explicit watch flag list to
|
|
|
|
// the watcher. Dedup for build_for_file_watcher and reset_for_file_watcher.
|
|
|
|
fn init_watcher(&self) {
|
2023-04-30 16:51:31 -04:00
|
|
|
let files_to_watch_sender = match &self.maybe_file_watcher_reporter {
|
2023-02-03 14:15:16 -05:00
|
|
|
Some(reporter) => &reporter.sender,
|
|
|
|
None => return,
|
|
|
|
};
|
|
|
|
if let Some(watch_paths) = self.options.watch_paths() {
|
|
|
|
files_to_watch_sender.send(watch_paths.clone()).unwrap();
|
|
|
|
}
|
|
|
|
if let Ok(Some(import_map_path)) = self
|
2022-06-29 11:51:11 -04:00
|
|
|
.options
|
2022-06-29 20:41:48 -04:00
|
|
|
.resolve_import_map_specifier()
|
2022-06-28 16:45:55 -04:00
|
|
|
.map(|ms| ms.and_then(|ref s| s.to_file_path().ok()))
|
2022-06-08 06:07:25 -04:00
|
|
|
{
|
2023-02-03 14:15:16 -05:00
|
|
|
files_to_watch_sender.send(vec![import_map_path]).unwrap();
|
2022-06-08 06:07:25 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn build_with_sender(
|
2022-06-29 11:51:11 -04:00
|
|
|
cli_options: Arc<CliOptions>,
|
2022-06-08 06:07:25 -04:00
|
|
|
maybe_sender: Option<tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>>,
|
|
|
|
) -> Result<Self, AnyError> {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let dir = cli_options.resolve_deno_dir()?;
|
2023-04-14 16:22:33 -04:00
|
|
|
let caches = Arc::new(Caches::default());
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
// Warm up the caches we know we'll likely need based on the CLI mode
|
|
|
|
match cli_options.sub_command() {
|
|
|
|
DenoSubcommand::Run(_) => {
|
|
|
|
_ = caches.dep_analysis_db(&dir);
|
|
|
|
_ = caches.node_analysis_db(&dir);
|
|
|
|
}
|
|
|
|
DenoSubcommand::Check(_) => {
|
|
|
|
_ = caches.dep_analysis_db(&dir);
|
|
|
|
_ = caches.node_analysis_db(&dir);
|
|
|
|
_ = caches.type_checking_cache_db(&dir);
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
2021-07-05 09:34:37 -04:00
|
|
|
let blob_store = BlobStore::default();
|
2022-11-25 19:04:30 -05:00
|
|
|
let deps_cache_location = dir.deps_folder_path();
|
2022-11-28 17:28:54 -05:00
|
|
|
let http_cache = HttpCache::new(&deps_cache_location);
|
2022-06-29 11:51:11 -04:00
|
|
|
let root_cert_store = cli_options.resolve_root_cert_store()?;
|
|
|
|
let cache_usage = cli_options.cache_setting();
|
2022-12-12 20:52:10 -05:00
|
|
|
let progress_bar = ProgressBar::new(ProgressBarStyle::TextOnly);
|
2022-11-18 17:28:14 -05:00
|
|
|
let http_client = HttpClient::new(
|
|
|
|
Some(root_cert_store.clone()),
|
2022-12-08 11:50:09 -05:00
|
|
|
cli_options.unsafely_ignore_certificate_errors().clone(),
|
2022-11-18 17:28:14 -05:00
|
|
|
)?;
|
2020-11-05 19:38:21 -05:00
|
|
|
let file_fetcher = FileFetcher::new(
|
2020-02-19 08:17:13 -05:00
|
|
|
http_cache,
|
2020-11-05 19:38:21 -05:00
|
|
|
cache_usage,
|
2022-06-29 11:51:11 -04:00
|
|
|
!cli_options.no_remote(),
|
2022-11-18 17:28:14 -05:00
|
|
|
http_client.clone(),
|
2021-07-05 09:34:37 -04:00
|
|
|
blob_store.clone(),
|
2022-09-09 15:57:39 -04:00
|
|
|
Some(progress_bar.clone()),
|
2023-01-25 16:51:04 -05:00
|
|
|
);
|
2019-11-04 10:38:52 -05:00
|
|
|
|
2022-11-02 11:32:30 -04:00
|
|
|
let lockfile = cli_options.maybe_lock_file();
|
2020-05-29 10:32:15 -04:00
|
|
|
|
2023-04-12 08:36:11 -04:00
|
|
|
let npm_registry_url = CliNpmRegistryApi::default_url().to_owned();
|
2023-04-26 13:07:15 -04:00
|
|
|
let npm_cache = Arc::new(NpmCache::new(
|
|
|
|
dir.npm_folder_path(),
|
2023-02-22 14:15:25 -05:00
|
|
|
cli_options.cache_setting(),
|
|
|
|
http_client.clone(),
|
|
|
|
progress_bar.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let npm_api = Arc::new(CliNpmRegistryApi::new(
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_registry_url.clone(),
|
2023-02-22 14:15:25 -05:00
|
|
|
npm_cache.clone(),
|
|
|
|
http_client.clone(),
|
|
|
|
progress_bar.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
2023-03-12 23:32:59 -04:00
|
|
|
let npm_snapshot = cli_options
|
|
|
|
.resolve_npm_resolution_snapshot(&npm_api)
|
|
|
|
.await?;
|
2023-04-14 16:22:33 -04:00
|
|
|
let npm_resolution = Arc::new(NpmResolution::from_serialized(
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api.clone(),
|
|
|
|
npm_snapshot,
|
|
|
|
lockfile.as_ref().cloned(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
2023-04-24 19:44:35 -04:00
|
|
|
let node_fs = Arc::new(deno_node::RealFs);
|
2023-03-12 23:32:59 -04:00
|
|
|
let npm_fs_resolver = create_npm_fs_resolver(
|
2023-04-24 19:44:35 -04:00
|
|
|
node_fs.clone(),
|
2023-04-26 13:07:15 -04:00
|
|
|
npm_cache.clone(),
|
2023-03-13 14:18:29 -04:00
|
|
|
&progress_bar,
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_registry_url,
|
|
|
|
npm_resolution.clone(),
|
2023-02-22 20:16:16 -05:00
|
|
|
cli_options.node_modules_dir_path(),
|
2023-03-12 23:32:59 -04:00
|
|
|
);
|
2023-04-21 21:02:46 -04:00
|
|
|
let npm_resolver = Arc::new(CliNpmResolver::new(
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_resolution.clone(),
|
|
|
|
npm_fs_resolver,
|
2023-02-22 14:15:25 -05:00
|
|
|
lockfile.as_ref().cloned(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let package_json_deps_installer = Arc::new(PackageJsonDepsInstaller::new(
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api.clone(),
|
|
|
|
npm_resolution.clone(),
|
2023-03-03 17:27:05 -05:00
|
|
|
cli_options.maybe_package_json_deps(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
2023-01-25 15:13:40 -05:00
|
|
|
let maybe_import_map = cli_options
|
|
|
|
.resolve_import_map(&file_fetcher)
|
|
|
|
.await?
|
|
|
|
.map(Arc::new);
|
2022-06-28 16:45:55 -04:00
|
|
|
let maybe_inspector_server =
|
2022-06-29 11:51:11 -04:00
|
|
|
cli_options.resolve_inspector_server().map(Arc::new);
|
2020-09-25 04:24:51 -04:00
|
|
|
|
2023-02-15 11:30:54 -05:00
|
|
|
let resolver = Arc::new(CliGraphResolver::new(
|
2022-11-02 10:47:02 -04:00
|
|
|
cli_options.to_maybe_jsx_import_source_config(),
|
|
|
|
maybe_import_map.clone(),
|
2023-02-22 14:15:25 -05:00
|
|
|
cli_options.no_npm(),
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api.clone(),
|
|
|
|
npm_resolution.clone(),
|
2023-02-24 19:35:43 -05:00
|
|
|
package_json_deps_installer.clone(),
|
2023-02-15 11:30:54 -05:00
|
|
|
));
|
2021-11-24 10:55:10 -05:00
|
|
|
|
2022-06-08 06:07:25 -04:00
|
|
|
let maybe_file_watcher_reporter =
|
|
|
|
maybe_sender.map(|sender| FileWatcherReporter {
|
|
|
|
sender,
|
|
|
|
file_paths: Arc::new(Mutex::new(vec![])),
|
|
|
|
});
|
|
|
|
|
2022-07-19 11:58:18 -04:00
|
|
|
let ts_config_result =
|
|
|
|
cli_options.resolve_ts_config_for_emit(TsConfigType::Emit)?;
|
|
|
|
if let Some(ignored_options) = ts_config_result.maybe_ignored_options {
|
|
|
|
warn!("{}", ignored_options);
|
|
|
|
}
|
|
|
|
let emit_cache = EmitCache::new(dir.gen_cache.clone());
|
2022-08-22 12:14:59 -04:00
|
|
|
let parsed_source_cache =
|
2023-04-14 16:22:33 -04:00
|
|
|
Arc::new(ParsedSourceCache::new(caches.dep_analysis_db(&dir)));
|
2023-04-13 14:03:07 -04:00
|
|
|
let emit_options: deno_ast::EmitOptions = ts_config_result.ts_config.into();
|
2023-04-14 16:22:33 -04:00
|
|
|
let emitter = Arc::new(Emitter::new(
|
2023-04-13 14:03:07 -04:00
|
|
|
emit_cache.clone(),
|
|
|
|
parsed_source_cache.clone(),
|
|
|
|
emit_options,
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let file_fetcher = Arc::new(file_fetcher);
|
2022-10-01 06:15:56 -04:00
|
|
|
let node_analysis_cache =
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
NodeAnalysisCache::new(caches.node_analysis_db(&dir));
|
2023-04-21 16:38:10 -04:00
|
|
|
let cjs_esm_analyzer = CliCjsEsmCodeAnalyzer::new(node_analysis_cache);
|
2023-04-24 19:44:35 -04:00
|
|
|
let node_resolver =
|
|
|
|
Arc::new(NodeResolver::new(node_fs.clone(), npm_resolver.clone()));
|
2023-04-14 16:22:33 -04:00
|
|
|
let node_code_translator = Arc::new(NodeCodeTranslator::new(
|
2023-04-21 16:38:10 -04:00
|
|
|
cjs_esm_analyzer,
|
2023-04-24 19:44:35 -04:00
|
|
|
node_fs.clone(),
|
|
|
|
node_resolver.clone(),
|
2023-04-17 15:36:23 -04:00
|
|
|
npm_resolver.clone(),
|
|
|
|
));
|
2023-04-14 18:05:46 -04:00
|
|
|
let type_checker = Arc::new(TypeChecker::new(
|
|
|
|
dir.clone(),
|
|
|
|
caches.clone(),
|
|
|
|
cli_options.clone(),
|
2023-04-17 15:36:23 -04:00
|
|
|
node_resolver.clone(),
|
2023-04-14 18:05:46 -04:00
|
|
|
npm_resolver.clone(),
|
|
|
|
));
|
2023-04-14 16:22:33 -04:00
|
|
|
let module_graph_builder = Arc::new(ModuleGraphBuilder::new(
|
|
|
|
cli_options.clone(),
|
|
|
|
resolver.clone(),
|
|
|
|
npm_resolver.clone(),
|
|
|
|
parsed_source_cache.clone(),
|
|
|
|
lockfile.clone(),
|
|
|
|
emit_cache.clone(),
|
|
|
|
file_fetcher.clone(),
|
2023-04-14 18:05:46 -04:00
|
|
|
type_checker.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let graph_container: Arc<ModuleGraphContainer> = Default::default();
|
|
|
|
let module_load_preparer = Arc::new(ModuleLoadPreparer::new(
|
|
|
|
cli_options.clone(),
|
|
|
|
graph_container.clone(),
|
|
|
|
lockfile.clone(),
|
|
|
|
maybe_file_watcher_reporter.clone(),
|
|
|
|
module_graph_builder.clone(),
|
|
|
|
parsed_source_cache.clone(),
|
|
|
|
progress_bar.clone(),
|
|
|
|
resolver.clone(),
|
2023-04-14 18:05:46 -04:00
|
|
|
type_checker,
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
2022-07-19 11:58:18 -04:00
|
|
|
|
2023-04-30 16:51:31 -04:00
|
|
|
Ok(ProcState {
|
2019-11-04 10:38:52 -05:00
|
|
|
dir,
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
caches,
|
2022-06-29 11:51:11 -04:00
|
|
|
options: cli_options,
|
2022-07-19 11:58:18 -04:00
|
|
|
emit_cache,
|
2023-04-13 14:03:07 -04:00
|
|
|
emitter,
|
2023-04-14 16:22:33 -04:00
|
|
|
file_fetcher,
|
2022-12-12 21:30:44 -05:00
|
|
|
http_client,
|
2023-04-14 16:22:33 -04:00
|
|
|
graph_container,
|
2019-11-04 10:38:52 -05:00
|
|
|
lockfile,
|
2020-05-29 10:32:15 -04:00
|
|
|
maybe_import_map,
|
2020-09-25 04:24:51 -04:00
|
|
|
maybe_inspector_server,
|
2022-06-28 16:45:55 -04:00
|
|
|
root_cert_store,
|
2021-07-05 09:34:37 -04:00
|
|
|
blob_store,
|
2022-08-22 12:14:59 -04:00
|
|
|
parsed_source_cache,
|
2023-02-15 11:30:54 -05:00
|
|
|
resolver,
|
2022-06-08 06:07:25 -04:00
|
|
|
maybe_file_watcher_reporter,
|
2023-04-14 16:22:33 -04:00
|
|
|
module_graph_builder,
|
|
|
|
node_code_translator,
|
2023-04-24 19:44:35 -04:00
|
|
|
node_fs,
|
2023-04-17 15:36:23 -04:00
|
|
|
node_resolver,
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api,
|
2022-09-13 11:59:01 -04:00
|
|
|
npm_cache,
|
2022-08-20 11:31:33 -04:00
|
|
|
npm_resolver,
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_resolution,
|
2023-02-24 19:35:43 -05:00
|
|
|
package_json_deps_installer,
|
2022-08-20 11:31:33 -04:00
|
|
|
cjs_resolutions: Default::default(),
|
2023-04-14 16:22:33 -04:00
|
|
|
module_load_preparer,
|
2023-04-30 16:51:31 -04:00
|
|
|
})
|
2019-11-04 10:38:52 -05:00
|
|
|
}
|
2023-04-27 10:05:20 -04:00
|
|
|
|
|
|
|
// todo(dsherret): this is a transitory method as we separate out
|
|
|
|
// ProcState from more code
|
2023-04-30 16:51:31 -04:00
|
|
|
pub fn create_cli_main_worker_factory(&self) -> CliMainWorkerFactory {
|
2023-04-27 10:05:20 -04:00
|
|
|
CliMainWorkerFactory::new(
|
|
|
|
StorageKeyResolver::from_options(&self.options),
|
|
|
|
self.npm_resolver.clone(),
|
|
|
|
self.node_resolver.clone(),
|
|
|
|
self.graph_container.clone(),
|
|
|
|
self.blob_store.clone(),
|
|
|
|
CliModuleLoaderFactory::new(
|
|
|
|
&self.options,
|
|
|
|
self.emitter.clone(),
|
|
|
|
self.graph_container.clone(),
|
|
|
|
self.module_load_preparer.clone(),
|
|
|
|
self.parsed_source_cache.clone(),
|
|
|
|
self.resolver.clone(),
|
|
|
|
NpmModuleLoader::new(
|
|
|
|
self.cjs_resolutions.clone(),
|
|
|
|
self.node_code_translator.clone(),
|
|
|
|
self.node_resolver.clone(),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
self.root_cert_store.clone(),
|
|
|
|
self.node_fs.clone(),
|
|
|
|
self.maybe_inspector_server.clone(),
|
|
|
|
CliMainWorkerOptions {
|
|
|
|
argv: self.options.argv().clone(),
|
|
|
|
debug: self
|
|
|
|
.options
|
|
|
|
.log_level()
|
|
|
|
.map(|l| l == log::Level::Debug)
|
|
|
|
.unwrap_or(false),
|
|
|
|
coverage_dir: self.options.coverage_dir(),
|
|
|
|
enable_testing_features: self.options.enable_testing_features(),
|
|
|
|
has_node_modules_dir: self.options.has_node_modules_dir(),
|
|
|
|
inspect_brk: self.options.inspect_brk().is_some(),
|
|
|
|
inspect_wait: self.options.inspect_wait().is_some(),
|
|
|
|
is_inspecting: self.options.is_inspecting(),
|
|
|
|
is_npm_main: self.options.is_npm_main(),
|
|
|
|
location: self.options.location_flag().clone(),
|
|
|
|
maybe_binary_npm_command_name: {
|
|
|
|
let mut maybe_binary_command_name = None;
|
|
|
|
if let DenoSubcommand::Run(flags) = self.options.sub_command() {
|
|
|
|
if let Ok(pkg_ref) = NpmPackageReqReference::from_str(&flags.script)
|
|
|
|
{
|
|
|
|
// if the user ran a binary command, we'll need to set process.argv[0]
|
|
|
|
// to be the name of the binary command instead of deno
|
|
|
|
let binary_name = pkg_ref
|
|
|
|
.sub_path
|
|
|
|
.as_deref()
|
|
|
|
.unwrap_or(pkg_ref.req.name.as_str());
|
|
|
|
maybe_binary_command_name = Some(binary_name.to_string());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
maybe_binary_command_name
|
|
|
|
},
|
|
|
|
origin_data_folder_path: self.dir.origin_data_folder_path(),
|
|
|
|
seed: self.options.seed(),
|
|
|
|
unsafely_ignore_certificate_errors: self
|
|
|
|
.options
|
|
|
|
.unsafely_ignore_certificate_errors()
|
|
|
|
.clone(),
|
|
|
|
unstable: self.options.unstable(),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
2023-04-14 16:22:33 -04:00
|
|
|
}
|
2019-11-04 10:38:52 -05:00
|
|
|
|
2023-04-14 16:22:33 -04:00
|
|
|
/// Keeps track of what module specifiers were resolved as CJS.
|
|
|
|
#[derive(Default)]
|
|
|
|
pub struct CjsResolutionStore(Mutex<HashSet<ModuleSpecifier>>);
|
2022-10-20 13:23:21 -04:00
|
|
|
|
2023-04-14 16:22:33 -04:00
|
|
|
impl CjsResolutionStore {
|
|
|
|
pub fn clear(&self) {
|
|
|
|
self.0.lock().clear();
|
2022-10-20 13:23:21 -04:00
|
|
|
}
|
|
|
|
|
2023-04-14 16:22:33 -04:00
|
|
|
pub fn contains(&self, specifier: &ModuleSpecifier) -> bool {
|
|
|
|
self.0.lock().contains(specifier)
|
2022-07-01 11:50:16 -04:00
|
|
|
}
|
2023-02-09 22:00:23 -05:00
|
|
|
|
2023-04-14 16:22:33 -04:00
|
|
|
pub fn insert(&self, specifier: ModuleSpecifier) {
|
|
|
|
self.0.lock().insert(specifier);
|
2023-02-15 13:44:52 -05:00
|
|
|
}
|
2019-11-04 10:38:52 -05:00
|
|
|
}
|
|
|
|
|
2023-01-10 10:28:10 -05:00
|
|
|
#[derive(Clone, Debug)]
|
2023-04-14 16:22:33 -04:00
|
|
|
pub struct FileWatcherReporter {
|
2022-06-08 06:07:25 -04:00
|
|
|
sender: tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>,
|
|
|
|
file_paths: Arc<Mutex<Vec<PathBuf>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl deno_graph::source::Reporter for FileWatcherReporter {
|
|
|
|
fn on_load(
|
|
|
|
&self,
|
|
|
|
specifier: &ModuleSpecifier,
|
|
|
|
modules_done: usize,
|
|
|
|
modules_total: usize,
|
|
|
|
) {
|
|
|
|
let mut file_paths = self.file_paths.lock();
|
|
|
|
if specifier.scheme() == "file" {
|
|
|
|
file_paths.push(specifier.to_file_path().unwrap());
|
|
|
|
}
|
|
|
|
|
|
|
|
if modules_done == modules_total {
|
|
|
|
self.sender.send(file_paths.drain(..).collect()).unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|