2023-01-02 16:00:42 -05:00
|
|
|
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
2020-09-05 20:34:02 -04:00
|
|
|
|
2022-06-29 11:51:11 -04:00
|
|
|
use crate::args::CliOptions;
|
2022-06-28 16:45:55 -04:00
|
|
|
use crate::args::DenoSubcommand;
|
2022-06-27 16:54:09 -04:00
|
|
|
use crate::args::Flags;
|
2022-11-25 17:00:28 -05:00
|
|
|
use crate::args::Lockfile;
|
2022-11-25 18:29:48 -05:00
|
|
|
use crate::args::TsConfigType;
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
use crate::cache::Caches;
|
2022-11-25 19:04:30 -05:00
|
|
|
use crate::cache::DenoDir;
|
2022-07-12 18:58:39 -04:00
|
|
|
use crate::cache::EmitCache;
|
2022-11-28 17:28:54 -05:00
|
|
|
use crate::cache::HttpCache;
|
2022-10-01 06:15:56 -04:00
|
|
|
use crate::cache::NodeAnalysisCache;
|
2022-08-22 12:14:59 -04:00
|
|
|
use crate::cache::ParsedSourceCache;
|
2023-04-13 14:03:07 -04:00
|
|
|
use crate::emit::Emitter;
|
2020-11-05 19:38:21 -05:00
|
|
|
use crate::file_fetcher::FileFetcher;
|
2023-04-14 16:22:33 -04:00
|
|
|
use crate::graph_util::ModuleGraphBuilder;
|
2023-02-24 14:42:45 -05:00
|
|
|
use crate::graph_util::ModuleGraphContainer;
|
2022-11-18 17:28:14 -05:00
|
|
|
use crate::http_util::HttpClient;
|
2023-04-14 16:22:33 -04:00
|
|
|
use crate::module_loader::ModuleLoadPreparer;
|
|
|
|
use crate::node::NodeCodeTranslator;
|
2023-03-12 23:32:59 -04:00
|
|
|
use crate::npm::create_npm_fs_resolver;
|
2023-04-12 08:36:11 -04:00
|
|
|
use crate::npm::CliNpmRegistryApi;
|
2022-09-13 11:59:01 -04:00
|
|
|
use crate::npm::NpmCache;
|
2022-08-20 11:31:33 -04:00
|
|
|
use crate::npm::NpmPackageResolver;
|
2023-03-12 23:32:59 -04:00
|
|
|
use crate::npm::NpmResolution;
|
2023-02-24 19:35:43 -05:00
|
|
|
use crate::npm::PackageJsonDepsInstaller;
|
2023-02-15 11:30:54 -05:00
|
|
|
use crate::resolver::CliGraphResolver;
|
2023-04-14 18:05:46 -04:00
|
|
|
use crate::tools::check::TypeChecker;
|
2022-11-28 17:28:54 -05:00
|
|
|
use crate::util::progress_bar::ProgressBar;
|
2022-12-12 20:52:10 -05:00
|
|
|
use crate::util::progress_bar::ProgressBarStyle;
|
2020-10-22 20:50:15 -04:00
|
|
|
|
2020-09-14 12:48:57 -04:00
|
|
|
use deno_core::error::AnyError;
|
2021-07-06 23:48:01 -04:00
|
|
|
use deno_core::parking_lot::Mutex;
|
2021-09-29 04:47:24 -04:00
|
|
|
use deno_core::CompiledWasmModuleStore;
|
2020-01-05 11:56:18 -05:00
|
|
|
use deno_core::ModuleSpecifier;
|
2021-09-10 21:38:24 -04:00
|
|
|
use deno_core::SharedArrayBufferStore;
|
2023-04-14 16:22:33 -04:00
|
|
|
|
2021-09-10 21:38:24 -04:00
|
|
|
use deno_runtime::deno_broadcast_channel::InMemoryBroadcastChannel;
|
2021-12-01 11:13:11 -05:00
|
|
|
use deno_runtime::deno_tls::rustls::RootCertStore;
|
2021-09-10 21:38:24 -04:00
|
|
|
use deno_runtime::deno_web::BlobStore;
|
|
|
|
use deno_runtime::inspector_server::InspectorServer;
|
|
|
|
use import_map::ImportMap;
|
2022-01-13 18:17:56 -05:00
|
|
|
use log::warn;
|
2021-06-19 10:14:43 -04:00
|
|
|
use std::collections::HashSet;
|
2021-09-24 11:10:42 -04:00
|
|
|
use std::ops::Deref;
|
2022-06-08 06:07:25 -04:00
|
|
|
use std::path::PathBuf;
|
2019-11-04 10:38:52 -05:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
/// This structure represents state of single "deno" program.
|
|
|
|
///
|
|
|
|
/// It is shared by all created workers (thus V8 isolates).
|
2021-09-24 11:10:42 -04:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct ProcState(Arc<Inner>);
|
|
|
|
|
|
|
|
pub struct Inner {
|
2022-11-25 19:04:30 -05:00
|
|
|
pub dir: DenoDir,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub caches: Arc<Caches>,
|
2023-02-03 14:15:16 -05:00
|
|
|
pub file_fetcher: Arc<FileFetcher>,
|
2022-12-12 21:30:44 -05:00
|
|
|
pub http_client: HttpClient,
|
2022-06-29 11:51:11 -04:00
|
|
|
pub options: Arc<CliOptions>,
|
2022-07-19 11:58:18 -04:00
|
|
|
pub emit_cache: EmitCache,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub emitter: Arc<Emitter>,
|
|
|
|
pub graph_container: Arc<ModuleGraphContainer>,
|
2020-10-23 17:01:54 -04:00
|
|
|
pub lockfile: Option<Arc<Mutex<Lockfile>>>,
|
2021-11-08 20:26:39 -05:00
|
|
|
pub maybe_import_map: Option<Arc<ImportMap>>,
|
2020-09-25 04:24:51 -04:00
|
|
|
pub maybe_inspector_server: Option<Arc<InspectorServer>>,
|
2022-06-28 16:45:55 -04:00
|
|
|
pub root_cert_store: RootCertStore,
|
2021-07-05 09:34:37 -04:00
|
|
|
pub blob_store: BlobStore,
|
2021-05-22 12:08:24 -04:00
|
|
|
pub broadcast_channel: InMemoryBroadcastChannel,
|
2021-07-06 13:42:52 -04:00
|
|
|
pub shared_array_buffer_store: SharedArrayBufferStore,
|
2021-09-29 04:47:24 -04:00
|
|
|
pub compiled_wasm_module_store: CompiledWasmModuleStore,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub parsed_source_cache: Arc<ParsedSourceCache>,
|
2023-02-15 11:30:54 -05:00
|
|
|
pub resolver: Arc<CliGraphResolver>,
|
2022-06-08 06:07:25 -04:00
|
|
|
maybe_file_watcher_reporter: Option<FileWatcherReporter>,
|
2023-04-14 16:22:33 -04:00
|
|
|
pub module_graph_builder: Arc<ModuleGraphBuilder>,
|
|
|
|
pub module_load_preparer: Arc<ModuleLoadPreparer>,
|
|
|
|
pub node_code_translator: Arc<NodeCodeTranslator>,
|
|
|
|
pub npm_api: Arc<CliNpmRegistryApi>,
|
|
|
|
pub npm_cache: Arc<NpmCache>,
|
|
|
|
pub npm_resolver: Arc<NpmPackageResolver>,
|
|
|
|
pub npm_resolution: Arc<NpmResolution>,
|
|
|
|
pub package_json_deps_installer: Arc<PackageJsonDepsInstaller>,
|
|
|
|
pub cjs_resolutions: Arc<CjsResolutionStore>,
|
2022-09-09 15:57:39 -04:00
|
|
|
progress_bar: ProgressBar,
|
2019-11-04 10:38:52 -05:00
|
|
|
}
|
|
|
|
|
2021-09-24 11:10:42 -04:00
|
|
|
impl Deref for ProcState {
|
|
|
|
type Target = Arc<Inner>;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
&self.0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ProcState {
|
2023-04-12 14:54:28 -04:00
|
|
|
pub async fn from_cli_options(
|
2022-06-29 11:51:11 -04:00
|
|
|
options: Arc<CliOptions>,
|
2022-06-28 16:45:55 -04:00
|
|
|
) -> Result<Self, AnyError> {
|
2022-06-29 11:51:11 -04:00
|
|
|
Self::build_with_sender(options, None).await
|
2022-06-08 06:07:25 -04:00
|
|
|
}
|
|
|
|
|
2023-04-12 14:54:28 -04:00
|
|
|
pub async fn from_flags(flags: Flags) -> Result<Self, AnyError> {
|
|
|
|
Self::from_cli_options(Arc::new(CliOptions::from_flags(flags)?)).await
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn from_flags_for_file_watcher(
|
2022-06-28 16:45:55 -04:00
|
|
|
flags: Flags,
|
2022-06-08 06:07:25 -04:00
|
|
|
files_to_watch_sender: tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>,
|
|
|
|
) -> Result<Self, AnyError> {
|
2022-06-28 16:45:55 -04:00
|
|
|
// resolve the config each time
|
2022-06-29 11:51:11 -04:00
|
|
|
let cli_options = Arc::new(CliOptions::from_flags(flags)?);
|
2022-06-28 16:45:55 -04:00
|
|
|
let ps =
|
2022-06-29 11:51:11 -04:00
|
|
|
Self::build_with_sender(cli_options, Some(files_to_watch_sender.clone()))
|
2022-06-28 16:45:55 -04:00
|
|
|
.await?;
|
2023-02-03 14:15:16 -05:00
|
|
|
ps.init_watcher();
|
|
|
|
Ok(ps)
|
|
|
|
}
|
2022-06-08 06:07:25 -04:00
|
|
|
|
2023-02-03 14:15:16 -05:00
|
|
|
/// Reset all runtime state to its default. This should be used on file
|
|
|
|
/// watcher restarts.
|
|
|
|
pub fn reset_for_file_watcher(&mut self) {
|
2023-04-14 16:22:33 -04:00
|
|
|
self.cjs_resolutions.clear();
|
|
|
|
self.parsed_source_cache.clear();
|
|
|
|
self.graph_container.clear();
|
|
|
|
|
2023-02-03 14:15:16 -05:00
|
|
|
self.0 = Arc::new(Inner {
|
|
|
|
dir: self.dir.clone(),
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
caches: self.caches.clone(),
|
2023-02-03 14:15:16 -05:00
|
|
|
options: self.options.clone(),
|
|
|
|
emit_cache: self.emit_cache.clone(),
|
2023-04-13 14:03:07 -04:00
|
|
|
emitter: self.emitter.clone(),
|
2023-02-03 14:15:16 -05:00
|
|
|
file_fetcher: self.file_fetcher.clone(),
|
|
|
|
http_client: self.http_client.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
graph_container: self.graph_container.clone(),
|
2023-02-03 14:15:16 -05:00
|
|
|
lockfile: self.lockfile.clone(),
|
|
|
|
maybe_import_map: self.maybe_import_map.clone(),
|
|
|
|
maybe_inspector_server: self.maybe_inspector_server.clone(),
|
|
|
|
root_cert_store: self.root_cert_store.clone(),
|
2023-03-17 21:58:25 -04:00
|
|
|
blob_store: self.blob_store.clone(),
|
2023-02-03 14:15:16 -05:00
|
|
|
broadcast_channel: Default::default(),
|
|
|
|
shared_array_buffer_store: Default::default(),
|
|
|
|
compiled_wasm_module_store: Default::default(),
|
2023-04-14 16:22:33 -04:00
|
|
|
parsed_source_cache: self.parsed_source_cache.clone(),
|
2023-02-15 11:30:54 -05:00
|
|
|
resolver: self.resolver.clone(),
|
2023-02-03 14:15:16 -05:00
|
|
|
maybe_file_watcher_reporter: self.maybe_file_watcher_reporter.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
module_graph_builder: self.module_graph_builder.clone(),
|
|
|
|
module_load_preparer: self.module_load_preparer.clone(),
|
|
|
|
node_code_translator: self.node_code_translator.clone(),
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api: self.npm_api.clone(),
|
2023-02-03 14:15:16 -05:00
|
|
|
npm_cache: self.npm_cache.clone(),
|
|
|
|
npm_resolver: self.npm_resolver.clone(),
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_resolution: self.npm_resolution.clone(),
|
2023-02-24 19:35:43 -05:00
|
|
|
package_json_deps_installer: self.package_json_deps_installer.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
cjs_resolutions: self.cjs_resolutions.clone(),
|
2023-02-03 14:15:16 -05:00
|
|
|
progress_bar: self.progress_bar.clone(),
|
|
|
|
});
|
|
|
|
self.init_watcher();
|
|
|
|
}
|
2022-06-08 06:07:25 -04:00
|
|
|
|
2023-02-03 14:15:16 -05:00
|
|
|
// Add invariant files like the import map and explicit watch flag list to
|
|
|
|
// the watcher. Dedup for build_for_file_watcher and reset_for_file_watcher.
|
|
|
|
fn init_watcher(&self) {
|
|
|
|
let files_to_watch_sender = match &self.0.maybe_file_watcher_reporter {
|
|
|
|
Some(reporter) => &reporter.sender,
|
|
|
|
None => return,
|
|
|
|
};
|
|
|
|
if let Some(watch_paths) = self.options.watch_paths() {
|
|
|
|
files_to_watch_sender.send(watch_paths.clone()).unwrap();
|
|
|
|
}
|
|
|
|
if let Ok(Some(import_map_path)) = self
|
2022-06-29 11:51:11 -04:00
|
|
|
.options
|
2022-06-29 20:41:48 -04:00
|
|
|
.resolve_import_map_specifier()
|
2022-06-28 16:45:55 -04:00
|
|
|
.map(|ms| ms.and_then(|ref s| s.to_file_path().ok()))
|
2022-06-08 06:07:25 -04:00
|
|
|
{
|
2023-02-03 14:15:16 -05:00
|
|
|
files_to_watch_sender.send(vec![import_map_path]).unwrap();
|
2022-06-08 06:07:25 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn build_with_sender(
|
2022-06-29 11:51:11 -04:00
|
|
|
cli_options: Arc<CliOptions>,
|
2022-06-08 06:07:25 -04:00
|
|
|
maybe_sender: Option<tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>>,
|
|
|
|
) -> Result<Self, AnyError> {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let dir = cli_options.resolve_deno_dir()?;
|
2023-04-14 16:22:33 -04:00
|
|
|
let caches = Arc::new(Caches::default());
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
// Warm up the caches we know we'll likely need based on the CLI mode
|
|
|
|
match cli_options.sub_command() {
|
|
|
|
DenoSubcommand::Run(_) => {
|
|
|
|
_ = caches.dep_analysis_db(&dir);
|
|
|
|
_ = caches.node_analysis_db(&dir);
|
|
|
|
}
|
|
|
|
DenoSubcommand::Check(_) => {
|
|
|
|
_ = caches.dep_analysis_db(&dir);
|
|
|
|
_ = caches.node_analysis_db(&dir);
|
|
|
|
_ = caches.type_checking_cache_db(&dir);
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
2021-07-05 09:34:37 -04:00
|
|
|
let blob_store = BlobStore::default();
|
2021-05-22 12:08:24 -04:00
|
|
|
let broadcast_channel = InMemoryBroadcastChannel::default();
|
2021-07-06 13:42:52 -04:00
|
|
|
let shared_array_buffer_store = SharedArrayBufferStore::default();
|
2021-09-29 04:47:24 -04:00
|
|
|
let compiled_wasm_module_store = CompiledWasmModuleStore::default();
|
2022-11-25 19:04:30 -05:00
|
|
|
let deps_cache_location = dir.deps_folder_path();
|
2022-11-28 17:28:54 -05:00
|
|
|
let http_cache = HttpCache::new(&deps_cache_location);
|
2022-06-29 11:51:11 -04:00
|
|
|
let root_cert_store = cli_options.resolve_root_cert_store()?;
|
|
|
|
let cache_usage = cli_options.cache_setting();
|
2022-12-12 20:52:10 -05:00
|
|
|
let progress_bar = ProgressBar::new(ProgressBarStyle::TextOnly);
|
2022-11-18 17:28:14 -05:00
|
|
|
let http_client = HttpClient::new(
|
|
|
|
Some(root_cert_store.clone()),
|
2022-12-08 11:50:09 -05:00
|
|
|
cli_options.unsafely_ignore_certificate_errors().clone(),
|
2022-11-18 17:28:14 -05:00
|
|
|
)?;
|
2020-11-05 19:38:21 -05:00
|
|
|
let file_fetcher = FileFetcher::new(
|
2020-02-19 08:17:13 -05:00
|
|
|
http_cache,
|
2020-11-05 19:38:21 -05:00
|
|
|
cache_usage,
|
2022-06-29 11:51:11 -04:00
|
|
|
!cli_options.no_remote(),
|
2022-11-18 17:28:14 -05:00
|
|
|
http_client.clone(),
|
2021-07-05 09:34:37 -04:00
|
|
|
blob_store.clone(),
|
2022-09-09 15:57:39 -04:00
|
|
|
Some(progress_bar.clone()),
|
2023-01-25 16:51:04 -05:00
|
|
|
);
|
2019-11-04 10:38:52 -05:00
|
|
|
|
2022-11-02 11:32:30 -04:00
|
|
|
let lockfile = cli_options.maybe_lock_file();
|
2020-05-29 10:32:15 -04:00
|
|
|
|
2023-04-12 08:36:11 -04:00
|
|
|
let npm_registry_url = CliNpmRegistryApi::default_url().to_owned();
|
2023-04-14 16:22:33 -04:00
|
|
|
let npm_cache = Arc::new(NpmCache::from_deno_dir(
|
2023-02-22 14:15:25 -05:00
|
|
|
&dir,
|
|
|
|
cli_options.cache_setting(),
|
|
|
|
http_client.clone(),
|
|
|
|
progress_bar.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let npm_api = Arc::new(CliNpmRegistryApi::new(
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_registry_url.clone(),
|
2023-02-22 14:15:25 -05:00
|
|
|
npm_cache.clone(),
|
|
|
|
http_client.clone(),
|
|
|
|
progress_bar.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
2023-03-12 23:32:59 -04:00
|
|
|
let npm_snapshot = cli_options
|
|
|
|
.resolve_npm_resolution_snapshot(&npm_api)
|
|
|
|
.await?;
|
2023-04-14 16:22:33 -04:00
|
|
|
let npm_resolution = Arc::new(NpmResolution::from_serialized(
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api.clone(),
|
|
|
|
npm_snapshot,
|
|
|
|
lockfile.as_ref().cloned(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
2023-03-12 23:32:59 -04:00
|
|
|
let npm_fs_resolver = create_npm_fs_resolver(
|
|
|
|
npm_cache,
|
2023-03-13 14:18:29 -04:00
|
|
|
&progress_bar,
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_registry_url,
|
|
|
|
npm_resolution.clone(),
|
2023-02-22 20:16:16 -05:00
|
|
|
cli_options.node_modules_dir_path(),
|
2023-03-12 23:32:59 -04:00
|
|
|
);
|
2023-04-14 16:22:33 -04:00
|
|
|
let npm_resolver = Arc::new(NpmPackageResolver::new(
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_resolution.clone(),
|
|
|
|
npm_fs_resolver,
|
2023-02-22 14:15:25 -05:00
|
|
|
lockfile.as_ref().cloned(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let package_json_deps_installer = Arc::new(PackageJsonDepsInstaller::new(
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api.clone(),
|
|
|
|
npm_resolution.clone(),
|
2023-03-03 17:27:05 -05:00
|
|
|
cli_options.maybe_package_json_deps(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
2023-01-25 15:13:40 -05:00
|
|
|
let maybe_import_map = cli_options
|
|
|
|
.resolve_import_map(&file_fetcher)
|
|
|
|
.await?
|
|
|
|
.map(Arc::new);
|
2022-06-28 16:45:55 -04:00
|
|
|
let maybe_inspector_server =
|
2022-06-29 11:51:11 -04:00
|
|
|
cli_options.resolve_inspector_server().map(Arc::new);
|
2020-09-25 04:24:51 -04:00
|
|
|
|
2023-02-15 11:30:54 -05:00
|
|
|
let resolver = Arc::new(CliGraphResolver::new(
|
2022-11-02 10:47:02 -04:00
|
|
|
cli_options.to_maybe_jsx_import_source_config(),
|
|
|
|
maybe_import_map.clone(),
|
2023-02-22 14:15:25 -05:00
|
|
|
cli_options.no_npm(),
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api.clone(),
|
|
|
|
npm_resolution.clone(),
|
2023-02-24 19:35:43 -05:00
|
|
|
package_json_deps_installer.clone(),
|
2023-02-15 11:30:54 -05:00
|
|
|
));
|
2021-11-24 10:55:10 -05:00
|
|
|
|
2022-06-08 06:07:25 -04:00
|
|
|
let maybe_file_watcher_reporter =
|
|
|
|
maybe_sender.map(|sender| FileWatcherReporter {
|
|
|
|
sender,
|
|
|
|
file_paths: Arc::new(Mutex::new(vec![])),
|
|
|
|
});
|
|
|
|
|
2022-07-19 11:58:18 -04:00
|
|
|
let ts_config_result =
|
|
|
|
cli_options.resolve_ts_config_for_emit(TsConfigType::Emit)?;
|
|
|
|
if let Some(ignored_options) = ts_config_result.maybe_ignored_options {
|
|
|
|
warn!("{}", ignored_options);
|
|
|
|
}
|
|
|
|
let emit_cache = EmitCache::new(dir.gen_cache.clone());
|
2022-08-22 12:14:59 -04:00
|
|
|
let parsed_source_cache =
|
2023-04-14 16:22:33 -04:00
|
|
|
Arc::new(ParsedSourceCache::new(caches.dep_analysis_db(&dir)));
|
2023-04-13 14:03:07 -04:00
|
|
|
let emit_options: deno_ast::EmitOptions = ts_config_result.ts_config.into();
|
2023-04-14 16:22:33 -04:00
|
|
|
let emitter = Arc::new(Emitter::new(
|
2023-04-13 14:03:07 -04:00
|
|
|
emit_cache.clone(),
|
|
|
|
parsed_source_cache.clone(),
|
|
|
|
emit_options,
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let npm_cache = Arc::new(NpmCache::from_deno_dir(
|
2022-08-22 11:35:04 -04:00
|
|
|
&dir,
|
2022-09-13 11:59:01 -04:00
|
|
|
cli_options.cache_setting(),
|
2022-11-18 17:28:14 -05:00
|
|
|
http_client.clone(),
|
2022-09-13 11:59:01 -04:00
|
|
|
progress_bar.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let file_fetcher = Arc::new(file_fetcher);
|
2022-10-01 06:15:56 -04:00
|
|
|
let node_analysis_cache =
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
NodeAnalysisCache::new(caches.node_analysis_db(&dir));
|
2023-04-14 16:22:33 -04:00
|
|
|
let node_code_translator = Arc::new(NodeCodeTranslator::new(
|
|
|
|
node_analysis_cache,
|
|
|
|
file_fetcher.clone(),
|
|
|
|
npm_resolver.clone(),
|
|
|
|
));
|
2023-04-14 18:05:46 -04:00
|
|
|
let type_checker = Arc::new(TypeChecker::new(
|
|
|
|
dir.clone(),
|
|
|
|
caches.clone(),
|
|
|
|
cli_options.clone(),
|
|
|
|
npm_resolver.clone(),
|
|
|
|
));
|
2023-04-14 16:22:33 -04:00
|
|
|
let module_graph_builder = Arc::new(ModuleGraphBuilder::new(
|
|
|
|
cli_options.clone(),
|
|
|
|
resolver.clone(),
|
|
|
|
npm_resolver.clone(),
|
|
|
|
parsed_source_cache.clone(),
|
|
|
|
lockfile.clone(),
|
|
|
|
emit_cache.clone(),
|
|
|
|
file_fetcher.clone(),
|
2023-04-14 18:05:46 -04:00
|
|
|
type_checker.clone(),
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
|
|
|
let graph_container: Arc<ModuleGraphContainer> = Default::default();
|
|
|
|
let module_load_preparer = Arc::new(ModuleLoadPreparer::new(
|
|
|
|
cli_options.clone(),
|
|
|
|
graph_container.clone(),
|
|
|
|
lockfile.clone(),
|
|
|
|
maybe_file_watcher_reporter.clone(),
|
|
|
|
module_graph_builder.clone(),
|
|
|
|
parsed_source_cache.clone(),
|
|
|
|
progress_bar.clone(),
|
|
|
|
resolver.clone(),
|
2023-04-14 18:05:46 -04:00
|
|
|
type_checker,
|
2023-04-14 16:22:33 -04:00
|
|
|
));
|
2022-07-19 11:58:18 -04:00
|
|
|
|
2021-09-24 11:10:42 -04:00
|
|
|
Ok(ProcState(Arc::new(Inner {
|
2019-11-04 10:38:52 -05:00
|
|
|
dir,
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
caches,
|
2022-06-29 11:51:11 -04:00
|
|
|
options: cli_options,
|
2022-07-19 11:58:18 -04:00
|
|
|
emit_cache,
|
2023-04-13 14:03:07 -04:00
|
|
|
emitter,
|
2023-04-14 16:22:33 -04:00
|
|
|
file_fetcher,
|
2022-12-12 21:30:44 -05:00
|
|
|
http_client,
|
2023-04-14 16:22:33 -04:00
|
|
|
graph_container,
|
2019-11-04 10:38:52 -05:00
|
|
|
lockfile,
|
2020-05-29 10:32:15 -04:00
|
|
|
maybe_import_map,
|
2020-09-25 04:24:51 -04:00
|
|
|
maybe_inspector_server,
|
2022-06-28 16:45:55 -04:00
|
|
|
root_cert_store,
|
2021-07-05 09:34:37 -04:00
|
|
|
blob_store,
|
2021-05-22 12:08:24 -04:00
|
|
|
broadcast_channel,
|
2021-07-06 13:42:52 -04:00
|
|
|
shared_array_buffer_store,
|
2021-09-29 04:47:24 -04:00
|
|
|
compiled_wasm_module_store,
|
2022-08-22 12:14:59 -04:00
|
|
|
parsed_source_cache,
|
2023-02-15 11:30:54 -05:00
|
|
|
resolver,
|
2022-06-08 06:07:25 -04:00
|
|
|
maybe_file_watcher_reporter,
|
2023-04-14 16:22:33 -04:00
|
|
|
module_graph_builder,
|
|
|
|
node_code_translator,
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_api,
|
2022-09-13 11:59:01 -04:00
|
|
|
npm_cache,
|
2022-08-20 11:31:33 -04:00
|
|
|
npm_resolver,
|
2023-03-12 23:32:59 -04:00
|
|
|
npm_resolution,
|
2023-02-24 19:35:43 -05:00
|
|
|
package_json_deps_installer,
|
2022-08-20 11:31:33 -04:00
|
|
|
cjs_resolutions: Default::default(),
|
2023-04-14 16:22:33 -04:00
|
|
|
module_load_preparer,
|
2022-09-09 15:57:39 -04:00
|
|
|
progress_bar,
|
2021-09-24 11:10:42 -04:00
|
|
|
})))
|
2019-11-04 10:38:52 -05:00
|
|
|
}
|
2023-04-14 16:22:33 -04:00
|
|
|
}
|
2019-11-04 10:38:52 -05:00
|
|
|
|
2023-04-14 16:22:33 -04:00
|
|
|
/// Keeps track of what module specifiers were resolved as CJS.
|
|
|
|
#[derive(Default)]
|
|
|
|
pub struct CjsResolutionStore(Mutex<HashSet<ModuleSpecifier>>);
|
2022-10-20 13:23:21 -04:00
|
|
|
|
2023-04-14 16:22:33 -04:00
|
|
|
impl CjsResolutionStore {
|
|
|
|
pub fn clear(&self) {
|
|
|
|
self.0.lock().clear();
|
2022-10-20 13:23:21 -04:00
|
|
|
}
|
|
|
|
|
2023-04-14 16:22:33 -04:00
|
|
|
pub fn contains(&self, specifier: &ModuleSpecifier) -> bool {
|
|
|
|
self.0.lock().contains(specifier)
|
2022-07-01 11:50:16 -04:00
|
|
|
}
|
2023-02-09 22:00:23 -05:00
|
|
|
|
2023-04-14 16:22:33 -04:00
|
|
|
pub fn insert(&self, specifier: ModuleSpecifier) {
|
|
|
|
self.0.lock().insert(specifier);
|
2023-02-15 13:44:52 -05:00
|
|
|
}
|
2019-11-04 10:38:52 -05:00
|
|
|
}
|
|
|
|
|
2023-01-10 10:28:10 -05:00
|
|
|
#[derive(Clone, Debug)]
|
2023-04-14 16:22:33 -04:00
|
|
|
pub struct FileWatcherReporter {
|
2022-06-08 06:07:25 -04:00
|
|
|
sender: tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>,
|
|
|
|
file_paths: Arc<Mutex<Vec<PathBuf>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl deno_graph::source::Reporter for FileWatcherReporter {
|
|
|
|
fn on_load(
|
|
|
|
&self,
|
|
|
|
specifier: &ModuleSpecifier,
|
|
|
|
modules_done: usize,
|
|
|
|
modules_total: usize,
|
|
|
|
) {
|
|
|
|
let mut file_paths = self.file_paths.lock();
|
|
|
|
if specifier.scheme() == "file" {
|
|
|
|
file_paths.push(specifier.to_file_path().unwrap());
|
|
|
|
}
|
|
|
|
|
|
|
|
if modules_done == modules_total {
|
|
|
|
self.sender.send(file_paths.drain(..).collect()).unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|