mirror of
https://github.com/denoland/deno.git
synced 2024-12-11 10:07:54 -05:00
1b355d8a87
Introduces a `SyncReadAsyncWriteLock` to make it harder to write to the npm resolution without first waiting async in a queue. For the npm resolution, reading synchronously is fine, but when updating, someone should wait async, clone the data, then write the data at the end back.
413 lines
12 KiB
Rust
413 lines
12 KiB
Rust
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
|
|
|
use std::collections::HashMap;
|
|
use std::collections::HashSet;
|
|
use std::sync::Arc;
|
|
|
|
use deno_core::error::AnyError;
|
|
use deno_core::parking_lot::Mutex;
|
|
use deno_lockfile::NpmPackageDependencyLockfileInfo;
|
|
use deno_lockfile::NpmPackageLockfileInfo;
|
|
use deno_npm::registry::NpmPackageInfo;
|
|
use deno_npm::registry::NpmRegistryApi;
|
|
use deno_npm::resolution::NpmPackageVersionResolutionError;
|
|
use deno_npm::resolution::NpmPackagesPartitioned;
|
|
use deno_npm::resolution::NpmResolutionError;
|
|
use deno_npm::resolution::NpmResolutionSnapshot;
|
|
use deno_npm::resolution::NpmResolutionSnapshotPendingResolver;
|
|
use deno_npm::resolution::NpmResolutionSnapshotPendingResolverOptions;
|
|
use deno_npm::resolution::PackageCacheFolderIdNotFoundError;
|
|
use deno_npm::resolution::PackageNotFoundFromReferrerError;
|
|
use deno_npm::resolution::PackageNvNotFoundError;
|
|
use deno_npm::resolution::PackageReqNotFoundError;
|
|
use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot;
|
|
use deno_npm::NpmPackageCacheFolderId;
|
|
use deno_npm::NpmPackageId;
|
|
use deno_npm::NpmResolutionPackage;
|
|
use deno_npm::NpmSystemInfo;
|
|
use deno_semver::package::PackageNv;
|
|
use deno_semver::package::PackageReq;
|
|
use deno_semver::VersionReq;
|
|
|
|
use crate::args::Lockfile;
|
|
use crate::util::sync::SyncReadAsyncWriteLock;
|
|
|
|
use super::CliNpmRegistryApi;
|
|
|
|
/// Handles updating and storing npm resolution in memory where the underlying
|
|
/// snapshot can be updated concurrently. Additionally handles updating the lockfile
|
|
/// based on changes to the resolution.
|
|
///
|
|
/// This does not interact with the file system.
|
|
pub struct NpmResolution {
|
|
api: Arc<CliNpmRegistryApi>,
|
|
snapshot: SyncReadAsyncWriteLock<NpmResolutionSnapshot>,
|
|
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
|
|
}
|
|
|
|
impl std::fmt::Debug for NpmResolution {
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
let snapshot = self.snapshot.read();
|
|
f.debug_struct("NpmResolution")
|
|
.field("snapshot", &snapshot.as_valid_serialized().as_serialized())
|
|
.finish()
|
|
}
|
|
}
|
|
|
|
impl NpmResolution {
|
|
pub fn from_serialized(
|
|
api: Arc<CliNpmRegistryApi>,
|
|
initial_snapshot: Option<ValidSerializedNpmResolutionSnapshot>,
|
|
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
|
|
) -> Self {
|
|
let snapshot =
|
|
NpmResolutionSnapshot::new(initial_snapshot.unwrap_or_default());
|
|
Self::new(api, snapshot, maybe_lockfile)
|
|
}
|
|
|
|
pub fn new(
|
|
api: Arc<CliNpmRegistryApi>,
|
|
initial_snapshot: NpmResolutionSnapshot,
|
|
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
|
|
) -> Self {
|
|
Self {
|
|
api,
|
|
snapshot: SyncReadAsyncWriteLock::new(initial_snapshot),
|
|
maybe_lockfile,
|
|
}
|
|
}
|
|
|
|
pub async fn add_package_reqs(
|
|
&self,
|
|
package_reqs: &[PackageReq],
|
|
) -> Result<(), AnyError> {
|
|
// only allow one thread in here at a time
|
|
let snapshot_lock = self.snapshot.acquire().await;
|
|
let snapshot = add_package_reqs_to_snapshot(
|
|
&self.api,
|
|
package_reqs,
|
|
self.maybe_lockfile.clone(),
|
|
|| snapshot_lock.read().clone(),
|
|
)
|
|
.await?;
|
|
|
|
*snapshot_lock.write() = snapshot;
|
|
Ok(())
|
|
}
|
|
|
|
pub async fn set_package_reqs(
|
|
&self,
|
|
package_reqs: &[PackageReq],
|
|
) -> Result<(), AnyError> {
|
|
// only allow one thread in here at a time
|
|
let snapshot_lock = self.snapshot.acquire().await;
|
|
|
|
let reqs_set = package_reqs.iter().collect::<HashSet<_>>();
|
|
let snapshot = add_package_reqs_to_snapshot(
|
|
&self.api,
|
|
package_reqs,
|
|
self.maybe_lockfile.clone(),
|
|
|| {
|
|
let snapshot = snapshot_lock.read().clone();
|
|
let has_removed_package = !snapshot
|
|
.package_reqs()
|
|
.keys()
|
|
.all(|req| reqs_set.contains(req));
|
|
// if any packages were removed, we need to completely recreate the npm resolution snapshot
|
|
if has_removed_package {
|
|
snapshot.into_empty()
|
|
} else {
|
|
snapshot
|
|
}
|
|
},
|
|
)
|
|
.await?;
|
|
|
|
*snapshot_lock.write() = snapshot;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
pub async fn resolve_pending(&self) -> Result<(), AnyError> {
|
|
// only allow one thread in here at a time
|
|
let snapshot_lock = self.snapshot.acquire().await;
|
|
|
|
let snapshot = add_package_reqs_to_snapshot(
|
|
&self.api,
|
|
&Vec::new(),
|
|
self.maybe_lockfile.clone(),
|
|
|| snapshot_lock.read().clone(),
|
|
)
|
|
.await?;
|
|
|
|
*snapshot_lock.write() = snapshot;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
pub fn resolve_pkg_cache_folder_id_from_pkg_id(
|
|
&self,
|
|
id: &NpmPackageId,
|
|
) -> Option<NpmPackageCacheFolderId> {
|
|
self
|
|
.snapshot
|
|
.read()
|
|
.package_from_id(id)
|
|
.map(|p| p.get_package_cache_folder_id())
|
|
}
|
|
|
|
pub fn resolve_pkg_id_from_pkg_cache_folder_id(
|
|
&self,
|
|
id: &NpmPackageCacheFolderId,
|
|
) -> Result<NpmPackageId, PackageCacheFolderIdNotFoundError> {
|
|
self
|
|
.snapshot
|
|
.read()
|
|
.resolve_pkg_from_pkg_cache_folder_id(id)
|
|
.map(|pkg| pkg.id.clone())
|
|
}
|
|
|
|
pub fn resolve_package_from_package(
|
|
&self,
|
|
name: &str,
|
|
referrer: &NpmPackageCacheFolderId,
|
|
) -> Result<NpmResolutionPackage, Box<PackageNotFoundFromReferrerError>> {
|
|
self
|
|
.snapshot
|
|
.read()
|
|
.resolve_package_from_package(name, referrer)
|
|
.cloned()
|
|
}
|
|
|
|
/// Resolve a node package from a deno module.
|
|
pub fn resolve_pkg_id_from_pkg_req(
|
|
&self,
|
|
req: &PackageReq,
|
|
) -> Result<NpmPackageId, PackageReqNotFoundError> {
|
|
self
|
|
.snapshot
|
|
.read()
|
|
.resolve_pkg_from_pkg_req(req)
|
|
.map(|pkg| pkg.id.clone())
|
|
}
|
|
|
|
pub fn resolve_pkg_reqs_from_pkg_id(
|
|
&self,
|
|
id: &NpmPackageId,
|
|
) -> Vec<PackageReq> {
|
|
let snapshot = self.snapshot.read();
|
|
let mut pkg_reqs = snapshot
|
|
.package_reqs()
|
|
.iter()
|
|
.filter(|(_, nv)| *nv == &id.nv)
|
|
.map(|(req, _)| req.clone())
|
|
.collect::<Vec<_>>();
|
|
pkg_reqs.sort(); // be deterministic
|
|
pkg_reqs
|
|
}
|
|
|
|
pub fn resolve_pkg_id_from_deno_module(
|
|
&self,
|
|
id: &PackageNv,
|
|
) -> Result<NpmPackageId, PackageNvNotFoundError> {
|
|
self
|
|
.snapshot
|
|
.read()
|
|
.resolve_package_from_deno_module(id)
|
|
.map(|pkg| pkg.id.clone())
|
|
}
|
|
|
|
/// Resolves a package requirement for deno graph. This should only be
|
|
/// called by deno_graph's NpmResolver or for resolving packages in
|
|
/// a package.json
|
|
pub async fn resolve_pkg_req_as_pending_with_info(
|
|
&self,
|
|
pkg_req: &PackageReq,
|
|
pkg_info: &NpmPackageInfo,
|
|
) -> Result<PackageNv, NpmPackageVersionResolutionError> {
|
|
debug_assert_eq!(pkg_req.name, pkg_info.name);
|
|
// only allow one thread in here at a time
|
|
let snapshot_lock = self.snapshot.acquire().await;
|
|
|
|
let mut snapshot = snapshot_lock.write();
|
|
let pending_resolver = get_npm_pending_resolver(&self.api);
|
|
let nv = pending_resolver.resolve_package_req_as_pending(
|
|
&mut snapshot,
|
|
pkg_req,
|
|
pkg_info,
|
|
)?;
|
|
Ok(nv)
|
|
}
|
|
|
|
pub async fn resolve_pkg_reqs_as_pending_with_info(
|
|
&self,
|
|
reqs_with_pkg_infos: &[(&PackageReq, Arc<NpmPackageInfo>)],
|
|
) -> Vec<Result<PackageNv, NpmPackageVersionResolutionError>> {
|
|
// only allow one thread in here at a time
|
|
let snapshot_lock = self.snapshot.acquire().await;
|
|
|
|
let mut snapshot = snapshot_lock.write();
|
|
let pending_resolver = get_npm_pending_resolver(&self.api);
|
|
let mut results = Vec::with_capacity(reqs_with_pkg_infos.len());
|
|
for (pkg_req, pkg_info) in reqs_with_pkg_infos {
|
|
debug_assert_eq!(pkg_req.name, pkg_info.name);
|
|
results.push(pending_resolver.resolve_package_req_as_pending(
|
|
&mut snapshot,
|
|
pkg_req,
|
|
pkg_info,
|
|
));
|
|
}
|
|
results
|
|
}
|
|
|
|
pub fn package_reqs(&self) -> HashMap<PackageReq, PackageNv> {
|
|
self.snapshot.read().package_reqs().clone()
|
|
}
|
|
|
|
pub fn all_system_packages(
|
|
&self,
|
|
system_info: &NpmSystemInfo,
|
|
) -> Vec<NpmResolutionPackage> {
|
|
self.snapshot.read().all_system_packages(system_info)
|
|
}
|
|
|
|
pub fn all_system_packages_partitioned(
|
|
&self,
|
|
system_info: &NpmSystemInfo,
|
|
) -> NpmPackagesPartitioned {
|
|
self
|
|
.snapshot
|
|
.read()
|
|
.all_system_packages_partitioned(system_info)
|
|
}
|
|
|
|
pub fn snapshot(&self) -> NpmResolutionSnapshot {
|
|
self.snapshot.read().clone()
|
|
}
|
|
|
|
pub fn serialized_valid_snapshot(
|
|
&self,
|
|
) -> ValidSerializedNpmResolutionSnapshot {
|
|
self.snapshot.read().as_valid_serialized()
|
|
}
|
|
|
|
pub fn serialized_valid_snapshot_for_system(
|
|
&self,
|
|
system_info: &NpmSystemInfo,
|
|
) -> ValidSerializedNpmResolutionSnapshot {
|
|
self
|
|
.snapshot
|
|
.read()
|
|
.as_valid_serialized_for_system(system_info)
|
|
}
|
|
}
|
|
|
|
async fn add_package_reqs_to_snapshot(
|
|
api: &CliNpmRegistryApi,
|
|
package_reqs: &[PackageReq],
|
|
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
|
|
get_new_snapshot: impl Fn() -> NpmResolutionSnapshot,
|
|
) -> Result<NpmResolutionSnapshot, AnyError> {
|
|
let snapshot = get_new_snapshot();
|
|
let snapshot = if !snapshot.has_pending()
|
|
&& package_reqs
|
|
.iter()
|
|
.all(|req| snapshot.package_reqs().contains_key(req))
|
|
{
|
|
log::debug!(
|
|
"Snapshot already up to date. Skipping pending npm resolution."
|
|
);
|
|
snapshot
|
|
} else {
|
|
log::debug!(
|
|
/* this string is used in tests!! */
|
|
"Running pending npm resolution."
|
|
);
|
|
let pending_resolver = get_npm_pending_resolver(api);
|
|
let result = pending_resolver
|
|
.resolve_pending(snapshot, package_reqs)
|
|
.await;
|
|
api.clear_memory_cache();
|
|
match result {
|
|
Ok(snapshot) => snapshot,
|
|
Err(NpmResolutionError::Resolution(err)) if api.mark_force_reload() => {
|
|
log::debug!("{err:#}");
|
|
log::debug!("npm resolution failed. Trying again...");
|
|
|
|
// try again
|
|
let snapshot = get_new_snapshot();
|
|
let result = pending_resolver
|
|
.resolve_pending(snapshot, package_reqs)
|
|
.await;
|
|
api.clear_memory_cache();
|
|
// now surface the result after clearing the cache
|
|
result?
|
|
}
|
|
Err(err) => return Err(err.into()),
|
|
}
|
|
};
|
|
|
|
if let Some(lockfile_mutex) = maybe_lockfile {
|
|
let mut lockfile = lockfile_mutex.lock();
|
|
populate_lockfile_from_snapshot(&mut lockfile, &snapshot);
|
|
}
|
|
|
|
Ok(snapshot)
|
|
}
|
|
|
|
fn get_npm_pending_resolver(
|
|
api: &CliNpmRegistryApi,
|
|
) -> NpmResolutionSnapshotPendingResolver<CliNpmRegistryApi> {
|
|
NpmResolutionSnapshotPendingResolver::new(
|
|
NpmResolutionSnapshotPendingResolverOptions {
|
|
api,
|
|
// WARNING: When bumping this version, check if anything needs to be
|
|
// updated in the `setNodeOnlyGlobalNames` call in 99_main_compiler.js
|
|
types_node_version_req: Some(
|
|
VersionReq::parse_from_npm("18.0.0 - 18.16.19").unwrap(),
|
|
),
|
|
},
|
|
)
|
|
}
|
|
|
|
fn populate_lockfile_from_snapshot(
|
|
lockfile: &mut Lockfile,
|
|
snapshot: &NpmResolutionSnapshot,
|
|
) {
|
|
assert!(!snapshot.has_pending());
|
|
for (package_req, nv) in snapshot.package_reqs() {
|
|
lockfile.insert_package_specifier(
|
|
format!("npm:{}", package_req),
|
|
format!(
|
|
"npm:{}",
|
|
snapshot
|
|
.resolve_package_from_deno_module(nv)
|
|
.unwrap()
|
|
.id
|
|
.as_serialized()
|
|
),
|
|
);
|
|
}
|
|
for package in snapshot.all_packages_for_every_system() {
|
|
lockfile.insert_npm_package(npm_package_to_lockfile_info(package));
|
|
}
|
|
}
|
|
|
|
fn npm_package_to_lockfile_info(
|
|
pkg: &NpmResolutionPackage,
|
|
) -> NpmPackageLockfileInfo {
|
|
let dependencies = pkg
|
|
.dependencies
|
|
.iter()
|
|
.map(|(name, id)| NpmPackageDependencyLockfileInfo {
|
|
name: name.clone(),
|
|
id: id.as_serialized(),
|
|
})
|
|
.collect();
|
|
|
|
NpmPackageLockfileInfo {
|
|
serialized_id: pkg.id.as_serialized(),
|
|
integrity: pkg.dist.integrity().for_lockfile(),
|
|
dependencies,
|
|
}
|
|
}
|