1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-21 15:04:11 -05:00

feat(unstable/npm): support peer dependencies (#16561)

This adds support for peer dependencies in npm packages.

1. If not found higher in the tree (ancestor and ancestor siblings),
peer dependencies are resolved like a dependency similar to npm 7.
2. Optional peer dependencies are only resolved if found higher in the
tree.
3. This creates "copy packages" or duplicates of a package when a
package has different resolution due to peer dependency resolution—see
https://pnpm.io/how-peers-are-resolved. Unlike pnpm though, duplicates
of packages will have `_1`, `_2`, etc. added to the end of the package
version in the directory in order to minimize the chance of hitting the
max file path limit on Windows. This is done for both the local
"node_modules" directory and also the global npm cache. The files are
hard linked in this case to reduce hard drive space.

This is a first pass and the code is definitely more inefficient than it
could be.

Closes #15823
This commit is contained in:
David Sherret 2022-11-08 14:17:24 -05:00 committed by Bartek Iwańczuk
parent d066322bd5
commit 2daba9c0ff
No known key found for this signature in database
GPG key ID: 0C6BCDDC3B3AD750
38 changed files with 4331 additions and 1380 deletions

8
Cargo.lock generated
View file

@ -1227,9 +1227,9 @@ dependencies = [
[[package]]
name = "deno_task_shell"
version = "0.7.0"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a275d3f78e828b4adddf20a472d9ac1927ac311aac48dca869bb8653d5a4a0b9"
checksum = "e8ad1e1002ecf8bafcb9b968bf19856ba4fe0e6c0c73b3404565bb29b15aae2c"
dependencies = [
"anyhow",
"futures",
@ -2803,9 +2803,9 @@ dependencies = [
[[package]]
name = "monch"
version = "0.2.1"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5e2e282addadb529bb31700f7d184797382fa2eb18384986aad78d117eaf0c4"
checksum = "f13de1c3edc9a5b9dc3a1029f56e9ab3eba34640010aff4fc01044c42ef67afa"
[[package]]
name = "naga"

View file

@ -57,7 +57,7 @@ deno_emit = "0.10.0"
deno_graph = "0.37.1"
deno_lint = { version = "0.34.0", features = ["docs"] }
deno_runtime = { version = "0.83.0", path = "../runtime" }
deno_task_shell = "0.7.0"
deno_task_shell = "0.7.2"
napi_sym = { path = "./napi_sym", version = "0.5.0" }
atty = "=0.2.14"
@ -86,7 +86,7 @@ libc = "=0.2.126"
log = { version = "=0.4.17", features = ["serde"] }
lsp-types = "=0.93.2" # used by tower-lsp and "proposed" feature is unstable in patch releases
mitata = "=0.0.7"
monch = "=0.2.1"
monch = "=0.4.0"
notify = "=5.0.0"
once_cell = "=1.14.0"
os_pipe = "=1.0.1"

View file

@ -15,6 +15,7 @@ use std::io::ErrorKind;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::time::Duration;
use walkdir::WalkDir;
pub fn atomic_write_file<T: AsRef<[u8]>>(
@ -357,6 +358,84 @@ pub fn copy_dir_recursive(from: &Path, to: &Path) -> Result<(), AnyError> {
Ok(())
}
/// Hardlinks the files in one directory to another directory.
///
/// Note: Does not handle symlinks.
pub fn hard_link_dir_recursive(from: &Path, to: &Path) -> Result<(), AnyError> {
std::fs::create_dir_all(&to)
.with_context(|| format!("Creating {}", to.display()))?;
let read_dir = std::fs::read_dir(&from)
.with_context(|| format!("Reading {}", from.display()))?;
for entry in read_dir {
let entry = entry?;
let file_type = entry.file_type()?;
let new_from = from.join(entry.file_name());
let new_to = to.join(entry.file_name());
if file_type.is_dir() {
hard_link_dir_recursive(&new_from, &new_to).with_context(|| {
format!("Dir {} to {}", new_from.display(), new_to.display())
})?;
} else if file_type.is_file() {
// note: chance for race conditions here between attempting to create,
// then removing, then attempting to create. There doesn't seem to be
// a way to hard link with overwriting in Rust, but maybe there is some
// way with platform specific code. The workaround here is to handle
// scenarios where something else might create or remove files.
if let Err(err) = std::fs::hard_link(&new_from, &new_to) {
if err.kind() == ErrorKind::AlreadyExists {
if let Err(err) = std::fs::remove_file(&new_to) {
if err.kind() == ErrorKind::NotFound {
// Assume another process/thread created this hard link to the file we are wanting
// to remove then sleep a little bit to let the other process/thread move ahead
// faster to reduce contention.
std::thread::sleep(Duration::from_millis(10));
} else {
return Err(err).with_context(|| {
format!(
"Removing file to hard link {} to {}",
new_from.display(),
new_to.display()
)
});
}
}
// Always attempt to recreate the hardlink. In contention scenarios, the other process
// might have been killed or exited after removing the file, but before creating the hardlink
if let Err(err) = std::fs::hard_link(&new_from, &new_to) {
// Assume another process/thread created this hard link to the file we are wanting
// to now create then sleep a little bit to let the other process/thread move ahead
// faster to reduce contention.
if err.kind() == ErrorKind::AlreadyExists {
std::thread::sleep(Duration::from_millis(10));
} else {
return Err(err).with_context(|| {
format!(
"Hard linking {} to {}",
new_from.display(),
new_to.display()
)
});
}
}
} else {
return Err(err).with_context(|| {
format!(
"Hard linking {} to {}",
new_from.display(),
new_to.display()
)
});
}
}
}
}
Ok(())
}
pub fn symlink_dir(oldpath: &Path, newpath: &Path) -> Result<(), AnyError> {
let err_mapper = |err: Error| {
Error::new(

View file

@ -16,6 +16,7 @@ use std::rc::Rc;
use std::sync::Arc;
use crate::args::ConfigFile;
use crate::npm::NpmPackageId;
use crate::npm::NpmPackageReq;
use crate::npm::NpmResolutionPackage;
use crate::tools::fmt::format_json;
@ -40,7 +41,7 @@ pub struct NpmPackageInfo {
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct NpmContent {
/// Mapping between requests for npm packages and resolved specifiers, eg.
/// Mapping between requests for npm packages and resolved packages, eg.
/// {
/// "chalk": "chalk@5.0.0"
/// "react@17": "react@17.0.1"
@ -269,7 +270,7 @@ impl Lockfile {
&mut self,
package: &NpmResolutionPackage,
) -> Result<(), LockfileError> {
let specifier = package.id.serialize_for_lock_file();
let specifier = package.id.as_serialized();
if let Some(package_info) = self.content.npm.packages.get(&specifier) {
let integrity = package
.dist
@ -286,7 +287,7 @@ This could be caused by:
* the source itself may be corrupt
Use \"--lock-write\" flag to regenerate the lockfile at \"{}\".",
package.id, self.filename.display()
package.id.display(), self.filename.display()
)));
}
} else {
@ -300,7 +301,7 @@ Use \"--lock-write\" flag to regenerate the lockfile at \"{}\".",
let dependencies = package
.dependencies
.iter()
.map(|(name, id)| (name.to_string(), id.serialize_for_lock_file()))
.map(|(name, id)| (name.to_string(), id.as_serialized()))
.collect::<BTreeMap<String, String>>();
let integrity = package
@ -309,7 +310,7 @@ Use \"--lock-write\" flag to regenerate the lockfile at \"{}\".",
.as_ref()
.unwrap_or(&package.dist.shasum);
self.content.npm.packages.insert(
package.id.serialize_for_lock_file(),
package.id.as_serialized(),
NpmPackageInfo {
integrity: integrity.to_string(),
dependencies,
@ -321,12 +322,13 @@ Use \"--lock-write\" flag to regenerate the lockfile at \"{}\".",
pub fn insert_npm_specifier(
&mut self,
package_req: &NpmPackageReq,
version: String,
package_id: &NpmPackageId,
) {
self.content.npm.specifiers.insert(
package_req.to_string(),
format!("{}@{}", package_req.name, version),
);
self
.content
.npm
.specifiers
.insert(package_req.to_string(), package_id.as_serialized());
self.has_content_changed = true;
}
}
@ -559,10 +561,12 @@ mod tests {
id: NpmPackageId {
name: "nanoid".to_string(),
version: NpmVersion::parse("3.3.4").unwrap(),
peer_dependencies: Vec::new(),
},
copy_index: 0,
dist: NpmPackageVersionDistInfo {
tarball: "foo".to_string(),
shasum: "foo".to_string(),
tarball: "foo".to_string(),
shasum: "foo".to_string(),
integrity: Some("sha512-MqBkQh/OHTS2egovRtLk45wEyNXwF+cokD+1YPf9u5VfJiRdAiRwB2froX5Co9Rh20xs4siNPm8naNotSD6RBw==".to_string())
},
dependencies: HashMap::new(),
@ -574,10 +578,12 @@ mod tests {
id: NpmPackageId {
name: "picocolors".to_string(),
version: NpmVersion::parse("1.0.0").unwrap(),
peer_dependencies: Vec::new(),
},
copy_index: 0,
dist: NpmPackageVersionDistInfo {
tarball: "foo".to_string(),
shasum: "foo".to_string(),
tarball: "foo".to_string(),
shasum: "foo".to_string(),
integrity: Some("sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==".to_string())
},
dependencies: HashMap::new(),
@ -590,10 +596,12 @@ mod tests {
id: NpmPackageId {
name: "source-map-js".to_string(),
version: NpmVersion::parse("1.0.2").unwrap(),
peer_dependencies: Vec::new(),
},
copy_index: 0,
dist: NpmPackageVersionDistInfo {
tarball: "foo".to_string(),
shasum: "foo".to_string(),
tarball: "foo".to_string(),
shasum: "foo".to_string(),
integrity: Some("sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==".to_string())
},
dependencies: HashMap::new(),
@ -606,7 +614,9 @@ mod tests {
id: NpmPackageId {
name: "source-map-js".to_string(),
version: NpmVersion::parse("1.0.2").unwrap(),
peer_dependencies: Vec::new(),
},
copy_index: 0,
dist: NpmPackageVersionDistInfo {
tarball: "foo".to_string(),
shasum: "foo".to_string(),

View file

@ -71,7 +71,7 @@ use crate::fs_util;
use crate::graph_util::graph_valid;
use crate::npm::NpmCache;
use crate::npm::NpmPackageResolver;
use crate::npm::NpmRegistryApi;
use crate::npm::RealNpmRegistryApi;
use crate::proc_state::import_map_from_text;
use crate::proc_state::ProcState;
use crate::progress_bar::ProgressBar;
@ -258,7 +258,7 @@ impl Inner {
ts_server.clone(),
);
let assets = Assets::new(ts_server.clone());
let registry_url = NpmRegistryApi::default_url();
let registry_url = RealNpmRegistryApi::default_url();
// Use an "only" cache setting in order to make the
// user do an explicit "cache" command and prevent
// the cache from being filled with lots of packages while
@ -270,7 +270,7 @@ impl Inner {
cache_setting.clone(),
progress_bar.clone(),
);
let api = NpmRegistryApi::new(
let api = RealNpmRegistryApi::new(
registry_url,
npm_cache.clone(),
cache_setting,

View file

@ -21,7 +21,6 @@ use crate::progress_bar::ProgressBar;
use super::registry::NpmPackageVersionDistInfo;
use super::semver::NpmVersion;
use super::tarball::verify_and_extract_tarball;
use super::NpmPackageId;
/// For some of the tests, we want downloading of packages
/// to be deterministic so that the output is always the same
@ -29,7 +28,107 @@ pub fn should_sync_download() -> bool {
std::env::var("DENO_UNSTABLE_NPM_SYNC_DOWNLOAD") == Ok("1".to_string())
}
pub const NPM_PACKAGE_SYNC_LOCK_FILENAME: &str = ".deno_sync_lock";
const NPM_PACKAGE_SYNC_LOCK_FILENAME: &str = ".deno_sync_lock";
pub fn with_folder_sync_lock(
package: (&str, &NpmVersion),
output_folder: &Path,
action: impl FnOnce() -> Result<(), AnyError>,
) -> Result<(), AnyError> {
fn inner(
output_folder: &Path,
action: impl FnOnce() -> Result<(), AnyError>,
) -> Result<(), AnyError> {
fs::create_dir_all(output_folder).with_context(|| {
format!("Error creating '{}'.", output_folder.display())
})?;
// This sync lock file is a way to ensure that partially created
// npm package directories aren't considered valid. This could maybe
// be a bit smarter in the future to not bother extracting here
// if another process has taken the lock in the past X seconds and
// wait for the other process to finish (it could try to create the
// file with `create_new(true)` then if it exists, check the metadata
// then wait until the other process finishes with a timeout), but
// for now this is good enough.
let sync_lock_path = output_folder.join(NPM_PACKAGE_SYNC_LOCK_FILENAME);
match fs::OpenOptions::new()
.write(true)
.create(true)
.open(&sync_lock_path)
{
Ok(_) => {
action()?;
// extraction succeeded, so only now delete this file
let _ignore = std::fs::remove_file(&sync_lock_path);
Ok(())
}
Err(err) => {
bail!(
concat!(
"Error creating package sync lock file at '{}'. ",
"Maybe try manually deleting this folder.\n\n{:#}",
),
output_folder.display(),
err
);
}
}
}
match inner(output_folder, action) {
Ok(()) => Ok(()),
Err(err) => {
if let Err(remove_err) = fs::remove_dir_all(&output_folder) {
if remove_err.kind() != std::io::ErrorKind::NotFound {
bail!(
concat!(
"Failed setting up package cache directory for {}@{}, then ",
"failed cleaning it up.\n\nOriginal error:\n\n{}\n\n",
"Remove error:\n\n{}\n\nPlease manually ",
"delete this folder or you will run into issues using this ",
"package in the future:\n\n{}"
),
package.0,
package.1,
err,
remove_err,
output_folder.display(),
);
}
}
Err(err)
}
}
}
pub struct NpmPackageCacheFolderId {
pub name: String,
pub version: NpmVersion,
/// Peer dependency resolution may require us to have duplicate copies
/// of the same package.
pub copy_index: usize,
}
impl NpmPackageCacheFolderId {
pub fn with_no_count(&self) -> Self {
Self {
name: self.name.clone(),
version: self.version.clone(),
copy_index: 0,
}
}
}
impl std::fmt::Display for NpmPackageCacheFolderId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}@{}", self.name, self.version)?;
if self.copy_index > 0 {
write!(f, "_{}", self.copy_index)?;
}
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct ReadonlyNpmCache {
@ -78,32 +177,49 @@ impl ReadonlyNpmCache {
Self::new(dir.root.join("npm"))
}
pub fn package_folder(
pub fn package_folder_for_id(
&self,
id: &NpmPackageId,
id: &NpmPackageCacheFolderId,
registry_url: &Url,
) -> PathBuf {
if id.copy_index == 0 {
self.package_folder_for_name_and_version(
&id.name,
&id.version,
registry_url,
)
} else {
self
.package_name_folder(&id.name, registry_url)
.join(format!("{}_{}", id.version, id.copy_index))
}
}
pub fn package_folder_for_name_and_version(
&self,
name: &str,
version: &NpmVersion,
registry_url: &Url,
) -> PathBuf {
self
.package_name_folder(&id.name, registry_url)
.join(id.version.to_string())
.package_name_folder(name, registry_url)
.join(version.to_string())
}
pub fn package_name_folder(&self, name: &str, registry_url: &Url) -> PathBuf {
let mut dir = self.registry_folder(registry_url);
let mut parts = name.split('/').map(Cow::Borrowed).collect::<Vec<_>>();
// package names were not always enforced to be lowercase and so we need
// to ensure package names, which are therefore case sensitive, are stored
// on a case insensitive file system to not have conflicts. We do this by
// first putting it in a "_" folder then hashing the package name.
let parts = name.split('/').map(Cow::Borrowed).collect::<Vec<_>>();
if name.to_lowercase() != name {
let last_part = parts.last_mut().unwrap();
*last_part = Cow::Owned(crate::checksum::gen(&[last_part.as_bytes()]));
// We can't just use the hash as part of the directory because it may
// have a collision with an actual package name in case someone wanted
// to name an actual package that. To get around this, put all these
// in a folder called "_" since npm packages can't start with an underscore
// and there is no package currently called just "_".
dir = dir.join("_");
// Lowercase package names introduce complications.
// When implementing this ensure:
// 1. It works on case insensitive filesystems. ex. JSON should not
// conflict with json... yes you read that right, those are separate
// packages.
// 2. We can figure out the package id from the path. This is used
// in resolve_package_id_from_specifier
// Probably use a hash of the package name at `npm/-/<hash>` then create
// a mapping for these package names.
todo!("deno currently doesn't support npm package names that are not all lowercase");
}
// ensure backslashes are used on windows
for part in parts {
@ -118,23 +234,24 @@ impl ReadonlyNpmCache {
.join(fs_util::root_url_to_safe_local_dirname(registry_url))
}
pub fn resolve_package_id_from_specifier(
pub fn resolve_package_folder_id_from_specifier(
&self,
specifier: &ModuleSpecifier,
registry_url: &Url,
) -> Result<NpmPackageId, AnyError> {
match self.maybe_resolve_package_id_from_specifier(specifier, registry_url)
) -> Result<NpmPackageCacheFolderId, AnyError> {
match self
.maybe_resolve_package_folder_id_from_specifier(specifier, registry_url)
{
Some(id) => Ok(id),
None => bail!("could not find npm package for '{}'", specifier),
}
}
fn maybe_resolve_package_id_from_specifier(
fn maybe_resolve_package_folder_id_from_specifier(
&self,
specifier: &ModuleSpecifier,
registry_url: &Url,
) -> Option<NpmPackageId> {
) -> Option<NpmPackageCacheFolderId> {
let registry_root_dir = self
.root_dir_url
.join(&format!(
@ -153,6 +270,7 @@ impl ReadonlyNpmCache {
// examples:
// * chalk/5.0.1/
// * @types/chalk/5.0.1/
// * some-package/5.0.1_1/ -- where the `_1` (/_\d+/) is a copy of the folder for peer deps
let is_scoped_package = relative_url.starts_with('@');
let mut parts = relative_url
.split('/')
@ -163,11 +281,19 @@ impl ReadonlyNpmCache {
if parts.len() < 2 {
return None;
}
let version = parts.pop().unwrap();
let version_part = parts.pop().unwrap();
let name = parts.join("/");
NpmVersion::parse(version)
.ok()
.map(|version| NpmPackageId { name, version })
let (version, copy_index) =
if let Some((version, copy_count)) = version_part.split_once('_') {
(version, copy_count.parse::<usize>().ok()?)
} else {
(version_part, 0)
};
Some(NpmPackageCacheFolderId {
name,
version: NpmVersion::parse(version).ok()?,
copy_index,
})
}
pub fn get_cache_location(&self) -> PathBuf {
@ -202,28 +328,38 @@ impl NpmCache {
pub async fn ensure_package(
&self,
id: &NpmPackageId,
package: (&str, &NpmVersion),
dist: &NpmPackageVersionDistInfo,
registry_url: &Url,
) -> Result<(), AnyError> {
self
.ensure_package_inner(id, dist, registry_url)
.ensure_package_inner(package, dist, registry_url)
.await
.with_context(|| format!("Failed caching npm package '{}'.", id))
.with_context(|| {
format!("Failed caching npm package '{}@{}'.", package.0, package.1)
})
}
pub fn should_use_cache_for_npm_package(&self, package_name: &str) -> bool {
self.cache_setting.should_use_for_npm_package(package_name)
}
async fn ensure_package_inner(
&self,
id: &NpmPackageId,
package: (&str, &NpmVersion),
dist: &NpmPackageVersionDistInfo,
registry_url: &Url,
) -> Result<(), AnyError> {
let package_folder = self.readonly.package_folder(id, registry_url);
let package_folder = self.readonly.package_folder_for_name_and_version(
package.0,
package.1,
registry_url,
);
if package_folder.exists()
// if this file exists, then the package didn't successfully extract
// the first time, or another process is currently extracting the zip file
&& !package_folder.join(NPM_PACKAGE_SYNC_LOCK_FILENAME).exists()
&& self.cache_setting.should_use_for_npm_package(&id.name)
&& self.should_use_cache_for_npm_package(package.0)
{
return Ok(());
} else if self.cache_setting == CacheSetting::Only {
@ -231,7 +367,7 @@ impl NpmCache {
"NotCached",
format!(
"An npm specifier not found in cache: \"{}\", --cached-only is specified.",
id.name
&package.0
)
)
);
@ -256,38 +392,66 @@ impl NpmCache {
} else {
let bytes = response.bytes().await?;
match verify_and_extract_tarball(id, &bytes, dist, &package_folder) {
Ok(()) => Ok(()),
Err(err) => {
if let Err(remove_err) = fs::remove_dir_all(&package_folder) {
if remove_err.kind() != std::io::ErrorKind::NotFound {
bail!(
concat!(
"Failed verifying and extracting npm tarball for {}, then ",
"failed cleaning up package cache folder.\n\nOriginal ",
"error:\n\n{}\n\nRemove error:\n\n{}\n\nPlease manually ",
"delete this folder or you will run into issues using this ",
"package in the future:\n\n{}"
),
id,
err,
remove_err,
package_folder.display(),
);
}
}
Err(err)
}
}
verify_and_extract_tarball(package, &bytes, dist, &package_folder)
}
}
pub fn package_folder(
/// Ensures a copy of the package exists in the global cache.
///
/// This assumes that the original package folder being hard linked
/// from exists before this is called.
pub fn ensure_copy_package(
&self,
id: &NpmPackageId,
id: &NpmPackageCacheFolderId,
registry_url: &Url,
) -> Result<(), AnyError> {
assert_ne!(id.copy_index, 0);
let package_folder = self.readonly.package_folder_for_id(id, registry_url);
if package_folder.exists()
// if this file exists, then the package didn't successfully extract
// the first time, or another process is currently extracting the zip file
&& !package_folder.join(NPM_PACKAGE_SYNC_LOCK_FILENAME).exists()
&& self.cache_setting.should_use_for_npm_package(&id.name)
{
return Ok(());
}
let original_package_folder = self
.readonly
.package_folder_for_name_and_version(&id.name, &id.version, registry_url);
with_folder_sync_lock(
(id.name.as_str(), &id.version),
&package_folder,
|| {
fs_util::hard_link_dir_recursive(
&original_package_folder,
&package_folder,
)
},
)?;
Ok(())
}
pub fn package_folder_for_id(
&self,
id: &NpmPackageCacheFolderId,
registry_url: &Url,
) -> PathBuf {
self.readonly.package_folder(id, registry_url)
self.readonly.package_folder_for_id(id, registry_url)
}
pub fn package_folder_for_name_and_version(
&self,
name: &str,
version: &NpmVersion,
registry_url: &Url,
) -> PathBuf {
self.readonly.package_folder_for_name_and_version(
name,
version,
registry_url,
)
}
pub fn package_name_folder(&self, name: &str, registry_url: &Url) -> PathBuf {
@ -298,14 +462,14 @@ impl NpmCache {
self.readonly.registry_folder(registry_url)
}
pub fn resolve_package_id_from_specifier(
pub fn resolve_package_folder_id_from_specifier(
&self,
specifier: &ModuleSpecifier,
registry_url: &Url,
) -> Result<NpmPackageId, AnyError> {
) -> Result<NpmPackageCacheFolderId, AnyError> {
self
.readonly
.resolve_package_id_from_specifier(specifier, registry_url)
.resolve_package_folder_id_from_specifier(specifier, registry_url)
}
}
@ -314,8 +478,8 @@ mod test {
use deno_core::url::Url;
use super::ReadonlyNpmCache;
use crate::npm::cache::NpmPackageCacheFolderId;
use crate::npm::semver::NpmVersion;
use crate::npm::NpmPackageId;
#[test]
fn should_get_lowercase_package_folder() {
@ -323,12 +487,12 @@ mod test {
let cache = ReadonlyNpmCache::new(root_dir.clone());
let registry_url = Url::parse("https://registry.npmjs.org/").unwrap();
// all lowercase should be as-is
assert_eq!(
cache.package_folder(
&NpmPackageId {
cache.package_folder_for_id(
&NpmPackageCacheFolderId {
name: "json".to_string(),
version: NpmVersion::parse("1.2.5").unwrap(),
copy_index: 0,
},
&registry_url,
),
@ -337,44 +501,20 @@ mod test {
.join("json")
.join("1.2.5"),
);
}
#[test]
fn should_handle_non_all_lowercase_package_names() {
// it was possible at one point for npm packages to not just be lowercase
let root_dir = crate::deno_dir::DenoDir::new(None).unwrap().root;
let cache = ReadonlyNpmCache::new(root_dir.clone());
let registry_url = Url::parse("https://registry.npmjs.org/").unwrap();
let json_uppercase_hash =
"db1a21a0bc2ef8fbe13ac4cf044e8c9116d29137d5ed8b916ab63dcb2d4290df";
assert_eq!(
cache.package_folder(
&NpmPackageId {
name: "JSON".to_string(),
cache.package_folder_for_id(
&NpmPackageCacheFolderId {
name: "json".to_string(),
version: NpmVersion::parse("1.2.5").unwrap(),
copy_index: 1,
},
&registry_url,
),
root_dir
.join("registry.npmjs.org")
.join("_")
.join(json_uppercase_hash)
.join("1.2.5"),
);
assert_eq!(
cache.package_folder(
&NpmPackageId {
name: "@types/JSON".to_string(),
version: NpmVersion::parse("1.2.5").unwrap(),
},
&registry_url,
),
root_dir
.join("registry.npmjs.org")
.join("_")
.join("@types")
.join(json_uppercase_hash)
.join("1.2.5"),
.join("json")
.join("1.2.5_1"),
);
}
}

View file

@ -13,6 +13,7 @@ pub use cache::NpmCache;
#[cfg(test)]
pub use registry::NpmPackageVersionDistInfo;
pub use registry::NpmRegistryApi;
pub use registry::RealNpmRegistryApi;
pub use resolution::NpmPackageId;
pub use resolution::NpmPackageReference;
pub use resolution::NpmPackageReq;

View file

@ -1,5 +1,6 @@
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
use std::cmp::Ordering;
use std::collections::HashMap;
use std::fs;
use std::io::ErrorKind;
@ -10,6 +11,8 @@ use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
use deno_core::error::custom_error;
use deno_core::error::AnyError;
use deno_core::futures::future::BoxFuture;
use deno_core::futures::FutureExt;
use deno_core::parking_lot::Mutex;
use deno_core::serde::Deserialize;
use deno_core::serde_json;
@ -24,11 +27,13 @@ use crate::http_cache::CACHE_PERM;
use crate::progress_bar::ProgressBar;
use super::cache::NpmCache;
use super::resolution::NpmVersionMatcher;
use super::semver::NpmVersion;
use super::semver::NpmVersionReq;
// npm registry docs: https://github.com/npm/registry/blob/master/docs/REGISTRY-API.md
#[derive(Debug, Deserialize, Serialize, Clone)]
#[derive(Debug, Default, Deserialize, Serialize, Clone)]
pub struct NpmPackageInfo {
pub name: String,
pub versions: HashMap<String, NpmPackageVersionInfo>,
@ -36,13 +41,59 @@ pub struct NpmPackageInfo {
pub dist_tags: HashMap<String, String>,
}
#[derive(Debug, Eq, PartialEq)]
pub enum NpmDependencyEntryKind {
Dep,
Peer,
OptionalPeer,
}
impl NpmDependencyEntryKind {
pub fn is_optional(&self) -> bool {
matches!(self, NpmDependencyEntryKind::OptionalPeer)
}
}
#[derive(Debug, Eq, PartialEq)]
pub struct NpmDependencyEntry {
pub kind: NpmDependencyEntryKind,
pub bare_specifier: String,
pub name: String,
pub version_req: NpmVersionReq,
/// When the dependency is also marked as a peer dependency,
/// use this entry to resolve the dependency when it can't
/// be resolved as a peer dependency.
pub peer_dep_version_req: Option<NpmVersionReq>,
}
impl PartialOrd for NpmDependencyEntry {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for NpmDependencyEntry {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// sort the dependencies alphabetically by name then by version descending
match self.name.cmp(&other.name) {
// sort by newest to oldest
Ordering::Equal => other
.version_req
.version_text()
.cmp(&self.version_req.version_text()),
ordering => ordering,
}
}
}
#[derive(Debug, Default, Deserialize, Serialize, Clone)]
pub struct NpmPeerDependencyMeta {
#[serde(default)]
optional: bool,
}
#[derive(Debug, Default, Deserialize, Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct NpmPackageVersionInfo {
pub version: String,
pub dist: NpmPackageVersionDistInfo,
@ -50,14 +101,19 @@ pub struct NpmPackageVersionInfo {
// package and version (ex. `"typescript-3.0.1": "npm:typescript@3.0.1"`).
#[serde(default)]
pub dependencies: HashMap<String, String>,
#[serde(default)]
pub peer_dependencies: HashMap<String, String>,
#[serde(default)]
pub peer_dependencies_meta: HashMap<String, NpmPeerDependencyMeta>,
}
impl NpmPackageVersionInfo {
pub fn dependencies_as_entries(
&self,
) -> Result<Vec<NpmDependencyEntry>, AnyError> {
fn entry_as_bare_specifier_and_reference(
fn parse_dep_entry(
entry: (&String, &String),
kind: NpmDependencyEntryKind,
) -> Result<NpmDependencyEntry, AnyError> {
let bare_specifier = entry.0.clone();
let (name, version_req) =
@ -78,21 +134,46 @@ impl NpmPackageVersionInfo {
)
})?;
Ok(NpmDependencyEntry {
kind,
bare_specifier,
name,
version_req,
peer_dep_version_req: None,
})
}
self
.dependencies
.iter()
.map(entry_as_bare_specifier_and_reference)
.collect::<Result<Vec<_>, AnyError>>()
let mut result = HashMap::with_capacity(
self.dependencies.len() + self.peer_dependencies.len(),
);
for entry in &self.peer_dependencies {
let is_optional = self
.peer_dependencies_meta
.get(entry.0)
.map(|d| d.optional)
.unwrap_or(false);
let kind = match is_optional {
true => NpmDependencyEntryKind::OptionalPeer,
false => NpmDependencyEntryKind::Peer,
};
let entry = parse_dep_entry(entry, kind)?;
result.insert(entry.bare_specifier.clone(), entry);
}
for entry in &self.dependencies {
let entry = parse_dep_entry(entry, NpmDependencyEntryKind::Dep)?;
// people may define a dependency as a peer dependency as well,
// so in those cases, attempt to resolve as a peer dependency,
// but then use this dependency version requirement otherwise
if let Some(peer_dep_entry) = result.get_mut(&entry.bare_specifier) {
peer_dep_entry.peer_dep_version_req = Some(entry.version_req);
} else {
result.insert(entry.bare_specifier.clone(), entry);
}
}
Ok(result.into_values().collect())
}
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
#[derive(Debug, Default, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct NpmPackageVersionDistInfo {
/// URL to the tarball.
pub tarball: String,
@ -100,16 +181,50 @@ pub struct NpmPackageVersionDistInfo {
pub integrity: Option<String>,
}
#[derive(Clone)]
pub struct NpmRegistryApi {
base_url: Url,
cache: NpmCache,
mem_cache: Arc<Mutex<HashMap<String, Option<NpmPackageInfo>>>>,
cache_setting: CacheSetting,
progress_bar: ProgressBar,
pub trait NpmRegistryApi: Clone + Sync + Send + 'static {
fn maybe_package_info(
&self,
name: &str,
) -> BoxFuture<'static, Result<Option<NpmPackageInfo>, AnyError>>;
fn package_info(
&self,
name: &str,
) -> BoxFuture<'static, Result<NpmPackageInfo, AnyError>> {
let api = self.clone();
let name = name.to_string();
async move {
let maybe_package_info = api.maybe_package_info(&name).await?;
match maybe_package_info {
Some(package_info) => Ok(package_info),
None => bail!("npm package '{}' does not exist", name),
}
}
.boxed()
}
fn package_version_info(
&self,
name: &str,
version: &NpmVersion,
) -> BoxFuture<'static, Result<Option<NpmPackageVersionInfo>, AnyError>> {
let api = self.clone();
let name = name.to_string();
let version = version.to_string();
async move {
// todo(dsherret): this could be optimized to not clone the
// entire package info in the case of the RealNpmRegistryApi
let mut package_info = api.package_info(&name).await?;
Ok(package_info.versions.remove(&version))
}
.boxed()
}
}
impl NpmRegistryApi {
#[derive(Clone)]
pub struct RealNpmRegistryApi(Arc<RealNpmRegistryApiInner>);
impl RealNpmRegistryApi {
pub fn default_url() -> Url {
let env_var_name = "DENO_NPM_REGISTRY";
if let Ok(registry_url) = std::env::var(env_var_name) {
@ -135,30 +250,40 @@ impl NpmRegistryApi {
cache_setting: CacheSetting,
progress_bar: ProgressBar,
) -> Self {
Self {
Self(Arc::new(RealNpmRegistryApiInner {
base_url,
cache,
mem_cache: Default::default(),
cache_setting,
progress_bar,
}
}))
}
pub fn base_url(&self) -> &Url {
&self.base_url
&self.0.base_url
}
}
pub async fn package_info(
impl NpmRegistryApi for RealNpmRegistryApi {
fn maybe_package_info(
&self,
name: &str,
) -> Result<NpmPackageInfo, AnyError> {
let maybe_package_info = self.maybe_package_info(name).await?;
match maybe_package_info {
Some(package_info) => Ok(package_info),
None => bail!("npm package '{}' does not exist", name),
}
) -> BoxFuture<'static, Result<Option<NpmPackageInfo>, AnyError>> {
let api = self.clone();
let name = name.to_string();
async move { api.0.maybe_package_info(&name).await }.boxed()
}
}
struct RealNpmRegistryApiInner {
base_url: Url,
cache: NpmCache,
mem_cache: Mutex<HashMap<String, Option<NpmPackageInfo>>>,
cache_setting: CacheSetting,
progress_bar: ProgressBar,
}
impl RealNpmRegistryApiInner {
pub async fn maybe_package_info(
&self,
name: &str,
@ -331,3 +456,100 @@ impl NpmRegistryApi {
name_folder_path.join("registry.json")
}
}
/// Note: This test struct is not thread safe for setup
/// purposes. Construct everything on the same thread.
#[cfg(test)]
#[derive(Clone, Default)]
pub struct TestNpmRegistryApi {
package_infos: Arc<Mutex<HashMap<String, NpmPackageInfo>>>,
}
#[cfg(test)]
impl TestNpmRegistryApi {
pub fn add_package_info(&self, name: &str, info: NpmPackageInfo) {
let previous = self.package_infos.lock().insert(name.to_string(), info);
assert!(previous.is_none());
}
pub fn ensure_package(&self, name: &str) {
if !self.package_infos.lock().contains_key(name) {
self.add_package_info(
name,
NpmPackageInfo {
name: name.to_string(),
..Default::default()
},
);
}
}
pub fn ensure_package_version(&self, name: &str, version: &str) {
self.ensure_package(name);
let mut infos = self.package_infos.lock();
let info = infos.get_mut(name).unwrap();
if !info.versions.contains_key(version) {
info.versions.insert(
version.to_string(),
NpmPackageVersionInfo {
version: version.to_string(),
..Default::default()
},
);
}
}
pub fn add_dependency(
&self,
package_from: (&str, &str),
package_to: (&str, &str),
) {
let mut infos = self.package_infos.lock();
let info = infos.get_mut(package_from.0).unwrap();
let version = info.versions.get_mut(package_from.1).unwrap();
version
.dependencies
.insert(package_to.0.to_string(), package_to.1.to_string());
}
pub fn add_peer_dependency(
&self,
package_from: (&str, &str),
package_to: (&str, &str),
) {
let mut infos = self.package_infos.lock();
let info = infos.get_mut(package_from.0).unwrap();
let version = info.versions.get_mut(package_from.1).unwrap();
version
.peer_dependencies
.insert(package_to.0.to_string(), package_to.1.to_string());
}
pub fn add_optional_peer_dependency(
&self,
package_from: (&str, &str),
package_to: (&str, &str),
) {
let mut infos = self.package_infos.lock();
let info = infos.get_mut(package_from.0).unwrap();
let version = info.versions.get_mut(package_from.1).unwrap();
version
.peer_dependencies
.insert(package_to.0.to_string(), package_to.1.to_string());
version.peer_dependencies_meta.insert(
package_to.0.to_string(),
NpmPeerDependencyMeta { optional: true },
);
}
}
#[cfg(test)]
impl NpmRegistryApi for TestNpmRegistryApi {
fn maybe_package_info(
&self,
name: &str,
) -> BoxFuture<'static, Result<Option<NpmPackageInfo>, AnyError>> {
let result = self.package_infos.lock().get(name).cloned();
Box::pin(deno_core::futures::future::ready(Ok(result)))
}
}

File diff suppressed because it is too large Load diff

2033
cli/npm/resolution/graph.rs Normal file

File diff suppressed because it is too large Load diff

676
cli/npm/resolution/mod.rs Normal file
View file

@ -0,0 +1,676 @@
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
use std::collections::HashMap;
use std::collections::HashSet;
use deno_ast::ModuleSpecifier;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::futures;
use deno_core::parking_lot::RwLock;
use serde::Deserialize;
use serde::Serialize;
use crate::lockfile::Lockfile;
use self::graph::GraphDependencyResolver;
use self::snapshot::NpmPackagesPartitioned;
use super::cache::should_sync_download;
use super::cache::NpmPackageCacheFolderId;
use super::registry::NpmPackageVersionDistInfo;
use super::registry::RealNpmRegistryApi;
use super::semver::NpmVersion;
use super::semver::SpecifierVersionReq;
use super::NpmRegistryApi;
mod graph;
mod snapshot;
use graph::Graph;
pub use snapshot::NpmResolutionSnapshot;
/// The version matcher used for npm schemed urls is more strict than
/// the one used by npm packages and so we represent either via a trait.
pub trait NpmVersionMatcher {
fn tag(&self) -> Option<&str>;
fn matches(&self, version: &NpmVersion) -> bool;
fn version_text(&self) -> String;
}
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct NpmPackageReference {
pub req: NpmPackageReq,
pub sub_path: Option<String>,
}
impl NpmPackageReference {
pub fn from_specifier(
specifier: &ModuleSpecifier,
) -> Result<NpmPackageReference, AnyError> {
Self::from_str(specifier.as_str())
}
pub fn from_str(specifier: &str) -> Result<NpmPackageReference, AnyError> {
let specifier = match specifier.strip_prefix("npm:") {
Some(s) => s,
None => {
bail!("Not an npm specifier: {}", specifier);
}
};
let parts = specifier.split('/').collect::<Vec<_>>();
let name_part_len = if specifier.starts_with('@') { 2 } else { 1 };
if parts.len() < name_part_len {
return Err(generic_error(format!("Not a valid package: {}", specifier)));
}
let name_parts = &parts[0..name_part_len];
let last_name_part = &name_parts[name_part_len - 1];
let (name, version_req) = if let Some(at_index) = last_name_part.rfind('@')
{
let version = &last_name_part[at_index + 1..];
let last_name_part = &last_name_part[..at_index];
let version_req = SpecifierVersionReq::parse(version)
.with_context(|| "Invalid version requirement.")?;
let name = if name_part_len == 1 {
last_name_part.to_string()
} else {
format!("{}/{}", name_parts[0], last_name_part)
};
(name, Some(version_req))
} else {
(name_parts.join("/"), None)
};
let sub_path = if parts.len() == name_parts.len() {
None
} else {
Some(parts[name_part_len..].join("/"))
};
if let Some(sub_path) = &sub_path {
if let Some(at_index) = sub_path.rfind('@') {
let (new_sub_path, version) = sub_path.split_at(at_index);
let msg = format!(
"Invalid package specifier 'npm:{}/{}'. Did you mean to write 'npm:{}{}/{}'?",
name, sub_path, name, version, new_sub_path
);
return Err(generic_error(msg));
}
}
Ok(NpmPackageReference {
req: NpmPackageReq { name, version_req },
sub_path,
})
}
}
impl std::fmt::Display for NpmPackageReference {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(sub_path) = &self.sub_path {
write!(f, "npm:{}/{}", self.req, sub_path)
} else {
write!(f, "npm:{}", self.req)
}
}
}
#[derive(
Clone, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize,
)]
pub struct NpmPackageReq {
pub name: String,
pub version_req: Option<SpecifierVersionReq>,
}
impl std::fmt::Display for NpmPackageReq {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self.version_req {
Some(req) => write!(f, "{}@{}", self.name, req),
None => write!(f, "{}", self.name),
}
}
}
impl NpmPackageReq {
pub fn from_str(text: &str) -> Result<Self, AnyError> {
// probably should do something more targetted in the future
let reference = NpmPackageReference::from_str(&format!("npm:{}", text))?;
Ok(reference.req)
}
}
impl NpmVersionMatcher for NpmPackageReq {
fn tag(&self) -> Option<&str> {
match &self.version_req {
Some(version_req) => version_req.tag(),
None => Some("latest"),
}
}
fn matches(&self, version: &NpmVersion) -> bool {
match self.version_req.as_ref() {
Some(req) => {
assert_eq!(self.tag(), None);
match req.range() {
Some(range) => range.satisfies(version),
None => false,
}
}
None => version.pre.is_empty(),
}
}
fn version_text(&self) -> String {
self
.version_req
.as_ref()
.map(|v| format!("{}", v))
.unwrap_or_else(|| "non-prerelease".to_string())
}
}
#[derive(
Debug, Clone, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize,
)]
pub struct NpmPackageId {
pub name: String,
pub version: NpmVersion,
pub peer_dependencies: Vec<NpmPackageId>,
}
impl NpmPackageId {
#[allow(unused)]
pub fn scope(&self) -> Option<&str> {
if self.name.starts_with('@') && self.name.contains('/') {
self.name.split('/').next()
} else {
None
}
}
pub fn as_serialized(&self) -> String {
self.as_serialized_with_level(0)
}
fn as_serialized_with_level(&self, level: usize) -> String {
// WARNING: This should not change because it's used in the lockfile
let mut result = format!(
"{}@{}",
if level == 0 {
self.name.to_string()
} else {
self.name.replace('/', "+")
},
self.version
);
for peer in &self.peer_dependencies {
// unfortunately we can't do something like `_3` when
// this gets deep because npm package names can start
// with a number
result.push_str(&"_".repeat(level + 1));
result.push_str(&peer.as_serialized_with_level(level + 1));
}
result
}
pub fn from_serialized(id: &str) -> Result<Self, AnyError> {
use monch::*;
fn parse_name(input: &str) -> ParseResult<&str> {
if_not_empty(substring(move |input| {
for (pos, c) in input.char_indices() {
// first character might be a scope, so skip it
if pos > 0 && c == '@' {
return Ok((&input[pos..], ()));
}
}
ParseError::backtrace()
}))(input)
}
fn parse_version(input: &str) -> ParseResult<&str> {
if_not_empty(substring(skip_while(|c| c != '_')))(input)
}
fn parse_name_and_version(
input: &str,
) -> ParseResult<(String, NpmVersion)> {
let (input, name) = parse_name(input)?;
let (input, _) = ch('@')(input)?;
let at_version_input = input;
let (input, version) = parse_version(input)?;
match NpmVersion::parse(version) {
Ok(version) => Ok((input, (name.to_string(), version))),
Err(err) => ParseError::fail(at_version_input, format!("{:#}", err)),
}
}
fn parse_level_at_level<'a>(
level: usize,
) -> impl Fn(&'a str) -> ParseResult<'a, ()> {
fn parse_level(input: &str) -> ParseResult<usize> {
let level = input.chars().take_while(|c| *c == '_').count();
Ok((&input[level..], level))
}
move |input| {
let (input, parsed_level) = parse_level(input)?;
if parsed_level == level {
Ok((input, ()))
} else {
ParseError::backtrace()
}
}
}
fn parse_peers_at_level<'a>(
level: usize,
) -> impl Fn(&'a str) -> ParseResult<'a, Vec<NpmPackageId>> {
move |mut input| {
let mut peers = Vec::new();
while let Ok((level_input, _)) = parse_level_at_level(level)(input) {
input = level_input;
let peer_result = parse_id_at_level(level)(input)?;
input = peer_result.0;
peers.push(peer_result.1);
}
Ok((input, peers))
}
}
fn parse_id_at_level<'a>(
level: usize,
) -> impl Fn(&'a str) -> ParseResult<'a, NpmPackageId> {
move |input| {
let (input, (name, version)) = parse_name_and_version(input)?;
let name = if level > 0 {
name.replace('+', "/")
} else {
name
};
let (input, peer_dependencies) =
parse_peers_at_level(level + 1)(input)?;
Ok((
input,
NpmPackageId {
name,
version,
peer_dependencies,
},
))
}
}
with_failure_handling(parse_id_at_level(0))(id)
.with_context(|| format!("Invalid npm package id '{}'.", id))
}
pub fn display(&self) -> String {
// Don't implement std::fmt::Display because we don't
// want this to be used by accident in certain scenarios.
format!("{}@{}", self.name, self.version)
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct NpmResolutionPackage {
pub id: NpmPackageId,
/// The peer dependency resolution can differ for the same
/// package (name and version) depending on where it is in
/// the resolution tree. This copy index indicates which
/// copy of the package this is.
pub copy_index: usize,
pub dist: NpmPackageVersionDistInfo,
/// Key is what the package refers to the other package as,
/// which could be different from the package name.
pub dependencies: HashMap<String, NpmPackageId>,
}
impl NpmResolutionPackage {
pub fn get_package_cache_folder_id(&self) -> NpmPackageCacheFolderId {
NpmPackageCacheFolderId {
name: self.id.name.clone(),
version: self.id.version.clone(),
copy_index: self.copy_index,
}
}
}
pub struct NpmResolution {
api: RealNpmRegistryApi,
snapshot: RwLock<NpmResolutionSnapshot>,
update_sempahore: tokio::sync::Semaphore,
}
impl std::fmt::Debug for NpmResolution {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let snapshot = self.snapshot.read();
f.debug_struct("NpmResolution")
.field("snapshot", &snapshot)
.finish()
}
}
impl NpmResolution {
pub fn new(
api: RealNpmRegistryApi,
initial_snapshot: Option<NpmResolutionSnapshot>,
) -> Self {
Self {
api,
snapshot: RwLock::new(initial_snapshot.unwrap_or_default()),
update_sempahore: tokio::sync::Semaphore::new(1),
}
}
pub async fn add_package_reqs(
&self,
package_reqs: Vec<NpmPackageReq>,
) -> Result<(), AnyError> {
// only allow one thread in here at a time
let _permit = self.update_sempahore.acquire().await.unwrap();
let snapshot = self.snapshot.read().clone();
let snapshot = self
.add_package_reqs_to_snapshot(package_reqs, snapshot)
.await?;
*self.snapshot.write() = snapshot;
Ok(())
}
pub async fn set_package_reqs(
&self,
package_reqs: HashSet<NpmPackageReq>,
) -> Result<(), AnyError> {
// only allow one thread in here at a time
let _permit = self.update_sempahore.acquire().await.unwrap();
let snapshot = self.snapshot.read().clone();
let has_removed_package = !snapshot
.package_reqs
.keys()
.all(|req| package_reqs.contains(req));
// if any packages were removed, we need to completely recreate the npm resolution snapshot
let snapshot = if has_removed_package {
NpmResolutionSnapshot::default()
} else {
snapshot
};
let snapshot = self
.add_package_reqs_to_snapshot(
package_reqs.into_iter().collect(),
snapshot,
)
.await?;
*self.snapshot.write() = snapshot;
Ok(())
}
async fn add_package_reqs_to_snapshot(
&self,
mut package_reqs: Vec<NpmPackageReq>,
snapshot: NpmResolutionSnapshot,
) -> Result<NpmResolutionSnapshot, AnyError> {
// convert the snapshot to a traversable graph
let mut graph = Graph::from_snapshot(snapshot);
// multiple packages are resolved in alphabetical order
package_reqs.sort_by(|a, b| a.name.cmp(&b.name));
// go over the top level packages first, then down the
// tree one level at a time through all the branches
let mut unresolved_tasks = Vec::with_capacity(package_reqs.len());
for package_req in package_reqs {
if graph.has_package_req(&package_req) {
// skip analyzing this package, as there's already a matching top level package
continue;
}
// no existing best version, so resolve the current packages
let api = self.api.clone();
let maybe_info = if should_sync_download() {
// for deterministic test output
Some(api.package_info(&package_req.name).await)
} else {
None
};
unresolved_tasks.push(tokio::task::spawn(async move {
let info = match maybe_info {
Some(info) => info?,
None => api.package_info(&package_req.name).await?,
};
Result::<_, AnyError>::Ok((package_req, info))
}));
}
let mut resolver = GraphDependencyResolver::new(&mut graph, &self.api);
for result in futures::future::join_all(unresolved_tasks).await {
let (package_req, info) = result??;
resolver.add_package_req(&package_req, info)?;
}
resolver.resolve_pending().await?;
graph.into_snapshot(&self.api).await
}
pub fn resolve_package_from_id(
&self,
id: &NpmPackageId,
) -> Option<NpmResolutionPackage> {
self.snapshot.read().package_from_id(id).cloned()
}
pub fn resolve_package_cache_folder_id_from_id(
&self,
id: &NpmPackageId,
) -> Option<NpmPackageCacheFolderId> {
self
.snapshot
.read()
.package_from_id(id)
.map(|p| p.get_package_cache_folder_id())
}
pub fn resolve_package_from_package(
&self,
name: &str,
referrer: &NpmPackageCacheFolderId,
) -> Result<NpmResolutionPackage, AnyError> {
self
.snapshot
.read()
.resolve_package_from_package(name, referrer)
.cloned()
}
/// Resolve a node package from a deno module.
pub fn resolve_package_from_deno_module(
&self,
package: &NpmPackageReq,
) -> Result<NpmResolutionPackage, AnyError> {
self
.snapshot
.read()
.resolve_package_from_deno_module(package)
.cloned()
}
pub fn all_packages(&self) -> Vec<NpmResolutionPackage> {
self.snapshot.read().all_packages()
}
pub fn all_packages_partitioned(&self) -> NpmPackagesPartitioned {
self.snapshot.read().all_packages_partitioned()
}
pub fn has_packages(&self) -> bool {
!self.snapshot.read().packages.is_empty()
}
pub fn snapshot(&self) -> NpmResolutionSnapshot {
self.snapshot.read().clone()
}
pub fn lock(
&self,
lockfile: &mut Lockfile,
snapshot: &NpmResolutionSnapshot,
) -> Result<(), AnyError> {
for (package_req, package_id) in snapshot.package_reqs.iter() {
lockfile.insert_npm_specifier(package_req, package_id);
}
for package in self.all_packages() {
lockfile.check_or_insert_npm_package(&package)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_npm_package_ref() {
assert_eq!(
NpmPackageReference::from_str("npm:@package/test").unwrap(),
NpmPackageReference {
req: NpmPackageReq {
name: "@package/test".to_string(),
version_req: None,
},
sub_path: None,
}
);
assert_eq!(
NpmPackageReference::from_str("npm:@package/test@1").unwrap(),
NpmPackageReference {
req: NpmPackageReq {
name: "@package/test".to_string(),
version_req: Some(SpecifierVersionReq::parse("1").unwrap()),
},
sub_path: None,
}
);
assert_eq!(
NpmPackageReference::from_str("npm:@package/test@~1.1/sub_path").unwrap(),
NpmPackageReference {
req: NpmPackageReq {
name: "@package/test".to_string(),
version_req: Some(SpecifierVersionReq::parse("~1.1").unwrap()),
},
sub_path: Some("sub_path".to_string()),
}
);
assert_eq!(
NpmPackageReference::from_str("npm:@package/test/sub_path").unwrap(),
NpmPackageReference {
req: NpmPackageReq {
name: "@package/test".to_string(),
version_req: None,
},
sub_path: Some("sub_path".to_string()),
}
);
assert_eq!(
NpmPackageReference::from_str("npm:test").unwrap(),
NpmPackageReference {
req: NpmPackageReq {
name: "test".to_string(),
version_req: None,
},
sub_path: None,
}
);
assert_eq!(
NpmPackageReference::from_str("npm:test@^1.2").unwrap(),
NpmPackageReference {
req: NpmPackageReq {
name: "test".to_string(),
version_req: Some(SpecifierVersionReq::parse("^1.2").unwrap()),
},
sub_path: None,
}
);
assert_eq!(
NpmPackageReference::from_str("npm:test@~1.1/sub_path").unwrap(),
NpmPackageReference {
req: NpmPackageReq {
name: "test".to_string(),
version_req: Some(SpecifierVersionReq::parse("~1.1").unwrap()),
},
sub_path: Some("sub_path".to_string()),
}
);
assert_eq!(
NpmPackageReference::from_str("npm:@package/test/sub_path").unwrap(),
NpmPackageReference {
req: NpmPackageReq {
name: "@package/test".to_string(),
version_req: None,
},
sub_path: Some("sub_path".to_string()),
}
);
assert_eq!(
NpmPackageReference::from_str("npm:@package")
.err()
.unwrap()
.to_string(),
"Not a valid package: @package"
);
}
#[test]
fn serialize_npm_package_id() {
let id = NpmPackageId {
name: "pkg-a".to_string(),
version: NpmVersion::parse("1.2.3").unwrap(),
peer_dependencies: vec![
NpmPackageId {
name: "pkg-b".to_string(),
version: NpmVersion::parse("3.2.1").unwrap(),
peer_dependencies: vec![
NpmPackageId {
name: "pkg-c".to_string(),
version: NpmVersion::parse("1.3.2").unwrap(),
peer_dependencies: vec![],
},
NpmPackageId {
name: "pkg-d".to_string(),
version: NpmVersion::parse("2.3.4").unwrap(),
peer_dependencies: vec![],
},
],
},
NpmPackageId {
name: "pkg-e".to_string(),
version: NpmVersion::parse("2.3.1").unwrap(),
peer_dependencies: vec![NpmPackageId {
name: "pkg-f".to_string(),
version: NpmVersion::parse("2.3.1").unwrap(),
peer_dependencies: vec![],
}],
},
],
};
let serialized = id.as_serialized();
assert_eq!(serialized, "pkg-a@1.2.3_pkg-b@3.2.1__pkg-c@1.3.2__pkg-d@2.3.4_pkg-e@2.3.1__pkg-f@2.3.1");
assert_eq!(NpmPackageId::from_serialized(&serialized).unwrap(), id);
}
}

View file

@ -0,0 +1,470 @@
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::Arc;
use deno_core::anyhow::anyhow;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::futures;
use deno_core::parking_lot::Mutex;
use serde::Deserialize;
use serde::Serialize;
use crate::lockfile::Lockfile;
use crate::npm::cache::should_sync_download;
use crate::npm::cache::NpmPackageCacheFolderId;
use crate::npm::registry::NpmPackageVersionDistInfo;
use crate::npm::registry::NpmRegistryApi;
use crate::npm::registry::RealNpmRegistryApi;
use super::NpmPackageId;
use super::NpmPackageReq;
use super::NpmResolutionPackage;
use super::NpmVersionMatcher;
/// Packages partitioned by if they are "copy" packages or not.
pub struct NpmPackagesPartitioned {
pub packages: Vec<NpmResolutionPackage>,
/// Since peer dependency resolution occurs based on ancestors and ancestor
/// siblings, this may sometimes cause the same package (name and version)
/// to have different dependencies based on where it appears in the tree.
/// For these packages, we create a "copy package" or duplicate of the package
/// whose dependencies are that of where in the tree they've resolved to.
pub copy_packages: Vec<NpmResolutionPackage>,
}
impl NpmPackagesPartitioned {
pub fn into_all(self) -> Vec<NpmResolutionPackage> {
let mut packages = self.packages;
packages.extend(self.copy_packages);
packages
}
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct NpmResolutionSnapshot {
#[serde(with = "map_to_vec")]
pub(super) package_reqs: HashMap<NpmPackageReq, NpmPackageId>,
pub(super) packages_by_name: HashMap<String, Vec<NpmPackageId>>,
#[serde(with = "map_to_vec")]
pub(super) packages: HashMap<NpmPackageId, NpmResolutionPackage>,
}
// This is done so the maps with non-string keys get serialized and deserialized as vectors.
// Adapted from: https://github.com/serde-rs/serde/issues/936#issuecomment-302281792
mod map_to_vec {
use std::collections::HashMap;
use serde::de::Deserialize;
use serde::de::Deserializer;
use serde::ser::Serializer;
use serde::Serialize;
pub fn serialize<S, K: Serialize, V: Serialize>(
map: &HashMap<K, V>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_seq(map.iter())
}
pub fn deserialize<
'de,
D,
K: Deserialize<'de> + Eq + std::hash::Hash,
V: Deserialize<'de>,
>(
deserializer: D,
) -> Result<HashMap<K, V>, D::Error>
where
D: Deserializer<'de>,
{
let mut map = HashMap::new();
for (key, value) in Vec::<(K, V)>::deserialize(deserializer)? {
map.insert(key, value);
}
Ok(map)
}
}
impl NpmResolutionSnapshot {
/// Resolve a node package from a deno module.
pub fn resolve_package_from_deno_module(
&self,
req: &NpmPackageReq,
) -> Result<&NpmResolutionPackage, AnyError> {
match self.package_reqs.get(req) {
Some(id) => Ok(self.packages.get(id).unwrap()),
None => bail!("could not find npm package directory for '{}'", req),
}
}
pub fn top_level_packages(&self) -> Vec<NpmPackageId> {
self
.package_reqs
.values()
.cloned()
.collect::<HashSet<_>>()
.into_iter()
.collect::<Vec<_>>()
}
pub fn package_from_id(
&self,
id: &NpmPackageId,
) -> Option<&NpmResolutionPackage> {
self.packages.get(id)
}
pub fn resolve_package_from_package(
&self,
name: &str,
referrer: &NpmPackageCacheFolderId,
) -> Result<&NpmResolutionPackage, AnyError> {
// todo(dsherret): do we need an additional hashmap to get this quickly?
let referrer_package = self
.packages_by_name
.get(&referrer.name)
.and_then(|packages| {
packages
.iter()
.filter(|p| p.version == referrer.version)
.filter_map(|id| {
let package = self.packages.get(id)?;
if package.copy_index == referrer.copy_index {
Some(package)
} else {
None
}
})
.next()
})
.ok_or_else(|| {
anyhow!("could not find referrer npm package '{}'", referrer)
})?;
let name = name_without_path(name);
if let Some(id) = referrer_package.dependencies.get(name) {
return Ok(self.packages.get(id).unwrap());
}
if referrer_package.id.name == name {
return Ok(referrer_package);
}
// TODO(bartlomieju): this should use a reverse lookup table in the
// snapshot instead of resolving best version again.
let req = NpmPackageReq {
name: name.to_string(),
version_req: None,
};
if let Some(id) = self.resolve_best_package_id(name, &req) {
if let Some(pkg) = self.packages.get(&id) {
return Ok(pkg);
}
}
bail!(
"could not find npm package '{}' referenced by '{}'",
name,
referrer
)
}
pub fn all_packages(&self) -> Vec<NpmResolutionPackage> {
self.packages.values().cloned().collect()
}
pub fn all_packages_partitioned(&self) -> NpmPackagesPartitioned {
let mut packages = self.all_packages();
let mut copy_packages = Vec::with_capacity(packages.len() / 2); // at most 1 copy for every package
// partition out any packages that are "copy" packages
for i in (0..packages.len()).rev() {
if packages[i].copy_index > 0 {
copy_packages.push(packages.swap_remove(i));
}
}
NpmPackagesPartitioned {
packages,
copy_packages,
}
}
pub fn resolve_best_package_id(
&self,
name: &str,
version_matcher: &impl NpmVersionMatcher,
) -> Option<NpmPackageId> {
// todo(dsherret): this is not exactly correct because some ids
// will be better than others due to peer dependencies
let mut maybe_best_id: Option<&NpmPackageId> = None;
if let Some(ids) = self.packages_by_name.get(name) {
for id in ids {
if version_matcher.matches(&id.version) {
let is_best_version = maybe_best_id
.as_ref()
.map(|best_id| best_id.version.cmp(&id.version).is_lt())
.unwrap_or(true);
if is_best_version {
maybe_best_id = Some(id);
}
}
}
}
maybe_best_id.cloned()
}
pub async fn from_lockfile(
lockfile: Arc<Mutex<Lockfile>>,
api: &RealNpmRegistryApi,
) -> Result<Self, AnyError> {
let mut package_reqs: HashMap<NpmPackageReq, NpmPackageId>;
let mut packages_by_name: HashMap<String, Vec<NpmPackageId>>;
let mut packages: HashMap<NpmPackageId, NpmResolutionPackage>;
let mut copy_index_resolver: SnapshotPackageCopyIndexResolver;
{
let lockfile = lockfile.lock();
// pre-allocate collections
package_reqs =
HashMap::with_capacity(lockfile.content.npm.specifiers.len());
let packages_len = lockfile.content.npm.packages.len();
packages = HashMap::with_capacity(packages_len);
packages_by_name = HashMap::with_capacity(packages_len); // close enough
copy_index_resolver =
SnapshotPackageCopyIndexResolver::with_capacity(packages_len);
let mut verify_ids = HashSet::with_capacity(packages_len);
// collect the specifiers to version mappings
for (key, value) in &lockfile.content.npm.specifiers {
let package_req = NpmPackageReq::from_str(key)
.with_context(|| format!("Unable to parse npm specifier: {}", key))?;
let package_id = NpmPackageId::from_serialized(value)?;
package_reqs.insert(package_req, package_id.clone());
verify_ids.insert(package_id.clone());
}
// then the packages
for (key, value) in &lockfile.content.npm.packages {
let package_id = NpmPackageId::from_serialized(key)?;
// collect the dependencies
let mut dependencies = HashMap::default();
packages_by_name
.entry(package_id.name.to_string())
.or_default()
.push(package_id.clone());
for (name, specifier) in &value.dependencies {
let dep_id = NpmPackageId::from_serialized(specifier)?;
dependencies.insert(name.to_string(), dep_id.clone());
verify_ids.insert(dep_id);
}
let package = NpmResolutionPackage {
id: package_id.clone(),
copy_index: copy_index_resolver.resolve(&package_id),
// temporary dummy value
dist: NpmPackageVersionDistInfo {
tarball: "foobar".to_string(),
shasum: "foobar".to_string(),
integrity: Some("foobar".to_string()),
},
dependencies,
};
packages.insert(package_id, package);
}
// verify that all these ids exist in packages
for id in &verify_ids {
if !packages.contains_key(id) {
bail!(
"the lockfile is corrupt. You can recreate it with --lock-write"
);
}
}
}
let mut unresolved_tasks = Vec::with_capacity(packages_by_name.len());
// cache the package names in parallel in the registry api
// unless synchronous download should occur
if should_sync_download() {
let mut package_names = packages_by_name.keys().collect::<Vec<_>>();
package_names.sort();
for package_name in package_names {
api.package_info(package_name).await?;
}
} else {
for package_name in packages_by_name.keys() {
let package_name = package_name.clone();
let api = api.clone();
unresolved_tasks.push(tokio::task::spawn(async move {
api.package_info(&package_name).await?;
Result::<_, AnyError>::Ok(())
}));
}
}
for result in futures::future::join_all(unresolved_tasks).await {
result??;
}
// ensure the dist is set for each package
for package in packages.values_mut() {
// this will read from the memory cache now
let version_info = match api
.package_version_info(&package.id.name, &package.id.version)
.await?
{
Some(version_info) => version_info,
None => {
bail!("could not find '{}' specified in the lockfile. Maybe try again with --reload", package.id.display());
}
};
package.dist = version_info.dist;
}
Ok(Self {
package_reqs,
packages_by_name,
packages,
})
}
}
pub struct SnapshotPackageCopyIndexResolver {
packages_to_copy_index: HashMap<NpmPackageId, usize>,
package_name_version_to_copy_count: HashMap<(String, String), usize>,
}
impl SnapshotPackageCopyIndexResolver {
pub fn with_capacity(capacity: usize) -> Self {
Self {
packages_to_copy_index: HashMap::with_capacity(capacity),
package_name_version_to_copy_count: HashMap::with_capacity(capacity), // close enough
}
}
pub fn from_map_with_capacity(
mut packages_to_copy_index: HashMap<NpmPackageId, usize>,
capacity: usize,
) -> Self {
let mut package_name_version_to_copy_count =
HashMap::with_capacity(capacity); // close enough
if capacity > packages_to_copy_index.len() {
packages_to_copy_index.reserve(capacity - packages_to_copy_index.len());
}
for (id, index) in &packages_to_copy_index {
let entry = package_name_version_to_copy_count
.entry((id.name.to_string(), id.version.to_string()))
.or_insert(0);
if *entry < *index {
*entry = *index;
}
}
Self {
packages_to_copy_index,
package_name_version_to_copy_count,
}
}
pub fn resolve(&mut self, id: &NpmPackageId) -> usize {
if let Some(index) = self.packages_to_copy_index.get(id) {
*index
} else {
let index = *self
.package_name_version_to_copy_count
.entry((id.name.to_string(), id.version.to_string()))
.and_modify(|count| {
*count += 1;
})
.or_insert(0);
self.packages_to_copy_index.insert(id.clone(), index);
index
}
}
}
fn name_without_path(name: &str) -> &str {
let mut search_start_index = 0;
if name.starts_with('@') {
if let Some(slash_index) = name.find('/') {
search_start_index = slash_index + 1;
}
}
if let Some(slash_index) = &name[search_start_index..].find('/') {
// get the name up until the path slash
&name[0..search_start_index + slash_index]
} else {
name
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_name_without_path() {
assert_eq!(name_without_path("foo"), "foo");
assert_eq!(name_without_path("@foo/bar"), "@foo/bar");
assert_eq!(name_without_path("@foo/bar/baz"), "@foo/bar");
assert_eq!(name_without_path("@hello"), "@hello");
}
#[test]
fn test_copy_index_resolver() {
let mut copy_index_resolver =
SnapshotPackageCopyIndexResolver::with_capacity(10);
assert_eq!(
copy_index_resolver
.resolve(&NpmPackageId::from_serialized("package@1.0.0").unwrap()),
0
);
assert_eq!(
copy_index_resolver
.resolve(&NpmPackageId::from_serialized("package@1.0.0").unwrap()),
0
);
assert_eq!(
copy_index_resolver.resolve(
&NpmPackageId::from_serialized("package@1.0.0_package-b@1.0.0")
.unwrap()
),
1
);
assert_eq!(
copy_index_resolver.resolve(
&NpmPackageId::from_serialized(
"package@1.0.0_package-b@1.0.0__package-c@2.0.0"
)
.unwrap()
),
2
);
assert_eq!(
copy_index_resolver.resolve(
&NpmPackageId::from_serialized("package@1.0.0_package-b@1.0.0")
.unwrap()
),
1
);
assert_eq!(
copy_index_resolver
.resolve(&NpmPackageId::from_serialized("package-b@1.0.0").unwrap()),
0
);
}
}

View file

@ -70,13 +70,19 @@ pub async fn cache_packages(
// and we want the output to be deterministic
packages.sort_by(|a, b| a.id.cmp(&b.id));
}
let mut handles = Vec::with_capacity(packages.len());
for package in packages {
assert_eq!(package.copy_index, 0); // the caller should not provide any of these
let cache = cache.clone();
let registry_url = registry_url.clone();
let handle = tokio::task::spawn(async move {
cache
.ensure_package(&package.id, &package.dist, &registry_url)
.ensure_package(
(package.id.name.as_str(), &package.id.version),
&package.dist,
&registry_url,
)
.await
});
if sync_download {

View file

@ -23,7 +23,7 @@ use crate::npm::resolvers::common::cache_packages;
use crate::npm::NpmCache;
use crate::npm::NpmPackageId;
use crate::npm::NpmPackageReq;
use crate::npm::NpmRegistryApi;
use crate::npm::RealNpmRegistryApi;
use super::common::ensure_registry_read_permission;
use super::common::InnerNpmPackageResolver;
@ -39,7 +39,7 @@ pub struct GlobalNpmPackageResolver {
impl GlobalNpmPackageResolver {
pub fn new(
cache: NpmCache,
api: NpmRegistryApi,
api: RealNpmRegistryApi,
initial_snapshot: Option<NpmResolutionSnapshot>,
) -> Self {
let registry_url = api.base_url().to_owned();
@ -53,7 +53,13 @@ impl GlobalNpmPackageResolver {
}
fn package_folder(&self, id: &NpmPackageId) -> PathBuf {
self.cache.package_folder(id, &self.registry_url)
let folder_id = self
.resolution
.resolve_package_cache_folder_id_from_id(id)
.unwrap();
self
.cache
.package_folder_for_id(&folder_id, &self.registry_url)
}
}
@ -74,7 +80,7 @@ impl InnerNpmPackageResolver for GlobalNpmPackageResolver {
) -> Result<PathBuf, AnyError> {
let referrer_pkg_id = self
.cache
.resolve_package_id_from_specifier(referrer, &self.registry_url)?;
.resolve_package_folder_id_from_specifier(referrer, &self.registry_url)?;
let pkg_result = self
.resolution
.resolve_package_from_package(name, &referrer_pkg_id);
@ -105,10 +111,15 @@ impl InnerNpmPackageResolver for GlobalNpmPackageResolver {
&self,
specifier: &ModuleSpecifier,
) -> Result<PathBuf, AnyError> {
let pkg_id = self
.cache
.resolve_package_id_from_specifier(specifier, &self.registry_url)?;
Ok(self.package_folder(&pkg_id))
let pkg_folder_id = self.cache.resolve_package_folder_id_from_specifier(
specifier,
&self.registry_url,
)?;
Ok(
self
.cache
.package_folder_for_id(&pkg_folder_id, &self.registry_url),
)
}
fn package_size(&self, package_id: &NpmPackageId) -> Result<u64, AnyError> {
@ -162,10 +173,22 @@ impl InnerNpmPackageResolver for GlobalNpmPackageResolver {
async fn cache_packages_in_resolver(
resolver: &GlobalNpmPackageResolver,
) -> Result<(), AnyError> {
let package_partitions = resolver.resolution.all_packages_partitioned();
cache_packages(
resolver.resolution.all_packages(),
package_partitions.packages,
&resolver.cache,
&resolver.registry_url,
)
.await
.await?;
// create the copy package folders
for copy in package_partitions.copy_packages {
resolver.cache.ensure_copy_package(
&copy.get_package_cache_folder_id(),
&resolver.registry_url,
)?;
}
Ok(())
}

View file

@ -24,12 +24,14 @@ use tokio::task::JoinHandle;
use crate::fs_util;
use crate::lockfile::Lockfile;
use crate::npm::cache::should_sync_download;
use crate::npm::cache::NpmPackageCacheFolderId;
use crate::npm::resolution::NpmResolution;
use crate::npm::resolution::NpmResolutionSnapshot;
use crate::npm::NpmCache;
use crate::npm::NpmPackageId;
use crate::npm::NpmPackageReq;
use crate::npm::NpmRegistryApi;
use crate::npm::NpmResolutionPackage;
use crate::npm::RealNpmRegistryApi;
use super::common::ensure_registry_read_permission;
use super::common::InnerNpmPackageResolver;
@ -48,7 +50,7 @@ pub struct LocalNpmPackageResolver {
impl LocalNpmPackageResolver {
pub fn new(
cache: NpmCache,
api: NpmRegistryApi,
api: RealNpmRegistryApi,
node_modules_folder: PathBuf,
initial_snapshot: Option<NpmResolutionSnapshot>,
) -> Self {
@ -101,6 +103,35 @@ impl LocalNpmPackageResolver {
// it's within the directory, so use it
specifier.to_file_path().ok()
}
fn get_package_id_folder(
&self,
package_id: &NpmPackageId,
) -> Result<PathBuf, AnyError> {
match self.resolution.resolve_package_from_id(package_id) {
Some(package) => Ok(self.get_package_id_folder_from_package(&package)),
None => bail!(
"Could not find package information for '{}'",
package_id.as_serialized()
),
}
}
fn get_package_id_folder_from_package(
&self,
package: &NpmResolutionPackage,
) -> PathBuf {
// package is stored at:
// node_modules/.deno/<package_cache_folder_id_folder_name>/node_modules/<package_name>
self
.root_node_modules_path
.join(".deno")
.join(get_package_folder_id_folder_name(
&package.get_package_cache_folder_id(),
))
.join("node_modules")
.join(&package.id.name)
}
}
impl InnerNpmPackageResolver for LocalNpmPackageResolver {
@ -108,19 +139,8 @@ impl InnerNpmPackageResolver for LocalNpmPackageResolver {
&self,
pkg_req: &NpmPackageReq,
) -> Result<PathBuf, AnyError> {
let resolved_package =
self.resolution.resolve_package_from_deno_module(pkg_req)?;
// it might be at the full path if there are duplicate names
let fully_resolved_folder_path = join_package_name(
&self.root_node_modules_path,
&resolved_package.id.to_string(),
);
Ok(if fully_resolved_folder_path.exists() {
fully_resolved_folder_path
} else {
join_package_name(&self.root_node_modules_path, &resolved_package.id.name)
})
let package = self.resolution.resolve_package_from_deno_module(pkg_req)?;
Ok(self.get_package_id_folder_from_package(&package))
}
fn resolve_package_folder_from_package(
@ -178,19 +198,9 @@ impl InnerNpmPackageResolver for LocalNpmPackageResolver {
}
fn package_size(&self, package_id: &NpmPackageId) -> Result<u64, AnyError> {
match self.resolution.resolve_package_from_id(package_id) {
Some(package) => Ok(fs_util::dir_size(
// package is stored at:
// node_modules/.deno/<package_id>/node_modules/<package_name>
&self
.root_node_modules_path
.join(".deno")
.join(package.id.to_string())
.join("node_modules")
.join(package.id.name),
)?),
None => bail!("Could not find package folder for '{}'", package_id),
}
let package_folder_path = self.get_package_id_folder(package_id)?;
Ok(fs_util::dir_size(&package_folder_path)?)
}
fn has_packages(&self) -> bool {
@ -255,10 +265,6 @@ async fn sync_resolution_with_fs(
registry_url: &Url,
root_node_modules_dir_path: &Path,
) -> Result<(), AnyError> {
fn get_package_folder_name(package_id: &NpmPackageId) -> String {
package_id.to_string().replace('/', "+")
}
let deno_local_registry_dir = root_node_modules_dir_path.join(".deno");
fs::create_dir_all(&deno_local_registry_dir).with_context(|| {
format!("Creating '{}'", deno_local_registry_dir.display())
@ -267,34 +273,45 @@ async fn sync_resolution_with_fs(
// 1. Write all the packages out the .deno directory.
//
// Copy (hardlink in future) <global_registry_cache>/<package_id>/ to
// node_modules/.deno/<package_id>/node_modules/<package_name>
// node_modules/.deno/<package_folder_id_folder_name>/node_modules/<package_name>
let sync_download = should_sync_download();
let mut all_packages = snapshot.all_packages();
let mut package_partitions = snapshot.all_packages_partitioned();
if sync_download {
// we're running the tests not with --quiet
// and we want the output to be deterministic
all_packages.sort_by(|a, b| a.id.cmp(&b.id));
package_partitions.packages.sort_by(|a, b| a.id.cmp(&b.id));
}
let mut handles: Vec<JoinHandle<Result<(), AnyError>>> =
Vec::with_capacity(all_packages.len());
for package in &all_packages {
let folder_name = get_package_folder_name(&package.id);
Vec::with_capacity(package_partitions.packages.len());
for package in &package_partitions.packages {
let folder_name =
get_package_folder_id_folder_name(&package.get_package_cache_folder_id());
let folder_path = deno_local_registry_dir.join(&folder_name);
let initialized_file = folder_path.join("deno_initialized");
if !initialized_file.exists() {
let initialized_file = folder_path.join(".initialized");
if !cache.should_use_cache_for_npm_package(&package.id.name)
|| !initialized_file.exists()
{
let cache = cache.clone();
let registry_url = registry_url.clone();
let package = package.clone();
let handle = tokio::task::spawn(async move {
cache
.ensure_package(&package.id, &package.dist, &registry_url)
.ensure_package(
(&package.id.name, &package.id.version),
&package.dist,
&registry_url,
)
.await?;
let sub_node_modules = folder_path.join("node_modules");
let package_path =
join_package_name(&sub_node_modules, &package.id.name);
fs::create_dir_all(&package_path)
.with_context(|| format!("Creating '{}'", folder_path.display()))?;
let cache_folder = cache.package_folder(&package.id, &registry_url);
let cache_folder = cache.package_folder_for_name_and_version(
&package.id.name,
&package.id.version,
&registry_url,
);
// for now copy, but in the future consider hard linking
fs_util::copy_dir_recursive(&cache_folder, &package_path)?;
// write out a file that indicates this folder has been initialized
@ -314,16 +331,51 @@ async fn sync_resolution_with_fs(
result??; // surface the first error
}
// 2. Symlink all the dependencies into the .deno directory.
// 2. Create any "copy" packages, which are used for peer dependencies
for package in &package_partitions.copy_packages {
let package_cache_folder_id = package.get_package_cache_folder_id();
let destination_path = deno_local_registry_dir
.join(&get_package_folder_id_folder_name(&package_cache_folder_id));
let initialized_file = destination_path.join(".initialized");
if !initialized_file.exists() {
let sub_node_modules = destination_path.join("node_modules");
let package_path = join_package_name(&sub_node_modules, &package.id.name);
fs::create_dir_all(&package_path).with_context(|| {
format!("Creating '{}'", destination_path.display())
})?;
let source_path = join_package_name(
&deno_local_registry_dir
.join(&get_package_folder_id_folder_name(
&package_cache_folder_id.with_no_count(),
))
.join("node_modules"),
&package.id.name,
);
fs_util::hard_link_dir_recursive(&source_path, &package_path)?;
// write out a file that indicates this folder has been initialized
fs::write(initialized_file, "")?;
}
}
let all_packages = package_partitions.into_all();
// 3. Symlink all the dependencies into the .deno directory.
//
// Symlink node_modules/.deno/<package_id>/node_modules/<dep_name> to
// node_modules/.deno/<dep_id>/node_modules/<dep_package_name>
for package in &all_packages {
let sub_node_modules = deno_local_registry_dir
.join(&get_package_folder_name(&package.id))
.join(&get_package_folder_id_folder_name(
&package.get_package_cache_folder_id(),
))
.join("node_modules");
for (name, dep_id) in &package.dependencies {
let dep_folder_name = get_package_folder_name(dep_id);
let dep_cache_folder_id = snapshot
.package_from_id(dep_id)
.unwrap()
.get_package_cache_folder_id();
let dep_folder_name =
get_package_folder_id_folder_name(&dep_cache_folder_id);
let dep_folder_path = join_package_name(
&deno_local_registry_dir
.join(dep_folder_name)
@ -337,7 +389,7 @@ async fn sync_resolution_with_fs(
}
}
// 3. Create all the packages in the node_modules folder, which are symlinks.
// 4. Create all the packages in the node_modules folder, which are symlinks.
//
// Symlink node_modules/<package_name> to
// node_modules/.deno/<package_id>/node_modules/<package_name>
@ -353,29 +405,41 @@ async fn sync_resolution_with_fs(
let root_folder_name = if found_names.insert(package_id.name.clone()) {
package_id.name.clone()
} else if is_top_level {
package_id.to_string()
package_id.display()
} else {
continue; // skip, already handled
};
let local_registry_package_path = deno_local_registry_dir
.join(&get_package_folder_name(&package_id))
.join("node_modules")
.join(&package_id.name);
let package = snapshot.package_from_id(&package_id).unwrap();
let local_registry_package_path = join_package_name(
&deno_local_registry_dir
.join(&get_package_folder_id_folder_name(
&package.get_package_cache_folder_id(),
))
.join("node_modules"),
&package_id.name,
);
symlink_package_dir(
&local_registry_package_path,
&join_package_name(root_node_modules_dir_path, &root_folder_name),
)?;
if let Some(package) = snapshot.package_from_id(&package_id) {
for id in package.dependencies.values() {
pending_packages.push_back((id.clone(), false));
}
for id in package.dependencies.values() {
pending_packages.push_back((id.clone(), false));
}
}
Ok(())
}
fn get_package_folder_id_folder_name(id: &NpmPackageCacheFolderId) -> String {
let copy_str = if id.copy_index == 0 {
"".to_string()
} else {
format!("_{}", id.copy_index)
};
format!("{}@{}{}", id.name, id.version, copy_str).replace('/', "+")
}
fn symlink_package_dir(
old_path: &Path,
new_path: &Path,

View file

@ -6,6 +6,7 @@ mod local;
use deno_ast::ModuleSpecifier;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
use deno_core::error::custom_error;
use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex;
@ -29,8 +30,8 @@ use self::local::LocalNpmPackageResolver;
use super::NpmCache;
use super::NpmPackageId;
use super::NpmPackageReq;
use super::NpmRegistryApi;
use super::NpmResolutionSnapshot;
use super::RealNpmRegistryApi;
const RESOLUTION_STATE_ENV_VAR_NAME: &str =
"DENO_DONT_USE_INTERNAL_NODE_COMPAT_STATE";
@ -71,7 +72,7 @@ pub struct NpmPackageResolver {
no_npm: bool,
inner: Arc<dyn InnerNpmPackageResolver>,
local_node_modules_path: Option<PathBuf>,
api: NpmRegistryApi,
api: RealNpmRegistryApi,
cache: NpmCache,
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
}
@ -90,7 +91,7 @@ impl std::fmt::Debug for NpmPackageResolver {
impl NpmPackageResolver {
pub fn new(
cache: NpmCache,
api: NpmRegistryApi,
api: RealNpmRegistryApi,
unstable: bool,
no_npm: bool,
local_node_modules_path: Option<PathBuf>,
@ -112,7 +113,14 @@ impl NpmPackageResolver {
lockfile: Arc<Mutex<Lockfile>>,
) -> Result<(), AnyError> {
let snapshot =
NpmResolutionSnapshot::from_lockfile(lockfile.clone(), &self.api).await?;
NpmResolutionSnapshot::from_lockfile(lockfile.clone(), &self.api)
.await
.with_context(|| {
format!(
"failed reading lockfile '{}'",
lockfile.lock().filename.display()
)
})?;
self.maybe_lockfile = Some(lockfile);
if let Some(node_modules_folder) = &self.local_node_modules_path {
self.inner = Arc::new(LocalNpmPackageResolver::new(
@ -133,7 +141,7 @@ impl NpmPackageResolver {
fn new_with_maybe_snapshot(
cache: NpmCache,
api: NpmRegistryApi,
api: RealNpmRegistryApi,
unstable: bool,
no_npm: bool,
local_node_modules_path: Option<PathBuf>,

View file

@ -1,38 +0,0 @@
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use monch::ParseError;
use monch::ParseErrorFailure;
use monch::ParseResult;
pub fn with_failure_handling<'a, T>(
combinator: impl Fn(&'a str) -> ParseResult<T>,
) -> impl Fn(&'a str) -> Result<T, AnyError> {
move |input| match combinator(input) {
Ok((input, result)) => {
if !input.is_empty() {
error_for_failure(fail_for_trailing_input(input))
} else {
Ok(result)
}
}
Err(ParseError::Backtrace) => {
error_for_failure(fail_for_trailing_input(input))
}
Err(ParseError::Failure(e)) => error_for_failure(e),
}
}
fn error_for_failure<T>(e: ParseErrorFailure) -> Result<T, AnyError> {
bail!(
"{}\n {}\n ~",
e.message,
// truncate the output to prevent wrapping in the console
e.input.chars().take(60).collect::<String>()
)
}
fn fail_for_trailing_input(input: &str) -> ParseErrorFailure {
ParseErrorFailure::new(input, "Unexpected character.")
}

View file

@ -11,7 +11,6 @@ use serde::Serialize;
use crate::npm::resolution::NpmVersionMatcher;
use self::errors::with_failure_handling;
use self::range::Partial;
use self::range::VersionBoundKind;
use self::range::VersionRange;
@ -20,7 +19,6 @@ use self::range::VersionRangeSet;
use self::range::XRange;
pub use self::specifier::SpecifierVersionReq;
mod errors;
mod range;
mod specifier;

View file

@ -6,7 +6,6 @@ use monch::*;
use serde::Deserialize;
use serde::Serialize;
use super::errors::with_failure_handling;
use super::range::Partial;
use super::range::VersionRange;
use super::range::XRange;

View file

@ -6,18 +6,17 @@ use std::path::Path;
use std::path::PathBuf;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use flate2::read::GzDecoder;
use tar::Archive;
use tar::EntryType;
use super::cache::NPM_PACKAGE_SYNC_LOCK_FILENAME;
use super::cache::with_folder_sync_lock;
use super::registry::NpmPackageVersionDistInfo;
use super::NpmPackageId;
use super::semver::NpmVersion;
pub fn verify_and_extract_tarball(
package: &NpmPackageId,
package: (&str, &NpmVersion),
data: &[u8],
dist_info: &NpmPackageVersionDistInfo,
output_folder: &Path,
@ -27,50 +26,19 @@ pub fn verify_and_extract_tarball(
} else {
// todo(dsherret): check shasum here
bail!(
"Errored on '{}': npm packages with no integrity are not implemented.",
package
"Errored on '{}@{}': npm packages with no integrity are not implemented.",
package.0,
package.1,
);
}
fs::create_dir_all(output_folder).with_context(|| {
format!("Error creating '{}'.", output_folder.display())
})?;
// This sync lock file is a way to ensure that partially created
// npm package directories aren't considered valid. This could maybe
// be a bit smarter in the future to not bother extracting here
// if another process has taken the lock in the past X seconds and
// wait for the other process to finish (it could try to create the
// file with `create_new(true)` then if it exists, check the metadata
// then wait until the other process finishes with a timeout), but
// for now this is good enough.
let sync_lock_path = output_folder.join(NPM_PACKAGE_SYNC_LOCK_FILENAME);
match fs::OpenOptions::new()
.write(true)
.create(true)
.open(&sync_lock_path)
{
Ok(_) => {
extract_tarball(data, output_folder)?;
// extraction succeeded, so only now delete this file
let _ignore = std::fs::remove_file(&sync_lock_path);
Ok(())
}
Err(err) => {
bail!(
concat!(
"Error creating package sync lock file at '{}'. ",
"Maybe try manually deleting this folder.\n\n{:#}",
),
output_folder.display(),
err
);
}
}
with_folder_sync_lock(package, output_folder, || {
extract_tarball(data, output_folder)
})
}
fn verify_tarball_integrity(
package: &NpmPackageId,
package: (&str, &NpmVersion),
data: &[u8],
npm_integrity: &str,
) -> Result<(), AnyError> {
@ -81,16 +49,18 @@ fn verify_tarball_integrity(
let algo = match hash_kind {
"sha512" => &SHA512,
hash_kind => bail!(
"Not implemented hash function for {}: {}",
package,
"Not implemented hash function for {}@{}: {}",
package.0,
package.1,
hash_kind
),
};
(algo, checksum.to_lowercase())
}
None => bail!(
"Not implemented integrity kind for {}: {}",
package,
"Not implemented integrity kind for {}@{}: {}",
package.0,
package.1,
npm_integrity
),
};
@ -101,8 +71,9 @@ fn verify_tarball_integrity(
let tarball_checksum = base64::encode(digest.as_ref()).to_lowercase();
if tarball_checksum != expected_checksum {
bail!(
"Tarball checksum did not match what was provided by npm registry for {}.\n\nExpected: {}\nActual: {}",
package,
"Tarball checksum did not match what was provided by npm registry for {}@{}.\n\nExpected: {}\nActual: {}",
package.0,
package.1,
expected_checksum,
tarball_checksum,
)
@ -162,32 +133,31 @@ mod test {
#[test]
pub fn test_verify_tarball() {
let package_id = NpmPackageId {
name: "package".to_string(),
version: NpmVersion::parse("1.0.0").unwrap(),
};
let package_name = "package".to_string();
let package_version = NpmVersion::parse("1.0.0").unwrap();
let package = (package_name.as_str(), &package_version);
let actual_checksum =
"z4phnx7vul3xvchq1m2ab9yg5aulvxxcg/spidns6c5h0ne8xyxysp+dgnkhfuwvy7kxvudbeoglodj6+sfapg==";
assert_eq!(
verify_tarball_integrity(&package_id, &Vec::new(), "test")
verify_tarball_integrity(package, &Vec::new(), "test")
.unwrap_err()
.to_string(),
"Not implemented integrity kind for package@1.0.0: test",
);
assert_eq!(
verify_tarball_integrity(&package_id, &Vec::new(), "sha1-test")
verify_tarball_integrity(package, &Vec::new(), "sha1-test")
.unwrap_err()
.to_string(),
"Not implemented hash function for package@1.0.0: sha1",
);
assert_eq!(
verify_tarball_integrity(&package_id, &Vec::new(), "sha512-test")
verify_tarball_integrity(package, &Vec::new(), "sha512-test")
.unwrap_err()
.to_string(),
format!("Tarball checksum did not match what was provided by npm registry for package@1.0.0.\n\nExpected: test\nActual: {}", actual_checksum),
);
assert!(verify_tarball_integrity(
&package_id,
package,
&Vec::new(),
&format!("sha512-{}", actual_checksum)
)

View file

@ -26,7 +26,7 @@ use crate::node::NodeResolution;
use crate::npm::NpmCache;
use crate::npm::NpmPackageReference;
use crate::npm::NpmPackageResolver;
use crate::npm::NpmRegistryApi;
use crate::npm::RealNpmRegistryApi;
use crate::progress_bar::ProgressBar;
use crate::resolver::CliResolver;
use crate::tools::check;
@ -211,13 +211,13 @@ impl ProcState {
let emit_cache = EmitCache::new(dir.gen_cache.clone());
let parsed_source_cache =
ParsedSourceCache::new(Some(dir.dep_analysis_db_file_path()));
let registry_url = NpmRegistryApi::default_url();
let registry_url = RealNpmRegistryApi::default_url();
let npm_cache = NpmCache::from_deno_dir(
&dir,
cli_options.cache_setting(),
progress_bar.clone(),
);
let api = NpmRegistryApi::new(
let api = RealNpmRegistryApi::new(
registry_url,
npm_cache.clone(),
cli_options.cache_setting(),

View file

@ -1002,7 +1002,7 @@ fn lock_file_missing_top_level_package() {
let stderr = String::from_utf8(output.stderr).unwrap();
assert_eq!(
stderr,
"error: the lockfile (deno.lock) is corrupt. You can recreate it with --lock-write\n"
"error: failed reading lockfile 'deno.lock'\n\nCaused by:\n the lockfile is corrupt. You can recreate it with --lock-write\n"
);
}
@ -1054,6 +1054,182 @@ fn auto_discover_lock_file() {
));
}
#[test]
fn peer_deps_with_copied_folders_and_lockfile() {
let _server = http_server();
let deno_dir = util::new_deno_dir();
let temp_dir = util::TempDir::new();
// write empty config file
temp_dir.write("deno.json", "{}");
let test_folder_path = test_util::testdata_path()
.join("npm")
.join("peer_deps_with_copied_folders");
let main_contents =
std::fs::read_to_string(test_folder_path.join("main.ts")).unwrap();
temp_dir.write("./main.ts", main_contents);
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("run")
.arg("--unstable")
.arg("-A")
.arg("main.ts")
.envs(env_vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert!(output.status.success());
let expected_output =
std::fs::read_to_string(test_folder_path.join("main.out")).unwrap();
assert_eq!(String::from_utf8(output.stderr).unwrap(), expected_output);
assert!(temp_dir.path().join("deno.lock").exists());
let grandchild_path = deno_dir
.path()
.join("npm")
.join("localhost_4545")
.join("npm")
.join("registry")
.join("@denotest")
.join("peer-dep-test-grandchild");
assert!(grandchild_path.join("1.0.0").exists());
assert!(grandchild_path.join("1.0.0_1").exists()); // copy folder, which is hardlinked
// run again
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("run")
.arg("--unstable")
.arg("-A")
.arg("main.ts")
.envs(env_vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert_eq!(String::from_utf8(output.stderr).unwrap(), "1\n2\n");
assert!(output.status.success());
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("run")
.arg("--unstable")
.arg("--reload")
.arg("-A")
.arg("main.ts")
.envs(env_vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert_eq!(String::from_utf8(output.stderr).unwrap(), expected_output);
assert!(output.status.success());
// now run with local node modules
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("run")
.arg("--unstable")
.arg("--node-modules-dir")
.arg("-A")
.arg("main.ts")
.envs(env_vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert_eq!(String::from_utf8(output.stderr).unwrap(), "1\n2\n");
assert!(output.status.success());
let deno_folder = temp_dir.path().join("node_modules").join(".deno");
assert!(deno_folder
.join("@denotest+peer-dep-test-grandchild@1.0.0")
.exists());
assert!(deno_folder
.join("@denotest+peer-dep-test-grandchild@1.0.0_1")
.exists()); // copy folder
// now again run with local node modules
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("run")
.arg("--unstable")
.arg("--node-modules-dir")
.arg("-A")
.arg("main.ts")
.envs(env_vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert!(output.status.success());
assert_eq!(String::from_utf8(output.stderr).unwrap(), "1\n2\n");
// now ensure it works with reloading
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("run")
.arg("--unstable")
.arg("--node-modules-dir")
.arg("--reload")
.arg("-A")
.arg("main.ts")
.envs(env_vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert!(output.status.success());
assert_eq!(String::from_utf8(output.stderr).unwrap(), expected_output);
// now ensure it works with reloading and no lockfile
let deno = util::deno_cmd_with_deno_dir(&deno_dir)
.current_dir(temp_dir.path())
.arg("run")
.arg("--unstable")
.arg("--node-modules-dir")
.arg("--no-lock")
.arg("--reload")
.arg("-A")
.arg("main.ts")
.envs(env_vars())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
let output = deno.wait_with_output().unwrap();
assert_eq!(String::from_utf8(output.stderr).unwrap(), expected_output,);
assert!(output.status.success());
}
itest!(info_peer_deps {
args: "info --quiet --unstable npm/peer_deps_with_copied_folders/main.ts",
output: "npm/peer_deps_with_copied_folders/main_info.out",
exit_code: 0,
envs: env_vars(),
http_server: true,
});
itest!(info_peer_deps_json {
args:
"info --quiet --unstable --json npm/peer_deps_with_copied_folders/main.ts",
output: "npm/peer_deps_with_copied_folders/main_info_json.out",
exit_code: 0,
envs: env_vars(),
http_server: true,
});
fn env_vars_no_sync_download() -> Vec<(String, String)> {
vec![
("DENO_NODE_COMPAT_URL".to_string(), util::std_file_url()),

View file

@ -0,0 +1,10 @@
Download http://localhost:4545/npm/registry/@denotest/peer-dep-test-child
Download http://localhost:4545/npm/registry/@denotest/peer-dep-test-grandchild
Download http://localhost:4545/npm/registry/@denotest/peer-dep-test-peer
Download http://localhost:4545/npm/registry/@denotest/peer-dep-test-child/1.0.0.tgz
Download http://localhost:4545/npm/registry/@denotest/peer-dep-test-child/2.0.0.tgz
Download http://localhost:4545/npm/registry/@denotest/peer-dep-test-grandchild/1.0.0.tgz
Download http://localhost:4545/npm/registry/@denotest/peer-dep-test-peer/1.0.0.tgz
Download http://localhost:4545/npm/registry/@denotest/peer-dep-test-peer/2.0.0.tgz
1
2

View file

@ -0,0 +1,5 @@
import version1 from "npm:@denotest/peer-dep-test-child@1";
import version2 from "npm:@denotest/peer-dep-test-child@2";
console.error(version1);
console.error(version2);

View file

@ -0,0 +1,14 @@
local: [WILDCARD]main.ts
type: TypeScript
dependencies: 6 unique
size: [WILDCARD]
file:///[WILDCARD]/testdata/npm/peer_deps_with_copied_folders/main.ts (171B)
├─┬ npm:@denotest/peer-dep-test-child@1 - 1.0.0 ([WILDCARD])
│ ├─┬ npm:@denotest/peer-dep-test-grandchild@1.0.0_@denotest+peer-dep-test-peer@1.0.0 ([WILDCARD])
│ │ └── npm:@denotest/peer-dep-test-peer@1.0.0 ([WILDCARD])
│ └── npm:@denotest/peer-dep-test-peer@1.0.0 ([WILDCARD])
└─┬ npm:@denotest/peer-dep-test-child@2 - 2.0.0 ([WILDCARD])
├─┬ npm:@denotest/peer-dep-test-grandchild@1.0.0_@denotest+peer-dep-test-peer@2.0.0 ([WILDCARD])
│ └── npm:@denotest/peer-dep-test-peer@2.0.0 ([WILDCARD])
└── npm:@denotest/peer-dep-test-peer@2.0.0 ([WILDCARD])

View file

@ -0,0 +1,95 @@
{
"roots": [
"[WILDCARD]/npm/peer_deps_with_copied_folders/main.ts"
],
"modules": [
{
"dependencies": [
{
"specifier": "npm:@denotest/peer-dep-test-child@1",
"code": {
"specifier": "npm:@denotest/peer-dep-test-child@1",
"span": {
"start": {
"line": 0,
"character": 21
},
"end": {
"line": 0,
"character": 58
}
}
},
"npmPackage": "@denotest/peer-dep-test-child@1.0.0_@denotest+peer-dep-test-peer@1.0.0"
},
{
"specifier": "npm:@denotest/peer-dep-test-child@2",
"code": {
"specifier": "npm:@denotest/peer-dep-test-child@2",
"span": {
"start": {
"line": 1,
"character": 21
},
"end": {
"line": 1,
"character": 58
}
}
},
"npmPackage": "@denotest/peer-dep-test-child@2.0.0_@denotest+peer-dep-test-peer@2.0.0"
}
],
"kind": "esm",
"local": "[WILDCARD]main.ts",
"emit": null,
"map": null,
"size": 171,
"mediaType": "TypeScript",
"specifier": "file://[WILDCARD]/main.ts"
}
],
"redirects": {},
"npmPackages": {
"@denotest/peer-dep-test-child@1.0.0_@denotest+peer-dep-test-peer@1.0.0": {
"name": "@denotest/peer-dep-test-child",
"version": "1.0.0",
"dependencies": [
"@denotest/peer-dep-test-grandchild@1.0.0_@denotest+peer-dep-test-peer@1.0.0",
"@denotest/peer-dep-test-peer@1.0.0"
]
},
"@denotest/peer-dep-test-child@2.0.0_@denotest+peer-dep-test-peer@2.0.0": {
"name": "@denotest/peer-dep-test-child",
"version": "2.0.0",
"dependencies": [
"@denotest/peer-dep-test-grandchild@1.0.0_@denotest+peer-dep-test-peer@2.0.0",
"@denotest/peer-dep-test-peer@2.0.0"
]
},
"@denotest/peer-dep-test-grandchild@1.0.0_@denotest+peer-dep-test-peer@1.0.0": {
"name": "@denotest/peer-dep-test-grandchild",
"version": "1.0.0",
"dependencies": [
"@denotest/peer-dep-test-peer@1.0.0"
]
},
"@denotest/peer-dep-test-grandchild@1.0.0_@denotest+peer-dep-test-peer@2.0.0": {
"name": "@denotest/peer-dep-test-grandchild",
"version": "1.0.0",
"dependencies": [
"@denotest/peer-dep-test-peer@2.0.0"
]
},
"@denotest/peer-dep-test-peer@1.0.0": {
"name": "@denotest/peer-dep-test-peer",
"version": "1.0.0",
"dependencies": []
},
"@denotest/peer-dep-test-peer@2.0.0": {
"name": "@denotest/peer-dep-test-peer",
"version": "2.0.0",
"dependencies": []
}
}
}

View file

@ -0,0 +1 @@
module.exports = require("@denotest/peer-dep-test-grandchild");

View file

@ -0,0 +1,8 @@
{
"name": "@denotest/peer-dep-test-child",
"version": "1.0.0",
"dependencies": {
"@denotest/peer-dep-test-grandchild": "*",
"@denotest/peer-dep-test-peer": "^1"
}
}

View file

@ -0,0 +1 @@
module.exports = require("@denotest/peer-dep-test-grandchild");

View file

@ -0,0 +1,8 @@
{
"name": "@denotest/peer-dep-test-child",
"version": "2.0.0",
"dependencies": {
"@denotest/peer-dep-test-grandchild": "*",
"@denotest/peer-dep-test-peer": "^2"
}
}

View file

@ -0,0 +1 @@
module.exports = require("@denotest/peer-dep-test-peer");

View file

@ -0,0 +1 @@
module.exports = require("./dist/index");

View file

@ -0,0 +1,7 @@
{
"name": "@denotest/peer-dep-test-child-2",
"version": "1.0.0",
"peerDependencies": {
"@denotest/peer-dep-test-peer": "*"
}
}

View file

@ -0,0 +1 @@
module.exports = 1;

View file

@ -0,0 +1,4 @@
{
"name": "@denotest/peer-dep-test-peer",
"version": "1.0.0"
}

View file

@ -0,0 +1 @@
module.exports = 2;

View file

@ -0,0 +1,4 @@
{
"name": "@denotest/peer-dep-test-peer",
"version": "2.0.0"
}

View file

@ -157,7 +157,8 @@ fn add_npm_packages_to_json(
});
if let Some(pkg) = maybe_package {
if let Some(module) = module.as_object_mut() {
module.insert("npmPackage".to_string(), format!("{}", pkg.id).into());
module
.insert("npmPackage".to_string(), pkg.id.as_serialized().into());
// change the "kind" to be "npm"
module.insert("kind".to_string(), "npm".into());
}
@ -190,7 +191,7 @@ fn add_npm_packages_to_json(
{
dep.insert(
"npmPackage".to_string(),
format!("{}", pkg.id).into(),
pkg.id.as_serialized().into(),
);
}
}
@ -212,11 +213,11 @@ fn add_npm_packages_to_json(
deps.sort();
let deps = deps
.into_iter()
.map(|id| serde_json::Value::String(format!("{}", id)))
.map(|id| serde_json::Value::String(id.as_serialized()))
.collect::<Vec<_>>();
kv.insert("dependencies".to_string(), deps.into());
json_packages.insert(format!("{}", &pkg.id), kv.into());
json_packages.insert(pkg.id.as_serialized(), kv.into());
}
json.insert("npmPackages".to_string(), json_packages.into());
@ -504,7 +505,7 @@ impl<'a> GraphDisplayContext<'a> {
None => Specifier(module.specifier.clone()),
};
let was_seen = !self.seen.insert(match &package_or_specifier {
Package(package) => package.id.to_string(),
Package(package) => package.id.as_serialized(),
Specifier(specifier) => specifier.to_string(),
});
let header_text = if was_seen {
@ -572,11 +573,14 @@ impl<'a> GraphDisplayContext<'a> {
for dep_id in deps.into_iter() {
let maybe_size = self.npm_info.package_sizes.get(dep_id).cloned();
let size_str = maybe_size_to_text(maybe_size);
let mut child =
TreeNode::from_text(format!("npm:{} {}", dep_id, size_str));
let mut child = TreeNode::from_text(format!(
"npm:{} {}",
dep_id.as_serialized(),
size_str
));
if let Some(package) = self.npm_info.packages.get(dep_id) {
if !package.dependencies.is_empty() {
if self.seen.contains(&package.id.to_string()) {
if self.seen.contains(&package.id.as_serialized()) {
child.text = format!("{} {}", child.text, colors::gray("*"));
} else {
let package = package.clone();