1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-21 15:04:11 -05:00

fix: bump cache sqlite dbs to v2 for WAL journal mode change (#24030)

In https://github.com/denoland/deno/pull/23955 we changed the sqlite db
journal mode to WAL. This causes issues when someone is running an old
version of Deno using TRUNCATE and a new version because the two fight
against each other.
This commit is contained in:
David Sherret 2024-05-29 14:38:18 -04:00 committed by GitHub
parent fada25b0dd
commit 94f040ac28
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 347 additions and 263 deletions

8
Cargo.lock generated
View file

@ -1278,9 +1278,9 @@ dependencies = [
[[package]] [[package]]
name = "deno_config" name = "deno_config"
version = "0.16.3" version = "0.16.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "971658ccd8dbd7de18f44d2270a6881a78a88f123584fc6497189ee5d20aa307" checksum = "3d21c7b688ff6cb411895a93bf1d6734ed654c3a7eb9b502f96098f6659df0c5"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"glob", "glob",
@ -1478,9 +1478,9 @@ dependencies = [
[[package]] [[package]]
name = "deno_graph" name = "deno_graph"
version = "0.77.0" version = "0.77.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e94a8739bb6087061f8419e4c1719459a144ef477ab649dca597a0603290ec82" checksum = "192d6f61d5418c928d29b2666b916df65a3b5677ce454fc6a4b4969983a02abe"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-trait", "async-trait",

View file

@ -65,11 +65,11 @@ winres.workspace = true
[dependencies] [dependencies]
deno_ast = { workspace = true, features = ["bundler", "cjs", "codegen", "proposal", "react", "sourcemap", "transforms", "typescript", "view", "visit"] } deno_ast = { workspace = true, features = ["bundler", "cjs", "codegen", "proposal", "react", "sourcemap", "transforms", "typescript", "view", "visit"] }
deno_cache_dir = { workspace = true } deno_cache_dir = { workspace = true }
deno_config = "=0.16.3" deno_config = "=0.16.4"
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] } deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
deno_doc = { version = "=0.137.0", features = ["html", "syntect"] } deno_doc = { version = "=0.137.0", features = ["html", "syntect"] }
deno_emit = "=0.41.0" deno_emit = "=0.41.0"
deno_graph = { version = "=0.77.0", features = ["tokio_executor"] } deno_graph = { version = "=0.77.2", features = ["tokio_executor"] }
deno_lint = { version = "=0.58.4", features = ["docs"] } deno_lint = { version = "=0.58.4", features = ["docs"] }
deno_lockfile.workspace = true deno_lockfile.workspace = true
deno_npm = "=0.21.0" deno_npm = "=0.21.0"

95
cli/cache/cache_db.rs vendored
View file

@ -14,6 +14,48 @@ use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use super::FastInsecureHasher;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CacheDBHash(u64);
impl CacheDBHash {
pub fn new(hash: u64) -> Self {
Self(hash)
}
pub fn from_source(source: impl std::hash::Hash) -> Self {
Self::new(
// always write in the deno version just in case
// the clearing on deno version change doesn't work
FastInsecureHasher::new_deno_versioned()
.write_hashable(source)
.finish(),
)
}
}
impl rusqlite::types::ToSql for CacheDBHash {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
Ok(rusqlite::types::ToSqlOutput::Owned(
// sqlite doesn't support u64, but it does support i64 so store
// this value "incorrectly" as i64 then convert back to u64 on read
rusqlite::types::Value::Integer(self.0 as i64),
))
}
}
impl rusqlite::types::FromSql for CacheDBHash {
fn column_result(
value: rusqlite::types::ValueRef,
) -> rusqlite::types::FromSqlResult<Self> {
match value {
rusqlite::types::ValueRef::Integer(i) => Ok(Self::new(i as u64)),
_ => Err(rusqlite::types::FromSqlError::InvalidType),
}
}
}
/// What should the cache should do on failure? /// What should the cache should do on failure?
#[derive(Default)] #[derive(Default)]
pub enum CacheFailure { pub enum CacheFailure {
@ -41,21 +83,16 @@ pub struct CacheDBConfiguration {
impl CacheDBConfiguration { impl CacheDBConfiguration {
fn create_combined_sql(&self) -> String { fn create_combined_sql(&self) -> String {
format!( format!(
" concat!(
PRAGMA journal_mode=WAL; "PRAGMA journal_mode=WAL;",
PRAGMA synchronous=NORMAL; "PRAGMA synchronous=NORMAL;",
PRAGMA temp_store=memory; "PRAGMA temp_store=memory;",
PRAGMA page_size=4096; "PRAGMA page_size=4096;",
PRAGMA mmap_size=6000000; "PRAGMA mmap_size=6000000;",
PRAGMA optimize; "PRAGMA optimize;",
"CREATE TABLE IF NOT EXISTS info (key TEXT PRIMARY KEY, value TEXT NOT NULL);",
CREATE TABLE IF NOT EXISTS info ( "{}",
key TEXT PRIMARY KEY, ),
value TEXT NOT NULL
);
{}
",
self.table_initializer self.table_initializer
) )
} }
@ -520,4 +557,32 @@ mod tests {
}) })
.expect_err("Should have failed"); .expect_err("Should have failed");
} }
#[test]
fn cache_db_hash_max_u64_value() {
assert_same_serialize_deserialize(CacheDBHash::new(u64::MAX));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MAX - 1));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MIN));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MIN + 1));
}
fn assert_same_serialize_deserialize(original_hash: CacheDBHash) {
use rusqlite::types::FromSql;
use rusqlite::types::ValueRef;
use rusqlite::ToSql;
let value = original_hash.to_sql().unwrap();
match value {
rusqlite::types::ToSqlOutput::Owned(rusqlite::types::Value::Integer(
value,
)) => {
let value_ref = ValueRef::Integer(value);
assert_eq!(
original_hash,
CacheDBHash::column_result(value_ref).unwrap()
);
}
_ => unreachable!(),
}
}
} }

55
cli/cache/check.rs vendored
View file

@ -2,6 +2,7 @@
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
use deno_ast::ModuleSpecifier; use deno_ast::ModuleSpecifier;
use deno_core::error::AnyError; use deno_core::error::AnyError;
@ -9,13 +10,13 @@ use deno_runtime::deno_webstorage::rusqlite::params;
pub static TYPE_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration { pub static TYPE_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!( table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS checkcache ( "CREATE TABLE IF NOT EXISTS checkcache (",
check_hash TEXT PRIMARY KEY "check_hash INT PRIMARY KEY",
);", ");",
"CREATE TABLE IF NOT EXISTS tsbuildinfo ( "CREATE TABLE IF NOT EXISTS tsbuildinfo (",
specifier TEXT PRIMARY KEY, "specifier TEXT PRIMARY KEY,",
text TEXT NOT NULL "text TEXT NOT NULL",
);", ");",
), ),
on_version_change: concat!( on_version_change: concat!(
"DELETE FROM checkcache;", "DELETE FROM checkcache;",
@ -37,7 +38,7 @@ impl TypeCheckCache {
Self(db) Self(db)
} }
pub fn has_check_hash(&self, hash: u64) -> bool { pub fn has_check_hash(&self, hash: CacheDBHash) -> bool {
match self.hash_check_hash_result(hash) { match self.hash_check_hash_result(hash) {
Ok(val) => val, Ok(val) => val,
Err(err) => { Err(err) => {
@ -52,14 +53,17 @@ impl TypeCheckCache {
} }
} }
fn hash_check_hash_result(&self, hash: u64) -> Result<bool, AnyError> { fn hash_check_hash_result(
&self,
hash: CacheDBHash,
) -> Result<bool, AnyError> {
self.0.exists( self.0.exists(
"SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1", "SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1",
params![hash.to_string()], params![hash],
) )
} }
pub fn add_check_hash(&self, check_hash: u64) { pub fn add_check_hash(&self, check_hash: CacheDBHash) {
if let Err(err) = self.add_check_hash_result(check_hash) { if let Err(err) = self.add_check_hash_result(check_hash) {
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
panic!("Error saving check hash: {err}"); panic!("Error saving check hash: {err}");
@ -69,13 +73,16 @@ impl TypeCheckCache {
} }
} }
fn add_check_hash_result(&self, check_hash: u64) -> Result<(), AnyError> { fn add_check_hash_result(
&self,
check_hash: CacheDBHash,
) -> Result<(), AnyError> {
let sql = " let sql = "
INSERT OR REPLACE INTO INSERT OR REPLACE INTO
checkcache (check_hash) checkcache (check_hash)
VALUES VALUES
(?1)"; (?1)";
self.0.execute(sql, params![&check_hash.to_string(),])?; self.0.execute(sql, params![check_hash])?;
Ok(()) Ok(())
} }
@ -123,10 +130,10 @@ mod test {
let conn = CacheDB::in_memory(&TYPE_CHECK_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&TYPE_CHECK_CACHE_DB, "1.0.0");
let cache = TypeCheckCache::new(conn); let cache = TypeCheckCache::new(conn);
assert!(!cache.has_check_hash(1)); assert!(!cache.has_check_hash(CacheDBHash::new(1)));
cache.add_check_hash(1); cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(1)); assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert!(!cache.has_check_hash(2)); assert!(!cache.has_check_hash(CacheDBHash::new(2)));
let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap(); let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap();
assert_eq!(cache.get_tsbuildinfo(&specifier1), None); assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
@ -137,9 +144,9 @@ mod test {
let conn = cache.0.recreate_with_version("2.0.0"); let conn = cache.0.recreate_with_version("2.0.0");
let cache = TypeCheckCache::new(conn); let cache = TypeCheckCache::new(conn);
assert!(!cache.has_check_hash(1)); assert!(!cache.has_check_hash(CacheDBHash::new(1)));
cache.add_check_hash(1); cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(1)); assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert_eq!(cache.get_tsbuildinfo(&specifier1), None); assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
cache.set_tsbuildinfo(&specifier1, "test"); cache.set_tsbuildinfo(&specifier1, "test");
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string())); assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
@ -148,13 +155,13 @@ mod test {
let conn = cache.0.recreate_with_version("2.0.0"); let conn = cache.0.recreate_with_version("2.0.0");
let cache = TypeCheckCache::new(conn); let cache = TypeCheckCache::new(conn);
assert!(cache.has_check_hash(1)); assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert!(!cache.has_check_hash(2)); assert!(!cache.has_check_hash(CacheDBHash::new(2)));
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string())); assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
// adding when already exists should not cause issue // adding when already exists should not cause issue
cache.add_check_hash(1); cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(1)); assert!(cache.has_check_hash(CacheDBHash::new(1)));
cache.set_tsbuildinfo(&specifier1, "other"); cache.set_tsbuildinfo(&specifier1, "other");
assert_eq!( assert_eq!(
cache.get_tsbuildinfo(&specifier1), cache.get_tsbuildinfo(&specifier1),

View file

@ -7,16 +7,19 @@ use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
pub static CODE_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration { pub static CODE_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS codecache ( table_initializer: concat!(
specifier TEXT NOT NULL, "CREATE TABLE IF NOT EXISTS codecache (",
type TEXT NOT NULL, "specifier TEXT NOT NULL,",
source_hash TEXT NOT NULL, "type INTEGER NOT NULL,",
data BLOB NOT NULL, "source_hash INTEGER NOT NULL,",
PRIMARY KEY (specifier, type) "data BLOB NOT NULL,",
);", "PRIMARY KEY (specifier, type)",
");"
),
on_version_change: "DELETE FROM codecache;", on_version_change: "DELETE FROM codecache;",
preheat_queries: &[], preheat_queries: &[],
on_failure: CacheFailure::Blackhole, on_failure: CacheFailure::Blackhole,
@ -59,7 +62,7 @@ impl CodeCache {
Self::ensure_ok(self.inner.get_sync( Self::ensure_ok(self.inner.get_sync(
specifier.as_str(), specifier.as_str(),
code_cache_type, code_cache_type,
&source_hash.to_string(), CacheDBHash::new(source_hash),
)) ))
} }
@ -73,7 +76,7 @@ impl CodeCache {
Self::ensure_ok(self.inner.set_sync( Self::ensure_ok(self.inner.set_sync(
specifier.as_str(), specifier.as_str(),
code_cache_type, code_cache_type,
&source_hash.to_string(), CacheDBHash::new(source_hash),
data, data,
)); ));
} }
@ -113,7 +116,7 @@ impl CodeCacheInner {
&self, &self,
specifier: &str, specifier: &str,
code_cache_type: code_cache::CodeCacheType, code_cache_type: code_cache::CodeCacheType,
source_hash: &str, source_hash: CacheDBHash,
) -> Result<Option<Vec<u8>>, AnyError> { ) -> Result<Option<Vec<u8>>, AnyError> {
let query = " let query = "
SELECT SELECT
@ -123,7 +126,11 @@ impl CodeCacheInner {
WHERE WHERE
specifier=?1 AND type=?2 AND source_hash=?3 specifier=?1 AND type=?2 AND source_hash=?3
LIMIT 1"; LIMIT 1";
let params = params![specifier, code_cache_type.as_str(), source_hash,]; let params = params![
specifier,
serialize_code_cache_type(code_cache_type),
source_hash,
];
self.conn.query_row(query, params, |row| { self.conn.query_row(query, params, |row| {
let value: Vec<u8> = row.get(0)?; let value: Vec<u8> = row.get(0)?;
Ok(value) Ok(value)
@ -134,7 +141,7 @@ impl CodeCacheInner {
&self, &self,
specifier: &str, specifier: &str,
code_cache_type: code_cache::CodeCacheType, code_cache_type: code_cache::CodeCacheType,
source_hash: &str, // use string because sqlite doesn't have a u64 type source_hash: CacheDBHash,
data: &[u8], data: &[u8],
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let sql = " let sql = "
@ -142,13 +149,26 @@ impl CodeCacheInner {
codecache (specifier, type, source_hash, data) codecache (specifier, type, source_hash, data)
VALUES VALUES
(?1, ?2, ?3, ?4)"; (?1, ?2, ?3, ?4)";
let params = let params = params![
params![specifier, code_cache_type.as_str(), source_hash, data]; specifier,
serialize_code_cache_type(code_cache_type),
source_hash,
data
];
self.conn.execute(sql, params)?; self.conn.execute(sql, params)?;
Ok(()) Ok(())
} }
} }
fn serialize_code_cache_type(
code_cache_type: code_cache::CodeCacheType,
) -> i64 {
match code_cache_type {
code_cache::CodeCacheType::Script => 0,
code_cache::CodeCacheType::EsModule => 1,
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
@ -162,7 +182,7 @@ mod test {
.get_sync( .get_sync(
"file:///foo/bar.js", "file:///foo/bar.js",
code_cache::CodeCacheType::EsModule, code_cache::CodeCacheType::EsModule,
"hash", CacheDBHash::new(1),
) )
.unwrap() .unwrap()
.is_none()); .is_none());
@ -171,7 +191,7 @@ mod test {
.set_sync( .set_sync(
"file:///foo/bar.js", "file:///foo/bar.js",
code_cache::CodeCacheType::EsModule, code_cache::CodeCacheType::EsModule,
"hash", CacheDBHash::new(1),
&data_esm, &data_esm,
) )
.unwrap(); .unwrap();
@ -180,7 +200,7 @@ mod test {
.get_sync( .get_sync(
"file:///foo/bar.js", "file:///foo/bar.js",
code_cache::CodeCacheType::EsModule, code_cache::CodeCacheType::EsModule,
"hash", CacheDBHash::new(1),
) )
.unwrap() .unwrap()
.unwrap(), .unwrap(),
@ -191,7 +211,7 @@ mod test {
.get_sync( .get_sync(
"file:///foo/bar.js", "file:///foo/bar.js",
code_cache::CodeCacheType::Script, code_cache::CodeCacheType::Script,
"hash", CacheDBHash::new(1),
) )
.unwrap() .unwrap()
.is_none()); .is_none());
@ -200,7 +220,7 @@ mod test {
.set_sync( .set_sync(
"file:///foo/bar.js", "file:///foo/bar.js",
code_cache::CodeCacheType::Script, code_cache::CodeCacheType::Script,
"hash", CacheDBHash::new(1),
&data_script, &data_script,
) )
.unwrap(); .unwrap();
@ -209,7 +229,7 @@ mod test {
.get_sync( .get_sync(
"file:///foo/bar.js", "file:///foo/bar.js",
code_cache::CodeCacheType::Script, code_cache::CodeCacheType::Script,
"hash", CacheDBHash::new(1),
) )
.unwrap() .unwrap()
.unwrap(), .unwrap(),
@ -220,7 +240,7 @@ mod test {
.get_sync( .get_sync(
"file:///foo/bar.js", "file:///foo/bar.js",
code_cache::CodeCacheType::EsModule, code_cache::CodeCacheType::EsModule,
"hash", CacheDBHash::new(1),
) )
.unwrap() .unwrap()
.unwrap(), .unwrap(),

11
cli/cache/common.rs vendored
View file

@ -3,16 +3,17 @@
use std::hash::Hasher; use std::hash::Hasher;
/// A very fast insecure hasher that uses the xxHash algorithm. /// A very fast insecure hasher that uses the xxHash algorithm.
#[derive(Default)]
pub struct FastInsecureHasher(twox_hash::XxHash64); pub struct FastInsecureHasher(twox_hash::XxHash64);
impl FastInsecureHasher { impl FastInsecureHasher {
pub fn new() -> Self { pub fn new_without_deno_version() -> Self {
Self::default() Self(Default::default())
} }
pub fn hash(hashable: impl std::hash::Hash) -> u64 { pub fn new_deno_versioned() -> Self {
Self::new().write_hashable(hashable).finish() let mut hasher = Self::new_without_deno_version();
hasher.write_str(crate::version::deno());
hasher
} }
pub fn write_str(&mut self, text: &str) -> &mut Self { pub fn write_str(&mut self, text: &str) -> &mut Self {

24
cli/cache/deno_dir.rs vendored
View file

@ -79,40 +79,46 @@ impl DenoDir {
self.root.display() self.root.display()
} }
/// Path for the V8 code cache.
pub fn code_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("v8_code_cache_v2")
}
/// Path for the incremental cache used for formatting. /// Path for the incremental cache used for formatting.
pub fn fmt_incremental_cache_db_file_path(&self) -> PathBuf { pub fn fmt_incremental_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("fmt_incremental_cache_v1") self.root.join("fmt_incremental_cache_v2")
} }
/// Path for the incremental cache used for linting. /// Path for the incremental cache used for linting.
pub fn lint_incremental_cache_db_file_path(&self) -> PathBuf { pub fn lint_incremental_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("lint_incremental_cache_v1") self.root.join("lint_incremental_cache_v2")
} }
/// Path for caching swc dependency analysis. /// Path for caching swc dependency analysis.
pub fn dep_analysis_db_file_path(&self) -> PathBuf { pub fn dep_analysis_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("dep_analysis_cache_v1") self.root.join("dep_analysis_cache_v2")
} }
/// Path for the cache used for fast check. /// Path for the cache used for fast check.
pub fn fast_check_cache_db_file_path(&self) -> PathBuf { pub fn fast_check_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("fast_check_cache_v1") self.root.join("fast_check_cache_v2")
} }
/// Path for caching node analysis. /// Path for caching node analysis.
pub fn node_analysis_db_file_path(&self) -> PathBuf { pub fn node_analysis_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("node_analysis_cache_v1") self.root.join("node_analysis_cache_v2")
} }
/// Path for the cache used for type checking. /// Path for the cache used for type checking.
pub fn type_checking_cache_db_file_path(&self) -> PathBuf { pub fn type_checking_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache // bump this version name to invalidate the entire cache
self.root.join("check_cache_v1") self.root.join("check_cache_v2")
} }
/// Path to the registries cache, used for the lps. /// Path to the registries cache, used for the lps.
@ -141,12 +147,6 @@ impl DenoDir {
self.root.join("npm") self.root.join("npm")
} }
/// Path for the V8 code cache.
pub fn code_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("v8_code_cache_v1")
}
/// Path used for the REPL history file. /// Path used for the REPL history file.
/// Can be overridden or disabled by setting `DENO_REPL_HISTORY` environment variable. /// Can be overridden or disabled by setting `DENO_REPL_HISTORY` environment variable.
pub fn repl_history_file_path(&self) -> Option<PathBuf> { pub fn repl_history_file_path(&self) -> Option<PathBuf> {

15
cli/cache/emit.rs vendored
View file

@ -14,8 +14,8 @@ use super::FastInsecureHasher;
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
struct EmitMetadata { struct EmitMetadata {
pub source_hash: String, pub source_hash: u64,
pub emit_hash: String, pub emit_hash: u64,
} }
/// The cache that stores previously emitted files. /// The cache that stores previously emitted files.
@ -52,7 +52,7 @@ impl EmitCache {
// load and verify the meta data file is for this source and CLI version // load and verify the meta data file is for this source and CLI version
let bytes = self.disk_cache.get(&meta_filename).ok()?; let bytes = self.disk_cache.get(&meta_filename).ok()?;
let meta: EmitMetadata = serde_json::from_slice(&bytes).ok()?; let meta: EmitMetadata = serde_json::from_slice(&bytes).ok()?;
if meta.source_hash != expected_source_hash.to_string() { if meta.source_hash != expected_source_hash {
return None; return None;
} }
@ -112,7 +112,7 @@ impl EmitCache {
// save the metadata // save the metadata
let metadata = EmitMetadata { let metadata = EmitMetadata {
source_hash: source_hash.to_string(), source_hash,
emit_hash: compute_emit_hash(code.as_bytes(), self.cli_version), emit_hash: compute_emit_hash(code.as_bytes(), self.cli_version),
}; };
self self
@ -138,16 +138,15 @@ impl EmitCache {
} }
} }
fn compute_emit_hash(bytes: &[u8], cli_version: &str) -> String { fn compute_emit_hash(bytes: &[u8], cli_version: &str) -> u64 {
// it's ok to use an insecure hash here because // it's ok to use an insecure hash here because
// if someone can change the emit source then they // if someone can change the emit source then they
// can also change the version hash // can also change the version hash
FastInsecureHasher::new() FastInsecureHasher::new_without_deno_version() // use cli_version param instead
.write(bytes) .write(bytes)
// emit should not be re-used between cli versions // emit should not be re-used between cli versions
.write(cli_version.as_bytes()) .write_str(cli_version)
.finish() .finish()
.to_string()
} }
#[cfg(test)] #[cfg(test)]

View file

@ -7,13 +7,16 @@ use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
pub static FAST_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration { pub static FAST_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS fastcheckcache ( table_initializer: concat!(
hash TEXT PRIMARY KEY, "CREATE TABLE IF NOT EXISTS fastcheckcache (",
data TEXT NOT NULL "hash INTEGER PRIMARY KEY,",
);", "data TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM fastcheckcache;", on_version_change: "DELETE FROM fastcheckcache;",
preheat_queries: &[], preheat_queries: &[],
on_failure: CacheFailure::Blackhole, on_failure: CacheFailure::Blackhole,
@ -81,13 +84,14 @@ impl FastCheckCacheInner {
WHERE WHERE
hash=?1 hash=?1
LIMIT 1"; LIMIT 1";
let res = self let res = self.conn.query_row(
.conn query,
// key is a string because SQLite can't handle u64 params![CacheDBHash::new(key.as_u64())],
.query_row(query, params![key.as_u64().to_string()], |row| { |row| {
let value: Vec<u8> = row.get(0)?; let value: Vec<u8> = row.get(0)?;
Ok(bincode::deserialize::<FastCheckCacheItem>(&value)?) Ok(bincode::deserialize::<FastCheckCacheItem>(&value)?)
})?; },
)?;
Ok(res) Ok(res)
} }
@ -103,7 +107,7 @@ impl FastCheckCacheInner {
(?1, ?2)"; (?1, ?2)";
self.conn.execute( self.conn.execute(
sql, sql,
params![key.as_u64().to_string(), &bincode::serialize(data)?], params![CacheDBHash::new(key.as_u64()), &bincode::serialize(data)?],
)?; )?;
Ok(()) Ok(())
} }
@ -114,6 +118,7 @@ mod test {
use std::collections::BTreeSet; use std::collections::BTreeSet;
use deno_ast::ModuleSpecifier; use deno_ast::ModuleSpecifier;
use deno_graph::FastCheckCache as _;
use deno_graph::FastCheckCacheModuleItem; use deno_graph::FastCheckCacheModuleItem;
use deno_graph::FastCheckCacheModuleItemDiagnostic; use deno_graph::FastCheckCacheModuleItemDiagnostic;
use deno_semver::package::PackageNv; use deno_semver::package::PackageNv;
@ -123,12 +128,14 @@ mod test {
#[test] #[test]
pub fn cache_general_use() { pub fn cache_general_use() {
let conn = CacheDB::in_memory(&FAST_CHECK_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&FAST_CHECK_CACHE_DB, "1.0.0");
let cache = FastCheckCacheInner::new(conn); let cache = FastCheckCache::new(conn);
let key = FastCheckCacheKey::build( let key = FastCheckCacheKey::build(
cache.hash_seed(),
&PackageNv::from_str("@scope/a@1.0.0").unwrap(), &PackageNv::from_str("@scope/a@1.0.0").unwrap(),
&Default::default(), &Default::default(),
); );
let cache = cache.inner;
assert!(cache.get(key).unwrap().is_none()); assert!(cache.get(key).unwrap().is_none());
let value = FastCheckCacheItem { let value = FastCheckCacheItem {
dependencies: BTreeSet::from([ dependencies: BTreeSet::from([

View file

@ -6,23 +6,23 @@ use std::path::PathBuf;
use deno_core::error::AnyError; use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex; use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
use deno_core::unsync::spawn; use deno_core::unsync::spawn;
use deno_core::unsync::JoinHandle; use deno_core::unsync::JoinHandle;
use deno_runtime::deno_webstorage::rusqlite::params; use deno_runtime::deno_webstorage::rusqlite::params;
use serde::Serialize;
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
use super::common::FastInsecureHasher;
pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration { pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS incrementalcache ( table_initializer: concat!(
file_path TEXT PRIMARY KEY, "CREATE TABLE IF NOT EXISTS incrementalcache (",
state_hash TEXT NOT NULL, "file_path TEXT PRIMARY KEY,",
source_hash TEXT NOT NULL "state_hash INTEGER NOT NULL,",
);", "source_hash INTEGER NOT NULL",
");"
),
on_version_change: "DELETE FROM incrementalcache;", on_version_change: "DELETE FROM incrementalcache;",
preheat_queries: &[], preheat_queries: &[],
// If the cache fails, just ignore all caching attempts // If the cache fails, just ignore all caching attempts
@ -34,7 +34,7 @@ pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
pub struct IncrementalCache(IncrementalCacheInner); pub struct IncrementalCache(IncrementalCacheInner);
impl IncrementalCache { impl IncrementalCache {
pub fn new<TState: Serialize>( pub fn new<TState: std::hash::Hash>(
db: CacheDB, db: CacheDB,
state: &TState, state: &TState,
initial_file_paths: &[PathBuf], initial_file_paths: &[PathBuf],
@ -56,24 +56,23 @@ impl IncrementalCache {
} }
enum ReceiverMessage { enum ReceiverMessage {
Update(PathBuf, u64), Update(PathBuf, CacheDBHash),
Exit, Exit,
} }
struct IncrementalCacheInner { struct IncrementalCacheInner {
previous_hashes: HashMap<PathBuf, u64>, previous_hashes: HashMap<PathBuf, CacheDBHash>,
sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>, sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>,
handle: Mutex<Option<JoinHandle<()>>>, handle: Mutex<Option<JoinHandle<()>>>,
} }
impl IncrementalCacheInner { impl IncrementalCacheInner {
pub fn new<TState: Serialize>( pub fn new<TState: std::hash::Hash>(
db: CacheDB, db: CacheDB,
state: &TState, state: &TState,
initial_file_paths: &[PathBuf], initial_file_paths: &[PathBuf],
) -> Self { ) -> Self {
let state_hash = let state_hash = CacheDBHash::from_source(state);
FastInsecureHasher::hash(serde_json::to_string(state).unwrap());
let sql_cache = SqlIncrementalCache::new(db, state_hash); let sql_cache = SqlIncrementalCache::new(db, state_hash);
Self::from_sql_incremental_cache(sql_cache, initial_file_paths) Self::from_sql_incremental_cache(sql_cache, initial_file_paths)
} }
@ -113,13 +112,13 @@ impl IncrementalCacheInner {
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool { pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
match self.previous_hashes.get(file_path) { match self.previous_hashes.get(file_path) {
Some(hash) => *hash == FastInsecureHasher::hash(file_text), Some(hash) => *hash == CacheDBHash::from_source(file_text),
None => false, None => false,
} }
} }
pub fn update_file(&self, file_path: &Path, file_text: &str) { pub fn update_file(&self, file_path: &Path, file_text: &str) {
let hash = FastInsecureHasher::hash(file_text); let hash = CacheDBHash::from_source(file_text);
if let Some(previous_hash) = self.previous_hashes.get(file_path) { if let Some(previous_hash) = self.previous_hashes.get(file_path) {
if *previous_hash == hash { if *previous_hash == hash {
return; // do not bother updating the db file because nothing has changed return; // do not bother updating the db file because nothing has changed
@ -146,15 +145,15 @@ struct SqlIncrementalCache {
/// A hash of the state used to produce the formatting/linting other than /// A hash of the state used to produce the formatting/linting other than
/// the CLI version. This state is a hash of the configuration and ensures /// the CLI version. This state is a hash of the configuration and ensures
/// we format/lint a file when the configuration changes. /// we format/lint a file when the configuration changes.
state_hash: u64, state_hash: CacheDBHash,
} }
impl SqlIncrementalCache { impl SqlIncrementalCache {
pub fn new(conn: CacheDB, state_hash: u64) -> Self { pub fn new(conn: CacheDB, state_hash: CacheDBHash) -> Self {
Self { conn, state_hash } Self { conn, state_hash }
} }
pub fn get_source_hash(&self, path: &Path) -> Option<u64> { pub fn get_source_hash(&self, path: &Path) -> Option<CacheDBHash> {
match self.get_source_hash_result(path) { match self.get_source_hash_result(path) {
Ok(option) => option, Ok(option) => option,
Err(err) => { Err(err) => {
@ -171,7 +170,7 @@ impl SqlIncrementalCache {
fn get_source_hash_result( fn get_source_hash_result(
&self, &self,
path: &Path, path: &Path,
) -> Result<Option<u64>, AnyError> { ) -> Result<Option<CacheDBHash>, AnyError> {
let query = " let query = "
SELECT SELECT
source_hash source_hash
@ -183,10 +182,10 @@ impl SqlIncrementalCache {
LIMIT 1"; LIMIT 1";
let res = self.conn.query_row( let res = self.conn.query_row(
query, query,
params![path.to_string_lossy(), self.state_hash.to_string()], params![path.to_string_lossy(), self.state_hash],
|row| { |row| {
let hash: String = row.get(0)?; let hash: CacheDBHash = row.get(0)?;
Ok(hash.parse::<u64>()?) Ok(hash)
}, },
)?; )?;
Ok(res) Ok(res)
@ -195,7 +194,7 @@ impl SqlIncrementalCache {
pub fn set_source_hash( pub fn set_source_hash(
&self, &self,
path: &Path, path: &Path,
source_hash: u64, source_hash: CacheDBHash,
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let sql = " let sql = "
INSERT OR REPLACE INTO INSERT OR REPLACE INTO
@ -204,11 +203,7 @@ impl SqlIncrementalCache {
(?1, ?2, ?3)"; (?1, ?2, ?3)";
self.conn.execute( self.conn.execute(
sql, sql,
params![ params![path.to_string_lossy(), self.state_hash, source_hash],
path.to_string_lossy(),
&self.state_hash.to_string(),
&source_hash,
],
)?; )?;
Ok(()) Ok(())
} }
@ -223,51 +218,51 @@ mod test {
#[test] #[test]
pub fn sql_cache_general_use() { pub fn sql_cache_general_use() {
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
let cache = SqlIncrementalCache::new(conn, 1); let cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
let path = PathBuf::from("/mod.ts"); let path = PathBuf::from("/mod.ts");
assert_eq!(cache.get_source_hash(&path), None); assert_eq!(cache.get_source_hash(&path), None);
cache.set_source_hash(&path, 2).unwrap(); cache.set_source_hash(&path, CacheDBHash::new(2)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(2)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// try changing the cli version (should clear) // try changing the cli version (should clear)
let conn = cache.conn.recreate_with_version("2.0.0"); let conn = cache.conn.recreate_with_version("2.0.0");
let mut cache = SqlIncrementalCache::new(conn, 1); let mut cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
assert_eq!(cache.get_source_hash(&path), None); assert_eq!(cache.get_source_hash(&path), None);
// add back the file to the cache // add back the file to the cache
cache.set_source_hash(&path, 2).unwrap(); cache.set_source_hash(&path, CacheDBHash::new(2)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(2)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// try changing the state hash // try changing the state hash
cache.state_hash = 2; cache.state_hash = CacheDBHash::new(2);
assert_eq!(cache.get_source_hash(&path), None); assert_eq!(cache.get_source_hash(&path), None);
cache.state_hash = 1; cache.state_hash = CacheDBHash::new(1);
// should return now that everything is back // should return now that everything is back
assert_eq!(cache.get_source_hash(&path), Some(2)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// recreating the cache should not remove the data because the CLI version and state hash is the same // recreating the cache should not remove the data because the CLI version and state hash is the same
let conn = cache.conn.recreate_with_version("2.0.0"); let conn = cache.conn.recreate_with_version("2.0.0");
let cache = SqlIncrementalCache::new(conn, 1); let cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
assert_eq!(cache.get_source_hash(&path), Some(2)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// now try replacing and using another path // now try replacing and using another path
cache.set_source_hash(&path, 3).unwrap(); cache.set_source_hash(&path, CacheDBHash::new(3)).unwrap();
cache.set_source_hash(&path, 4).unwrap(); cache.set_source_hash(&path, CacheDBHash::new(4)).unwrap();
let path2 = PathBuf::from("/mod2.ts"); let path2 = PathBuf::from("/mod2.ts");
cache.set_source_hash(&path2, 5).unwrap(); cache.set_source_hash(&path2, CacheDBHash::new(5)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(4)); assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(4)));
assert_eq!(cache.get_source_hash(&path2), Some(5)); assert_eq!(cache.get_source_hash(&path2), Some(CacheDBHash::new(5)));
} }
#[tokio::test] #[tokio::test]
pub async fn incremental_cache_general_use() { pub async fn incremental_cache_general_use() {
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
let sql_cache = SqlIncrementalCache::new(conn, 1); let sql_cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
let file_path = PathBuf::from("/mod.ts"); let file_path = PathBuf::from("/mod.ts");
let file_text = "test"; let file_text = "test";
let file_hash = FastInsecureHasher::hash(file_text); let file_hash = CacheDBHash::from_source(file_text);
sql_cache.set_source_hash(&file_path, file_hash).unwrap(); sql_cache.set_source_hash(&file_path, file_hash).unwrap();
let cache = IncrementalCacheInner::from_sql_incremental_cache( let cache = IncrementalCacheInner::from_sql_incremental_cache(
sql_cache, sql_cache,

7
cli/cache/mod.rs vendored
View file

@ -38,6 +38,7 @@ mod module_info;
mod node; mod node;
mod parsed_source; mod parsed_source;
pub use cache_db::CacheDBHash;
pub use caches::Caches; pub use caches::Caches;
pub use check::TypeCheckCache; pub use check::TypeCheckCache;
pub use code_cache::CodeCache; pub use code_cache::CodeCache;
@ -101,8 +102,6 @@ pub type LocalLspHttpCache =
deno_cache_dir::LocalLspHttpCache<RealDenoCacheEnv>; deno_cache_dir::LocalLspHttpCache<RealDenoCacheEnv>;
pub use deno_cache_dir::HttpCache; pub use deno_cache_dir::HttpCache;
use self::module_info::ModuleInfoCacheSourceHash;
/// A "wrapper" for the FileFetcher and DiskCache for the Deno CLI that provides /// A "wrapper" for the FileFetcher and DiskCache for the Deno CLI that provides
/// a concise interface to the DENO_DIR when building module graphs. /// a concise interface to the DENO_DIR when building module graphs.
pub struct FetchCacher { pub struct FetchCacher {
@ -297,11 +296,11 @@ impl Loader for FetchCacher {
module_info: &deno_graph::ModuleInfo, module_info: &deno_graph::ModuleInfo,
) { ) {
log::debug!("Caching module info for {}", specifier); log::debug!("Caching module info for {}", specifier);
let source_hash = ModuleInfoCacheSourceHash::from_source(source); let source_hash = CacheDBHash::from_source(source);
let result = self.module_info_cache.set_module_info( let result = self.module_info_cache.set_module_info(
specifier, specifier,
MediaType::from_specifier(specifier), MediaType::from_specifier(specifier),
&source_hash, source_hash,
module_info, module_info,
); );
if let Err(err) = result { if let Err(err) = result {

View file

@ -12,8 +12,8 @@ use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
use super::FastInsecureHasher;
use super::ParsedSourceCache; use super::ParsedSourceCache;
const SELECT_MODULE_INFO: &str = " const SELECT_MODULE_INFO: &str = "
@ -28,40 +28,19 @@ WHERE
LIMIT 1"; LIMIT 1";
pub static MODULE_INFO_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration { pub static MODULE_INFO_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS moduleinfocache ( table_initializer: concat!(
specifier TEXT PRIMARY KEY, "CREATE TABLE IF NOT EXISTS moduleinfocache (",
media_type TEXT NOT NULL, "specifier TEXT PRIMARY KEY,",
source_hash TEXT NOT NULL, "media_type INTEGER NOT NULL,",
module_info TEXT NOT NULL "source_hash INTEGER NOT NULL,",
);", "module_info TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM moduleinfocache;", on_version_change: "DELETE FROM moduleinfocache;",
preheat_queries: &[SELECT_MODULE_INFO], preheat_queries: &[SELECT_MODULE_INFO],
on_failure: CacheFailure::InMemory, on_failure: CacheFailure::InMemory,
}; };
#[derive(Debug)]
pub struct ModuleInfoCacheSourceHash(String);
impl ModuleInfoCacheSourceHash {
pub fn new(hash: u64) -> Self {
Self(hash.to_string())
}
pub fn from_source(source: &[u8]) -> Self {
Self::new(FastInsecureHasher::hash(source))
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl From<ModuleInfoCacheSourceHash> for String {
fn from(source_hash: ModuleInfoCacheSourceHash) -> String {
source_hash.0
}
}
/// A cache of `deno_graph::ModuleInfo` objects. Using this leads to a considerable /// A cache of `deno_graph::ModuleInfo` objects. Using this leads to a considerable
/// performance improvement because when it exists we can skip parsing a module for /// performance improvement because when it exists we can skip parsing a module for
/// deno_graph. /// deno_graph.
@ -91,7 +70,7 @@ impl ModuleInfoCache {
&self, &self,
specifier: &ModuleSpecifier, specifier: &ModuleSpecifier,
media_type: MediaType, media_type: MediaType,
expected_source_hash: &ModuleInfoCacheSourceHash, expected_source_hash: CacheDBHash,
) -> Result<Option<ModuleInfo>, AnyError> { ) -> Result<Option<ModuleInfo>, AnyError> {
let query = SELECT_MODULE_INFO; let query = SELECT_MODULE_INFO;
let res = self.conn.query_row( let res = self.conn.query_row(
@ -99,7 +78,7 @@ impl ModuleInfoCache {
params![ params![
&specifier.as_str(), &specifier.as_str(),
serialize_media_type(media_type), serialize_media_type(media_type),
expected_source_hash.as_str(), expected_source_hash,
], ],
|row| { |row| {
let module_info: String = row.get(0)?; let module_info: String = row.get(0)?;
@ -114,7 +93,7 @@ impl ModuleInfoCache {
&self, &self,
specifier: &ModuleSpecifier, specifier: &ModuleSpecifier,
media_type: MediaType, media_type: MediaType,
source_hash: &ModuleInfoCacheSourceHash, source_hash: CacheDBHash,
module_info: &ModuleInfo, module_info: &ModuleInfo,
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let sql = " let sql = "
@ -127,7 +106,7 @@ impl ModuleInfoCache {
params![ params![
specifier.as_str(), specifier.as_str(),
serialize_media_type(media_type), serialize_media_type(media_type),
source_hash.as_str(), source_hash,
&serde_json::to_string(&module_info)?, &serde_json::to_string(&module_info)?,
], ],
)?; )?;
@ -159,11 +138,11 @@ impl<'a> deno_graph::ModuleAnalyzer for ModuleInfoCacheModuleAnalyzer<'a> {
media_type: MediaType, media_type: MediaType,
) -> Result<ModuleInfo, deno_ast::ParseDiagnostic> { ) -> Result<ModuleInfo, deno_ast::ParseDiagnostic> {
// attempt to load from the cache // attempt to load from the cache
let source_hash = ModuleInfoCacheSourceHash::from_source(source.as_bytes()); let source_hash = CacheDBHash::from_source(&source);
match self.module_info_cache.get_module_info( match self.module_info_cache.get_module_info(
specifier, specifier,
media_type, media_type,
&source_hash, source_hash,
) { ) {
Ok(Some(info)) => return Ok(info), Ok(Some(info)) => return Ok(info),
Ok(None) => {} Ok(None) => {}
@ -193,7 +172,7 @@ impl<'a> deno_graph::ModuleAnalyzer for ModuleInfoCacheModuleAnalyzer<'a> {
if let Err(err) = self.module_info_cache.set_module_info( if let Err(err) = self.module_info_cache.set_module_info(
specifier, specifier,
media_type, media_type,
&source_hash, source_hash,
&module_info, &module_info,
) { ) {
log::debug!( log::debug!(
@ -207,27 +186,25 @@ impl<'a> deno_graph::ModuleAnalyzer for ModuleInfoCacheModuleAnalyzer<'a> {
} }
} }
// todo(dsherret): change this to be stored as an integer next time fn serialize_media_type(media_type: MediaType) -> i64 {
// the cache version is bumped
fn serialize_media_type(media_type: MediaType) -> &'static str {
use MediaType::*; use MediaType::*;
match media_type { match media_type {
JavaScript => "1", JavaScript => 1,
Jsx => "2", Jsx => 2,
Mjs => "3", Mjs => 3,
Cjs => "4", Cjs => 4,
TypeScript => "5", TypeScript => 5,
Mts => "6", Mts => 6,
Cts => "7", Cts => 7,
Dts => "8", Dts => 8,
Dmts => "9", Dmts => 9,
Dcts => "10", Dcts => 10,
Tsx => "11", Tsx => 11,
Json => "12", Json => 12,
Wasm => "13", Wasm => 13,
TsBuildInfo => "14", TsBuildInfo => 14,
SourceMap => "15", SourceMap => 15,
Unknown => "16", Unknown => 16,
} }
} }
@ -250,7 +227,7 @@ mod test {
.get_module_info( .get_module_info(
&specifier1, &specifier1,
MediaType::JavaScript, MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1) CacheDBHash::new(1)
) )
.unwrap(), .unwrap(),
None None
@ -274,7 +251,7 @@ mod test {
.set_module_info( .set_module_info(
&specifier1, &specifier1,
MediaType::JavaScript, MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1), CacheDBHash::new(1),
&module_info, &module_info,
) )
.unwrap(); .unwrap();
@ -283,7 +260,7 @@ mod test {
.get_module_info( .get_module_info(
&specifier1, &specifier1,
MediaType::JavaScript, MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1) CacheDBHash::new(1)
) )
.unwrap(), .unwrap(),
Some(module_info.clone()) Some(module_info.clone())
@ -293,7 +270,7 @@ mod test {
.get_module_info( .get_module_info(
&specifier2, &specifier2,
MediaType::JavaScript, MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1) CacheDBHash::new(1)
) )
.unwrap(), .unwrap(),
None, None,
@ -304,7 +281,7 @@ mod test {
.get_module_info( .get_module_info(
&specifier1, &specifier1,
MediaType::TypeScript, MediaType::TypeScript,
&ModuleInfoCacheSourceHash::new(1) CacheDBHash::new(1)
) )
.unwrap(), .unwrap(),
None, None,
@ -315,7 +292,7 @@ mod test {
.get_module_info( .get_module_info(
&specifier1, &specifier1,
MediaType::JavaScript, MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(2) CacheDBHash::new(2)
) )
.unwrap(), .unwrap(),
None, None,
@ -330,7 +307,7 @@ mod test {
.get_module_info( .get_module_info(
&specifier1, &specifier1,
MediaType::JavaScript, MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1) CacheDBHash::new(1)
) )
.unwrap(), .unwrap(),
Some(module_info) Some(module_info)
@ -345,7 +322,7 @@ mod test {
.get_module_info( .get_module_info(
&specifier1, &specifier1,
MediaType::JavaScript, MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1) CacheDBHash::new(1)
) )
.unwrap(), .unwrap(),
None, None,

61
cli/cache/node.rs vendored
View file

@ -9,15 +9,17 @@ use crate::node::CliCjsAnalysis;
use super::cache_db::CacheDB; use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration; use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheFailure; use super::cache_db::CacheFailure;
use super::FastInsecureHasher; use super::CacheDBHash;
pub static NODE_ANALYSIS_CACHE_DB: CacheDBConfiguration = pub static NODE_ANALYSIS_CACHE_DB: CacheDBConfiguration =
CacheDBConfiguration { CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS cjsanalysiscache ( table_initializer: concat!(
specifier TEXT PRIMARY KEY, "CREATE TABLE IF NOT EXISTS cjsanalysiscache (",
source_hash TEXT NOT NULL, "specifier TEXT PRIMARY KEY,",
data TEXT NOT NULL "source_hash INTEGER NOT NULL,",
);", "data TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM cjsanalysiscache;", on_version_change: "DELETE FROM cjsanalysiscache;",
preheat_queries: &[], preheat_queries: &[],
on_failure: CacheFailure::InMemory, on_failure: CacheFailure::InMemory,
@ -35,10 +37,6 @@ impl NodeAnalysisCache {
} }
} }
pub fn compute_source_hash(text: &str) -> String {
FastInsecureHasher::hash(text).to_string()
}
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T { fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
match res { match res {
Ok(x) => x, Ok(x) => x,
@ -59,7 +57,7 @@ impl NodeAnalysisCache {
pub fn get_cjs_analysis( pub fn get_cjs_analysis(
&self, &self,
specifier: &str, specifier: &str,
expected_source_hash: &str, expected_source_hash: CacheDBHash,
) -> Option<CliCjsAnalysis> { ) -> Option<CliCjsAnalysis> {
Self::ensure_ok( Self::ensure_ok(
self.inner.get_cjs_analysis(specifier, expected_source_hash), self.inner.get_cjs_analysis(specifier, expected_source_hash),
@ -69,7 +67,7 @@ impl NodeAnalysisCache {
pub fn set_cjs_analysis( pub fn set_cjs_analysis(
&self, &self,
specifier: &str, specifier: &str,
source_hash: &str, source_hash: CacheDBHash,
cjs_analysis: &CliCjsAnalysis, cjs_analysis: &CliCjsAnalysis,
) { ) {
Self::ensure_ok(self.inner.set_cjs_analysis( Self::ensure_ok(self.inner.set_cjs_analysis(
@ -93,7 +91,7 @@ impl NodeAnalysisCacheInner {
pub fn get_cjs_analysis( pub fn get_cjs_analysis(
&self, &self,
specifier: &str, specifier: &str,
expected_source_hash: &str, expected_source_hash: CacheDBHash,
) -> Result<Option<CliCjsAnalysis>, AnyError> { ) -> Result<Option<CliCjsAnalysis>, AnyError> {
let query = " let query = "
SELECT SELECT
@ -106,7 +104,7 @@ impl NodeAnalysisCacheInner {
LIMIT 1"; LIMIT 1";
let res = self.conn.query_row( let res = self.conn.query_row(
query, query,
params![specifier, &expected_source_hash], params![specifier, expected_source_hash],
|row| { |row| {
let analysis_info: String = row.get(0)?; let analysis_info: String = row.get(0)?;
Ok(serde_json::from_str(&analysis_info)?) Ok(serde_json::from_str(&analysis_info)?)
@ -118,7 +116,7 @@ impl NodeAnalysisCacheInner {
pub fn set_cjs_analysis( pub fn set_cjs_analysis(
&self, &self,
specifier: &str, specifier: &str,
source_hash: &str, source_hash: CacheDBHash,
cjs_analysis: &CliCjsAnalysis, cjs_analysis: &CliCjsAnalysis,
) -> Result<(), AnyError> { ) -> Result<(), AnyError> {
let sql = " let sql = "
@ -130,7 +128,7 @@ impl NodeAnalysisCacheInner {
sql, sql,
params![ params![
specifier, specifier,
&source_hash.to_string(), source_hash,
&serde_json::to_string(&cjs_analysis)?, &serde_json::to_string(&cjs_analysis)?,
], ],
)?; )?;
@ -147,34 +145,47 @@ mod test {
let conn = CacheDB::in_memory(&NODE_ANALYSIS_CACHE_DB, "1.0.0"); let conn = CacheDB::in_memory(&NODE_ANALYSIS_CACHE_DB, "1.0.0");
let cache = NodeAnalysisCacheInner::new(conn); let cache = NodeAnalysisCacheInner::new(conn);
assert!(cache.get_cjs_analysis("file.js", "2").unwrap().is_none()); assert!(cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.is_none());
let cjs_analysis = CliCjsAnalysis::Cjs { let cjs_analysis = CliCjsAnalysis::Cjs {
exports: vec!["export1".to_string()], exports: vec!["export1".to_string()],
reexports: vec!["re-export1".to_string()], reexports: vec!["re-export1".to_string()],
}; };
cache cache
.set_cjs_analysis("file.js", "2", &cjs_analysis) .set_cjs_analysis("file.js", CacheDBHash::new(2), &cjs_analysis)
.unwrap();
assert!(cache
.get_cjs_analysis("file.js", CacheDBHash::new(3))
.unwrap()
.is_none()); // different hash
let actual_cjs_analysis = cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.unwrap(); .unwrap();
assert!(cache.get_cjs_analysis("file.js", "3").unwrap().is_none()); // different hash
let actual_cjs_analysis =
cache.get_cjs_analysis("file.js", "2").unwrap().unwrap();
assert_eq!(actual_cjs_analysis, cjs_analysis); assert_eq!(actual_cjs_analysis, cjs_analysis);
// adding when already exists should not cause issue // adding when already exists should not cause issue
cache cache
.set_cjs_analysis("file.js", "2", &cjs_analysis) .set_cjs_analysis("file.js", CacheDBHash::new(2), &cjs_analysis)
.unwrap(); .unwrap();
// recreating with same cli version should still have it // recreating with same cli version should still have it
let conn = cache.conn.recreate_with_version("1.0.0"); let conn = cache.conn.recreate_with_version("1.0.0");
let cache = NodeAnalysisCacheInner::new(conn); let cache = NodeAnalysisCacheInner::new(conn);
let actual_analysis = let actual_analysis = cache
cache.get_cjs_analysis("file.js", "2").unwrap().unwrap(); .get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.unwrap();
assert_eq!(actual_analysis, cjs_analysis); assert_eq!(actual_analysis, cjs_analysis);
// now changing the cli version should clear it // now changing the cli version should clear it
let conn = cache.conn.recreate_with_version("2.0.0"); let conn = cache.conn.recreate_with_version("2.0.0");
let cache = NodeAnalysisCacheInner::new(conn); let cache = NodeAnalysisCacheInner::new(conn);
assert!(cache.get_cjs_analysis("file.js", "2").unwrap().is_none()); assert!(cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.is_none());
} }
} }

View file

@ -34,7 +34,7 @@ impl Emitter {
emit_options: deno_ast::EmitOptions, emit_options: deno_ast::EmitOptions,
) -> Self { ) -> Self {
let transpile_and_emit_options_hash = { let transpile_and_emit_options_hash = {
let mut hasher = FastInsecureHasher::default(); let mut hasher = FastInsecureHasher::new_without_deno_version();
hasher.write_hashable(&transpile_options); hasher.write_hashable(&transpile_options);
hasher.write_hashable(&emit_options); hasher.write_hashable(&emit_options);
hasher.finish() hasher.finish()
@ -188,7 +188,7 @@ impl Emitter {
/// options then generates a string hash which can be stored to /// options then generates a string hash which can be stored to
/// determine if the cached emit is valid or not. /// determine if the cached emit is valid or not.
fn get_source_hash(&self, source_text: &str) -> u64 { fn get_source_hash(&self, source_text: &str) -> u64 {
FastInsecureHasher::new() FastInsecureHasher::new_without_deno_version() // stored in the transpile_and_emit_options_hash
.write_str(source_text) .write_str(source_text)
.write_u64(self.transpile_and_emit_options_hash) .write_u64(self.transpile_and_emit_options_hash)
.finish() .finish()

View file

@ -807,7 +807,7 @@ impl Settings {
} }
pub fn enable_settings_hash(&self) -> u64 { pub fn enable_settings_hash(&self) -> u64 {
let mut hasher = FastInsecureHasher::default(); let mut hasher = FastInsecureHasher::new_without_deno_version();
let unscoped = self.get_unscoped(); let unscoped = self.get_unscoped();
hasher.write_hashable(unscoped.enable); hasher.write_hashable(unscoped.enable);
hasher.write_hashable(&unscoped.enable_paths); hasher.write_hashable(&unscoped.enable_paths);

View file

@ -382,7 +382,9 @@ impl<TGraphContainer: ModuleGraphContainer>
let code_cache = if module_type == ModuleType::JavaScript { let code_cache = if module_type == ModuleType::JavaScript {
self.shared.code_cache.as_ref().map(|cache| { self.shared.code_cache.as_ref().map(|cache| {
let code_hash = FastInsecureHasher::hash(&code); let code_hash = FastInsecureHasher::new_deno_versioned()
.write_hashable(&code)
.finish();
let data = cache let data = cache
.get_sync(specifier, code_cache::CodeCacheType::EsModule, code_hash) .get_sync(specifier, code_cache::CodeCacheType::EsModule, code_hash)
.map(Cow::from) .map(Cow::from)

View file

@ -13,6 +13,7 @@ use deno_runtime::deno_node::analyze::NodeCodeTranslator;
use serde::Deserialize; use serde::Deserialize;
use serde::Serialize; use serde::Serialize;
use crate::cache::CacheDBHash;
use crate::cache::NodeAnalysisCache; use crate::cache::NodeAnalysisCache;
use crate::util::fs::canonicalize_path_maybe_not_exists; use crate::util::fs::canonicalize_path_maybe_not_exists;
@ -63,10 +64,9 @@ impl CliCjsCodeAnalyzer {
specifier: &ModuleSpecifier, specifier: &ModuleSpecifier,
source: &str, source: &str,
) -> Result<CliCjsAnalysis, AnyError> { ) -> Result<CliCjsAnalysis, AnyError> {
let source_hash = NodeAnalysisCache::compute_source_hash(source); let source_hash = CacheDBHash::from_source(source);
if let Some(analysis) = self if let Some(analysis) =
.cache self.cache.get_cjs_analysis(specifier.as_str(), source_hash)
.get_cjs_analysis(specifier.as_str(), &source_hash)
{ {
return Ok(analysis); return Ok(analysis);
} }
@ -107,7 +107,7 @@ impl CliCjsCodeAnalyzer {
self self
.cache .cache
.set_cjs_analysis(specifier.as_str(), &source_hash, &analysis); .set_cjs_analysis(specifier.as_str(), source_hash, &analysis);
Ok(analysis) Ok(analysis)
} }

View file

@ -594,7 +594,7 @@ impl CliNpmResolver for ManagedCliNpmResolver {
.into_iter() .into_iter()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
package_reqs.sort_by(|a, b| a.0.cmp(&b.0)); // determinism package_reqs.sort_by(|a, b| a.0.cmp(&b.0)); // determinism
let mut hasher = FastInsecureHasher::new(); let mut hasher = FastInsecureHasher::new_without_deno_version();
// ensure the cache gets busted when turning nodeModulesDir on or off // ensure the cache gets busted when turning nodeModulesDir on or off
// as this could cause changes in resolution // as this could cause changes in resolution
hasher.write_hashable(self.fs_resolver.node_modules_path().is_some()); hasher.write_hashable(self.fs_resolver.node_modules_path().is_some());

View file

@ -19,6 +19,7 @@ use crate::args::TsConfig;
use crate::args::TsConfigType; use crate::args::TsConfigType;
use crate::args::TsTypeLib; use crate::args::TsTypeLib;
use crate::args::TypeCheckMode; use crate::args::TypeCheckMode;
use crate::cache::CacheDBHash;
use crate::cache::Caches; use crate::cache::Caches;
use crate::cache::FastInsecureHasher; use crate::cache::FastInsecureHasher;
use crate::cache::TypeCheckCache; use crate::cache::TypeCheckCache;
@ -28,7 +29,6 @@ use crate::npm::CliNpmResolver;
use crate::tsc; use crate::tsc;
use crate::tsc::Diagnostics; use crate::tsc::Diagnostics;
use crate::util::path::to_percent_decoded_str; use crate::util::path::to_percent_decoded_str;
use crate::version;
/// Options for performing a check of a module graph. Note that the decision to /// Options for performing a check of a module graph. Note that the decision to
/// emit or not is determined by the `ts_config` settings. /// emit or not is determined by the `ts_config` settings.
@ -174,9 +174,8 @@ impl TypeChecker {
// to make tsc build info work, we need to consistently hash modules, so that // to make tsc build info work, we need to consistently hash modules, so that
// tsc can better determine if an emit is still valid or not, so we provide // tsc can better determine if an emit is still valid or not, so we provide
// that data here. // that data here.
let hash_data = FastInsecureHasher::new() let hash_data = FastInsecureHasher::new_deno_versioned()
.write(&ts_config.as_bytes()) .write(&ts_config.as_bytes())
.write_str(version::deno())
.finish(); .finish();
// add fast check to the graph before getting the roots // add fast check to the graph before getting the roots
@ -246,7 +245,7 @@ impl TypeChecker {
} }
enum CheckHashResult { enum CheckHashResult {
Hash(u64), Hash(CacheDBHash),
NoFiles, NoFiles,
} }
@ -258,7 +257,7 @@ fn get_check_hash(
type_check_mode: TypeCheckMode, type_check_mode: TypeCheckMode,
ts_config: &TsConfig, ts_config: &TsConfig,
) -> CheckHashResult { ) -> CheckHashResult {
let mut hasher = FastInsecureHasher::new(); let mut hasher = FastInsecureHasher::new_deno_versioned();
hasher.write_u8(match type_check_mode { hasher.write_u8(match type_check_mode {
TypeCheckMode::All => 0, TypeCheckMode::All => 0,
TypeCheckMode::Local => 1, TypeCheckMode::Local => 1,
@ -340,7 +339,7 @@ fn get_check_hash(
// no files to type check // no files to type check
CheckHashResult::NoFiles CheckHashResult::NoFiles
} else { } else {
CheckHashResult::Hash(hasher.finish()) CheckHashResult::Hash(CacheDBHash::new(hasher.finish()))
} }
} }

View file

@ -248,7 +248,7 @@ fn get_maybe_hash(
} }
fn get_hash(source: &str, hash_data: u64) -> String { fn get_hash(source: &str, hash_data: u64) -> String {
FastInsecureHasher::new() FastInsecureHasher::new_without_deno_version()
.write_str(source) .write_str(source)
.write_u64(hash_data) .write_u64(hash_data)
.finish() .finish()

View file

@ -14,7 +14,7 @@ fn fast_check_cache() {
let test_context = TestContextBuilder::for_jsr().use_temp_cwd().build(); let test_context = TestContextBuilder::for_jsr().use_temp_cwd().build();
let deno_dir = test_context.deno_dir(); let deno_dir = test_context.deno_dir();
let temp_dir = test_context.temp_dir(); let temp_dir = test_context.temp_dir();
let type_check_cache_path = deno_dir.path().join("check_cache_v1"); let type_check_cache_path = deno_dir.path().join("check_cache_v2");
temp_dir.write( temp_dir.write(
"main.ts", "main.ts",

View file

@ -29,6 +29,8 @@ use util::PathRef;
use util::TestContext; use util::TestContext;
use util::TestContextBuilder; use util::TestContextBuilder;
const CODE_CACHE_DB_FILE_NAME: &str = "v8_code_cache_v2";
itest!(stdout_write_all { itest!(stdout_write_all {
args: "run --quiet run/stdout_write_all.ts", args: "run --quiet run/stdout_write_all.ts",
output: "run/stdout_write_all.out", output: "run/stdout_write_all.out",
@ -5066,7 +5068,7 @@ fn code_cache_test() {
assert!(!output.stderr().contains("V8 code cache hit")); assert!(!output.stderr().contains("V8 code cache hit"));
// Check that the code cache database exists. // Check that the code cache database exists.
let code_cache_path = deno_dir.path().join("v8_code_cache_v1"); let code_cache_path = deno_dir.path().join(CODE_CACHE_DB_FILE_NAME);
assert!(code_cache_path.exists()); assert!(code_cache_path.exists());
} }
@ -5157,7 +5159,7 @@ fn code_cache_npm_test() {
assert!(!output.stderr().contains("V8 code cache hit")); assert!(!output.stderr().contains("V8 code cache hit"));
// Check that the code cache database exists. // Check that the code cache database exists.
let code_cache_path = deno_dir.path().join("v8_code_cache_v1"); let code_cache_path = deno_dir.path().join(CODE_CACHE_DB_FILE_NAME);
assert!(code_cache_path.exists()); assert!(code_cache_path.exists());
} }
@ -5217,7 +5219,7 @@ fn code_cache_npm_with_require_test() {
assert!(!output.stderr().contains("V8 code cache hit")); assert!(!output.stderr().contains("V8 code cache hit"));
// Check that the code cache database exists. // Check that the code cache database exists.
let code_cache_path = deno_dir.path().join("v8_code_cache_v1"); let code_cache_path = deno_dir.path().join(CODE_CACHE_DB_FILE_NAME);
assert!(code_cache_path.exists()); assert!(code_cache_path.exists());
} }