1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-21 15:04:11 -05:00

fix: bump cache sqlite dbs to v2 for WAL journal mode change (#24030)

In https://github.com/denoland/deno/pull/23955 we changed the sqlite db
journal mode to WAL. This causes issues when someone is running an old
version of Deno using TRUNCATE and a new version because the two fight
against each other.
This commit is contained in:
David Sherret 2024-05-29 14:38:18 -04:00 committed by GitHub
parent fada25b0dd
commit 94f040ac28
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 347 additions and 263 deletions

8
Cargo.lock generated
View file

@ -1278,9 +1278,9 @@ dependencies = [
[[package]]
name = "deno_config"
version = "0.16.3"
version = "0.16.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "971658ccd8dbd7de18f44d2270a6881a78a88f123584fc6497189ee5d20aa307"
checksum = "3d21c7b688ff6cb411895a93bf1d6734ed654c3a7eb9b502f96098f6659df0c5"
dependencies = [
"anyhow",
"glob",
@ -1478,9 +1478,9 @@ dependencies = [
[[package]]
name = "deno_graph"
version = "0.77.0"
version = "0.77.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e94a8739bb6087061f8419e4c1719459a144ef477ab649dca597a0603290ec82"
checksum = "192d6f61d5418c928d29b2666b916df65a3b5677ce454fc6a4b4969983a02abe"
dependencies = [
"anyhow",
"async-trait",

View file

@ -65,11 +65,11 @@ winres.workspace = true
[dependencies]
deno_ast = { workspace = true, features = ["bundler", "cjs", "codegen", "proposal", "react", "sourcemap", "transforms", "typescript", "view", "visit"] }
deno_cache_dir = { workspace = true }
deno_config = "=0.16.3"
deno_config = "=0.16.4"
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
deno_doc = { version = "=0.137.0", features = ["html", "syntect"] }
deno_emit = "=0.41.0"
deno_graph = { version = "=0.77.0", features = ["tokio_executor"] }
deno_graph = { version = "=0.77.2", features = ["tokio_executor"] }
deno_lint = { version = "=0.58.4", features = ["docs"] }
deno_lockfile.workspace = true
deno_npm = "=0.21.0"

95
cli/cache/cache_db.rs vendored
View file

@ -14,6 +14,48 @@ use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use super::FastInsecureHasher;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CacheDBHash(u64);
impl CacheDBHash {
pub fn new(hash: u64) -> Self {
Self(hash)
}
pub fn from_source(source: impl std::hash::Hash) -> Self {
Self::new(
// always write in the deno version just in case
// the clearing on deno version change doesn't work
FastInsecureHasher::new_deno_versioned()
.write_hashable(source)
.finish(),
)
}
}
impl rusqlite::types::ToSql for CacheDBHash {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
Ok(rusqlite::types::ToSqlOutput::Owned(
// sqlite doesn't support u64, but it does support i64 so store
// this value "incorrectly" as i64 then convert back to u64 on read
rusqlite::types::Value::Integer(self.0 as i64),
))
}
}
impl rusqlite::types::FromSql for CacheDBHash {
fn column_result(
value: rusqlite::types::ValueRef,
) -> rusqlite::types::FromSqlResult<Self> {
match value {
rusqlite::types::ValueRef::Integer(i) => Ok(Self::new(i as u64)),
_ => Err(rusqlite::types::FromSqlError::InvalidType),
}
}
}
/// What should the cache should do on failure?
#[derive(Default)]
pub enum CacheFailure {
@ -41,21 +83,16 @@ pub struct CacheDBConfiguration {
impl CacheDBConfiguration {
fn create_combined_sql(&self) -> String {
format!(
"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA temp_store=memory;
PRAGMA page_size=4096;
PRAGMA mmap_size=6000000;
PRAGMA optimize;
CREATE TABLE IF NOT EXISTS info (
key TEXT PRIMARY KEY,
value TEXT NOT NULL
);
{}
",
concat!(
"PRAGMA journal_mode=WAL;",
"PRAGMA synchronous=NORMAL;",
"PRAGMA temp_store=memory;",
"PRAGMA page_size=4096;",
"PRAGMA mmap_size=6000000;",
"PRAGMA optimize;",
"CREATE TABLE IF NOT EXISTS info (key TEXT PRIMARY KEY, value TEXT NOT NULL);",
"{}",
),
self.table_initializer
)
}
@ -520,4 +557,32 @@ mod tests {
})
.expect_err("Should have failed");
}
#[test]
fn cache_db_hash_max_u64_value() {
assert_same_serialize_deserialize(CacheDBHash::new(u64::MAX));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MAX - 1));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MIN));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MIN + 1));
}
fn assert_same_serialize_deserialize(original_hash: CacheDBHash) {
use rusqlite::types::FromSql;
use rusqlite::types::ValueRef;
use rusqlite::ToSql;
let value = original_hash.to_sql().unwrap();
match value {
rusqlite::types::ToSqlOutput::Owned(rusqlite::types::Value::Integer(
value,
)) => {
let value_ref = ValueRef::Integer(value);
assert_eq!(
original_hash,
CacheDBHash::column_result(value_ref).unwrap()
);
}
_ => unreachable!(),
}
}
}

55
cli/cache/check.rs vendored
View file

@ -2,6 +2,7 @@
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
use deno_ast::ModuleSpecifier;
use deno_core::error::AnyError;
@ -9,13 +10,13 @@ use deno_runtime::deno_webstorage::rusqlite::params;
pub static TYPE_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS checkcache (
check_hash TEXT PRIMARY KEY
);",
"CREATE TABLE IF NOT EXISTS tsbuildinfo (
specifier TEXT PRIMARY KEY,
text TEXT NOT NULL
);",
"CREATE TABLE IF NOT EXISTS checkcache (",
"check_hash INT PRIMARY KEY",
");",
"CREATE TABLE IF NOT EXISTS tsbuildinfo (",
"specifier TEXT PRIMARY KEY,",
"text TEXT NOT NULL",
");",
),
on_version_change: concat!(
"DELETE FROM checkcache;",
@ -37,7 +38,7 @@ impl TypeCheckCache {
Self(db)
}
pub fn has_check_hash(&self, hash: u64) -> bool {
pub fn has_check_hash(&self, hash: CacheDBHash) -> bool {
match self.hash_check_hash_result(hash) {
Ok(val) => val,
Err(err) => {
@ -52,14 +53,17 @@ impl TypeCheckCache {
}
}
fn hash_check_hash_result(&self, hash: u64) -> Result<bool, AnyError> {
fn hash_check_hash_result(
&self,
hash: CacheDBHash,
) -> Result<bool, AnyError> {
self.0.exists(
"SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1",
params![hash.to_string()],
params![hash],
)
}
pub fn add_check_hash(&self, check_hash: u64) {
pub fn add_check_hash(&self, check_hash: CacheDBHash) {
if let Err(err) = self.add_check_hash_result(check_hash) {
if cfg!(debug_assertions) {
panic!("Error saving check hash: {err}");
@ -69,13 +73,16 @@ impl TypeCheckCache {
}
}
fn add_check_hash_result(&self, check_hash: u64) -> Result<(), AnyError> {
fn add_check_hash_result(
&self,
check_hash: CacheDBHash,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
checkcache (check_hash)
VALUES
(?1)";
self.0.execute(sql, params![&check_hash.to_string(),])?;
self.0.execute(sql, params![check_hash])?;
Ok(())
}
@ -123,10 +130,10 @@ mod test {
let conn = CacheDB::in_memory(&TYPE_CHECK_CACHE_DB, "1.0.0");
let cache = TypeCheckCache::new(conn);
assert!(!cache.has_check_hash(1));
cache.add_check_hash(1);
assert!(cache.has_check_hash(1));
assert!(!cache.has_check_hash(2));
assert!(!cache.has_check_hash(CacheDBHash::new(1)));
cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert!(!cache.has_check_hash(CacheDBHash::new(2)));
let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap();
assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
@ -137,9 +144,9 @@ mod test {
let conn = cache.0.recreate_with_version("2.0.0");
let cache = TypeCheckCache::new(conn);
assert!(!cache.has_check_hash(1));
cache.add_check_hash(1);
assert!(cache.has_check_hash(1));
assert!(!cache.has_check_hash(CacheDBHash::new(1)));
cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
cache.set_tsbuildinfo(&specifier1, "test");
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
@ -148,13 +155,13 @@ mod test {
let conn = cache.0.recreate_with_version("2.0.0");
let cache = TypeCheckCache::new(conn);
assert!(cache.has_check_hash(1));
assert!(!cache.has_check_hash(2));
assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert!(!cache.has_check_hash(CacheDBHash::new(2)));
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
// adding when already exists should not cause issue
cache.add_check_hash(1);
assert!(cache.has_check_hash(1));
cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(CacheDBHash::new(1)));
cache.set_tsbuildinfo(&specifier1, "other");
assert_eq!(
cache.get_tsbuildinfo(&specifier1),

View file

@ -7,16 +7,19 @@ use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
pub static CODE_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS codecache (
specifier TEXT NOT NULL,
type TEXT NOT NULL,
source_hash TEXT NOT NULL,
data BLOB NOT NULL,
PRIMARY KEY (specifier, type)
);",
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS codecache (",
"specifier TEXT NOT NULL,",
"type INTEGER NOT NULL,",
"source_hash INTEGER NOT NULL,",
"data BLOB NOT NULL,",
"PRIMARY KEY (specifier, type)",
");"
),
on_version_change: "DELETE FROM codecache;",
preheat_queries: &[],
on_failure: CacheFailure::Blackhole,
@ -59,7 +62,7 @@ impl CodeCache {
Self::ensure_ok(self.inner.get_sync(
specifier.as_str(),
code_cache_type,
&source_hash.to_string(),
CacheDBHash::new(source_hash),
))
}
@ -73,7 +76,7 @@ impl CodeCache {
Self::ensure_ok(self.inner.set_sync(
specifier.as_str(),
code_cache_type,
&source_hash.to_string(),
CacheDBHash::new(source_hash),
data,
));
}
@ -113,7 +116,7 @@ impl CodeCacheInner {
&self,
specifier: &str,
code_cache_type: code_cache::CodeCacheType,
source_hash: &str,
source_hash: CacheDBHash,
) -> Result<Option<Vec<u8>>, AnyError> {
let query = "
SELECT
@ -123,7 +126,11 @@ impl CodeCacheInner {
WHERE
specifier=?1 AND type=?2 AND source_hash=?3
LIMIT 1";
let params = params![specifier, code_cache_type.as_str(), source_hash,];
let params = params![
specifier,
serialize_code_cache_type(code_cache_type),
source_hash,
];
self.conn.query_row(query, params, |row| {
let value: Vec<u8> = row.get(0)?;
Ok(value)
@ -134,7 +141,7 @@ impl CodeCacheInner {
&self,
specifier: &str,
code_cache_type: code_cache::CodeCacheType,
source_hash: &str, // use string because sqlite doesn't have a u64 type
source_hash: CacheDBHash,
data: &[u8],
) -> Result<(), AnyError> {
let sql = "
@ -142,13 +149,26 @@ impl CodeCacheInner {
codecache (specifier, type, source_hash, data)
VALUES
(?1, ?2, ?3, ?4)";
let params =
params![specifier, code_cache_type.as_str(), source_hash, data];
let params = params![
specifier,
serialize_code_cache_type(code_cache_type),
source_hash,
data
];
self.conn.execute(sql, params)?;
Ok(())
}
}
fn serialize_code_cache_type(
code_cache_type: code_cache::CodeCacheType,
) -> i64 {
match code_cache_type {
code_cache::CodeCacheType::Script => 0,
code_cache::CodeCacheType::EsModule => 1,
}
}
#[cfg(test)]
mod test {
use super::*;
@ -162,7 +182,7 @@ mod test {
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
"hash",
CacheDBHash::new(1),
)
.unwrap()
.is_none());
@ -171,7 +191,7 @@ mod test {
.set_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
"hash",
CacheDBHash::new(1),
&data_esm,
)
.unwrap();
@ -180,7 +200,7 @@ mod test {
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
"hash",
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),
@ -191,7 +211,7 @@ mod test {
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
"hash",
CacheDBHash::new(1),
)
.unwrap()
.is_none());
@ -200,7 +220,7 @@ mod test {
.set_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
"hash",
CacheDBHash::new(1),
&data_script,
)
.unwrap();
@ -209,7 +229,7 @@ mod test {
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
"hash",
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),
@ -220,7 +240,7 @@ mod test {
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
"hash",
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),

11
cli/cache/common.rs vendored
View file

@ -3,16 +3,17 @@
use std::hash::Hasher;
/// A very fast insecure hasher that uses the xxHash algorithm.
#[derive(Default)]
pub struct FastInsecureHasher(twox_hash::XxHash64);
impl FastInsecureHasher {
pub fn new() -> Self {
Self::default()
pub fn new_without_deno_version() -> Self {
Self(Default::default())
}
pub fn hash(hashable: impl std::hash::Hash) -> u64 {
Self::new().write_hashable(hashable).finish()
pub fn new_deno_versioned() -> Self {
let mut hasher = Self::new_without_deno_version();
hasher.write_str(crate::version::deno());
hasher
}
pub fn write_str(&mut self, text: &str) -> &mut Self {

24
cli/cache/deno_dir.rs vendored
View file

@ -79,40 +79,46 @@ impl DenoDir {
self.root.display()
}
/// Path for the V8 code cache.
pub fn code_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("v8_code_cache_v2")
}
/// Path for the incremental cache used for formatting.
pub fn fmt_incremental_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("fmt_incremental_cache_v1")
self.root.join("fmt_incremental_cache_v2")
}
/// Path for the incremental cache used for linting.
pub fn lint_incremental_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("lint_incremental_cache_v1")
self.root.join("lint_incremental_cache_v2")
}
/// Path for caching swc dependency analysis.
pub fn dep_analysis_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("dep_analysis_cache_v1")
self.root.join("dep_analysis_cache_v2")
}
/// Path for the cache used for fast check.
pub fn fast_check_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("fast_check_cache_v1")
self.root.join("fast_check_cache_v2")
}
/// Path for caching node analysis.
pub fn node_analysis_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("node_analysis_cache_v1")
self.root.join("node_analysis_cache_v2")
}
/// Path for the cache used for type checking.
pub fn type_checking_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("check_cache_v1")
self.root.join("check_cache_v2")
}
/// Path to the registries cache, used for the lps.
@ -141,12 +147,6 @@ impl DenoDir {
self.root.join("npm")
}
/// Path for the V8 code cache.
pub fn code_cache_db_file_path(&self) -> PathBuf {
// bump this version name to invalidate the entire cache
self.root.join("v8_code_cache_v1")
}
/// Path used for the REPL history file.
/// Can be overridden or disabled by setting `DENO_REPL_HISTORY` environment variable.
pub fn repl_history_file_path(&self) -> Option<PathBuf> {

15
cli/cache/emit.rs vendored
View file

@ -14,8 +14,8 @@ use super::FastInsecureHasher;
#[derive(Debug, Deserialize, Serialize)]
struct EmitMetadata {
pub source_hash: String,
pub emit_hash: String,
pub source_hash: u64,
pub emit_hash: u64,
}
/// The cache that stores previously emitted files.
@ -52,7 +52,7 @@ impl EmitCache {
// load and verify the meta data file is for this source and CLI version
let bytes = self.disk_cache.get(&meta_filename).ok()?;
let meta: EmitMetadata = serde_json::from_slice(&bytes).ok()?;
if meta.source_hash != expected_source_hash.to_string() {
if meta.source_hash != expected_source_hash {
return None;
}
@ -112,7 +112,7 @@ impl EmitCache {
// save the metadata
let metadata = EmitMetadata {
source_hash: source_hash.to_string(),
source_hash,
emit_hash: compute_emit_hash(code.as_bytes(), self.cli_version),
};
self
@ -138,16 +138,15 @@ impl EmitCache {
}
}
fn compute_emit_hash(bytes: &[u8], cli_version: &str) -> String {
fn compute_emit_hash(bytes: &[u8], cli_version: &str) -> u64 {
// it's ok to use an insecure hash here because
// if someone can change the emit source then they
// can also change the version hash
FastInsecureHasher::new()
FastInsecureHasher::new_without_deno_version() // use cli_version param instead
.write(bytes)
// emit should not be re-used between cli versions
.write(cli_version.as_bytes())
.write_str(cli_version)
.finish()
.to_string()
}
#[cfg(test)]

View file

@ -7,13 +7,16 @@ use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
pub static FAST_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS fastcheckcache (
hash TEXT PRIMARY KEY,
data TEXT NOT NULL
);",
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS fastcheckcache (",
"hash INTEGER PRIMARY KEY,",
"data TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM fastcheckcache;",
preheat_queries: &[],
on_failure: CacheFailure::Blackhole,
@ -81,13 +84,14 @@ impl FastCheckCacheInner {
WHERE
hash=?1
LIMIT 1";
let res = self
.conn
// key is a string because SQLite can't handle u64
.query_row(query, params![key.as_u64().to_string()], |row| {
let res = self.conn.query_row(
query,
params![CacheDBHash::new(key.as_u64())],
|row| {
let value: Vec<u8> = row.get(0)?;
Ok(bincode::deserialize::<FastCheckCacheItem>(&value)?)
})?;
},
)?;
Ok(res)
}
@ -103,7 +107,7 @@ impl FastCheckCacheInner {
(?1, ?2)";
self.conn.execute(
sql,
params![key.as_u64().to_string(), &bincode::serialize(data)?],
params![CacheDBHash::new(key.as_u64()), &bincode::serialize(data)?],
)?;
Ok(())
}
@ -114,6 +118,7 @@ mod test {
use std::collections::BTreeSet;
use deno_ast::ModuleSpecifier;
use deno_graph::FastCheckCache as _;
use deno_graph::FastCheckCacheModuleItem;
use deno_graph::FastCheckCacheModuleItemDiagnostic;
use deno_semver::package::PackageNv;
@ -123,12 +128,14 @@ mod test {
#[test]
pub fn cache_general_use() {
let conn = CacheDB::in_memory(&FAST_CHECK_CACHE_DB, "1.0.0");
let cache = FastCheckCacheInner::new(conn);
let cache = FastCheckCache::new(conn);
let key = FastCheckCacheKey::build(
cache.hash_seed(),
&PackageNv::from_str("@scope/a@1.0.0").unwrap(),
&Default::default(),
);
let cache = cache.inner;
assert!(cache.get(key).unwrap().is_none());
let value = FastCheckCacheItem {
dependencies: BTreeSet::from([

View file

@ -6,23 +6,23 @@ use std::path::PathBuf;
use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
use deno_core::unsync::spawn;
use deno_core::unsync::JoinHandle;
use deno_runtime::deno_webstorage::rusqlite::params;
use serde::Serialize;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
use super::common::FastInsecureHasher;
pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS incrementalcache (
file_path TEXT PRIMARY KEY,
state_hash TEXT NOT NULL,
source_hash TEXT NOT NULL
);",
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS incrementalcache (",
"file_path TEXT PRIMARY KEY,",
"state_hash INTEGER NOT NULL,",
"source_hash INTEGER NOT NULL",
");"
),
on_version_change: "DELETE FROM incrementalcache;",
preheat_queries: &[],
// If the cache fails, just ignore all caching attempts
@ -34,7 +34,7 @@ pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
pub struct IncrementalCache(IncrementalCacheInner);
impl IncrementalCache {
pub fn new<TState: Serialize>(
pub fn new<TState: std::hash::Hash>(
db: CacheDB,
state: &TState,
initial_file_paths: &[PathBuf],
@ -56,24 +56,23 @@ impl IncrementalCache {
}
enum ReceiverMessage {
Update(PathBuf, u64),
Update(PathBuf, CacheDBHash),
Exit,
}
struct IncrementalCacheInner {
previous_hashes: HashMap<PathBuf, u64>,
previous_hashes: HashMap<PathBuf, CacheDBHash>,
sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>,
handle: Mutex<Option<JoinHandle<()>>>,
}
impl IncrementalCacheInner {
pub fn new<TState: Serialize>(
pub fn new<TState: std::hash::Hash>(
db: CacheDB,
state: &TState,
initial_file_paths: &[PathBuf],
) -> Self {
let state_hash =
FastInsecureHasher::hash(serde_json::to_string(state).unwrap());
let state_hash = CacheDBHash::from_source(state);
let sql_cache = SqlIncrementalCache::new(db, state_hash);
Self::from_sql_incremental_cache(sql_cache, initial_file_paths)
}
@ -113,13 +112,13 @@ impl IncrementalCacheInner {
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
match self.previous_hashes.get(file_path) {
Some(hash) => *hash == FastInsecureHasher::hash(file_text),
Some(hash) => *hash == CacheDBHash::from_source(file_text),
None => false,
}
}
pub fn update_file(&self, file_path: &Path, file_text: &str) {
let hash = FastInsecureHasher::hash(file_text);
let hash = CacheDBHash::from_source(file_text);
if let Some(previous_hash) = self.previous_hashes.get(file_path) {
if *previous_hash == hash {
return; // do not bother updating the db file because nothing has changed
@ -146,15 +145,15 @@ struct SqlIncrementalCache {
/// A hash of the state used to produce the formatting/linting other than
/// the CLI version. This state is a hash of the configuration and ensures
/// we format/lint a file when the configuration changes.
state_hash: u64,
state_hash: CacheDBHash,
}
impl SqlIncrementalCache {
pub fn new(conn: CacheDB, state_hash: u64) -> Self {
pub fn new(conn: CacheDB, state_hash: CacheDBHash) -> Self {
Self { conn, state_hash }
}
pub fn get_source_hash(&self, path: &Path) -> Option<u64> {
pub fn get_source_hash(&self, path: &Path) -> Option<CacheDBHash> {
match self.get_source_hash_result(path) {
Ok(option) => option,
Err(err) => {
@ -171,7 +170,7 @@ impl SqlIncrementalCache {
fn get_source_hash_result(
&self,
path: &Path,
) -> Result<Option<u64>, AnyError> {
) -> Result<Option<CacheDBHash>, AnyError> {
let query = "
SELECT
source_hash
@ -183,10 +182,10 @@ impl SqlIncrementalCache {
LIMIT 1";
let res = self.conn.query_row(
query,
params![path.to_string_lossy(), self.state_hash.to_string()],
params![path.to_string_lossy(), self.state_hash],
|row| {
let hash: String = row.get(0)?;
Ok(hash.parse::<u64>()?)
let hash: CacheDBHash = row.get(0)?;
Ok(hash)
},
)?;
Ok(res)
@ -195,7 +194,7 @@ impl SqlIncrementalCache {
pub fn set_source_hash(
&self,
path: &Path,
source_hash: u64,
source_hash: CacheDBHash,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
@ -204,11 +203,7 @@ impl SqlIncrementalCache {
(?1, ?2, ?3)";
self.conn.execute(
sql,
params![
path.to_string_lossy(),
&self.state_hash.to_string(),
&source_hash,
],
params![path.to_string_lossy(), self.state_hash, source_hash],
)?;
Ok(())
}
@ -223,51 +218,51 @@ mod test {
#[test]
pub fn sql_cache_general_use() {
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
let cache = SqlIncrementalCache::new(conn, 1);
let cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
let path = PathBuf::from("/mod.ts");
assert_eq!(cache.get_source_hash(&path), None);
cache.set_source_hash(&path, 2).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(2));
cache.set_source_hash(&path, CacheDBHash::new(2)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// try changing the cli version (should clear)
let conn = cache.conn.recreate_with_version("2.0.0");
let mut cache = SqlIncrementalCache::new(conn, 1);
let mut cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
assert_eq!(cache.get_source_hash(&path), None);
// add back the file to the cache
cache.set_source_hash(&path, 2).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(2));
cache.set_source_hash(&path, CacheDBHash::new(2)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// try changing the state hash
cache.state_hash = 2;
cache.state_hash = CacheDBHash::new(2);
assert_eq!(cache.get_source_hash(&path), None);
cache.state_hash = 1;
cache.state_hash = CacheDBHash::new(1);
// should return now that everything is back
assert_eq!(cache.get_source_hash(&path), Some(2));
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// recreating the cache should not remove the data because the CLI version and state hash is the same
let conn = cache.conn.recreate_with_version("2.0.0");
let cache = SqlIncrementalCache::new(conn, 1);
assert_eq!(cache.get_source_hash(&path), Some(2));
let cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// now try replacing and using another path
cache.set_source_hash(&path, 3).unwrap();
cache.set_source_hash(&path, 4).unwrap();
cache.set_source_hash(&path, CacheDBHash::new(3)).unwrap();
cache.set_source_hash(&path, CacheDBHash::new(4)).unwrap();
let path2 = PathBuf::from("/mod2.ts");
cache.set_source_hash(&path2, 5).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(4));
assert_eq!(cache.get_source_hash(&path2), Some(5));
cache.set_source_hash(&path2, CacheDBHash::new(5)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(4)));
assert_eq!(cache.get_source_hash(&path2), Some(CacheDBHash::new(5)));
}
#[tokio::test]
pub async fn incremental_cache_general_use() {
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
let sql_cache = SqlIncrementalCache::new(conn, 1);
let sql_cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
let file_path = PathBuf::from("/mod.ts");
let file_text = "test";
let file_hash = FastInsecureHasher::hash(file_text);
let file_hash = CacheDBHash::from_source(file_text);
sql_cache.set_source_hash(&file_path, file_hash).unwrap();
let cache = IncrementalCacheInner::from_sql_incremental_cache(
sql_cache,

7
cli/cache/mod.rs vendored
View file

@ -38,6 +38,7 @@ mod module_info;
mod node;
mod parsed_source;
pub use cache_db::CacheDBHash;
pub use caches::Caches;
pub use check::TypeCheckCache;
pub use code_cache::CodeCache;
@ -101,8 +102,6 @@ pub type LocalLspHttpCache =
deno_cache_dir::LocalLspHttpCache<RealDenoCacheEnv>;
pub use deno_cache_dir::HttpCache;
use self::module_info::ModuleInfoCacheSourceHash;
/// A "wrapper" for the FileFetcher and DiskCache for the Deno CLI that provides
/// a concise interface to the DENO_DIR when building module graphs.
pub struct FetchCacher {
@ -297,11 +296,11 @@ impl Loader for FetchCacher {
module_info: &deno_graph::ModuleInfo,
) {
log::debug!("Caching module info for {}", specifier);
let source_hash = ModuleInfoCacheSourceHash::from_source(source);
let source_hash = CacheDBHash::from_source(source);
let result = self.module_info_cache.set_module_info(
specifier,
MediaType::from_specifier(specifier),
&source_hash,
source_hash,
module_info,
);
if let Err(err) = result {

View file

@ -12,8 +12,8 @@ use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
use super::FastInsecureHasher;
use super::ParsedSourceCache;
const SELECT_MODULE_INFO: &str = "
@ -28,40 +28,19 @@ WHERE
LIMIT 1";
pub static MODULE_INFO_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS moduleinfocache (
specifier TEXT PRIMARY KEY,
media_type TEXT NOT NULL,
source_hash TEXT NOT NULL,
module_info TEXT NOT NULL
);",
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS moduleinfocache (",
"specifier TEXT PRIMARY KEY,",
"media_type INTEGER NOT NULL,",
"source_hash INTEGER NOT NULL,",
"module_info TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM moduleinfocache;",
preheat_queries: &[SELECT_MODULE_INFO],
on_failure: CacheFailure::InMemory,
};
#[derive(Debug)]
pub struct ModuleInfoCacheSourceHash(String);
impl ModuleInfoCacheSourceHash {
pub fn new(hash: u64) -> Self {
Self(hash.to_string())
}
pub fn from_source(source: &[u8]) -> Self {
Self::new(FastInsecureHasher::hash(source))
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl From<ModuleInfoCacheSourceHash> for String {
fn from(source_hash: ModuleInfoCacheSourceHash) -> String {
source_hash.0
}
}
/// A cache of `deno_graph::ModuleInfo` objects. Using this leads to a considerable
/// performance improvement because when it exists we can skip parsing a module for
/// deno_graph.
@ -91,7 +70,7 @@ impl ModuleInfoCache {
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
expected_source_hash: &ModuleInfoCacheSourceHash,
expected_source_hash: CacheDBHash,
) -> Result<Option<ModuleInfo>, AnyError> {
let query = SELECT_MODULE_INFO;
let res = self.conn.query_row(
@ -99,7 +78,7 @@ impl ModuleInfoCache {
params![
&specifier.as_str(),
serialize_media_type(media_type),
expected_source_hash.as_str(),
expected_source_hash,
],
|row| {
let module_info: String = row.get(0)?;
@ -114,7 +93,7 @@ impl ModuleInfoCache {
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
source_hash: &ModuleInfoCacheSourceHash,
source_hash: CacheDBHash,
module_info: &ModuleInfo,
) -> Result<(), AnyError> {
let sql = "
@ -127,7 +106,7 @@ impl ModuleInfoCache {
params![
specifier.as_str(),
serialize_media_type(media_type),
source_hash.as_str(),
source_hash,
&serde_json::to_string(&module_info)?,
],
)?;
@ -159,11 +138,11 @@ impl<'a> deno_graph::ModuleAnalyzer for ModuleInfoCacheModuleAnalyzer<'a> {
media_type: MediaType,
) -> Result<ModuleInfo, deno_ast::ParseDiagnostic> {
// attempt to load from the cache
let source_hash = ModuleInfoCacheSourceHash::from_source(source.as_bytes());
let source_hash = CacheDBHash::from_source(&source);
match self.module_info_cache.get_module_info(
specifier,
media_type,
&source_hash,
source_hash,
) {
Ok(Some(info)) => return Ok(info),
Ok(None) => {}
@ -193,7 +172,7 @@ impl<'a> deno_graph::ModuleAnalyzer for ModuleInfoCacheModuleAnalyzer<'a> {
if let Err(err) = self.module_info_cache.set_module_info(
specifier,
media_type,
&source_hash,
source_hash,
&module_info,
) {
log::debug!(
@ -207,27 +186,25 @@ impl<'a> deno_graph::ModuleAnalyzer for ModuleInfoCacheModuleAnalyzer<'a> {
}
}
// todo(dsherret): change this to be stored as an integer next time
// the cache version is bumped
fn serialize_media_type(media_type: MediaType) -> &'static str {
fn serialize_media_type(media_type: MediaType) -> i64 {
use MediaType::*;
match media_type {
JavaScript => "1",
Jsx => "2",
Mjs => "3",
Cjs => "4",
TypeScript => "5",
Mts => "6",
Cts => "7",
Dts => "8",
Dmts => "9",
Dcts => "10",
Tsx => "11",
Json => "12",
Wasm => "13",
TsBuildInfo => "14",
SourceMap => "15",
Unknown => "16",
JavaScript => 1,
Jsx => 2,
Mjs => 3,
Cjs => 4,
TypeScript => 5,
Mts => 6,
Cts => 7,
Dts => 8,
Dmts => 9,
Dcts => 10,
Tsx => 11,
Json => 12,
Wasm => 13,
TsBuildInfo => 14,
SourceMap => 15,
Unknown => 16,
}
}
@ -250,7 +227,7 @@ mod test {
.get_module_info(
&specifier1,
MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1)
CacheDBHash::new(1)
)
.unwrap(),
None
@ -274,7 +251,7 @@ mod test {
.set_module_info(
&specifier1,
MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1),
CacheDBHash::new(1),
&module_info,
)
.unwrap();
@ -283,7 +260,7 @@ mod test {
.get_module_info(
&specifier1,
MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1)
CacheDBHash::new(1)
)
.unwrap(),
Some(module_info.clone())
@ -293,7 +270,7 @@ mod test {
.get_module_info(
&specifier2,
MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1)
CacheDBHash::new(1)
)
.unwrap(),
None,
@ -304,7 +281,7 @@ mod test {
.get_module_info(
&specifier1,
MediaType::TypeScript,
&ModuleInfoCacheSourceHash::new(1)
CacheDBHash::new(1)
)
.unwrap(),
None,
@ -315,7 +292,7 @@ mod test {
.get_module_info(
&specifier1,
MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(2)
CacheDBHash::new(2)
)
.unwrap(),
None,
@ -330,7 +307,7 @@ mod test {
.get_module_info(
&specifier1,
MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1)
CacheDBHash::new(1)
)
.unwrap(),
Some(module_info)
@ -345,7 +322,7 @@ mod test {
.get_module_info(
&specifier1,
MediaType::JavaScript,
&ModuleInfoCacheSourceHash::new(1)
CacheDBHash::new(1)
)
.unwrap(),
None,

61
cli/cache/node.rs vendored
View file

@ -9,15 +9,17 @@ use crate::node::CliCjsAnalysis;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheFailure;
use super::FastInsecureHasher;
use super::CacheDBHash;
pub static NODE_ANALYSIS_CACHE_DB: CacheDBConfiguration =
CacheDBConfiguration {
table_initializer: "CREATE TABLE IF NOT EXISTS cjsanalysiscache (
specifier TEXT PRIMARY KEY,
source_hash TEXT NOT NULL,
data TEXT NOT NULL
);",
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS cjsanalysiscache (",
"specifier TEXT PRIMARY KEY,",
"source_hash INTEGER NOT NULL,",
"data TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM cjsanalysiscache;",
preheat_queries: &[],
on_failure: CacheFailure::InMemory,
@ -35,10 +37,6 @@ impl NodeAnalysisCache {
}
}
pub fn compute_source_hash(text: &str) -> String {
FastInsecureHasher::hash(text).to_string()
}
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
match res {
Ok(x) => x,
@ -59,7 +57,7 @@ impl NodeAnalysisCache {
pub fn get_cjs_analysis(
&self,
specifier: &str,
expected_source_hash: &str,
expected_source_hash: CacheDBHash,
) -> Option<CliCjsAnalysis> {
Self::ensure_ok(
self.inner.get_cjs_analysis(specifier, expected_source_hash),
@ -69,7 +67,7 @@ impl NodeAnalysisCache {
pub fn set_cjs_analysis(
&self,
specifier: &str,
source_hash: &str,
source_hash: CacheDBHash,
cjs_analysis: &CliCjsAnalysis,
) {
Self::ensure_ok(self.inner.set_cjs_analysis(
@ -93,7 +91,7 @@ impl NodeAnalysisCacheInner {
pub fn get_cjs_analysis(
&self,
specifier: &str,
expected_source_hash: &str,
expected_source_hash: CacheDBHash,
) -> Result<Option<CliCjsAnalysis>, AnyError> {
let query = "
SELECT
@ -106,7 +104,7 @@ impl NodeAnalysisCacheInner {
LIMIT 1";
let res = self.conn.query_row(
query,
params![specifier, &expected_source_hash],
params![specifier, expected_source_hash],
|row| {
let analysis_info: String = row.get(0)?;
Ok(serde_json::from_str(&analysis_info)?)
@ -118,7 +116,7 @@ impl NodeAnalysisCacheInner {
pub fn set_cjs_analysis(
&self,
specifier: &str,
source_hash: &str,
source_hash: CacheDBHash,
cjs_analysis: &CliCjsAnalysis,
) -> Result<(), AnyError> {
let sql = "
@ -130,7 +128,7 @@ impl NodeAnalysisCacheInner {
sql,
params![
specifier,
&source_hash.to_string(),
source_hash,
&serde_json::to_string(&cjs_analysis)?,
],
)?;
@ -147,34 +145,47 @@ mod test {
let conn = CacheDB::in_memory(&NODE_ANALYSIS_CACHE_DB, "1.0.0");
let cache = NodeAnalysisCacheInner::new(conn);
assert!(cache.get_cjs_analysis("file.js", "2").unwrap().is_none());
assert!(cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.is_none());
let cjs_analysis = CliCjsAnalysis::Cjs {
exports: vec!["export1".to_string()],
reexports: vec!["re-export1".to_string()],
};
cache
.set_cjs_analysis("file.js", "2", &cjs_analysis)
.set_cjs_analysis("file.js", CacheDBHash::new(2), &cjs_analysis)
.unwrap();
assert!(cache
.get_cjs_analysis("file.js", CacheDBHash::new(3))
.unwrap()
.is_none()); // different hash
let actual_cjs_analysis = cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.unwrap();
assert!(cache.get_cjs_analysis("file.js", "3").unwrap().is_none()); // different hash
let actual_cjs_analysis =
cache.get_cjs_analysis("file.js", "2").unwrap().unwrap();
assert_eq!(actual_cjs_analysis, cjs_analysis);
// adding when already exists should not cause issue
cache
.set_cjs_analysis("file.js", "2", &cjs_analysis)
.set_cjs_analysis("file.js", CacheDBHash::new(2), &cjs_analysis)
.unwrap();
// recreating with same cli version should still have it
let conn = cache.conn.recreate_with_version("1.0.0");
let cache = NodeAnalysisCacheInner::new(conn);
let actual_analysis =
cache.get_cjs_analysis("file.js", "2").unwrap().unwrap();
let actual_analysis = cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.unwrap();
assert_eq!(actual_analysis, cjs_analysis);
// now changing the cli version should clear it
let conn = cache.conn.recreate_with_version("2.0.0");
let cache = NodeAnalysisCacheInner::new(conn);
assert!(cache.get_cjs_analysis("file.js", "2").unwrap().is_none());
assert!(cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.is_none());
}
}

View file

@ -34,7 +34,7 @@ impl Emitter {
emit_options: deno_ast::EmitOptions,
) -> Self {
let transpile_and_emit_options_hash = {
let mut hasher = FastInsecureHasher::default();
let mut hasher = FastInsecureHasher::new_without_deno_version();
hasher.write_hashable(&transpile_options);
hasher.write_hashable(&emit_options);
hasher.finish()
@ -188,7 +188,7 @@ impl Emitter {
/// options then generates a string hash which can be stored to
/// determine if the cached emit is valid or not.
fn get_source_hash(&self, source_text: &str) -> u64 {
FastInsecureHasher::new()
FastInsecureHasher::new_without_deno_version() // stored in the transpile_and_emit_options_hash
.write_str(source_text)
.write_u64(self.transpile_and_emit_options_hash)
.finish()

View file

@ -807,7 +807,7 @@ impl Settings {
}
pub fn enable_settings_hash(&self) -> u64 {
let mut hasher = FastInsecureHasher::default();
let mut hasher = FastInsecureHasher::new_without_deno_version();
let unscoped = self.get_unscoped();
hasher.write_hashable(unscoped.enable);
hasher.write_hashable(&unscoped.enable_paths);

View file

@ -382,7 +382,9 @@ impl<TGraphContainer: ModuleGraphContainer>
let code_cache = if module_type == ModuleType::JavaScript {
self.shared.code_cache.as_ref().map(|cache| {
let code_hash = FastInsecureHasher::hash(&code);
let code_hash = FastInsecureHasher::new_deno_versioned()
.write_hashable(&code)
.finish();
let data = cache
.get_sync(specifier, code_cache::CodeCacheType::EsModule, code_hash)
.map(Cow::from)

View file

@ -13,6 +13,7 @@ use deno_runtime::deno_node::analyze::NodeCodeTranslator;
use serde::Deserialize;
use serde::Serialize;
use crate::cache::CacheDBHash;
use crate::cache::NodeAnalysisCache;
use crate::util::fs::canonicalize_path_maybe_not_exists;
@ -63,10 +64,9 @@ impl CliCjsCodeAnalyzer {
specifier: &ModuleSpecifier,
source: &str,
) -> Result<CliCjsAnalysis, AnyError> {
let source_hash = NodeAnalysisCache::compute_source_hash(source);
if let Some(analysis) = self
.cache
.get_cjs_analysis(specifier.as_str(), &source_hash)
let source_hash = CacheDBHash::from_source(source);
if let Some(analysis) =
self.cache.get_cjs_analysis(specifier.as_str(), source_hash)
{
return Ok(analysis);
}
@ -107,7 +107,7 @@ impl CliCjsCodeAnalyzer {
self
.cache
.set_cjs_analysis(specifier.as_str(), &source_hash, &analysis);
.set_cjs_analysis(specifier.as_str(), source_hash, &analysis);
Ok(analysis)
}

View file

@ -594,7 +594,7 @@ impl CliNpmResolver for ManagedCliNpmResolver {
.into_iter()
.collect::<Vec<_>>();
package_reqs.sort_by(|a, b| a.0.cmp(&b.0)); // determinism
let mut hasher = FastInsecureHasher::new();
let mut hasher = FastInsecureHasher::new_without_deno_version();
// ensure the cache gets busted when turning nodeModulesDir on or off
// as this could cause changes in resolution
hasher.write_hashable(self.fs_resolver.node_modules_path().is_some());

View file

@ -19,6 +19,7 @@ use crate::args::TsConfig;
use crate::args::TsConfigType;
use crate::args::TsTypeLib;
use crate::args::TypeCheckMode;
use crate::cache::CacheDBHash;
use crate::cache::Caches;
use crate::cache::FastInsecureHasher;
use crate::cache::TypeCheckCache;
@ -28,7 +29,6 @@ use crate::npm::CliNpmResolver;
use crate::tsc;
use crate::tsc::Diagnostics;
use crate::util::path::to_percent_decoded_str;
use crate::version;
/// Options for performing a check of a module graph. Note that the decision to
/// emit or not is determined by the `ts_config` settings.
@ -174,9 +174,8 @@ impl TypeChecker {
// to make tsc build info work, we need to consistently hash modules, so that
// tsc can better determine if an emit is still valid or not, so we provide
// that data here.
let hash_data = FastInsecureHasher::new()
let hash_data = FastInsecureHasher::new_deno_versioned()
.write(&ts_config.as_bytes())
.write_str(version::deno())
.finish();
// add fast check to the graph before getting the roots
@ -246,7 +245,7 @@ impl TypeChecker {
}
enum CheckHashResult {
Hash(u64),
Hash(CacheDBHash),
NoFiles,
}
@ -258,7 +257,7 @@ fn get_check_hash(
type_check_mode: TypeCheckMode,
ts_config: &TsConfig,
) -> CheckHashResult {
let mut hasher = FastInsecureHasher::new();
let mut hasher = FastInsecureHasher::new_deno_versioned();
hasher.write_u8(match type_check_mode {
TypeCheckMode::All => 0,
TypeCheckMode::Local => 1,
@ -340,7 +339,7 @@ fn get_check_hash(
// no files to type check
CheckHashResult::NoFiles
} else {
CheckHashResult::Hash(hasher.finish())
CheckHashResult::Hash(CacheDBHash::new(hasher.finish()))
}
}

View file

@ -248,7 +248,7 @@ fn get_maybe_hash(
}
fn get_hash(source: &str, hash_data: u64) -> String {
FastInsecureHasher::new()
FastInsecureHasher::new_without_deno_version()
.write_str(source)
.write_u64(hash_data)
.finish()

View file

@ -14,7 +14,7 @@ fn fast_check_cache() {
let test_context = TestContextBuilder::for_jsr().use_temp_cwd().build();
let deno_dir = test_context.deno_dir();
let temp_dir = test_context.temp_dir();
let type_check_cache_path = deno_dir.path().join("check_cache_v1");
let type_check_cache_path = deno_dir.path().join("check_cache_v2");
temp_dir.write(
"main.ts",

View file

@ -29,6 +29,8 @@ use util::PathRef;
use util::TestContext;
use util::TestContextBuilder;
const CODE_CACHE_DB_FILE_NAME: &str = "v8_code_cache_v2";
itest!(stdout_write_all {
args: "run --quiet run/stdout_write_all.ts",
output: "run/stdout_write_all.out",
@ -5066,7 +5068,7 @@ fn code_cache_test() {
assert!(!output.stderr().contains("V8 code cache hit"));
// Check that the code cache database exists.
let code_cache_path = deno_dir.path().join("v8_code_cache_v1");
let code_cache_path = deno_dir.path().join(CODE_CACHE_DB_FILE_NAME);
assert!(code_cache_path.exists());
}
@ -5157,7 +5159,7 @@ fn code_cache_npm_test() {
assert!(!output.stderr().contains("V8 code cache hit"));
// Check that the code cache database exists.
let code_cache_path = deno_dir.path().join("v8_code_cache_v1");
let code_cache_path = deno_dir.path().join(CODE_CACHE_DB_FILE_NAME);
assert!(code_cache_path.exists());
}
@ -5217,7 +5219,7 @@ fn code_cache_npm_with_require_test() {
assert!(!output.stderr().contains("V8 code cache hit"));
// Check that the code cache database exists.
let code_cache_path = deno_dir.path().join("v8_code_cache_v1");
let code_cache_path = deno_dir.path().join(CODE_CACHE_DB_FILE_NAME);
assert!(code_cache_path.exists());
}