2024-01-01 14:58:21 -05:00
|
|
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
2022-07-12 18:58:39 -04:00
|
|
|
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
use super::cache_db::CacheDB;
|
|
|
|
use super::cache_db::CacheDBConfiguration;
|
2024-05-29 14:38:18 -04:00
|
|
|
use super::cache_db::CacheDBHash;
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
use super::cache_db::CacheFailure;
|
2022-07-12 18:58:39 -04:00
|
|
|
use deno_ast::ModuleSpecifier;
|
|
|
|
use deno_core::error::AnyError;
|
|
|
|
use deno_runtime::deno_webstorage::rusqlite::params;
|
|
|
|
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
pub static TYPE_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
|
|
|
|
table_initializer: concat!(
|
2024-05-29 14:38:18 -04:00
|
|
|
"CREATE TABLE IF NOT EXISTS checkcache (",
|
|
|
|
"check_hash INT PRIMARY KEY",
|
|
|
|
");",
|
|
|
|
"CREATE TABLE IF NOT EXISTS tsbuildinfo (",
|
|
|
|
"specifier TEXT PRIMARY KEY,",
|
|
|
|
"text TEXT NOT NULL",
|
|
|
|
");",
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
),
|
|
|
|
on_version_change: concat!(
|
|
|
|
"DELETE FROM checkcache;",
|
|
|
|
"DELETE FROM tsbuildinfo;"
|
|
|
|
),
|
|
|
|
preheat_queries: &[],
|
|
|
|
// If the cache fails, just ignore all caching attempts
|
|
|
|
on_failure: CacheFailure::Blackhole,
|
|
|
|
};
|
2022-07-12 18:58:39 -04:00
|
|
|
|
|
|
|
/// The cache used to tell whether type checking should occur again.
|
|
|
|
///
|
|
|
|
/// This simply stores a hash of the inputs of each successful type check
|
|
|
|
/// and only clears them out when changing CLI versions.
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
pub struct TypeCheckCache(CacheDB);
|
2022-07-12 18:58:39 -04:00
|
|
|
|
|
|
|
impl TypeCheckCache {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
pub fn new(db: CacheDB) -> Self {
|
|
|
|
Self(db)
|
2022-07-12 18:58:39 -04:00
|
|
|
}
|
|
|
|
|
2024-05-29 14:38:18 -04:00
|
|
|
pub fn has_check_hash(&self, hash: CacheDBHash) -> bool {
|
2022-07-12 18:58:39 -04:00
|
|
|
match self.hash_check_hash_result(hash) {
|
|
|
|
Ok(val) => val,
|
|
|
|
Err(err) => {
|
|
|
|
if cfg!(debug_assertions) {
|
2023-01-27 10:43:16 -05:00
|
|
|
panic!("Error retrieving hash: {err}");
|
2022-07-12 18:58:39 -04:00
|
|
|
} else {
|
|
|
|
log::debug!("Error retrieving hash: {}", err);
|
|
|
|
// fail silently when not debugging
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-29 14:38:18 -04:00
|
|
|
fn hash_check_hash_result(
|
|
|
|
&self,
|
|
|
|
hash: CacheDBHash,
|
|
|
|
) -> Result<bool, AnyError> {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
self.0.exists(
|
|
|
|
"SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1",
|
2024-05-29 14:38:18 -04:00
|
|
|
params![hash],
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
)
|
2022-07-12 18:58:39 -04:00
|
|
|
}
|
|
|
|
|
2024-05-29 14:38:18 -04:00
|
|
|
pub fn add_check_hash(&self, check_hash: CacheDBHash) {
|
2022-07-12 18:58:39 -04:00
|
|
|
if let Err(err) = self.add_check_hash_result(check_hash) {
|
|
|
|
if cfg!(debug_assertions) {
|
2023-01-27 10:43:16 -05:00
|
|
|
panic!("Error saving check hash: {err}");
|
2022-07-12 18:58:39 -04:00
|
|
|
} else {
|
|
|
|
log::debug!("Error saving check hash: {}", err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-29 14:38:18 -04:00
|
|
|
fn add_check_hash_result(
|
|
|
|
&self,
|
|
|
|
check_hash: CacheDBHash,
|
|
|
|
) -> Result<(), AnyError> {
|
2022-07-12 18:58:39 -04:00
|
|
|
let sql = "
|
|
|
|
INSERT OR REPLACE INTO
|
|
|
|
checkcache (check_hash)
|
|
|
|
VALUES
|
|
|
|
(?1)";
|
2024-05-29 14:38:18 -04:00
|
|
|
self.0.execute(sql, params![check_hash])?;
|
2022-07-12 18:58:39 -04:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_tsbuildinfo(&self, specifier: &ModuleSpecifier) -> Option<String> {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
self
|
|
|
|
.0
|
|
|
|
.query_row(
|
|
|
|
"SELECT text FROM tsbuildinfo WHERE specifier=?1 LIMIT 1",
|
|
|
|
params![specifier.to_string()],
|
|
|
|
|row| Ok(row.get::<_, String>(0)?),
|
|
|
|
)
|
|
|
|
.ok()?
|
2022-07-12 18:58:39 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_tsbuildinfo(&self, specifier: &ModuleSpecifier, text: &str) {
|
|
|
|
if let Err(err) = self.set_tsbuildinfo_result(specifier, text) {
|
|
|
|
// should never error here, but if it ever does don't fail
|
|
|
|
if cfg!(debug_assertions) {
|
2023-01-27 10:43:16 -05:00
|
|
|
panic!("Error saving tsbuildinfo: {err}");
|
2022-07-12 18:58:39 -04:00
|
|
|
} else {
|
|
|
|
log::debug!("Error saving tsbuildinfo: {}", err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_tsbuildinfo_result(
|
|
|
|
&self,
|
|
|
|
specifier: &ModuleSpecifier,
|
|
|
|
text: &str,
|
|
|
|
) -> Result<(), AnyError> {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
self.0.execute(
|
2022-07-12 18:58:39 -04:00
|
|
|
"INSERT OR REPLACE INTO tsbuildinfo (specifier, text) VALUES (?1, ?2)",
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
params![specifier.to_string(), text],
|
2022-07-12 18:58:39 -04:00
|
|
|
)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
pub fn check_cache_general_use() {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let conn = CacheDB::in_memory(&TYPE_CHECK_CACHE_DB, "1.0.0");
|
|
|
|
let cache = TypeCheckCache::new(conn);
|
2022-07-12 18:58:39 -04:00
|
|
|
|
2024-05-29 14:38:18 -04:00
|
|
|
assert!(!cache.has_check_hash(CacheDBHash::new(1)));
|
|
|
|
cache.add_check_hash(CacheDBHash::new(1));
|
|
|
|
assert!(cache.has_check_hash(CacheDBHash::new(1)));
|
|
|
|
assert!(!cache.has_check_hash(CacheDBHash::new(2)));
|
2022-07-12 18:58:39 -04:00
|
|
|
|
|
|
|
let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap();
|
|
|
|
assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
|
|
|
|
cache.set_tsbuildinfo(&specifier1, "test");
|
|
|
|
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
|
|
|
|
|
|
|
|
// try changing the cli version (should clear)
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let conn = cache.0.recreate_with_version("2.0.0");
|
|
|
|
let cache = TypeCheckCache::new(conn);
|
|
|
|
|
2024-05-29 14:38:18 -04:00
|
|
|
assert!(!cache.has_check_hash(CacheDBHash::new(1)));
|
|
|
|
cache.add_check_hash(CacheDBHash::new(1));
|
|
|
|
assert!(cache.has_check_hash(CacheDBHash::new(1)));
|
2022-07-12 18:58:39 -04:00
|
|
|
assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
|
|
|
|
cache.set_tsbuildinfo(&specifier1, "test");
|
|
|
|
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
|
|
|
|
|
2022-07-19 11:58:18 -04:00
|
|
|
// recreating the cache should not remove the data because the CLI version is the same
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let conn = cache.0.recreate_with_version("2.0.0");
|
|
|
|
let cache = TypeCheckCache::new(conn);
|
|
|
|
|
2024-05-29 14:38:18 -04:00
|
|
|
assert!(cache.has_check_hash(CacheDBHash::new(1)));
|
|
|
|
assert!(!cache.has_check_hash(CacheDBHash::new(2)));
|
2022-07-12 18:58:39 -04:00
|
|
|
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
|
|
|
|
|
|
|
|
// adding when already exists should not cause issue
|
2024-05-29 14:38:18 -04:00
|
|
|
cache.add_check_hash(CacheDBHash::new(1));
|
|
|
|
assert!(cache.has_check_hash(CacheDBHash::new(1)));
|
2022-07-12 18:58:39 -04:00
|
|
|
cache.set_tsbuildinfo(&specifier1, "other");
|
|
|
|
assert_eq!(
|
|
|
|
cache.get_tsbuildinfo(&specifier1),
|
|
|
|
Some("other".to_string())
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|