2023-01-02 16:00:42 -05:00
|
|
|
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
2022-07-12 18:58:39 -04:00
|
|
|
|
2022-04-19 22:14:00 -04:00
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::path::Path;
|
|
|
|
use std::path::PathBuf;
|
|
|
|
|
|
|
|
use deno_core::error::AnyError;
|
|
|
|
use deno_core::parking_lot::Mutex;
|
|
|
|
use deno_core::serde_json;
|
2023-05-14 17:40:01 -04:00
|
|
|
use deno_core::task::spawn;
|
|
|
|
use deno_core::task::JoinHandle;
|
2022-04-19 22:14:00 -04:00
|
|
|
use deno_runtime::deno_webstorage::rusqlite::params;
|
|
|
|
use serde::Serialize;
|
|
|
|
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
use super::cache_db::CacheDB;
|
|
|
|
use super::cache_db::CacheDBConfiguration;
|
|
|
|
use super::cache_db::CacheFailure;
|
2022-07-19 11:58:18 -04:00
|
|
|
use super::common::FastInsecureHasher;
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
|
|
|
|
pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
|
|
|
|
table_initializer: "CREATE TABLE IF NOT EXISTS incrementalcache (
|
|
|
|
file_path TEXT PRIMARY KEY,
|
|
|
|
state_hash TEXT NOT NULL,
|
|
|
|
source_hash TEXT NOT NULL
|
|
|
|
);",
|
|
|
|
on_version_change: "DELETE FROM incrementalcache;",
|
|
|
|
preheat_queries: &[],
|
|
|
|
// If the cache fails, just ignore all caching attempts
|
|
|
|
on_failure: CacheFailure::Blackhole,
|
|
|
|
};
|
2022-07-12 18:58:39 -04:00
|
|
|
|
2022-04-19 22:14:00 -04:00
|
|
|
/// Cache used to skip formatting/linting a file again when we
|
|
|
|
/// know it is already formatted or has no lint diagnostics.
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
pub struct IncrementalCache(IncrementalCacheInner);
|
2022-04-19 22:14:00 -04:00
|
|
|
|
|
|
|
impl IncrementalCache {
|
|
|
|
pub fn new<TState: Serialize>(
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
db: CacheDB,
|
2022-04-19 22:14:00 -04:00
|
|
|
state: &TState,
|
|
|
|
initial_file_paths: &[PathBuf],
|
|
|
|
) -> Self {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
IncrementalCache(IncrementalCacheInner::new(db, state, initial_file_paths))
|
2022-04-19 22:14:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
self.0.is_file_same(file_path, file_text)
|
2022-04-19 22:14:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn update_file(&self, file_path: &Path, file_text: &str) {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
self.0.update_file(file_path, file_text)
|
2022-04-19 22:14:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn wait_completion(&self) {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
self.0.wait_completion().await;
|
2022-04-19 22:14:00 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
enum ReceiverMessage {
|
|
|
|
Update(PathBuf, u64),
|
|
|
|
Exit,
|
|
|
|
}
|
|
|
|
|
|
|
|
struct IncrementalCacheInner {
|
|
|
|
previous_hashes: HashMap<PathBuf, u64>,
|
|
|
|
sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>,
|
|
|
|
handle: Mutex<Option<JoinHandle<()>>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl IncrementalCacheInner {
|
|
|
|
pub fn new<TState: Serialize>(
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
db: CacheDB,
|
2022-04-19 22:14:00 -04:00
|
|
|
state: &TState,
|
|
|
|
initial_file_paths: &[PathBuf],
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
) -> Self {
|
2023-07-10 17:45:09 -04:00
|
|
|
let state_hash =
|
|
|
|
FastInsecureHasher::hash(serde_json::to_string(state).unwrap());
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let sql_cache = SqlIncrementalCache::new(db, state_hash);
|
|
|
|
Self::from_sql_incremental_cache(sql_cache, initial_file_paths)
|
2022-04-19 22:14:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
fn from_sql_incremental_cache(
|
|
|
|
cache: SqlIncrementalCache,
|
|
|
|
initial_file_paths: &[PathBuf],
|
|
|
|
) -> Self {
|
|
|
|
let mut previous_hashes = HashMap::new();
|
|
|
|
for path in initial_file_paths {
|
|
|
|
if let Some(hash) = cache.get_source_hash(path) {
|
|
|
|
previous_hashes.insert(path.to_path_buf(), hash);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let (sender, mut receiver) =
|
|
|
|
tokio::sync::mpsc::unbounded_channel::<ReceiverMessage>();
|
|
|
|
|
|
|
|
// sqlite isn't `Sync`, so we do all the updating on a dedicated task
|
2023-05-14 17:40:01 -04:00
|
|
|
let handle = spawn(async move {
|
2022-04-19 22:14:00 -04:00
|
|
|
while let Some(message) = receiver.recv().await {
|
|
|
|
match message {
|
|
|
|
ReceiverMessage::Update(path, hash) => {
|
|
|
|
let _ = cache.set_source_hash(&path, hash);
|
|
|
|
}
|
|
|
|
ReceiverMessage::Exit => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
IncrementalCacheInner {
|
|
|
|
previous_hashes,
|
|
|
|
sender,
|
|
|
|
handle: Mutex::new(Some(handle)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
|
|
|
|
match self.previous_hashes.get(file_path) {
|
2023-07-10 17:45:09 -04:00
|
|
|
Some(hash) => *hash == FastInsecureHasher::hash(file_text),
|
2022-04-19 22:14:00 -04:00
|
|
|
None => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn update_file(&self, file_path: &Path, file_text: &str) {
|
2023-07-10 17:45:09 -04:00
|
|
|
let hash = FastInsecureHasher::hash(file_text);
|
2022-04-19 22:14:00 -04:00
|
|
|
if let Some(previous_hash) = self.previous_hashes.get(file_path) {
|
|
|
|
if *previous_hash == hash {
|
|
|
|
return; // do not bother updating the db file because nothing has changed
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let _ = self
|
|
|
|
.sender
|
|
|
|
.send(ReceiverMessage::Update(file_path.to_path_buf(), hash));
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn wait_completion(&self) {
|
|
|
|
if self.sender.send(ReceiverMessage::Exit).is_err() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
let handle = self.handle.lock().take();
|
|
|
|
if let Some(handle) = handle {
|
|
|
|
handle.await.unwrap();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
struct SqlIncrementalCache {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
conn: CacheDB,
|
2022-04-19 22:14:00 -04:00
|
|
|
/// A hash of the state used to produce the formatting/linting other than
|
|
|
|
/// the CLI version. This state is a hash of the configuration and ensures
|
|
|
|
/// we format/lint a file when the configuration changes.
|
|
|
|
state_hash: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl SqlIncrementalCache {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
pub fn new(conn: CacheDB, state_hash: u64) -> Self {
|
|
|
|
Self { conn, state_hash }
|
2022-04-19 22:14:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_source_hash(&self, path: &Path) -> Option<u64> {
|
|
|
|
match self.get_source_hash_result(path) {
|
|
|
|
Ok(option) => option,
|
|
|
|
Err(err) => {
|
|
|
|
if cfg!(debug_assertions) {
|
2023-01-27 10:43:16 -05:00
|
|
|
panic!("Error retrieving hash: {err}");
|
2022-04-19 22:14:00 -04:00
|
|
|
} else {
|
|
|
|
// fail silently when not debugging
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_source_hash_result(
|
|
|
|
&self,
|
|
|
|
path: &Path,
|
|
|
|
) -> Result<Option<u64>, AnyError> {
|
|
|
|
let query = "
|
|
|
|
SELECT
|
|
|
|
source_hash
|
|
|
|
FROM
|
|
|
|
incrementalcache
|
|
|
|
WHERE
|
|
|
|
file_path=?1
|
|
|
|
AND state_hash=?2
|
|
|
|
LIMIT 1";
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let res = self.conn.query_row(
|
|
|
|
query,
|
|
|
|
params![path.to_string_lossy(), self.state_hash.to_string()],
|
|
|
|
|row| {
|
|
|
|
let hash: String = row.get(0)?;
|
|
|
|
Ok(hash.parse::<u64>()?)
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
Ok(res)
|
2022-04-19 22:14:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_source_hash(
|
|
|
|
&self,
|
|
|
|
path: &Path,
|
|
|
|
source_hash: u64,
|
|
|
|
) -> Result<(), AnyError> {
|
|
|
|
let sql = "
|
|
|
|
INSERT OR REPLACE INTO
|
|
|
|
incrementalcache (file_path, state_hash, source_hash)
|
|
|
|
VALUES
|
|
|
|
(?1, ?2, ?3)";
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
self.conn.execute(
|
|
|
|
sql,
|
|
|
|
params![
|
|
|
|
path.to_string_lossy(),
|
|
|
|
&self.state_hash.to_string(),
|
|
|
|
&source_hash,
|
|
|
|
],
|
|
|
|
)?;
|
2022-04-19 22:14:00 -04:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
|
|
|
use std::path::PathBuf;
|
|
|
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
pub fn sql_cache_general_use() {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
|
|
|
|
let cache = SqlIncrementalCache::new(conn, 1);
|
2022-04-19 22:14:00 -04:00
|
|
|
let path = PathBuf::from("/mod.ts");
|
|
|
|
|
|
|
|
assert_eq!(cache.get_source_hash(&path), None);
|
|
|
|
cache.set_source_hash(&path, 2).unwrap();
|
|
|
|
assert_eq!(cache.get_source_hash(&path), Some(2));
|
|
|
|
|
|
|
|
// try changing the cli version (should clear)
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let conn = cache.conn.recreate_with_version("2.0.0");
|
|
|
|
let mut cache = SqlIncrementalCache::new(conn, 1);
|
2022-04-19 22:14:00 -04:00
|
|
|
assert_eq!(cache.get_source_hash(&path), None);
|
|
|
|
|
|
|
|
// add back the file to the cache
|
|
|
|
cache.set_source_hash(&path, 2).unwrap();
|
|
|
|
assert_eq!(cache.get_source_hash(&path), Some(2));
|
|
|
|
|
|
|
|
// try changing the state hash
|
|
|
|
cache.state_hash = 2;
|
|
|
|
assert_eq!(cache.get_source_hash(&path), None);
|
|
|
|
cache.state_hash = 1;
|
|
|
|
|
|
|
|
// should return now that everything is back
|
|
|
|
assert_eq!(cache.get_source_hash(&path), Some(2));
|
|
|
|
|
|
|
|
// recreating the cache should not remove the data because the CLI version and state hash is the same
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let conn = cache.conn.recreate_with_version("2.0.0");
|
|
|
|
let cache = SqlIncrementalCache::new(conn, 1);
|
2022-04-19 22:14:00 -04:00
|
|
|
assert_eq!(cache.get_source_hash(&path), Some(2));
|
|
|
|
|
|
|
|
// now try replacing and using another path
|
|
|
|
cache.set_source_hash(&path, 3).unwrap();
|
|
|
|
cache.set_source_hash(&path, 4).unwrap();
|
|
|
|
let path2 = PathBuf::from("/mod2.ts");
|
|
|
|
cache.set_source_hash(&path2, 5).unwrap();
|
|
|
|
assert_eq!(cache.get_source_hash(&path), Some(4));
|
|
|
|
assert_eq!(cache.get_source_hash(&path2), Some(5));
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
pub async fn incremental_cache_general_use() {
|
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by
somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity
to move all the cache management code to a single place and reduce
duplication. While the PR has a net gain of lines, much of that is just
being a bit more deliberate with how we're recovering from errors.
The existing caches had various policies for dealing with cache
corruption, so I've unified them and tried to isolate the decisions we
make for recovery in a single place (see `open_connection` in
`CacheDB`). The policy I chose was:
1. Retry twice to open on-disk caches
2. If that fails, try to delete the file and recreate it on-disk
3. If we fail to delete the file or re-create a new cache, use a
fallback strategy that can be chosen per-cache: InMemory (temporary
cache for the process run), BlackHole (ignore writes, return empty
reads), or Error (fail on every operation).
The caches all use the same general code now, and share the cache
failure recovery policy.
In addition, it cleans up a TODO in the `NodeAnalysisCache`.
2023-03-27 18:01:52 -04:00
|
|
|
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
|
|
|
|
let sql_cache = SqlIncrementalCache::new(conn, 1);
|
2022-04-19 22:14:00 -04:00
|
|
|
let file_path = PathBuf::from("/mod.ts");
|
|
|
|
let file_text = "test";
|
2023-07-10 17:45:09 -04:00
|
|
|
let file_hash = FastInsecureHasher::hash(file_text);
|
2022-04-19 22:14:00 -04:00
|
|
|
sql_cache.set_source_hash(&file_path, file_hash).unwrap();
|
|
|
|
let cache = IncrementalCacheInner::from_sql_incremental_cache(
|
|
|
|
sql_cache,
|
|
|
|
&[file_path.clone()],
|
|
|
|
);
|
|
|
|
|
|
|
|
assert!(cache.is_file_same(&file_path, "test"));
|
|
|
|
assert!(!cache.is_file_same(&file_path, "other"));
|
|
|
|
|
|
|
|
// just ensure this doesn't panic
|
|
|
|
cache.update_file(&file_path, "other");
|
|
|
|
}
|
|
|
|
}
|