mirror of
https://github.com/denoland/deno.git
synced 2024-12-24 08:09:08 -05:00
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity to move all the cache management code to a single place and reduce duplication. While the PR has a net gain of lines, much of that is just being a bit more deliberate with how we're recovering from errors. The existing caches had various policies for dealing with cache corruption, so I've unified them and tried to isolate the decisions we make for recovery in a single place (see `open_connection` in `CacheDB`). The policy I chose was: 1. Retry twice to open on-disk caches 2. If that fails, try to delete the file and recreate it on-disk 3. If we fail to delete the file or re-create a new cache, use a fallback strategy that can be chosen per-cache: InMemory (temporary cache for the process run), BlackHole (ignore writes, return empty reads), or Error (fail on every operation). The caches all use the same general code now, and share the cache failure recovery policy. In addition, it cleans up a TODO in the `NodeAnalysisCache`.
This commit is contained in:
parent
8c051dbd1a
commit
86c3c4f343
17 changed files with 999 additions and 622 deletions
486
cli/cache/cache_db.rs
vendored
Normal file
486
cli/cache/cache_db.rs
vendored
Normal file
|
@ -0,0 +1,486 @@
|
|||
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
use deno_core::parking_lot::MutexGuard;
|
||||
use deno_runtime::deno_webstorage::rusqlite;
|
||||
use deno_runtime::deno_webstorage::rusqlite::Connection;
|
||||
use deno_runtime::deno_webstorage::rusqlite::OptionalExtension;
|
||||
use deno_runtime::deno_webstorage::rusqlite::Params;
|
||||
use once_cell::sync::OnceCell;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// What should the cache should do on failure?
|
||||
#[derive(Default)]
|
||||
pub enum CacheFailure {
|
||||
/// Return errors if failure mode otherwise unspecified.
|
||||
#[default]
|
||||
Error,
|
||||
/// Create an in-memory cache that is not persistent.
|
||||
InMemory,
|
||||
/// Create a blackhole cache that ignores writes and returns empty reads.
|
||||
Blackhole,
|
||||
}
|
||||
|
||||
/// Configuration SQL and other parameters for a [`CacheDB`].
|
||||
pub struct CacheDBConfiguration {
|
||||
/// SQL to run for a new database.
|
||||
pub table_initializer: &'static str,
|
||||
/// SQL to run when the version from [`crate::version::deno()`] changes.
|
||||
pub on_version_change: &'static str,
|
||||
/// Prepared statements to pre-heat while initializing the database.
|
||||
pub preheat_queries: &'static [&'static str],
|
||||
/// What the cache should do on failure.
|
||||
pub on_failure: CacheFailure,
|
||||
}
|
||||
|
||||
impl CacheDBConfiguration {
|
||||
fn create_combined_sql(&self) -> String {
|
||||
format!(
|
||||
"
|
||||
PRAGMA journal_mode=OFF;
|
||||
PRAGMA synchronous=NORMAL;
|
||||
PRAGMA temp_store=memory;
|
||||
PRAGMA page_size=4096;
|
||||
PRAGMA mmap_size=6000000;
|
||||
PRAGMA optimize;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS info (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL
|
||||
);
|
||||
|
||||
{}
|
||||
",
|
||||
self.table_initializer
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
enum ConnectionState {
|
||||
Connected(Connection),
|
||||
Blackhole,
|
||||
Error(Arc<AnyError>),
|
||||
}
|
||||
|
||||
/// A cache database that eagerly initializes itself off-thread, preventing initialization operations
|
||||
/// from blocking the main thread.
|
||||
#[derive(Clone)]
|
||||
pub struct CacheDB {
|
||||
// TODO(mmastrac): We can probably simplify our thread-safe implementation here
|
||||
conn: Arc<Mutex<OnceCell<ConnectionState>>>,
|
||||
path: Option<PathBuf>,
|
||||
config: &'static CacheDBConfiguration,
|
||||
version: &'static str,
|
||||
}
|
||||
|
||||
impl Drop for CacheDB {
|
||||
fn drop(&mut self) {
|
||||
// No need to clean up an in-memory cache in an way -- just drop and go.
|
||||
let path = match self.path.take() {
|
||||
Some(path) => path,
|
||||
_ => return,
|
||||
};
|
||||
|
||||
// TODO(mmastrac): we should ensure tokio runtimes are consistently available or not
|
||||
if tokio::runtime::Handle::try_current().is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
// For on-disk caches, see if we're the last holder of the Arc.
|
||||
let arc = std::mem::take(&mut self.conn);
|
||||
if let Ok(inner) = Arc::try_unwrap(arc) {
|
||||
// Hand off SQLite connection to another thread to do the surprisingly expensive cleanup
|
||||
let inner = inner.into_inner().into_inner();
|
||||
if let Some(conn) = inner {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
drop(conn);
|
||||
log::trace!(
|
||||
"Cleaned up SQLite connection at {}",
|
||||
path.to_string_lossy()
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CacheDB {
|
||||
#[cfg(test)]
|
||||
pub fn in_memory(
|
||||
config: &'static CacheDBConfiguration,
|
||||
version: &'static str,
|
||||
) -> Self {
|
||||
CacheDB {
|
||||
conn: Arc::new(Mutex::new(OnceCell::new())),
|
||||
path: None,
|
||||
config,
|
||||
version,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_path(
|
||||
config: &'static CacheDBConfiguration,
|
||||
path: PathBuf,
|
||||
version: &'static str,
|
||||
) -> Self {
|
||||
log::debug!("Opening cache {}...", path.to_string_lossy());
|
||||
let new = Self {
|
||||
conn: Arc::new(Mutex::new(OnceCell::new())),
|
||||
path: Some(path),
|
||||
config,
|
||||
version,
|
||||
};
|
||||
|
||||
new.spawn_eager_init_thread();
|
||||
new
|
||||
}
|
||||
|
||||
/// Useful for testing: re-create this cache DB with a different current version.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn recreate_with_version(mut self, version: &'static str) -> Self {
|
||||
// By taking the lock, we know there are no initialization threads alive
|
||||
drop(self.conn.lock());
|
||||
|
||||
let arc = std::mem::take(&mut self.conn);
|
||||
let conn = match Arc::try_unwrap(arc) {
|
||||
Err(_) => panic!("Failed to unwrap connection"),
|
||||
Ok(conn) => match conn.into_inner().into_inner() {
|
||||
Some(ConnectionState::Connected(conn)) => conn,
|
||||
_ => panic!("Connection had failed and cannot be unwrapped"),
|
||||
},
|
||||
};
|
||||
|
||||
Self::initialize_connection(self.config, &conn, version).unwrap();
|
||||
|
||||
let cell = OnceCell::new();
|
||||
_ = cell.set(ConnectionState::Connected(conn));
|
||||
Self {
|
||||
conn: Arc::new(Mutex::new(cell)),
|
||||
path: self.path.clone(),
|
||||
config: self.config,
|
||||
version,
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_eager_init_thread(&self) {
|
||||
let clone = self.clone();
|
||||
// TODO(mmastrac): we should ensure tokio runtimes are consistently available or not
|
||||
if tokio::runtime::Handle::try_current().is_ok() {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let lock = clone.conn.lock();
|
||||
clone.initialize(&lock);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Open the connection in memory or on disk.
|
||||
fn actually_open_connection(
|
||||
&self,
|
||||
path: &Option<PathBuf>,
|
||||
) -> Result<Connection, rusqlite::Error> {
|
||||
match path {
|
||||
// This should never fail unless something is very wrong
|
||||
None => Connection::open_in_memory(),
|
||||
Some(path) => Connection::open(path),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to initialize that connection.
|
||||
fn initialize_connection(
|
||||
config: &CacheDBConfiguration,
|
||||
conn: &Connection,
|
||||
version: &str,
|
||||
) -> Result<(), AnyError> {
|
||||
let sql = config.create_combined_sql();
|
||||
conn.execute_batch(&sql)?;
|
||||
|
||||
// Check the version
|
||||
let existing_version = conn
|
||||
.query_row(
|
||||
"SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
|
||||
[],
|
||||
|row| row.get::<_, String>(0),
|
||||
)
|
||||
.optional()?
|
||||
.unwrap_or_default();
|
||||
|
||||
// If Deno has been upgraded, run the SQL to update the version
|
||||
if existing_version != version {
|
||||
conn.execute_batch(config.on_version_change)?;
|
||||
let mut stmt = conn
|
||||
.prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
|
||||
stmt.execute(["CLI_VERSION", version])?;
|
||||
}
|
||||
|
||||
// Preheat any prepared queries
|
||||
for preheat in config.preheat_queries {
|
||||
drop(conn.prepare_cached(preheat)?);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Open and initialize a connection.
|
||||
fn open_connection_and_init(
|
||||
&self,
|
||||
path: &Option<PathBuf>,
|
||||
) -> Result<Connection, AnyError> {
|
||||
let conn = self.actually_open_connection(path)?;
|
||||
Self::initialize_connection(self.config, &conn, self.version)?;
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
/// This function represents the policy for dealing with corrupted cache files. We try fairly aggressively
|
||||
/// to repair the situation, and if we can't, we prefer to log noisily and continue with in-memory caches.
|
||||
fn open_connection(&self) -> Result<ConnectionState, AnyError> {
|
||||
// Success on first try? We hope that this is the case.
|
||||
let err = match self.open_connection_and_init(&self.path) {
|
||||
Ok(conn) => return Ok(ConnectionState::Connected(conn)),
|
||||
Err(err) => err,
|
||||
};
|
||||
|
||||
if self.path.is_none() {
|
||||
// If an in-memory DB fails, that's game over
|
||||
log::error!("Failed to initialize in-memory cache database.");
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
let path = self.path.as_ref().unwrap();
|
||||
|
||||
// There are rare times in the tests when we can't initialize a cache DB the first time, but it succeeds the second time, so
|
||||
// we don't log these at a debug level.
|
||||
log::trace!(
|
||||
"Could not initialize cache database '{}', retrying... ({err:?})",
|
||||
path.to_string_lossy(),
|
||||
);
|
||||
|
||||
// Try a second time
|
||||
let err = match self.open_connection_and_init(&self.path) {
|
||||
Ok(conn) => return Ok(ConnectionState::Connected(conn)),
|
||||
Err(err) => err,
|
||||
};
|
||||
|
||||
// Failed, try deleting it
|
||||
log::warn!(
|
||||
"Could not initialize cache database '{}', deleting and retrying... ({err:?})",
|
||||
path.to_string_lossy()
|
||||
);
|
||||
if std::fs::remove_file(path).is_ok() {
|
||||
// Try a third time if we successfully deleted it
|
||||
let res = self.open_connection_and_init(&self.path);
|
||||
if let Ok(conn) = res {
|
||||
return Ok(ConnectionState::Connected(conn));
|
||||
};
|
||||
}
|
||||
|
||||
match self.config.on_failure {
|
||||
CacheFailure::InMemory => {
|
||||
log::error!(
|
||||
"Failed to open cache file '{}', opening in-memory cache.",
|
||||
path.to_string_lossy()
|
||||
);
|
||||
Ok(ConnectionState::Connected(
|
||||
self.open_connection_and_init(&None)?,
|
||||
))
|
||||
}
|
||||
CacheFailure::Blackhole => {
|
||||
log::error!(
|
||||
"Failed to open cache file '{}', performance may be degraded.",
|
||||
path.to_string_lossy()
|
||||
);
|
||||
Ok(ConnectionState::Blackhole)
|
||||
}
|
||||
CacheFailure::Error => {
|
||||
log::error!(
|
||||
"Failed to open cache file '{}', expect further errors.",
|
||||
path.to_string_lossy()
|
||||
);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize<'a>(
|
||||
&self,
|
||||
lock: &'a MutexGuard<OnceCell<ConnectionState>>,
|
||||
) -> &'a ConnectionState {
|
||||
lock.get_or_init(|| match self.open_connection() {
|
||||
Ok(conn) => conn,
|
||||
Err(e) => ConnectionState::Error(e.into()),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn with_connection<T: Default>(
|
||||
&self,
|
||||
f: impl FnOnce(&Connection) -> Result<T, AnyError>,
|
||||
) -> Result<T, AnyError> {
|
||||
let lock = self.conn.lock();
|
||||
let conn = self.initialize(&lock);
|
||||
|
||||
match conn {
|
||||
ConnectionState::Blackhole => {
|
||||
// Cache is a blackhole - nothing in or out.
|
||||
Ok(T::default())
|
||||
}
|
||||
ConnectionState::Error(e) => {
|
||||
// This isn't ideal because we lose the original underlying error
|
||||
let err = AnyError::msg(e.clone().to_string());
|
||||
Err(err)
|
||||
}
|
||||
ConnectionState::Connected(conn) => f(conn),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn ensure_connected(&self) -> Result<(), AnyError> {
|
||||
self.with_connection(|_| Ok(()))
|
||||
}
|
||||
|
||||
pub fn execute(
|
||||
&self,
|
||||
sql: &'static str,
|
||||
params: impl Params,
|
||||
) -> Result<usize, AnyError> {
|
||||
self.with_connection(|conn| {
|
||||
let mut stmt = conn.prepare_cached(sql)?;
|
||||
let res = stmt.execute(params)?;
|
||||
Ok(res)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn exists(
|
||||
&self,
|
||||
sql: &'static str,
|
||||
params: impl Params,
|
||||
) -> Result<bool, AnyError> {
|
||||
self.with_connection(|conn| {
|
||||
let mut stmt = conn.prepare_cached(sql)?;
|
||||
let res = stmt.exists(params)?;
|
||||
Ok(res)
|
||||
})
|
||||
}
|
||||
|
||||
/// Query a row from the database with a mapping function.
|
||||
pub fn query_row<T, F>(
|
||||
&self,
|
||||
sql: &'static str,
|
||||
params: impl Params,
|
||||
f: F,
|
||||
) -> Result<Option<T>, AnyError>
|
||||
where
|
||||
F: FnOnce(&rusqlite::Row<'_>) -> Result<T, AnyError>,
|
||||
{
|
||||
let res = self.with_connection(|conn| {
|
||||
let mut stmt = conn.prepare_cached(sql)?;
|
||||
let mut rows = stmt.query(params)?;
|
||||
if let Some(row) = rows.next()? {
|
||||
let res = f(row)?;
|
||||
Ok(Some(res))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
})?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
static TEST_DB: CacheDBConfiguration = CacheDBConfiguration {
|
||||
table_initializer: "create table if not exists test(value TEXT);",
|
||||
on_version_change: "delete from test;",
|
||||
preheat_queries: &[],
|
||||
on_failure: CacheFailure::InMemory,
|
||||
};
|
||||
|
||||
static TEST_DB_BLACKHOLE: CacheDBConfiguration = CacheDBConfiguration {
|
||||
table_initializer: "create table if not exists test(value TEXT);",
|
||||
on_version_change: "delete from test;",
|
||||
preheat_queries: &[],
|
||||
on_failure: CacheFailure::Blackhole,
|
||||
};
|
||||
|
||||
static TEST_DB_ERROR: CacheDBConfiguration = CacheDBConfiguration {
|
||||
table_initializer: "create table if not exists test(value TEXT);",
|
||||
on_version_change: "delete from test;",
|
||||
preheat_queries: &[],
|
||||
on_failure: CacheFailure::Error,
|
||||
};
|
||||
|
||||
static BAD_SQL_TEST_DB: CacheDBConfiguration = CacheDBConfiguration {
|
||||
table_initializer: "bad sql;",
|
||||
on_version_change: "delete from test;",
|
||||
preheat_queries: &[],
|
||||
on_failure: CacheFailure::InMemory,
|
||||
};
|
||||
|
||||
static FAILURE_PATH: &str = "/tmp/this/doesnt/exist/so/will/always/fail";
|
||||
|
||||
#[tokio::test]
|
||||
async fn simple_database() {
|
||||
let db = CacheDB::in_memory(&TEST_DB, "1.0");
|
||||
db.ensure_connected()
|
||||
.expect("Failed to initialize in-memory database");
|
||||
|
||||
db.execute("insert into test values (?1)", [1]).unwrap();
|
||||
let res = db
|
||||
.query_row("select * from test", [], |row| {
|
||||
Ok(row.get::<_, String>(0).unwrap())
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(Some("1".into()), res);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn bad_sql() {
|
||||
let db = CacheDB::in_memory(&BAD_SQL_TEST_DB, "1.0");
|
||||
db.ensure_connected()
|
||||
.expect_err("Expected to fail, but succeeded");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn failure_mode_in_memory() {
|
||||
let db = CacheDB::from_path(&TEST_DB, FAILURE_PATH.into(), "1.0");
|
||||
db.ensure_connected()
|
||||
.expect("Should have created a database");
|
||||
|
||||
db.execute("insert into test values (?1)", [1]).unwrap();
|
||||
let res = db
|
||||
.query_row("select * from test", [], |row| {
|
||||
Ok(row.get::<_, String>(0).unwrap())
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(Some("1".into()), res);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn failure_mode_blackhole() {
|
||||
let db = CacheDB::from_path(&TEST_DB_BLACKHOLE, FAILURE_PATH.into(), "1.0");
|
||||
db.ensure_connected()
|
||||
.expect("Should have created a database");
|
||||
|
||||
db.execute("insert into test values (?1)", [1]).unwrap();
|
||||
let res = db
|
||||
.query_row("select * from test", [], |row| {
|
||||
Ok(row.get::<_, String>(0).unwrap())
|
||||
})
|
||||
.unwrap();
|
||||
assert_eq!(None, res);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn failure_mode_error() {
|
||||
let db = CacheDB::from_path(&TEST_DB_ERROR, FAILURE_PATH.into(), "1.0");
|
||||
db.ensure_connected().expect_err("Should have failed");
|
||||
|
||||
db.execute("insert into test values (?1)", [1])
|
||||
.expect_err("Should have failed");
|
||||
db.query_row("select * from test", [], |row| {
|
||||
Ok(row.get::<_, String>(0).unwrap())
|
||||
})
|
||||
.expect_err("Should have failed");
|
||||
}
|
||||
}
|
75
cli/cache/caches.rs
vendored
Normal file
75
cli/cache/caches.rs
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use once_cell::sync::OnceCell;
|
||||
|
||||
use super::cache_db::CacheDB;
|
||||
use super::cache_db::CacheDBConfiguration;
|
||||
use super::check::TYPE_CHECK_CACHE_DB;
|
||||
use super::incremental::INCREMENTAL_CACHE_DB;
|
||||
use super::node::NODE_ANALYSIS_CACHE_DB;
|
||||
use super::parsed_source::PARSED_SOURCE_CACHE_DB;
|
||||
use super::DenoDir;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Caches {
|
||||
fmt_incremental_cache_db: Arc<OnceCell<CacheDB>>,
|
||||
lint_incremental_cache_db: Arc<OnceCell<CacheDB>>,
|
||||
dep_analysis_db: Arc<OnceCell<CacheDB>>,
|
||||
node_analysis_db: Arc<OnceCell<CacheDB>>,
|
||||
type_checking_cache_db: Arc<OnceCell<CacheDB>>,
|
||||
}
|
||||
|
||||
impl Caches {
|
||||
fn make_db(
|
||||
cell: &Arc<OnceCell<CacheDB>>,
|
||||
config: &'static CacheDBConfiguration,
|
||||
path: PathBuf,
|
||||
) -> CacheDB {
|
||||
cell
|
||||
.get_or_init(|| CacheDB::from_path(config, path, crate::version::deno()))
|
||||
.clone()
|
||||
}
|
||||
|
||||
pub fn fmt_incremental_cache_db(&self, dir: &DenoDir) -> CacheDB {
|
||||
Self::make_db(
|
||||
&self.fmt_incremental_cache_db,
|
||||
&INCREMENTAL_CACHE_DB,
|
||||
dir.fmt_incremental_cache_db_file_path(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn lint_incremental_cache_db(&self, dir: &DenoDir) -> CacheDB {
|
||||
Self::make_db(
|
||||
&self.lint_incremental_cache_db,
|
||||
&INCREMENTAL_CACHE_DB,
|
||||
dir.lint_incremental_cache_db_file_path(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn dep_analysis_db(&self, dir: &DenoDir) -> CacheDB {
|
||||
Self::make_db(
|
||||
&self.dep_analysis_db,
|
||||
&PARSED_SOURCE_CACHE_DB,
|
||||
dir.dep_analysis_db_file_path(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn node_analysis_db(&self, dir: &DenoDir) -> CacheDB {
|
||||
Self::make_db(
|
||||
&self.node_analysis_db,
|
||||
&NODE_ANALYSIS_CACHE_DB,
|
||||
dir.node_analysis_db_file_path(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn type_checking_cache_db(&self, dir: &DenoDir) -> CacheDB {
|
||||
Self::make_db(
|
||||
&self.type_checking_cache_db,
|
||||
&TYPE_CHECK_CACHE_DB,
|
||||
dir.type_checking_cache_db_file_path(),
|
||||
)
|
||||
}
|
||||
}
|
176
cli/cache/check.rs
vendored
176
cli/cache/check.rs
vendored
|
@ -1,68 +1,40 @@
|
|||
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use super::cache_db::CacheDB;
|
||||
use super::cache_db::CacheDBConfiguration;
|
||||
use super::cache_db::CacheFailure;
|
||||
use deno_ast::ModuleSpecifier;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_runtime::deno_webstorage::rusqlite::params;
|
||||
use deno_runtime::deno_webstorage::rusqlite::Connection;
|
||||
|
||||
use super::common::INITIAL_PRAGMAS;
|
||||
pub static TYPE_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
|
||||
table_initializer: concat!(
|
||||
"CREATE TABLE IF NOT EXISTS checkcache (
|
||||
check_hash TEXT PRIMARY KEY
|
||||
);",
|
||||
"CREATE TABLE IF NOT EXISTS tsbuildinfo (
|
||||
specifier TEXT PRIMARY KEY,
|
||||
text TEXT NOT NULL
|
||||
);",
|
||||
),
|
||||
on_version_change: concat!(
|
||||
"DELETE FROM checkcache;",
|
||||
"DELETE FROM tsbuildinfo;"
|
||||
),
|
||||
preheat_queries: &[],
|
||||
// If the cache fails, just ignore all caching attempts
|
||||
on_failure: CacheFailure::Blackhole,
|
||||
};
|
||||
|
||||
/// The cache used to tell whether type checking should occur again.
|
||||
///
|
||||
/// This simply stores a hash of the inputs of each successful type check
|
||||
/// and only clears them out when changing CLI versions.
|
||||
pub struct TypeCheckCache(Option<Connection>);
|
||||
pub struct TypeCheckCache(CacheDB);
|
||||
|
||||
impl TypeCheckCache {
|
||||
pub fn new(db_file_path: &Path) -> Self {
|
||||
log::debug!("Loading type check cache.");
|
||||
match Self::try_new(db_file_path) {
|
||||
Ok(cache) => cache,
|
||||
Err(err) => {
|
||||
log::debug!(
|
||||
concat!(
|
||||
"Failed loading internal type checking cache. ",
|
||||
"Recreating...\n\nError details:\n{:#}",
|
||||
),
|
||||
err
|
||||
);
|
||||
// Maybe the cache file is corrupt. Attempt to remove the cache file
|
||||
// then attempt to recreate again. Otherwise, use null object pattern.
|
||||
match std::fs::remove_file(db_file_path) {
|
||||
Ok(_) => match Self::try_new(db_file_path) {
|
||||
Ok(cache) => cache,
|
||||
Err(err) => {
|
||||
log::debug!(
|
||||
concat!(
|
||||
"Unable to load internal cache for type checking. ",
|
||||
"This will reduce the performance of type checking.\n\n",
|
||||
"Error details:\n{:#}",
|
||||
),
|
||||
err
|
||||
);
|
||||
Self(None)
|
||||
}
|
||||
},
|
||||
Err(_) => Self(None),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn try_new(db_file_path: &Path) -> Result<Self, AnyError> {
|
||||
let conn = Connection::open(db_file_path)?;
|
||||
Self::from_connection(conn, crate::version::deno())
|
||||
}
|
||||
|
||||
fn from_connection(
|
||||
conn: Connection,
|
||||
cli_version: &'static str,
|
||||
) -> Result<Self, AnyError> {
|
||||
initialize(&conn, cli_version)?;
|
||||
|
||||
Ok(Self(Some(conn)))
|
||||
pub fn new(db: CacheDB) -> Self {
|
||||
Self(db)
|
||||
}
|
||||
|
||||
pub fn has_check_hash(&self, hash: u64) -> bool {
|
||||
|
@ -81,13 +53,10 @@ impl TypeCheckCache {
|
|||
}
|
||||
|
||||
fn hash_check_hash_result(&self, hash: u64) -> Result<bool, AnyError> {
|
||||
let conn = match &self.0 {
|
||||
Some(conn) => conn,
|
||||
None => return Ok(false),
|
||||
};
|
||||
let query = "SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1";
|
||||
let mut stmt = conn.prepare_cached(query)?;
|
||||
Ok(stmt.exists(params![hash.to_string()])?)
|
||||
self.0.exists(
|
||||
"SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1",
|
||||
params![hash.to_string()],
|
||||
)
|
||||
}
|
||||
|
||||
pub fn add_check_hash(&self, check_hash: u64) {
|
||||
|
@ -101,32 +70,24 @@ impl TypeCheckCache {
|
|||
}
|
||||
|
||||
fn add_check_hash_result(&self, check_hash: u64) -> Result<(), AnyError> {
|
||||
let conn = match &self.0 {
|
||||
Some(conn) => conn,
|
||||
None => return Ok(()),
|
||||
};
|
||||
let sql = "
|
||||
INSERT OR REPLACE INTO
|
||||
checkcache (check_hash)
|
||||
VALUES
|
||||
(?1)";
|
||||
let mut stmt = conn.prepare_cached(sql)?;
|
||||
stmt.execute(params![&check_hash.to_string(),])?;
|
||||
self.0.execute(sql, params![&check_hash.to_string(),])?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_tsbuildinfo(&self, specifier: &ModuleSpecifier) -> Option<String> {
|
||||
let conn = match &self.0 {
|
||||
Some(conn) => conn,
|
||||
None => return None,
|
||||
};
|
||||
let mut stmt = conn
|
||||
.prepare_cached("SELECT text FROM tsbuildinfo WHERE specifier=?1 LIMIT 1")
|
||||
.ok()?;
|
||||
let mut rows = stmt.query(params![specifier.to_string()]).ok()?;
|
||||
let row = rows.next().ok().flatten()?;
|
||||
|
||||
row.get(0).ok()
|
||||
self
|
||||
.0
|
||||
.query_row(
|
||||
"SELECT text FROM tsbuildinfo WHERE specifier=?1 LIMIT 1",
|
||||
params![specifier.to_string()],
|
||||
|row| Ok(row.get::<_, String>(0)?),
|
||||
)
|
||||
.ok()?
|
||||
}
|
||||
|
||||
pub fn set_tsbuildinfo(&self, specifier: &ModuleSpecifier, text: &str) {
|
||||
|
@ -145,67 +106,22 @@ impl TypeCheckCache {
|
|||
specifier: &ModuleSpecifier,
|
||||
text: &str,
|
||||
) -> Result<(), AnyError> {
|
||||
let conn = match &self.0 {
|
||||
Some(conn) => conn,
|
||||
None => return Ok(()),
|
||||
};
|
||||
let mut stmt = conn.prepare_cached(
|
||||
self.0.execute(
|
||||
"INSERT OR REPLACE INTO tsbuildinfo (specifier, text) VALUES (?1, ?2)",
|
||||
params![specifier.to_string(), text],
|
||||
)?;
|
||||
stmt.execute(params![specifier.to_string(), text])?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize(
|
||||
conn: &Connection,
|
||||
cli_version: &'static str,
|
||||
) -> Result<(), AnyError> {
|
||||
// INT doesn't store up to u64, so use TEXT for check_hash
|
||||
let query = format!(
|
||||
"{INITIAL_PRAGMAS}
|
||||
CREATE TABLE IF NOT EXISTS checkcache (
|
||||
check_hash TEXT PRIMARY KEY
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS tsbuildinfo (
|
||||
specifier TEXT PRIMARY KEY,
|
||||
text TEXT NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS info (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL
|
||||
);
|
||||
",
|
||||
);
|
||||
conn.execute_batch(&query)?;
|
||||
|
||||
// delete the cache when the CLI version changes
|
||||
let data_cli_version: Option<String> = conn
|
||||
.query_row(
|
||||
"SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
|
||||
[],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.ok();
|
||||
if data_cli_version.as_deref() != Some(cli_version) {
|
||||
conn.execute("DELETE FROM checkcache", params![])?;
|
||||
conn.execute("DELETE FROM tsbuildinfo", params![])?;
|
||||
let mut stmt = conn
|
||||
.prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
|
||||
stmt.execute(params!["CLI_VERSION", cli_version])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
pub fn check_cache_general_use() {
|
||||
let conn = Connection::open_in_memory().unwrap();
|
||||
let cache = TypeCheckCache::from_connection(conn, "1.0.0").unwrap();
|
||||
let conn = CacheDB::in_memory(&TYPE_CHECK_CACHE_DB, "1.0.0");
|
||||
let cache = TypeCheckCache::new(conn);
|
||||
|
||||
assert!(!cache.has_check_hash(1));
|
||||
cache.add_check_hash(1);
|
||||
|
@ -218,8 +134,9 @@ mod test {
|
|||
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
|
||||
|
||||
// try changing the cli version (should clear)
|
||||
let conn = cache.0.unwrap();
|
||||
let cache = TypeCheckCache::from_connection(conn, "2.0.0").unwrap();
|
||||
let conn = cache.0.recreate_with_version("2.0.0");
|
||||
let cache = TypeCheckCache::new(conn);
|
||||
|
||||
assert!(!cache.has_check_hash(1));
|
||||
cache.add_check_hash(1);
|
||||
assert!(cache.has_check_hash(1));
|
||||
|
@ -228,8 +145,9 @@ mod test {
|
|||
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
|
||||
|
||||
// recreating the cache should not remove the data because the CLI version is the same
|
||||
let conn = cache.0.unwrap();
|
||||
let cache = TypeCheckCache::from_connection(conn, "2.0.0").unwrap();
|
||||
let conn = cache.0.recreate_with_version("2.0.0");
|
||||
let cache = TypeCheckCache::new(conn);
|
||||
|
||||
assert!(cache.has_check_hash(1));
|
||||
assert!(!cache.has_check_hash(2));
|
||||
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
|
||||
|
|
12
cli/cache/common.rs
vendored
12
cli/cache/common.rs
vendored
|
@ -43,15 +43,3 @@ impl FastInsecureHasher {
|
|||
self.0.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Disable write-ahead-logging and tweak some other stuff.
|
||||
/// We want to favor startup time over cache performance and
|
||||
/// creating a WAL is expensive on startup.
|
||||
pub static INITIAL_PRAGMAS: &str = "
|
||||
PRAGMA journal_mode=OFF;
|
||||
PRAGMA synchronous=NORMAL;
|
||||
PRAGMA temp_store=memory;
|
||||
PRAGMA page_size=4096;
|
||||
PRAGMA mmap_size=6000000;
|
||||
PRAGMA optimize;
|
||||
";
|
||||
|
|
166
cli/cache/incremental.rs
vendored
166
cli/cache/incremental.rs
vendored
|
@ -8,57 +8,49 @@ use deno_core::error::AnyError;
|
|||
use deno_core::parking_lot::Mutex;
|
||||
use deno_core::serde_json;
|
||||
use deno_runtime::deno_webstorage::rusqlite::params;
|
||||
use deno_runtime::deno_webstorage::rusqlite::Connection;
|
||||
use serde::Serialize;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
use super::cache_db::CacheDB;
|
||||
use super::cache_db::CacheDBConfiguration;
|
||||
use super::cache_db::CacheFailure;
|
||||
use super::common::FastInsecureHasher;
|
||||
use super::common::INITIAL_PRAGMAS;
|
||||
|
||||
pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
|
||||
table_initializer: "CREATE TABLE IF NOT EXISTS incrementalcache (
|
||||
file_path TEXT PRIMARY KEY,
|
||||
state_hash TEXT NOT NULL,
|
||||
source_hash TEXT NOT NULL
|
||||
);",
|
||||
on_version_change: "DELETE FROM incrementalcache;",
|
||||
preheat_queries: &[],
|
||||
// If the cache fails, just ignore all caching attempts
|
||||
on_failure: CacheFailure::Blackhole,
|
||||
};
|
||||
|
||||
/// Cache used to skip formatting/linting a file again when we
|
||||
/// know it is already formatted or has no lint diagnostics.
|
||||
pub struct IncrementalCache(Option<IncrementalCacheInner>);
|
||||
pub struct IncrementalCache(IncrementalCacheInner);
|
||||
|
||||
impl IncrementalCache {
|
||||
pub fn new<TState: Serialize>(
|
||||
db_file_path: &Path,
|
||||
db: CacheDB,
|
||||
state: &TState,
|
||||
initial_file_paths: &[PathBuf],
|
||||
) -> Self {
|
||||
// if creating the incremental cache fails, then we
|
||||
// treat it as not having a cache
|
||||
let result =
|
||||
IncrementalCacheInner::new(db_file_path, state, initial_file_paths);
|
||||
IncrementalCache(match result {
|
||||
Ok(inner) => Some(inner),
|
||||
Err(err) => {
|
||||
log::debug!("Creating the incremental cache failed.\n{:#}", err);
|
||||
// Maybe the cache file is corrupt. Attempt to remove
|
||||
// the cache file for next time
|
||||
let _ = std::fs::remove_file(db_file_path);
|
||||
None
|
||||
}
|
||||
})
|
||||
IncrementalCache(IncrementalCacheInner::new(db, state, initial_file_paths))
|
||||
}
|
||||
|
||||
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
|
||||
if let Some(inner) = &self.0 {
|
||||
inner.is_file_same(file_path, file_text)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
self.0.is_file_same(file_path, file_text)
|
||||
}
|
||||
|
||||
pub fn update_file(&self, file_path: &Path, file_text: &str) {
|
||||
if let Some(inner) = &self.0 {
|
||||
inner.update_file(file_path, file_text)
|
||||
}
|
||||
self.0.update_file(file_path, file_text)
|
||||
}
|
||||
|
||||
pub async fn wait_completion(&self) {
|
||||
if let Some(inner) = &self.0 {
|
||||
inner.wait_completion().await;
|
||||
}
|
||||
self.0.wait_completion().await;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,18 +67,15 @@ struct IncrementalCacheInner {
|
|||
|
||||
impl IncrementalCacheInner {
|
||||
pub fn new<TState: Serialize>(
|
||||
db_file_path: &Path,
|
||||
db: CacheDB,
|
||||
state: &TState,
|
||||
initial_file_paths: &[PathBuf],
|
||||
) -> Result<Self, AnyError> {
|
||||
) -> Self {
|
||||
let state_hash = FastInsecureHasher::new()
|
||||
.write_str(&serde_json::to_string(state).unwrap())
|
||||
.finish();
|
||||
let sql_cache = SqlIncrementalCache::new(db_file_path, state_hash)?;
|
||||
Ok(Self::from_sql_incremental_cache(
|
||||
sql_cache,
|
||||
initial_file_paths,
|
||||
))
|
||||
let sql_cache = SqlIncrementalCache::new(db, state_hash);
|
||||
Self::from_sql_incremental_cache(sql_cache, initial_file_paths)
|
||||
}
|
||||
|
||||
fn from_sql_incremental_cache(
|
||||
|
@ -155,7 +144,7 @@ impl IncrementalCacheInner {
|
|||
}
|
||||
|
||||
struct SqlIncrementalCache {
|
||||
conn: Connection,
|
||||
conn: CacheDB,
|
||||
/// A hash of the state used to produce the formatting/linting other than
|
||||
/// the CLI version. This state is a hash of the configuration and ensures
|
||||
/// we format/lint a file when the configuration changes.
|
||||
|
@ -163,20 +152,8 @@ struct SqlIncrementalCache {
|
|||
}
|
||||
|
||||
impl SqlIncrementalCache {
|
||||
pub fn new(db_file_path: &Path, state_hash: u64) -> Result<Self, AnyError> {
|
||||
log::debug!("Loading incremental cache.");
|
||||
let conn = Connection::open(db_file_path)?;
|
||||
Self::from_connection(conn, state_hash, crate::version::deno())
|
||||
}
|
||||
|
||||
fn from_connection(
|
||||
conn: Connection,
|
||||
state_hash: u64,
|
||||
cli_version: &'static str,
|
||||
) -> Result<Self, AnyError> {
|
||||
initialize(&conn, cli_version)?;
|
||||
|
||||
Ok(Self { conn, state_hash })
|
||||
pub fn new(conn: CacheDB, state_hash: u64) -> Self {
|
||||
Self { conn, state_hash }
|
||||
}
|
||||
|
||||
pub fn get_source_hash(&self, path: &Path) -> Option<u64> {
|
||||
|
@ -206,15 +183,15 @@ impl SqlIncrementalCache {
|
|||
file_path=?1
|
||||
AND state_hash=?2
|
||||
LIMIT 1";
|
||||
let mut stmt = self.conn.prepare_cached(query)?;
|
||||
let mut rows = stmt
|
||||
.query(params![path.to_string_lossy(), self.state_hash.to_string()])?;
|
||||
if let Some(row) = rows.next()? {
|
||||
let hash: String = row.get(0)?;
|
||||
Ok(Some(hash.parse::<u64>()?))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
let res = self.conn.query_row(
|
||||
query,
|
||||
params![path.to_string_lossy(), self.state_hash.to_string()],
|
||||
|row| {
|
||||
let hash: String = row.get(0)?;
|
||||
Ok(hash.parse::<u64>()?)
|
||||
},
|
||||
)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn set_source_hash(
|
||||
|
@ -227,53 +204,18 @@ impl SqlIncrementalCache {
|
|||
incrementalcache (file_path, state_hash, source_hash)
|
||||
VALUES
|
||||
(?1, ?2, ?3)";
|
||||
let mut stmt = self.conn.prepare_cached(sql)?;
|
||||
stmt.execute(params![
|
||||
path.to_string_lossy(),
|
||||
&self.state_hash.to_string(),
|
||||
&source_hash,
|
||||
])?;
|
||||
self.conn.execute(
|
||||
sql,
|
||||
params![
|
||||
path.to_string_lossy(),
|
||||
&self.state_hash.to_string(),
|
||||
&source_hash,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize(
|
||||
conn: &Connection,
|
||||
cli_version: &'static str,
|
||||
) -> Result<(), AnyError> {
|
||||
// INT doesn't store up to u64, so use TEXT for source_hash
|
||||
let query = format!(
|
||||
"{INITIAL_PRAGMAS}
|
||||
CREATE TABLE IF NOT EXISTS incrementalcache (
|
||||
file_path TEXT PRIMARY KEY,
|
||||
state_hash TEXT NOT NULL,
|
||||
source_hash TEXT NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS info (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL
|
||||
);"
|
||||
);
|
||||
conn.execute_batch(&query)?;
|
||||
|
||||
// delete the cache when the CLI version changes
|
||||
let data_cli_version: Option<String> = conn
|
||||
.query_row(
|
||||
"SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
|
||||
[],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.ok();
|
||||
if data_cli_version.as_deref() != Some(cli_version) {
|
||||
conn.execute("DELETE FROM incrementalcache", params![])?;
|
||||
let mut stmt = conn
|
||||
.prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
|
||||
stmt.execute(params!["CLI_VERSION", cli_version])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::path::PathBuf;
|
||||
|
@ -282,8 +224,8 @@ mod test {
|
|||
|
||||
#[test]
|
||||
pub fn sql_cache_general_use() {
|
||||
let conn = Connection::open_in_memory().unwrap();
|
||||
let cache = SqlIncrementalCache::from_connection(conn, 1, "1.0.0").unwrap();
|
||||
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
|
||||
let cache = SqlIncrementalCache::new(conn, 1);
|
||||
let path = PathBuf::from("/mod.ts");
|
||||
|
||||
assert_eq!(cache.get_source_hash(&path), None);
|
||||
|
@ -291,9 +233,8 @@ mod test {
|
|||
assert_eq!(cache.get_source_hash(&path), Some(2));
|
||||
|
||||
// try changing the cli version (should clear)
|
||||
let conn = cache.conn;
|
||||
let mut cache =
|
||||
SqlIncrementalCache::from_connection(conn, 1, "2.0.0").unwrap();
|
||||
let conn = cache.conn.recreate_with_version("2.0.0");
|
||||
let mut cache = SqlIncrementalCache::new(conn, 1);
|
||||
assert_eq!(cache.get_source_hash(&path), None);
|
||||
|
||||
// add back the file to the cache
|
||||
|
@ -309,8 +250,8 @@ mod test {
|
|||
assert_eq!(cache.get_source_hash(&path), Some(2));
|
||||
|
||||
// recreating the cache should not remove the data because the CLI version and state hash is the same
|
||||
let conn = cache.conn;
|
||||
let cache = SqlIncrementalCache::from_connection(conn, 1, "2.0.0").unwrap();
|
||||
let conn = cache.conn.recreate_with_version("2.0.0");
|
||||
let cache = SqlIncrementalCache::new(conn, 1);
|
||||
assert_eq!(cache.get_source_hash(&path), Some(2));
|
||||
|
||||
// now try replacing and using another path
|
||||
|
@ -324,9 +265,8 @@ mod test {
|
|||
|
||||
#[tokio::test]
|
||||
pub async fn incremental_cache_general_use() {
|
||||
let conn = Connection::open_in_memory().unwrap();
|
||||
let sql_cache =
|
||||
SqlIncrementalCache::from_connection(conn, 1, "1.0.0").unwrap();
|
||||
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
|
||||
let sql_cache = SqlIncrementalCache::new(conn, 1);
|
||||
let file_path = PathBuf::from("/mod.ts");
|
||||
let file_text = "test";
|
||||
let file_hash = FastInsecureHasher::new().write_str(file_text).finish();
|
||||
|
|
3
cli/cache/mod.rs
vendored
3
cli/cache/mod.rs
vendored
|
@ -14,6 +14,8 @@ use deno_runtime::permissions::PermissionsContainer;
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
mod cache_db;
|
||||
mod caches;
|
||||
mod check;
|
||||
mod common;
|
||||
mod deno_dir;
|
||||
|
@ -24,6 +26,7 @@ mod incremental;
|
|||
mod node;
|
||||
mod parsed_source;
|
||||
|
||||
pub use caches::Caches;
|
||||
pub use check::TypeCheckCache;
|
||||
pub use common::FastInsecureHasher;
|
||||
pub use deno_dir::DenoDir;
|
||||
|
|
312
cli/cache/node.rs
vendored
312
cli/cache/node.rs
vendored
|
@ -1,40 +1,58 @@
|
|||
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use deno_ast::CjsAnalysis;
|
||||
use deno_core::error::AnyError;
|
||||
use deno_core::parking_lot::Mutex;
|
||||
use deno_core::serde_json;
|
||||
use deno_runtime::deno_webstorage::rusqlite::params;
|
||||
use deno_runtime::deno_webstorage::rusqlite::Connection;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::common::INITIAL_PRAGMAS;
|
||||
use super::cache_db::CacheDB;
|
||||
use super::cache_db::CacheDBConfiguration;
|
||||
use super::cache_db::CacheFailure;
|
||||
use super::FastInsecureHasher;
|
||||
|
||||
// todo(dsherret): use deno_ast::CjsAnalysisData directly when upgrading deno_ast
|
||||
// See https://github.com/denoland/deno_ast/pull/117
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct CjsAnalysisData {
|
||||
pub exports: Vec<String>,
|
||||
pub reexports: Vec<String>,
|
||||
}
|
||||
pub static NODE_ANALYSIS_CACHE_DB: CacheDBConfiguration =
|
||||
CacheDBConfiguration {
|
||||
table_initializer: concat!(
|
||||
"CREATE TABLE IF NOT EXISTS cjsanalysiscache (
|
||||
specifier TEXT PRIMARY KEY,
|
||||
source_hash TEXT NOT NULL,
|
||||
data TEXT NOT NULL
|
||||
);",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS cjsanalysiscacheidx
|
||||
ON cjsanalysiscache(specifier);",
|
||||
"CREATE TABLE IF NOT EXISTS esmglobalscache (
|
||||
specifier TEXT PRIMARY KEY,
|
||||
source_hash TEXT NOT NULL,
|
||||
data TEXT NOT NULL
|
||||
);",
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS esmglobalscacheidx
|
||||
ON esmglobalscache(specifier);",
|
||||
),
|
||||
on_version_change: concat!(
|
||||
"DELETE FROM cjsanalysiscache;",
|
||||
"DELETE FROM esmglobalscache;",
|
||||
),
|
||||
preheat_queries: &[],
|
||||
on_failure: CacheFailure::InMemory,
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NodeAnalysisCache {
|
||||
db_file_path: Option<PathBuf>,
|
||||
inner: Arc<Mutex<Option<Option<NodeAnalysisCacheInner>>>>,
|
||||
inner: NodeAnalysisCacheInner,
|
||||
}
|
||||
|
||||
impl NodeAnalysisCache {
|
||||
pub fn new(db_file_path: Option<PathBuf>) -> Self {
|
||||
#[cfg(test)]
|
||||
pub fn new_in_memory() -> Self {
|
||||
Self::new(CacheDB::in_memory(
|
||||
&NODE_ANALYSIS_CACHE_DB,
|
||||
crate::version::deno(),
|
||||
))
|
||||
}
|
||||
|
||||
pub fn new(db: CacheDB) -> Self {
|
||||
Self {
|
||||
db_file_path,
|
||||
inner: Default::default(),
|
||||
inner: NodeAnalysisCacheInner::new(db),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,16 +63,31 @@ impl NodeAnalysisCache {
|
|||
.to_string()
|
||||
}
|
||||
|
||||
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
|
||||
match res {
|
||||
Ok(x) => x,
|
||||
Err(err) => {
|
||||
// TODO(mmastrac): This behavior was inherited from before the refactoring but it probably makes sense to move it into the cache
|
||||
// at some point.
|
||||
// should never error here, but if it ever does don't fail
|
||||
if cfg!(debug_assertions) {
|
||||
panic!("Error using esm analysis: {err:#}");
|
||||
} else {
|
||||
log::debug!("Error using esm analysis: {:#}", err);
|
||||
}
|
||||
T::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_cjs_analysis(
|
||||
&self,
|
||||
specifier: &str,
|
||||
expected_source_hash: &str,
|
||||
) -> Option<CjsAnalysis> {
|
||||
self
|
||||
.with_inner(|inner| {
|
||||
inner.get_cjs_analysis(specifier, expected_source_hash)
|
||||
})
|
||||
.flatten()
|
||||
Self::ensure_ok(
|
||||
self.inner.get_cjs_analysis(specifier, expected_source_hash),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn set_cjs_analysis(
|
||||
|
@ -63,9 +96,11 @@ impl NodeAnalysisCache {
|
|||
source_hash: &str,
|
||||
cjs_analysis: &CjsAnalysis,
|
||||
) {
|
||||
self.with_inner(|inner| {
|
||||
inner.set_cjs_analysis(specifier, source_hash, cjs_analysis)
|
||||
});
|
||||
Self::ensure_ok(self.inner.set_cjs_analysis(
|
||||
specifier,
|
||||
source_hash,
|
||||
cjs_analysis,
|
||||
));
|
||||
}
|
||||
|
||||
pub fn get_esm_analysis(
|
||||
|
@ -73,11 +108,9 @@ impl NodeAnalysisCache {
|
|||
specifier: &str,
|
||||
expected_source_hash: &str,
|
||||
) -> Option<Vec<String>> {
|
||||
self
|
||||
.with_inner(|inner| {
|
||||
inner.get_esm_analysis(specifier, expected_source_hash)
|
||||
})
|
||||
.flatten()
|
||||
Self::ensure_ok(
|
||||
self.inner.get_esm_analysis(specifier, expected_source_hash),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn set_esm_analysis(
|
||||
|
@ -86,78 +119,22 @@ impl NodeAnalysisCache {
|
|||
source_hash: &str,
|
||||
top_level_decls: &Vec<String>,
|
||||
) {
|
||||
self.with_inner(|inner| {
|
||||
inner.set_esm_analysis(specifier, source_hash, top_level_decls)
|
||||
});
|
||||
}
|
||||
|
||||
fn with_inner<TResult>(
|
||||
&self,
|
||||
action: impl FnOnce(&NodeAnalysisCacheInner) -> Result<TResult, AnyError>,
|
||||
) -> Option<TResult> {
|
||||
// lazily create the cache in order to not
|
||||
let mut maybe_created = self.inner.lock();
|
||||
let inner = match maybe_created.as_ref() {
|
||||
Some(maybe_inner) => maybe_inner.as_ref(),
|
||||
None => {
|
||||
let maybe_inner = match NodeAnalysisCacheInner::new(
|
||||
self.db_file_path.as_deref(),
|
||||
crate::version::deno().to_string(),
|
||||
) {
|
||||
Ok(cache) => Some(cache),
|
||||
Err(err) => {
|
||||
// should never error here, but if it ever does don't fail
|
||||
if cfg!(debug_assertions) {
|
||||
panic!("Error creating node analysis cache: {err:#}");
|
||||
} else {
|
||||
log::debug!("Error creating node analysis cache: {:#}", err);
|
||||
None
|
||||
}
|
||||
}
|
||||
};
|
||||
*maybe_created = Some(maybe_inner);
|
||||
maybe_created.as_ref().and_then(|p| p.as_ref())
|
||||
}
|
||||
}?;
|
||||
match action(inner) {
|
||||
Ok(result) => Some(result),
|
||||
Err(err) => {
|
||||
// should never error here, but if it ever does don't fail
|
||||
if cfg!(debug_assertions) {
|
||||
panic!("Error using esm analysis: {err:#}");
|
||||
} else {
|
||||
log::debug!("Error using esm analysis: {:#}", err);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
Self::ensure_ok(self.inner.set_esm_analysis(
|
||||
specifier,
|
||||
source_hash,
|
||||
top_level_decls,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct NodeAnalysisCacheInner {
|
||||
conn: Connection,
|
||||
conn: CacheDB,
|
||||
}
|
||||
|
||||
impl NodeAnalysisCacheInner {
|
||||
pub fn new(
|
||||
db_file_path: Option<&Path>,
|
||||
version: String,
|
||||
) -> Result<Self, AnyError> {
|
||||
log::debug!("Opening node analysis cache.");
|
||||
let conn = match db_file_path {
|
||||
Some(path) => Connection::open(path)?,
|
||||
None => Connection::open_in_memory()?,
|
||||
};
|
||||
Self::from_connection(conn, version)
|
||||
}
|
||||
|
||||
fn from_connection(
|
||||
conn: Connection,
|
||||
version: String,
|
||||
) -> Result<Self, AnyError> {
|
||||
initialize(&conn, &version)?;
|
||||
|
||||
Ok(Self { conn })
|
||||
pub fn new(conn: CacheDB) -> Self {
|
||||
Self { conn }
|
||||
}
|
||||
|
||||
pub fn get_cjs_analysis(
|
||||
|
@ -174,19 +151,15 @@ impl NodeAnalysisCacheInner {
|
|||
specifier=?1
|
||||
AND source_hash=?2
|
||||
LIMIT 1";
|
||||
let mut stmt = self.conn.prepare_cached(query)?;
|
||||
let mut rows = stmt.query(params![specifier, &expected_source_hash])?;
|
||||
if let Some(row) = rows.next()? {
|
||||
let analysis_info: String = row.get(0)?;
|
||||
let analysis_info: CjsAnalysisData =
|
||||
serde_json::from_str(&analysis_info)?;
|
||||
Ok(Some(CjsAnalysis {
|
||||
exports: analysis_info.exports,
|
||||
reexports: analysis_info.reexports,
|
||||
}))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
let res = self.conn.query_row(
|
||||
query,
|
||||
params![specifier, &expected_source_hash],
|
||||
|row| {
|
||||
let analysis_info: String = row.get(0)?;
|
||||
Ok(serde_json::from_str(&analysis_info)?)
|
||||
},
|
||||
)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn set_cjs_analysis(
|
||||
|
@ -200,16 +173,14 @@ impl NodeAnalysisCacheInner {
|
|||
cjsanalysiscache (specifier, source_hash, data)
|
||||
VALUES
|
||||
(?1, ?2, ?3)";
|
||||
let mut stmt = self.conn.prepare_cached(sql)?;
|
||||
stmt.execute(params![
|
||||
specifier,
|
||||
&source_hash.to_string(),
|
||||
&serde_json::to_string(&CjsAnalysisData {
|
||||
// temporary clones until upgrading deno_ast
|
||||
exports: cjs_analysis.exports.clone(),
|
||||
reexports: cjs_analysis.reexports.clone(),
|
||||
})?,
|
||||
])?;
|
||||
self.conn.execute(
|
||||
sql,
|
||||
params![
|
||||
specifier,
|
||||
&source_hash.to_string(),
|
||||
&serde_json::to_string(&cjs_analysis)?,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -227,15 +198,16 @@ impl NodeAnalysisCacheInner {
|
|||
specifier=?1
|
||||
AND source_hash=?2
|
||||
LIMIT 1";
|
||||
let mut stmt = self.conn.prepare_cached(query)?;
|
||||
let mut rows = stmt.query(params![specifier, &expected_source_hash])?;
|
||||
if let Some(row) = rows.next()? {
|
||||
let top_level_decls: String = row.get(0)?;
|
||||
let decls: Vec<String> = serde_json::from_str(&top_level_decls)?;
|
||||
Ok(Some(decls))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
let res = self.conn.query_row(
|
||||
query,
|
||||
params![specifier, &expected_source_hash],
|
||||
|row| {
|
||||
let top_level_decls: String = row.get(0)?;
|
||||
let decls: Vec<String> = serde_json::from_str(&top_level_decls)?;
|
||||
Ok(decls)
|
||||
},
|
||||
)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn set_esm_analysis(
|
||||
|
@ -249,72 +221,26 @@ impl NodeAnalysisCacheInner {
|
|||
esmglobalscache (specifier, source_hash, data)
|
||||
VALUES
|
||||
(?1, ?2, ?3)";
|
||||
let mut stmt = self.conn.prepare_cached(sql)?;
|
||||
stmt.execute(params![
|
||||
specifier,
|
||||
&source_hash,
|
||||
&serde_json::to_string(top_level_decls)?,
|
||||
])?;
|
||||
self.conn.execute(
|
||||
sql,
|
||||
params![
|
||||
specifier,
|
||||
&source_hash,
|
||||
&serde_json::to_string(top_level_decls)?,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize(conn: &Connection, cli_version: &str) -> Result<(), AnyError> {
|
||||
// INT doesn't store up to u64, so use TEXT for source_hash
|
||||
let query = format!(
|
||||
"{INITIAL_PRAGMAS}
|
||||
CREATE TABLE IF NOT EXISTS cjsanalysiscache (
|
||||
specifier TEXT PRIMARY KEY,
|
||||
source_hash TEXT NOT NULL,
|
||||
data TEXT NOT NULL
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS cjsanalysiscacheidx
|
||||
ON cjsanalysiscache(specifier);
|
||||
CREATE TABLE IF NOT EXISTS esmglobalscache (
|
||||
specifier TEXT PRIMARY KEY,
|
||||
source_hash TEXT NOT NULL,
|
||||
data TEXT NOT NULL
|
||||
);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS esmglobalscacheidx
|
||||
ON esmglobalscache(specifier);
|
||||
CREATE TABLE IF NOT EXISTS info (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL
|
||||
);
|
||||
"
|
||||
);
|
||||
|
||||
conn.execute_batch(&query)?;
|
||||
|
||||
// delete the cache when the CLI version changes
|
||||
let data_cli_version: Option<String> = conn
|
||||
.query_row(
|
||||
"SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
|
||||
[],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.ok();
|
||||
if data_cli_version.as_deref() != Some(cli_version) {
|
||||
conn.execute("DELETE FROM cjsanalysiscache", params![])?;
|
||||
conn.execute("DELETE FROM esmglobalscache", params![])?;
|
||||
let mut stmt = conn
|
||||
.prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
|
||||
stmt.execute(params!["CLI_VERSION", &cli_version])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
pub fn node_analysis_cache_general_use() {
|
||||
let conn = Connection::open_in_memory().unwrap();
|
||||
let cache =
|
||||
NodeAnalysisCacheInner::from_connection(conn, "1.0.0".to_string())
|
||||
.unwrap();
|
||||
let conn = CacheDB::in_memory(&NODE_ANALYSIS_CACHE_DB, "1.0.0");
|
||||
let cache = NodeAnalysisCacheInner::new(conn);
|
||||
|
||||
assert!(cache.get_cjs_analysis("file.js", "2").unwrap().is_none());
|
||||
let cjs_analysis = CjsAnalysis {
|
||||
|
@ -349,10 +275,8 @@ mod test {
|
|||
.unwrap();
|
||||
|
||||
// recreating with same cli version should still have it
|
||||
let conn = cache.conn;
|
||||
let cache =
|
||||
NodeAnalysisCacheInner::from_connection(conn, "1.0.0".to_string())
|
||||
.unwrap();
|
||||
let conn = cache.conn.recreate_with_version("1.0.0");
|
||||
let cache = NodeAnalysisCacheInner::new(conn);
|
||||
let actual_analysis =
|
||||
cache.get_cjs_analysis("file.js", "2").unwrap().unwrap();
|
||||
assert_eq!(actual_analysis.exports, cjs_analysis.exports);
|
||||
|
@ -362,10 +286,8 @@ mod test {
|
|||
assert_eq!(actual_esm_analysis, esm_analysis);
|
||||
|
||||
// now changing the cli version should clear it
|
||||
let conn = cache.conn;
|
||||
let cache =
|
||||
NodeAnalysisCacheInner::from_connection(conn, "2.0.0".to_string())
|
||||
.unwrap();
|
||||
let conn = cache.conn.recreate_with_version("2.0.0");
|
||||
let cache = NodeAnalysisCacheInner::new(conn);
|
||||
assert!(cache.get_cjs_analysis("file.js", "2").unwrap().is_none());
|
||||
assert!(cache.get_esm_analysis("file.js", "2").unwrap().is_none());
|
||||
}
|
||||
|
|
223
cli/cache/parsed_source.rs
vendored
223
cli/cache/parsed_source.rs
vendored
|
@ -1,8 +1,6 @@
|
|||
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use deno_ast::MediaType;
|
||||
|
@ -15,13 +13,37 @@ use deno_graph::CapturingModuleParser;
|
|||
use deno_graph::DefaultModuleAnalyzer;
|
||||
use deno_graph::ModuleInfo;
|
||||
use deno_graph::ModuleParser;
|
||||
use deno_graph::ParsedSourceStore;
|
||||
use deno_runtime::deno_webstorage::rusqlite::params;
|
||||
use deno_runtime::deno_webstorage::rusqlite::Connection;
|
||||
|
||||
use super::common::INITIAL_PRAGMAS;
|
||||
use super::cache_db::CacheDB;
|
||||
use super::cache_db::CacheDBConfiguration;
|
||||
use super::cache_db::CacheFailure;
|
||||
use super::FastInsecureHasher;
|
||||
|
||||
const SELECT_MODULE_INFO: &str = "
|
||||
SELECT
|
||||
module_info
|
||||
FROM
|
||||
moduleinfocache
|
||||
WHERE
|
||||
specifier=?1
|
||||
AND media_type=?2
|
||||
AND source_hash=?3
|
||||
LIMIT 1";
|
||||
|
||||
pub static PARSED_SOURCE_CACHE_DB: CacheDBConfiguration =
|
||||
CacheDBConfiguration {
|
||||
table_initializer: "CREATE TABLE IF NOT EXISTS moduleinfocache (
|
||||
specifier TEXT PRIMARY KEY,
|
||||
media_type TEXT NOT NULL,
|
||||
source_hash TEXT NOT NULL,
|
||||
module_info TEXT NOT NULL
|
||||
);",
|
||||
on_version_change: "DELETE FROM moduleinfocache;",
|
||||
preheat_queries: &[SELECT_MODULE_INFO],
|
||||
on_failure: CacheFailure::InMemory,
|
||||
};
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct ParsedSourceCacheSources(
|
||||
Arc<Mutex<HashMap<ModuleSpecifier, ParsedSource>>>,
|
||||
|
@ -53,24 +75,29 @@ impl deno_graph::ParsedSourceStore for ParsedSourceCacheSources {
|
|||
/// for cached dependency analysis.
|
||||
#[derive(Clone)]
|
||||
pub struct ParsedSourceCache {
|
||||
db_cache_path: Option<PathBuf>,
|
||||
cli_version: &'static str,
|
||||
db: CacheDB,
|
||||
sources: ParsedSourceCacheSources,
|
||||
}
|
||||
|
||||
impl ParsedSourceCache {
|
||||
pub fn new(sql_cache_path: Option<PathBuf>) -> Self {
|
||||
#[cfg(test)]
|
||||
pub fn new_in_memory() -> Self {
|
||||
Self {
|
||||
db_cache_path: sql_cache_path,
|
||||
cli_version: crate::version::deno(),
|
||||
db: CacheDB::in_memory(&PARSED_SOURCE_CACHE_DB, crate::version::deno()),
|
||||
sources: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(db: CacheDB) -> Self {
|
||||
Self {
|
||||
db,
|
||||
sources: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reset_for_file_watcher(&self) -> Self {
|
||||
Self {
|
||||
db_cache_path: self.db_cache_path.clone(),
|
||||
cli_version: self.cli_version,
|
||||
db: self.db.clone(),
|
||||
sources: Default::default(),
|
||||
}
|
||||
}
|
||||
|
@ -104,31 +131,11 @@ impl ParsedSourceCache {
|
|||
self.sources.0.lock().remove(specifier);
|
||||
}
|
||||
|
||||
/// Gets this cache as a `deno_graph::ParsedSourceStore`.
|
||||
pub fn as_store(&self) -> Box<dyn ParsedSourceStore> {
|
||||
// This trait is not implemented directly on ParsedSourceCache
|
||||
// in order to prevent its methods from being accidentally used.
|
||||
// Generally, people should prefer the methods found that will
|
||||
// lazily parse if necessary.
|
||||
Box::new(self.sources.clone())
|
||||
}
|
||||
|
||||
pub fn as_analyzer(&self) -> Box<dyn deno_graph::ModuleAnalyzer> {
|
||||
match ParsedSourceCacheModuleAnalyzer::new(
|
||||
self.db_cache_path.as_deref(),
|
||||
self.cli_version,
|
||||
Box::new(ParsedSourceCacheModuleAnalyzer::new(
|
||||
self.db.clone(),
|
||||
self.sources.clone(),
|
||||
) {
|
||||
Ok(analyzer) => Box::new(analyzer),
|
||||
Err(err) => {
|
||||
log::debug!("Could not create cached module analyzer. {:#}", err);
|
||||
// fallback to not caching if it can't be created
|
||||
Box::new(deno_graph::CapturingModuleAnalyzer::new(
|
||||
None,
|
||||
Some(self.as_store()),
|
||||
))
|
||||
}
|
||||
}
|
||||
))
|
||||
}
|
||||
|
||||
/// Creates a parser that will reuse a ParsedSource from the store
|
||||
|
@ -139,32 +146,13 @@ impl ParsedSourceCache {
|
|||
}
|
||||
|
||||
struct ParsedSourceCacheModuleAnalyzer {
|
||||
conn: Connection,
|
||||
conn: CacheDB,
|
||||
sources: ParsedSourceCacheSources,
|
||||
}
|
||||
|
||||
impl ParsedSourceCacheModuleAnalyzer {
|
||||
pub fn new(
|
||||
db_file_path: Option<&Path>,
|
||||
cli_version: &'static str,
|
||||
sources: ParsedSourceCacheSources,
|
||||
) -> Result<Self, AnyError> {
|
||||
log::debug!("Loading cached module analyzer.");
|
||||
let conn = match db_file_path {
|
||||
Some(path) => Connection::open(path)?,
|
||||
None => Connection::open_in_memory()?,
|
||||
};
|
||||
Self::from_connection(conn, cli_version, sources)
|
||||
}
|
||||
|
||||
fn from_connection(
|
||||
conn: Connection,
|
||||
cli_version: &'static str,
|
||||
sources: ParsedSourceCacheSources,
|
||||
) -> Result<Self, AnyError> {
|
||||
initialize(&conn, cli_version)?;
|
||||
|
||||
Ok(Self { conn, sources })
|
||||
pub fn new(conn: CacheDB, sources: ParsedSourceCacheSources) -> Self {
|
||||
Self { conn, sources }
|
||||
}
|
||||
|
||||
pub fn get_module_info(
|
||||
|
@ -173,29 +161,21 @@ impl ParsedSourceCacheModuleAnalyzer {
|
|||
media_type: MediaType,
|
||||
expected_source_hash: &str,
|
||||
) -> Result<Option<ModuleInfo>, AnyError> {
|
||||
let query = "
|
||||
SELECT
|
||||
module_info
|
||||
FROM
|
||||
moduleinfocache
|
||||
WHERE
|
||||
specifier=?1
|
||||
AND media_type=?2
|
||||
AND source_hash=?3
|
||||
LIMIT 1";
|
||||
let mut stmt = self.conn.prepare_cached(query)?;
|
||||
let mut rows = stmt.query(params![
|
||||
&specifier.as_str(),
|
||||
serialize_media_type(media_type),
|
||||
&expected_source_hash,
|
||||
])?;
|
||||
if let Some(row) = rows.next()? {
|
||||
let module_info: String = row.get(0)?;
|
||||
let module_info = serde_json::from_str(&module_info)?;
|
||||
Ok(Some(module_info))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
let query = SELECT_MODULE_INFO;
|
||||
let res = self.conn.query_row(
|
||||
query,
|
||||
params![
|
||||
&specifier.as_str(),
|
||||
serialize_media_type(media_type),
|
||||
&expected_source_hash,
|
||||
],
|
||||
|row| {
|
||||
let module_info: String = row.get(0)?;
|
||||
let module_info = serde_json::from_str(&module_info)?;
|
||||
Ok(module_info)
|
||||
},
|
||||
)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn set_module_info(
|
||||
|
@ -210,13 +190,15 @@ impl ParsedSourceCacheModuleAnalyzer {
|
|||
moduleinfocache (specifier, media_type, source_hash, module_info)
|
||||
VALUES
|
||||
(?1, ?2, ?3, ?4)";
|
||||
let mut stmt = self.conn.prepare_cached(sql)?;
|
||||
stmt.execute(params![
|
||||
specifier.as_str(),
|
||||
serialize_media_type(media_type),
|
||||
&source_hash,
|
||||
&serde_json::to_string(&module_info)?,
|
||||
])?;
|
||||
self.conn.execute(
|
||||
sql,
|
||||
params![
|
||||
specifier.as_str(),
|
||||
serialize_media_type(media_type),
|
||||
&source_hash,
|
||||
&serde_json::to_string(&module_info)?,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -287,46 +269,6 @@ impl deno_graph::ModuleAnalyzer for ParsedSourceCacheModuleAnalyzer {
|
|||
}
|
||||
}
|
||||
|
||||
fn initialize(
|
||||
conn: &Connection,
|
||||
cli_version: &'static str,
|
||||
) -> Result<(), AnyError> {
|
||||
let query = format!(
|
||||
"{INITIAL_PRAGMAS}
|
||||
-- INT doesn't store up to u64, so use TEXT for source_hash
|
||||
CREATE TABLE IF NOT EXISTS moduleinfocache (
|
||||
specifier TEXT PRIMARY KEY,
|
||||
media_type TEXT NOT NULL,
|
||||
source_hash TEXT NOT NULL,
|
||||
module_info TEXT NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS info (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL
|
||||
);
|
||||
"
|
||||
);
|
||||
|
||||
conn.execute_batch(&query)?;
|
||||
|
||||
// delete the cache when the CLI version changes
|
||||
let data_cli_version: Option<String> = conn
|
||||
.query_row(
|
||||
"SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
|
||||
[],
|
||||
|row| row.get(0),
|
||||
)
|
||||
.ok();
|
||||
if data_cli_version.as_deref() != Some(cli_version) {
|
||||
conn.execute("DELETE FROM moduleinfocache", params![])?;
|
||||
let mut stmt = conn
|
||||
.prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
|
||||
stmt.execute(params!["CLI_VERSION", &cli_version])?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn compute_source_hash(bytes: &[u8]) -> String {
|
||||
FastInsecureHasher::new().write(bytes).finish().to_string()
|
||||
}
|
||||
|
@ -340,13 +282,8 @@ mod test {
|
|||
|
||||
#[test]
|
||||
pub fn parsed_source_cache_module_analyzer_general_use() {
|
||||
let conn = Connection::open_in_memory().unwrap();
|
||||
let cache = ParsedSourceCacheModuleAnalyzer::from_connection(
|
||||
conn,
|
||||
"1.0.0",
|
||||
Default::default(),
|
||||
)
|
||||
.unwrap();
|
||||
let conn = CacheDB::in_memory(&PARSED_SOURCE_CACHE_DB, "1.0.0");
|
||||
let cache = ParsedSourceCacheModuleAnalyzer::new(conn, Default::default());
|
||||
let specifier1 =
|
||||
ModuleSpecifier::parse("https://localhost/mod.ts").unwrap();
|
||||
let specifier2 =
|
||||
|
@ -403,13 +340,8 @@ mod test {
|
|||
);
|
||||
|
||||
// try recreating with the same version
|
||||
let conn = cache.conn;
|
||||
let cache = ParsedSourceCacheModuleAnalyzer::from_connection(
|
||||
conn,
|
||||
"1.0.0",
|
||||
Default::default(),
|
||||
)
|
||||
.unwrap();
|
||||
let conn = cache.conn.recreate_with_version("1.0.0");
|
||||
let cache = ParsedSourceCacheModuleAnalyzer::new(conn, Default::default());
|
||||
|
||||
// should get it
|
||||
assert_eq!(
|
||||
|
@ -420,13 +352,8 @@ mod test {
|
|||
);
|
||||
|
||||
// try recreating with a different version
|
||||
let conn = cache.conn;
|
||||
let cache = ParsedSourceCacheModuleAnalyzer::from_connection(
|
||||
conn,
|
||||
"1.0.1",
|
||||
Default::default(),
|
||||
)
|
||||
.unwrap();
|
||||
let conn = cache.conn.recreate_with_version("1.0.1");
|
||||
let cache = ParsedSourceCacheModuleAnalyzer::new(conn, Default::default());
|
||||
|
||||
// should no longer exist
|
||||
assert_eq!(
|
||||
|
|
|
@ -224,7 +224,7 @@ pub async fn create_graph_and_maybe_check(
|
|||
log::warn!("{}", ignored_options);
|
||||
}
|
||||
let maybe_config_specifier = ps.options.maybe_config_file_specifier();
|
||||
let cache = TypeCheckCache::new(&ps.dir.type_checking_cache_db_file_path());
|
||||
let cache = TypeCheckCache::new(ps.caches.type_checking_cache_db(&ps.dir));
|
||||
let check_result = check::check(
|
||||
graph.clone(),
|
||||
&cache,
|
||||
|
|
|
@ -167,7 +167,7 @@ mod tests {
|
|||
#[test]
|
||||
fn test_esm_code_with_node_globals() {
|
||||
let r = esm_code_with_node_globals(
|
||||
&NodeAnalysisCache::new(None),
|
||||
&NodeAnalysisCache::new_in_memory(),
|
||||
&ModuleSpecifier::parse("https://example.com/foo/bar.js").unwrap(),
|
||||
"export const x = 1;".to_string(),
|
||||
)
|
||||
|
@ -183,7 +183,7 @@ mod tests {
|
|||
#[test]
|
||||
fn test_esm_code_with_node_globals_with_shebang() {
|
||||
let r = esm_code_with_node_globals(
|
||||
&NodeAnalysisCache::new(None),
|
||||
&NodeAnalysisCache::new_in_memory(),
|
||||
&ModuleSpecifier::parse("https://example.com/foo/bar.js").unwrap(),
|
||||
"#!/usr/bin/env node\nexport const x = 1;".to_string(),
|
||||
)
|
||||
|
|
|
@ -8,6 +8,7 @@ use crate::args::TsConfigType;
|
|||
use crate::args::TsTypeLib;
|
||||
use crate::args::TypeCheckMode;
|
||||
use crate::cache;
|
||||
use crate::cache::Caches;
|
||||
use crate::cache::DenoDir;
|
||||
use crate::cache::EmitCache;
|
||||
use crate::cache::FastInsecureHasher;
|
||||
|
@ -74,6 +75,7 @@ pub struct ProcState(Arc<Inner>);
|
|||
|
||||
pub struct Inner {
|
||||
pub dir: DenoDir,
|
||||
pub caches: Caches,
|
||||
pub file_fetcher: Arc<FileFetcher>,
|
||||
pub http_client: HttpClient,
|
||||
pub options: Arc<CliOptions>,
|
||||
|
@ -139,6 +141,7 @@ impl ProcState {
|
|||
self.blob_store.clear();
|
||||
self.0 = Arc::new(Inner {
|
||||
dir: self.dir.clone(),
|
||||
caches: self.caches.clone(),
|
||||
options: self.options.clone(),
|
||||
emit_cache: self.emit_cache.clone(),
|
||||
emit_options_hash: self.emit_options_hash,
|
||||
|
@ -192,11 +195,25 @@ impl ProcState {
|
|||
cli_options: Arc<CliOptions>,
|
||||
maybe_sender: Option<tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>>,
|
||||
) -> Result<Self, AnyError> {
|
||||
let dir = cli_options.resolve_deno_dir()?;
|
||||
let caches = Caches::default();
|
||||
// Warm up the caches we know we'll likely need based on the CLI mode
|
||||
match cli_options.sub_command() {
|
||||
DenoSubcommand::Run(_) => {
|
||||
_ = caches.dep_analysis_db(&dir);
|
||||
_ = caches.node_analysis_db(&dir);
|
||||
}
|
||||
DenoSubcommand::Check(_) => {
|
||||
_ = caches.dep_analysis_db(&dir);
|
||||
_ = caches.node_analysis_db(&dir);
|
||||
_ = caches.type_checking_cache_db(&dir);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let blob_store = BlobStore::default();
|
||||
let broadcast_channel = InMemoryBroadcastChannel::default();
|
||||
let shared_array_buffer_store = SharedArrayBufferStore::default();
|
||||
let compiled_wasm_module_store = CompiledWasmModuleStore::default();
|
||||
let dir = cli_options.resolve_deno_dir()?;
|
||||
let deps_cache_location = dir.deps_folder_path();
|
||||
let http_cache = HttpCache::new(&deps_cache_location);
|
||||
let root_cert_store = cli_options.resolve_root_cert_store()?;
|
||||
|
@ -284,7 +301,7 @@ impl ProcState {
|
|||
}
|
||||
let emit_cache = EmitCache::new(dir.gen_cache.clone());
|
||||
let parsed_source_cache =
|
||||
ParsedSourceCache::new(Some(dir.dep_analysis_db_file_path()));
|
||||
ParsedSourceCache::new(caches.dep_analysis_db(&dir));
|
||||
let npm_cache = NpmCache::from_deno_dir(
|
||||
&dir,
|
||||
cli_options.cache_setting(),
|
||||
|
@ -292,11 +309,12 @@ impl ProcState {
|
|||
progress_bar.clone(),
|
||||
);
|
||||
let node_analysis_cache =
|
||||
NodeAnalysisCache::new(Some(dir.node_analysis_db_file_path()));
|
||||
NodeAnalysisCache::new(caches.node_analysis_db(&dir));
|
||||
|
||||
let emit_options: deno_ast::EmitOptions = ts_config_result.ts_config.into();
|
||||
Ok(ProcState(Arc::new(Inner {
|
||||
dir,
|
||||
caches,
|
||||
options: cli_options,
|
||||
emit_cache,
|
||||
emit_options_hash: FastInsecureHasher::new()
|
||||
|
@ -430,7 +448,7 @@ impl ProcState {
|
|||
&& !roots.iter().all(|r| reload_exclusions.contains(r)),
|
||||
};
|
||||
let check_cache =
|
||||
TypeCheckCache::new(&self.dir.type_checking_cache_db_file_path());
|
||||
TypeCheckCache::new(self.caches.type_checking_cache_db(&self.dir));
|
||||
let check_result =
|
||||
check::check(graph, &check_cache, &self.npm_resolver, options)?;
|
||||
self.graph_container.set_type_checked(&roots, lib);
|
||||
|
|
|
@ -11,11 +11,11 @@ use deno_runtime::deno_fetch::reqwest;
|
|||
use deno_runtime::deno_websocket::tokio_tungstenite;
|
||||
use deno_runtime::deno_websocket::tokio_tungstenite::tungstenite;
|
||||
use std::io::BufRead;
|
||||
use std::process::Child;
|
||||
use test_util as util;
|
||||
use test_util::TempDir;
|
||||
use tokio::net::TcpStream;
|
||||
use util::http_server;
|
||||
use util::DenoChild;
|
||||
|
||||
struct InspectorTester {
|
||||
socket_tx: SplitSink<
|
||||
|
@ -30,7 +30,7 @@ struct InspectorTester {
|
|||
>,
|
||||
>,
|
||||
notification_filter: Box<dyn FnMut(&str) -> bool + 'static>,
|
||||
child: Child,
|
||||
child: DenoChild,
|
||||
stderr_lines: Box<dyn Iterator<Item = String>>,
|
||||
stdout_lines: Box<dyn Iterator<Item = String>>,
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ fn ignore_script_parsed(msg: &str) -> bool {
|
|||
}
|
||||
|
||||
impl InspectorTester {
|
||||
async fn create<F>(mut child: Child, notification_filter: F) -> Self
|
||||
async fn create<F>(mut child: DenoChild, notification_filter: F) -> Self
|
||||
where
|
||||
F: FnMut(&str) -> bool + 'static,
|
||||
{
|
||||
|
|
|
@ -6,6 +6,7 @@ use std::io::BufRead;
|
|||
use test_util as util;
|
||||
use test_util::assert_contains;
|
||||
use test_util::TempDir;
|
||||
use util::DenoChild;
|
||||
|
||||
use util::assert_not_contains;
|
||||
|
||||
|
@ -80,7 +81,7 @@ fn read_line(s: &str, lines: &mut impl Iterator<Item = String>) -> String {
|
|||
lines.find(|m| m.contains(s)).unwrap()
|
||||
}
|
||||
|
||||
fn check_alive_then_kill(mut child: std::process::Child) {
|
||||
fn check_alive_then_kill(mut child: DenoChild) {
|
||||
assert!(child.try_wait().unwrap().is_none());
|
||||
child.kill().unwrap();
|
||||
}
|
||||
|
@ -1244,8 +1245,8 @@ fn run_watch_dynamic_imports() {
|
|||
.spawn()
|
||||
.unwrap();
|
||||
let (mut stdout_lines, mut stderr_lines) = child_lines(&mut child);
|
||||
assert_contains!(stderr_lines.next().unwrap(), "No package.json file found");
|
||||
assert_contains!(stderr_lines.next().unwrap(), "Process started");
|
||||
wait_contains("No package.json file found", &mut stderr_lines);
|
||||
wait_contains("Process started", &mut stderr_lines);
|
||||
|
||||
wait_contains(
|
||||
"Hopefully dynamic import will be watched...",
|
||||
|
|
|
@ -12,6 +12,7 @@ use crate::args::FilesConfig;
|
|||
use crate::args::FmtOptions;
|
||||
use crate::args::FmtOptionsConfig;
|
||||
use crate::args::ProseWrap;
|
||||
use crate::cache::Caches;
|
||||
use crate::colors;
|
||||
use crate::util::diff::diff;
|
||||
use crate::util::file_watcher;
|
||||
|
@ -101,9 +102,10 @@ pub async fn format(
|
|||
}
|
||||
};
|
||||
let deno_dir = &cli_options.resolve_deno_dir()?;
|
||||
let operation = |(paths, fmt_options): (Vec<PathBuf>, FmtOptionsConfig)| async move {
|
||||
let caches = Caches::default();
|
||||
let operation = |(paths, fmt_options): (Vec<PathBuf>, FmtOptionsConfig)| async {
|
||||
let incremental_cache = Arc::new(IncrementalCache::new(
|
||||
&deno_dir.fmt_incremental_cache_db_file_path(),
|
||||
caches.fmt_incremental_cache_db(deno_dir),
|
||||
&fmt_options,
|
||||
&paths,
|
||||
));
|
||||
|
|
|
@ -11,6 +11,7 @@ use crate::args::FilesConfig;
|
|||
use crate::args::LintOptions;
|
||||
use crate::args::LintReporterKind;
|
||||
use crate::args::LintRulesConfig;
|
||||
use crate::cache::Caches;
|
||||
use crate::colors;
|
||||
use crate::tools::fmt::run_parallelized;
|
||||
use crate::util::file_watcher;
|
||||
|
@ -98,9 +99,10 @@ pub async fn lint(
|
|||
|
||||
let has_error = Arc::new(AtomicBool::new(false));
|
||||
let deno_dir = cli_options.resolve_deno_dir()?;
|
||||
let caches = Caches::default();
|
||||
let operation = |paths: Vec<PathBuf>| async {
|
||||
let incremental_cache = Arc::new(IncrementalCache::new(
|
||||
&deno_dir.lint_incremental_cache_db_file_path(),
|
||||
caches.lint_incremental_cache_db(&deno_dir),
|
||||
// use a hash of the rule names in order to bust the cache
|
||||
&{
|
||||
// ensure this is stable by sorting it
|
||||
|
|
2
cli/tools/vendor/test.rs
vendored
2
cli/tools/vendor/test.rs
vendored
|
@ -218,7 +218,7 @@ impl VendorTestBuilder {
|
|||
let output_dir = make_path("/vendor");
|
||||
let roots = self.entry_points.clone();
|
||||
let loader = self.loader.clone();
|
||||
let parsed_source_cache = ParsedSourceCache::new(None);
|
||||
let parsed_source_cache = ParsedSourceCache::new_in_memory();
|
||||
let analyzer = parsed_source_cache.as_analyzer();
|
||||
let graph = build_test_graph(
|
||||
roots,
|
||||
|
|
|
@ -1897,22 +1897,117 @@ pub fn new_deno_dir() -> TempDir {
|
|||
TempDir::new()
|
||||
}
|
||||
|
||||
/// Because we need to keep the [`TempDir`] alive for the entire run of this command,
|
||||
/// we have to effectively reproduce the entire builder-pattern object for [`Command`].
|
||||
pub struct DenoCmd {
|
||||
// keep the deno dir directory alive for the duration of the command
|
||||
_deno_dir: TempDir,
|
||||
cmd: Command,
|
||||
}
|
||||
|
||||
impl Deref for DenoCmd {
|
||||
type Target = Command;
|
||||
fn deref(&self) -> &Command {
|
||||
&self.cmd
|
||||
impl DenoCmd {
|
||||
pub fn args<I, S>(&mut self, args: I) -> &mut Self
|
||||
where
|
||||
I: IntoIterator<Item = S>,
|
||||
S: AsRef<std::ffi::OsStr>,
|
||||
{
|
||||
self.cmd.args(args);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn arg<S>(&mut self, arg: S) -> &mut Self
|
||||
where
|
||||
S: AsRef<std::ffi::OsStr>,
|
||||
{
|
||||
self.cmd.arg(arg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn envs<I, K, V>(&mut self, vars: I) -> &mut Self
|
||||
where
|
||||
I: IntoIterator<Item = (K, V)>,
|
||||
K: AsRef<std::ffi::OsStr>,
|
||||
V: AsRef<std::ffi::OsStr>,
|
||||
{
|
||||
self.cmd.envs(vars);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Self
|
||||
where
|
||||
K: AsRef<std::ffi::OsStr>,
|
||||
V: AsRef<std::ffi::OsStr>,
|
||||
{
|
||||
self.cmd.env(key, val);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn env_remove<K>(&mut self, key: K) -> &mut Self
|
||||
where
|
||||
K: AsRef<std::ffi::OsStr>,
|
||||
{
|
||||
self.cmd.env_remove(key);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn stdin<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Self {
|
||||
self.cmd.stdin(cfg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn stdout<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Self {
|
||||
self.cmd.stdout(cfg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn stderr<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Self {
|
||||
self.cmd.stderr(cfg);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn current_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Self {
|
||||
self.cmd.current_dir(dir);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn output(&mut self) -> Result<std::process::Output, std::io::Error> {
|
||||
self.cmd.output()
|
||||
}
|
||||
|
||||
pub fn status(&mut self) -> Result<std::process::ExitStatus, std::io::Error> {
|
||||
self.cmd.status()
|
||||
}
|
||||
|
||||
pub fn spawn(&mut self) -> Result<DenoChild, std::io::Error> {
|
||||
Ok(DenoChild {
|
||||
_deno_dir: self._deno_dir.clone(),
|
||||
child: self.cmd.spawn()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for DenoCmd {
|
||||
fn deref_mut(&mut self) -> &mut Command {
|
||||
&mut self.cmd
|
||||
/// We need to keep the [`TempDir`] around until the child has finished executing, so
|
||||
/// this acts as a RAII guard.
|
||||
pub struct DenoChild {
|
||||
_deno_dir: TempDir,
|
||||
child: Child,
|
||||
}
|
||||
|
||||
impl Deref for DenoChild {
|
||||
type Target = Child;
|
||||
fn deref(&self) -> &Child {
|
||||
&self.child
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for DenoChild {
|
||||
fn deref_mut(&mut self) -> &mut Child {
|
||||
&mut self.child
|
||||
}
|
||||
}
|
||||
|
||||
impl DenoChild {
|
||||
pub fn wait_with_output(self) -> Result<Output, std::io::Error> {
|
||||
self.child.wait_with_output()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue