1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-12-22 07:14:47 -05:00

fix(ext/kv): retry transaction on SQLITE_BUSY errors (#20189)

Properly handle the `SQLITE_BUSY` error code by retrying the
transaction.

Also wraps database initialization logic in a transaction to protect
against incomplete/concurrent initializations.

Fixes https://github.com/denoland/deno/issues/20116.
This commit is contained in:
Heyang Zhou 2023-08-17 18:53:55 +08:00 committed by GitHub
parent ec63b36994
commit 0960e895da
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 144 additions and 36 deletions

1
Cargo.lock generated
View file

@ -1220,6 +1220,7 @@ dependencies = [
"base64 0.13.1", "base64 0.13.1",
"deno_core", "deno_core",
"hex", "hex",
"log",
"num-bigint", "num-bigint",
"rand", "rand",
"rusqlite", "rusqlite",

View file

@ -1756,3 +1756,53 @@ dbTest("atomic operation is exposed", (db) => {
const ao = db.atomic(); const ao = db.atomic();
assert(ao instanceof Deno.AtomicOperation); assert(ao instanceof Deno.AtomicOperation);
}); });
Deno.test({
name: "racy open",
async fn() {
for (let i = 0; i < 100; i++) {
const filename = await Deno.makeTempFile({ prefix: "racy_open_db" });
try {
const [db1, db2, db3] = await Promise.all([
Deno.openKv(filename),
Deno.openKv(filename),
Deno.openKv(filename),
]);
db1.close();
db2.close();
db3.close();
} finally {
await Deno.remove(filename);
}
}
},
});
Deno.test({
name: "racy write",
async fn() {
const filename = await Deno.makeTempFile({ prefix: "racy_write_db" });
const concurrency = 20;
const iterations = 5;
try {
const dbs = await Promise.all(
Array(concurrency).fill(0).map(() => Deno.openKv(filename)),
);
try {
for (let i = 0; i < iterations; i++) {
await Promise.all(
dbs.map((db) => db.atomic().sum(["counter"], 1n).commit()),
);
}
assertEquals(
((await dbs[0].get(["counter"])).value as Deno.KvU64).value,
BigInt(concurrency * iterations),
);
} finally {
dbs.forEach((db) => db.close());
}
} finally {
await Deno.remove(filename);
}
},
});

View file

@ -19,6 +19,7 @@ async-trait.workspace = true
base64.workspace = true base64.workspace = true
deno_core.workspace = true deno_core.workspace = true
hex.workspace = true hex.workspace = true
log.workspace = true
num-bigint.workspace = true num-bigint.workspace = true
rand.workspace = true rand.workspace = true
rusqlite.workspace = true rusqlite.workspace = true

View file

@ -3,6 +3,7 @@
use std::borrow::Cow; use std::borrow::Cow;
use std::cell::Cell; use std::cell::Cell;
use std::cell::RefCell; use std::cell::RefCell;
use std::future::Future;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::path::Path; use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
@ -21,6 +22,7 @@ use deno_core::task::spawn;
use deno_core::task::spawn_blocking; use deno_core::task::spawn_blocking;
use deno_core::AsyncRefCell; use deno_core::AsyncRefCell;
use deno_core::OpState; use deno_core::OpState;
use rand::Rng;
use rusqlite::params; use rusqlite::params;
use rusqlite::OpenFlags; use rusqlite::OpenFlags;
use rusqlite::OptionalExtension; use rusqlite::OptionalExtension;
@ -165,28 +167,41 @@ impl<P: SqliteDbHandlerPermissions> DatabaseHandler for SqliteDbHandler<P> {
} }
} }
let default_storage_dir = self.default_storage_dir.clone(); let conn = sqlite_retry_loop(|| {
let conn = spawn_blocking(move || { let path = path.clone();
let conn = match (path.as_deref(), &default_storage_dir) { let default_storage_dir = self.default_storage_dir.clone();
(Some(":memory:"), _) | (None, None) => { async move {
rusqlite::Connection::open_in_memory()? spawn_blocking(move || {
} let conn = match (path.as_deref(), &default_storage_dir) {
(Some(path), _) => { (Some(":memory:"), _) | (None, None) => {
let flags = rusqlite::Connection::open_in_memory()?
OpenFlags::default().difference(OpenFlags::SQLITE_OPEN_URI); }
rusqlite::Connection::open_with_flags(path, flags)? (Some(path), _) => {
} let flags =
(None, Some(path)) => { OpenFlags::default().difference(OpenFlags::SQLITE_OPEN_URI);
std::fs::create_dir_all(path)?; rusqlite::Connection::open_with_flags(path, flags)?
let path = path.join("kv.sqlite3"); }
rusqlite::Connection::open(&path)? (None, Some(path)) => {
} std::fs::create_dir_all(path)?;
}; let path = path.join("kv.sqlite3");
rusqlite::Connection::open(path)?
}
};
conn.pragma_update(None, "journal_mode", "wal")?; conn.pragma_update(None, "journal_mode", "wal")?;
conn.execute(STATEMENT_CREATE_MIGRATION_TABLE, [])?;
let current_version: usize = conn Ok::<_, AnyError>(conn)
})
.await
.unwrap()
}
})
.await?;
let conn = Rc::new(AsyncRefCell::new(Cell::new(Some(conn))));
SqliteDb::run_tx(conn.clone(), |tx| {
tx.execute(STATEMENT_CREATE_MIGRATION_TABLE, [])?;
let current_version: usize = tx
.query_row( .query_row(
"select version from migration_state where k = 0", "select version from migration_state where k = 0",
[], [],
@ -198,21 +213,22 @@ impl<P: SqliteDbHandlerPermissions> DatabaseHandler for SqliteDbHandler<P> {
for (i, migration) in MIGRATIONS.iter().enumerate() { for (i, migration) in MIGRATIONS.iter().enumerate() {
let version = i + 1; let version = i + 1;
if version > current_version { if version > current_version {
conn.execute_batch(migration)?; tx.execute_batch(migration)?;
conn.execute( tx.execute(
"replace into migration_state (k, version) values(?, ?)", "replace into migration_state (k, version) values(?, ?)",
[&0, &version], [&0, &version],
)?; )?;
} }
} }
Ok::<_, AnyError>(conn) tx.commit()?;
Ok(())
}) })
.await .await?;
.unwrap()?;
Ok(SqliteDb { Ok(SqliteDb {
conn: Rc::new(AsyncRefCell::new(Cell::new(Some(conn)))), conn,
queue: OnceCell::new(), queue: OnceCell::new(),
}) })
} }
@ -223,11 +239,48 @@ pub struct SqliteDb {
queue: OnceCell<SqliteQueue>, queue: OnceCell<SqliteQueue>,
} }
async fn sqlite_retry_loop<R, Fut: Future<Output = Result<R, AnyError>>>(
mut f: impl FnMut() -> Fut,
) -> Result<R, AnyError> {
loop {
match f().await {
Ok(x) => return Ok(x),
Err(e) => {
if let Some(x) = e.downcast_ref::<rusqlite::Error>() {
if x.sqlite_error_code() == Some(rusqlite::ErrorCode::DatabaseBusy) {
log::debug!("kv: Database is busy, retrying");
tokio::time::sleep(Duration::from_millis(
rand::thread_rng().gen_range(5..20),
))
.await;
continue;
}
}
return Err(e);
}
}
}
}
impl SqliteDb { impl SqliteDb {
async fn run_tx<F, R>( async fn run_tx<F, R>(
conn: Rc<AsyncRefCell<Cell<Option<rusqlite::Connection>>>>, conn: Rc<AsyncRefCell<Cell<Option<rusqlite::Connection>>>>,
f: F, f: F,
) -> Result<R, AnyError> ) -> Result<R, AnyError>
where
F: (FnOnce(rusqlite::Transaction<'_>) -> Result<R, AnyError>)
+ Clone
+ Send
+ 'static,
R: Send + 'static,
{
sqlite_retry_loop(|| Self::run_tx_inner(conn.clone(), f.clone())).await
}
async fn run_tx_inner<F, R>(
conn: Rc<AsyncRefCell<Cell<Option<rusqlite::Connection>>>>,
f: F,
) -> Result<R, AnyError>
where where
F: (FnOnce(rusqlite::Transaction<'_>) -> Result<R, AnyError>) F: (FnOnce(rusqlite::Transaction<'_>) -> Result<R, AnyError>)
+ Send + Send
@ -579,9 +632,10 @@ impl Database for SqliteDb {
requests: Vec<ReadRange>, requests: Vec<ReadRange>,
_options: SnapshotReadOptions, _options: SnapshotReadOptions,
) -> Result<Vec<ReadRangeOutput>, AnyError> { ) -> Result<Vec<ReadRangeOutput>, AnyError> {
let requests = Arc::new(requests);
Self::run_tx(self.conn.clone(), move |tx| { Self::run_tx(self.conn.clone(), move |tx| {
let mut responses = Vec::with_capacity(requests.len()); let mut responses = Vec::with_capacity(requests.len());
for request in requests { for request in &*requests {
let mut stmt = tx.prepare_cached(if request.reverse { let mut stmt = tx.prepare_cached(if request.reverse {
STATEMENT_KV_RANGE_SCAN_REVERSE STATEMENT_KV_RANGE_SCAN_REVERSE
} else { } else {
@ -622,9 +676,10 @@ impl Database for SqliteDb {
&self, &self,
write: AtomicWrite, write: AtomicWrite,
) -> Result<Option<CommitResult>, AnyError> { ) -> Result<Option<CommitResult>, AnyError> {
let write = Arc::new(write);
let (has_enqueues, commit_result) = let (has_enqueues, commit_result) =
Self::run_tx(self.conn.clone(), move |tx| { Self::run_tx(self.conn.clone(), move |tx| {
for check in write.checks { for check in &write.checks {
let real_versionstamp = tx let real_versionstamp = tx
.prepare_cached(STATEMENT_KV_POINT_GET_VERSION_ONLY)? .prepare_cached(STATEMENT_KV_POINT_GET_VERSION_ONLY)?
.query_row([check.key.as_slice()], |row| row.get(0)) .query_row([check.key.as_slice()], |row| row.get(0))
@ -639,10 +694,10 @@ impl Database for SqliteDb {
.prepare_cached(STATEMENT_INC_AND_GET_DATA_VERSION)? .prepare_cached(STATEMENT_INC_AND_GET_DATA_VERSION)?
.query_row([], |row| row.get(0))?; .query_row([], |row| row.get(0))?;
for mutation in write.mutations { for mutation in &write.mutations {
match mutation.kind { match &mutation.kind {
MutationKind::Set(value) => { MutationKind::Set(value) => {
let (value, encoding) = encode_value(&value); let (value, encoding) = encode_value(value);
let changed = tx let changed = tx
.prepare_cached(STATEMENT_KV_POINT_SET)? .prepare_cached(STATEMENT_KV_POINT_SET)?
.execute(params![mutation.key, &value, &encoding, &version])?; .execute(params![mutation.key, &value, &encoding, &version])?;
@ -659,7 +714,7 @@ impl Database for SqliteDb {
&tx, &tx,
&mutation.key, &mutation.key,
"sum", "sum",
&operand, operand,
version, version,
|a, b| a.wrapping_add(b), |a, b| a.wrapping_add(b),
)?; )?;
@ -669,7 +724,7 @@ impl Database for SqliteDb {
&tx, &tx,
&mutation.key, &mutation.key,
"min", "min",
&operand, operand,
version, version,
|a, b| a.min(b), |a, b| a.min(b),
)?; )?;
@ -679,7 +734,7 @@ impl Database for SqliteDb {
&tx, &tx,
&mutation.key, &mutation.key,
"max", "max",
&operand, operand,
version, version,
|a, b| a.max(b), |a, b| a.max(b),
)?; )?;
@ -693,12 +748,13 @@ impl Database for SqliteDb {
.as_millis() as u64; .as_millis() as u64;
let has_enqueues = !write.enqueues.is_empty(); let has_enqueues = !write.enqueues.is_empty();
for enqueue in write.enqueues { for enqueue in &write.enqueues {
let id = Uuid::new_v4().to_string(); let id = Uuid::new_v4().to_string();
let backoff_schedule = serde_json::to_string( let backoff_schedule = serde_json::to_string(
&enqueue &enqueue
.backoff_schedule .backoff_schedule
.or_else(|| Some(DEFAULT_BACKOFF_SCHEDULE.to_vec())), .as_deref()
.or_else(|| Some(&DEFAULT_BACKOFF_SCHEDULE[..])),
)?; )?;
let keys_if_undelivered = let keys_if_undelivered =
serde_json::to_string(&enqueue.keys_if_undelivered)?; serde_json::to_string(&enqueue.keys_if_undelivered)?;