1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-24 15:19:26 -05:00

feat(ext/kv): configurable limit params (#25174)

This commit makes various limit parameters in `deno_kv` configurable.

Currently these values are declared as constants and thus can't be
modified from outside. However, there may be situations where we want to
change it. This commit makes this possible by introducing a new struct
`KvConfig` that needs to be given as the 2nd param in `init_ops`.
This commit is contained in:
Yusuke Tanaka 2024-08-27 16:30:19 +09:00 committed by GitHub
parent c89a20b428
commit 9b4026563c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 203 additions and 48 deletions

136
ext/kv/config.rs Normal file
View file

@ -0,0 +1,136 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
pub struct KvConfig {
pub(crate) max_write_key_size_bytes: usize,
pub(crate) max_read_key_size_bytes: usize,
pub(crate) max_value_size_bytes: usize,
pub(crate) max_read_ranges: usize,
pub(crate) max_read_entries: usize,
pub(crate) max_checks: usize,
pub(crate) max_mutations: usize,
pub(crate) max_watched_keys: usize,
pub(crate) max_total_mutation_size_bytes: usize,
pub(crate) max_total_key_size_bytes: usize,
}
impl KvConfig {
pub fn builder() -> KvConfigBuilder {
KvConfigBuilder::default()
}
}
#[derive(Default)]
pub struct KvConfigBuilder {
max_write_key_size_bytes: Option<usize>,
max_value_size_bytes: Option<usize>,
max_read_ranges: Option<usize>,
max_read_entries: Option<usize>,
max_checks: Option<usize>,
max_mutations: Option<usize>,
max_watched_keys: Option<usize>,
max_total_mutation_size_bytes: Option<usize>,
max_total_key_size_bytes: Option<usize>,
}
impl KvConfigBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn max_write_key_size_bytes(
&mut self,
max_write_key_size_bytes: usize,
) -> &mut Self {
self.max_write_key_size_bytes = Some(max_write_key_size_bytes);
self
}
pub fn max_value_size_bytes(
&mut self,
max_value_size_bytes: usize,
) -> &mut Self {
self.max_value_size_bytes = Some(max_value_size_bytes);
self
}
pub fn max_read_ranges(&mut self, max_read_ranges: usize) -> &mut Self {
self.max_read_ranges = Some(max_read_ranges);
self
}
pub fn max_read_entries(&mut self, max_read_entries: usize) -> &mut Self {
self.max_read_entries = Some(max_read_entries);
self
}
pub fn max_checks(&mut self, max_checks: usize) -> &mut Self {
self.max_checks = Some(max_checks);
self
}
pub fn max_mutations(&mut self, max_mutations: usize) -> &mut Self {
self.max_mutations = Some(max_mutations);
self
}
pub fn max_watched_keys(&mut self, max_watched_keys: usize) -> &mut Self {
self.max_watched_keys = Some(max_watched_keys);
self
}
pub fn max_total_mutation_size_bytes(
&mut self,
max_total_mutation_size_bytes: usize,
) -> &mut Self {
self.max_total_mutation_size_bytes = Some(max_total_mutation_size_bytes);
self
}
pub fn max_total_key_size_bytes(
&mut self,
max_total_key_size_bytes: usize,
) -> &mut Self {
self.max_total_key_size_bytes = Some(max_total_key_size_bytes);
self
}
pub fn build(&self) -> KvConfig {
const MAX_WRITE_KEY_SIZE_BYTES: usize = 2048;
// range selectors can contain 0x00 or 0xff suffixes
const MAX_READ_KEY_SIZE_BYTES: usize = MAX_WRITE_KEY_SIZE_BYTES + 1;
const MAX_VALUE_SIZE_BYTES: usize = 65536;
const MAX_READ_RANGES: usize = 10;
const MAX_READ_ENTRIES: usize = 1000;
const MAX_CHECKS: usize = 100;
const MAX_MUTATIONS: usize = 1000;
const MAX_WATCHED_KEYS: usize = 10;
const MAX_TOTAL_MUTATION_SIZE_BYTES: usize = 800 * 1024;
const MAX_TOTAL_KEY_SIZE_BYTES: usize = 80 * 1024;
KvConfig {
max_write_key_size_bytes: self
.max_write_key_size_bytes
.unwrap_or(MAX_WRITE_KEY_SIZE_BYTES),
max_read_key_size_bytes: self
.max_write_key_size_bytes
.map(|x|
// range selectors can contain 0x00 or 0xff suffixes
x + 1)
.unwrap_or(MAX_READ_KEY_SIZE_BYTES),
max_value_size_bytes: self
.max_value_size_bytes
.unwrap_or(MAX_VALUE_SIZE_BYTES),
max_read_ranges: self.max_read_ranges.unwrap_or(MAX_READ_RANGES),
max_read_entries: self.max_read_entries.unwrap_or(MAX_READ_ENTRIES),
max_checks: self.max_checks.unwrap_or(MAX_CHECKS),
max_mutations: self.max_mutations.unwrap_or(MAX_MUTATIONS),
max_watched_keys: self.max_watched_keys.unwrap_or(MAX_WATCHED_KEYS),
max_total_mutation_size_bytes: self
.max_total_mutation_size_bytes
.unwrap_or(MAX_TOTAL_MUTATION_SIZE_BYTES),
max_total_key_size_bytes: self
.max_total_key_size_bytes
.unwrap_or(MAX_TOTAL_KEY_SIZE_BYTES),
}
}
}

View file

@ -1,5 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
pub mod config;
pub mod dynamic;
mod interface;
pub mod remote;
@ -56,22 +57,11 @@ use log::debug;
use serde::Deserialize;
use serde::Serialize;
pub use crate::config::*;
pub use crate::interface::*;
pub const UNSTABLE_FEATURE_NAME: &str = "kv";
const MAX_WRITE_KEY_SIZE_BYTES: usize = 2048;
// range selectors can contain 0x00 or 0xff suffixes
const MAX_READ_KEY_SIZE_BYTES: usize = MAX_WRITE_KEY_SIZE_BYTES + 1;
const MAX_VALUE_SIZE_BYTES: usize = 65536;
const MAX_READ_RANGES: usize = 10;
const MAX_READ_ENTRIES: usize = 1000;
const MAX_CHECKS: usize = 100;
const MAX_MUTATIONS: usize = 1000;
const MAX_WATCHED_KEYS: usize = 10;
const MAX_TOTAL_MUTATION_SIZE_BYTES: usize = 800 * 1024;
const MAX_TOTAL_KEY_SIZE_BYTES: usize = 80 * 1024;
deno_core::extension!(deno_kv,
deps = [ deno_console, deno_web ],
parameters = [ DBH: DatabaseHandler ],
@ -88,8 +78,10 @@ deno_core::extension!(deno_kv,
esm = [ "01_db.ts" ],
options = {
handler: DBH,
config: KvConfig,
},
state = |state, options| {
state.put(Rc::new(options.config));
state.put(Rc::new(options.handler));
}
);
@ -282,10 +274,15 @@ where
resource.db.clone()
};
if ranges.len() > MAX_READ_RANGES {
let config = {
let state = state.borrow();
state.borrow::<Rc<KvConfig>>().clone()
};
if ranges.len() > config.max_read_ranges {
return Err(type_error(format!(
"too many ranges (max {})",
MAX_READ_RANGES
config.max_read_ranges
)));
}
@ -298,8 +295,8 @@ where
let (start, end) =
decode_selector_and_cursor(&selector, reverse, cursor.as_ref())?;
check_read_key_size(&start)?;
check_read_key_size(&end)?;
check_read_key_size(&start, &config)?;
check_read_key_size(&end, &config)?;
total_entries += limit as usize;
Ok(ReadRange {
@ -312,10 +309,10 @@ where
})
.collect::<Result<Vec<_>, AnyError>>()?;
if total_entries > MAX_READ_ENTRIES {
if total_entries > config.max_read_entries {
return Err(type_error(format!(
"too many entries (max {})",
MAX_READ_ENTRIES
config.max_read_entries
)));
}
@ -392,11 +389,12 @@ where
DBH: DatabaseHandler + 'static,
{
let resource = state.resource_table.get::<DatabaseResource<DBH::DB>>(rid)?;
let config = state.borrow::<Rc<KvConfig>>().clone();
if keys.len() > MAX_WATCHED_KEYS {
if keys.len() > config.max_watched_keys {
return Err(type_error(format!(
"too many keys (max {})",
MAX_WATCHED_KEYS
config.max_watched_keys
)));
}
@ -406,7 +404,7 @@ where
.collect::<std::io::Result<_>>()?;
for k in &keys {
check_read_key_size(k)?;
check_read_key_size(k, &config)?;
}
let stream = resource.db.watch(keys);
@ -783,14 +781,22 @@ where
resource.db.clone()
};
if checks.len() > MAX_CHECKS {
return Err(type_error(format!("too many checks (max {})", MAX_CHECKS)));
let config = {
let state = state.borrow();
state.borrow::<Rc<KvConfig>>().clone()
};
if checks.len() > config.max_checks {
return Err(type_error(format!(
"too many checks (max {})",
config.max_checks
)));
}
if mutations.len() + enqueues.len() > MAX_MUTATIONS {
if mutations.len() + enqueues.len() > config.max_mutations {
return Err(type_error(format!(
"too many mutations (max {})",
MAX_MUTATIONS
config.max_mutations
)));
}
@ -822,36 +828,37 @@ where
return Err(type_error("key cannot be empty"));
}
total_payload_size += check_write_key_size(key)?;
total_payload_size += check_write_key_size(key, &config)?;
}
for (key, value) in mutations
.iter()
.flat_map(|m| m.kind.value().map(|x| (&m.key, x)))
{
let key_size = check_write_key_size(key)?;
total_payload_size += check_value_size(value)? + key_size;
let key_size = check_write_key_size(key, &config)?;
total_payload_size += check_value_size(value, &config)? + key_size;
total_key_size += key_size;
}
for enqueue in &enqueues {
total_payload_size += check_enqueue_payload_size(&enqueue.payload)?;
total_payload_size +=
check_enqueue_payload_size(&enqueue.payload, &config)?;
if let Some(schedule) = enqueue.backoff_schedule.as_ref() {
total_payload_size += 4 * schedule.len();
}
}
if total_payload_size > MAX_TOTAL_MUTATION_SIZE_BYTES {
if total_payload_size > config.max_total_mutation_size_bytes {
return Err(type_error(format!(
"total mutation size too large (max {} bytes)",
MAX_TOTAL_MUTATION_SIZE_BYTES
config.max_total_mutation_size_bytes
)));
}
if total_key_size > MAX_TOTAL_KEY_SIZE_BYTES {
if total_key_size > config.max_total_key_size_bytes {
return Err(type_error(format!(
"total key size too large (max {} bytes)",
MAX_TOTAL_KEY_SIZE_BYTES
config.max_total_key_size_bytes
)));
}
@ -881,50 +888,59 @@ fn op_kv_encode_cursor(
Ok(cursor)
}
fn check_read_key_size(key: &[u8]) -> Result<(), AnyError> {
if key.len() > MAX_READ_KEY_SIZE_BYTES {
fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), AnyError> {
if key.len() > config.max_read_key_size_bytes {
Err(type_error(format!(
"key too large for read (max {} bytes)",
MAX_READ_KEY_SIZE_BYTES
config.max_read_key_size_bytes
)))
} else {
Ok(())
}
}
fn check_write_key_size(key: &[u8]) -> Result<usize, AnyError> {
if key.len() > MAX_WRITE_KEY_SIZE_BYTES {
fn check_write_key_size(
key: &[u8],
config: &KvConfig,
) -> Result<usize, AnyError> {
if key.len() > config.max_write_key_size_bytes {
Err(type_error(format!(
"key too large for write (max {} bytes)",
MAX_WRITE_KEY_SIZE_BYTES
config.max_write_key_size_bytes
)))
} else {
Ok(key.len())
}
}
fn check_value_size(value: &KvValue) -> Result<usize, AnyError> {
fn check_value_size(
value: &KvValue,
config: &KvConfig,
) -> Result<usize, AnyError> {
let payload = match value {
KvValue::Bytes(x) => x,
KvValue::V8(x) => x,
KvValue::U64(_) => return Ok(8),
};
if payload.len() > MAX_VALUE_SIZE_BYTES {
if payload.len() > config.max_value_size_bytes {
Err(type_error(format!(
"value too large (max {} bytes)",
MAX_VALUE_SIZE_BYTES
config.max_value_size_bytes
)))
} else {
Ok(payload.len())
}
}
fn check_enqueue_payload_size(payload: &[u8]) -> Result<usize, AnyError> {
if payload.len() > MAX_VALUE_SIZE_BYTES {
fn check_enqueue_payload_size(
payload: &[u8],
config: &KvConfig,
) -> Result<usize, AnyError> {
if payload.len() > config.max_value_size_bytes {
Err(type_error(format!(
"enqueue payload too large (max {} bytes)",
MAX_VALUE_SIZE_BYTES
config.max_value_size_bytes
)))
} else {
Ok(payload.len())

View file

@ -244,9 +244,10 @@ pub fn create_runtime_snapshot(
deno_ffi::deno_ffi::init_ops_and_esm::<Permissions>(),
deno_net::deno_net::init_ops_and_esm::<Permissions>(None, None),
deno_tls::deno_tls::init_ops_and_esm(),
deno_kv::deno_kv::init_ops_and_esm(deno_kv::sqlite::SqliteDbHandler::<
Permissions,
>::new(None, None)),
deno_kv::deno_kv::init_ops_and_esm(
deno_kv::sqlite::SqliteDbHandler::<Permissions>::new(None, None),
deno_kv::KvConfig::builder().build(),
),
deno_cron::deno_cron::init_ops_and_esm(
deno_cron::local::LocalCronHandler::new(),
),

View file

@ -480,6 +480,7 @@ impl WebWorker {
proxy: None,
},
),
deno_kv::KvConfig::builder().build(),
),
deno_cron::deno_cron::init_ops_and_esm(LocalCronHandler::new()),
deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(),

View file

@ -404,6 +404,7 @@ impl MainWorker {
proxy: None,
},
),
deno_kv::KvConfig::builder().build(),
),
deno_cron::deno_cron::init_ops_and_esm(LocalCronHandler::new()),
deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(),