// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use crate::ops::*; use crate::OpResult; use crate::PromiseId; use anyhow::Error; use futures::future::Either; use futures::future::Future; use futures::future::FutureExt; use futures::task::noop_waker_ref; use std::cell::RefCell; use std::future::ready; use std::option::Option; use std::task::Context; use std::task::Poll; #[inline] pub fn queue_fast_async_op( ctx: &OpCtx, promise_id: PromiseId, op: impl Future> + 'static, ) { let get_class = { let state = RefCell::borrow(&ctx.state); state.tracker.track_async(ctx.id); state.get_error_class_fn }; let fut = op.map(|result| crate::_ops::to_op_result(get_class, result)); // SAFETY: this is guaranteed to be running on a current-thread executor ctx.context_state.borrow_mut().pending_ops.spawn(unsafe { crate::task::MaskFutureAsSend::new(OpCall::new(ctx, promise_id, fut)) }); } #[inline] pub fn map_async_op1( ctx: &OpCtx, op: impl Future> + 'static, ) -> impl Future { let get_class = { let state = RefCell::borrow(&ctx.state); state.tracker.track_async(ctx.id); state.get_error_class_fn }; op.map(|res| crate::_ops::to_op_result(get_class, res)) } #[inline] pub fn map_async_op2( ctx: &OpCtx, op: impl Future + 'static, ) -> impl Future { let state = RefCell::borrow(&ctx.state); state.tracker.track_async(ctx.id); op.map(|res| OpResult::Ok(res.into())) } #[inline] pub fn map_async_op3( ctx: &OpCtx, op: Result> + 'static, Error>, ) -> impl Future { let get_class = { let state = RefCell::borrow(&ctx.state); state.tracker.track_async(ctx.id); state.get_error_class_fn }; match op { Err(err) => { Either::Left(ready(OpResult::Err(OpError::new(get_class, err)))) } Ok(fut) => { Either::Right(fut.map(|res| crate::_ops::to_op_result(get_class, res))) } } } #[inline] pub fn map_async_op4( ctx: &OpCtx, op: Result + 'static, Error>, ) -> impl Future { let get_class = { let state = RefCell::borrow(&ctx.state); state.tracker.track_async(ctx.id); state.get_error_class_fn }; match op { Err(err) => { Either::Left(ready(OpResult::Err(OpError::new(get_class, err)))) } Ok(fut) => Either::Right(fut.map(|r| OpResult::Ok(r.into()))), } } pub fn queue_async_op<'s>( ctx: &OpCtx, scope: &'s mut v8::HandleScope, deferred: bool, promise_id: PromiseId, op: impl Future + 'static, ) -> Option> { // An op's realm (as given by `OpCtx::realm_idx`) must match the realm in // which it is invoked. Otherwise, we might have cross-realm object exposure. // deno_core doesn't currently support such exposure, even though embedders // can cause them, so we panic in debug mode (since the check is expensive). // TODO(mmastrac): Restore this // debug_assert_eq!( // runtime_state.borrow().context(ctx.realm_idx as usize, scope), // Some(scope.get_current_context()) // ); let id = ctx.id; // TODO(mmastrac): We have to poll every future here because that assumption is baked into a large number // of ops. If we can figure out a way around this, we can remove this call to boxed_local and save a malloc per future. let mut pinned = op.map(move |res| (promise_id, id, res)).boxed_local(); match pinned.poll_unpin(&mut Context::from_waker(noop_waker_ref())) { Poll::Pending => {} Poll::Ready(mut res) => { if deferred { ctx .context_state .borrow_mut() .pending_ops // SAFETY: this is guaranteed to be running on a current-thread executor .spawn(unsafe { crate::task::MaskFutureAsSend::new(ready(res)) }); return None; } else { ctx.state.borrow_mut().tracker.track_async_completed(ctx.id); return Some(res.2.to_v8(scope).unwrap()); } } } ctx .context_state .borrow_mut() .pending_ops // SAFETY: this is guaranteed to be running on a current-thread executor .spawn(unsafe { crate::task::MaskFutureAsSend::new(pinned) }); None }