Merge pull request #1774 from bugadani/executor

Allow custom executors without indirection
This commit is contained in:
Dario Nieuwenhuis 2023-08-14 20:43:51 +00:00 committed by GitHub
commit bd58b5002a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 124 additions and 153 deletions

View File

@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
- Replaced Pender. Implementations now must define an extern function called `__pender`.
## 0.2.1 - 2023-08-10
- Avoid calling `pend()` when waking expired timers

View File

@ -14,7 +14,7 @@ categories = [
[package.metadata.embassy_docs]
src_base = "https://github.com/embassy-rs/embassy/blob/embassy-executor-v$VERSION/embassy-executor/src/"
src_base_git = "https://github.com/embassy-rs/embassy/blob/$COMMIT/embassy-executor/src/"
features = ["nightly", "defmt", "pender-callback"]
features = ["nightly", "defmt"]
flavors = [
{ name = "std", target = "x86_64-unknown-linux-gnu", features = ["arch-std", "executor-thread"] },
{ name = "wasm", target = "wasm32-unknown-unknown", features = ["arch-wasm", "executor-thread"] },
@ -25,7 +25,7 @@ flavors = [
[package.metadata.docs.rs]
default-target = "thumbv7em-none-eabi"
targets = ["thumbv7em-none-eabi"]
features = ["nightly", "defmt", "pender-callback", "arch-cortex-m", "executor-thread", "executor-interrupt"]
features = ["nightly", "defmt", "arch-cortex-m", "executor-thread", "executor-interrupt"]
[features]
@ -37,9 +37,6 @@ arch-xtensa = ["_arch"]
arch-riscv32 = ["_arch"]
arch-wasm = ["_arch", "dep:wasm-bindgen", "dep:js-sys"]
# Enable creating a `Pender` from an arbitrary function pointer callback.
pender-callback = []
# Enable the thread-mode executor (using WFE/SEV in Cortex-M, WFI in other embedded archs)
executor-thread = []
# Enable the interrupt-mode executor (available in Cortex-M only)

View File

@ -1,3 +1,49 @@
const THREAD_PENDER: usize = usize::MAX;
#[export_name = "__pender"]
#[cfg(any(feature = "executor-thread", feature = "executor-interrupt"))]
fn __pender(context: *mut ()) {
unsafe {
// Safety: `context` is either `usize::MAX` created by `Executor::run`, or a valid interrupt
// request number given to `InterruptExecutor::start`.
let context = context as usize;
#[cfg(feature = "executor-thread")]
// Try to make Rust optimize the branching away if we only use thread mode.
if !cfg!(feature = "executor-interrupt") || context == THREAD_PENDER {
core::arch::asm!("sev");
return;
}
#[cfg(feature = "executor-interrupt")]
{
use cortex_m::interrupt::InterruptNumber;
use cortex_m::peripheral::NVIC;
#[derive(Clone, Copy)]
struct Irq(u16);
unsafe impl InterruptNumber for Irq {
fn number(self) -> u16 {
self.0
}
}
let irq = Irq(context as u16);
// STIR is faster, but is only available in v7 and higher.
#[cfg(not(armv6m))]
{
let mut nvic: NVIC = core::mem::transmute(());
nvic.request(irq);
}
#[cfg(armv6m)]
NVIC::pend(irq);
}
}
}
#[cfg(feature = "executor-thread")]
pub use thread::*;
#[cfg(feature = "executor-thread")]
@ -8,18 +54,9 @@ mod thread {
#[cfg(feature = "nightly")]
pub use embassy_macros::main_cortex_m as main;
use crate::raw::{Pender, PenderInner};
use crate::arch::THREAD_PENDER;
use crate::{raw, Spawner};
#[derive(Copy, Clone)]
pub(crate) struct ThreadPender;
impl ThreadPender {
pub(crate) fn pend(self) {
unsafe { core::arch::asm!("sev") }
}
}
/// Thread mode executor, using WFE/SEV.
///
/// This is the simplest and most common kind of executor. It runs on
@ -39,7 +76,7 @@ mod thread {
/// Create a new Executor.
pub fn new() -> Self {
Self {
inner: raw::Executor::new(Pender(PenderInner::Thread(ThreadPender))),
inner: raw::Executor::new(THREAD_PENDER as *mut ()),
not_send: PhantomData,
}
}
@ -86,30 +123,7 @@ mod interrupt {
use cortex_m::interrupt::InterruptNumber;
use cortex_m::peripheral::NVIC;
use crate::raw::{self, Pender, PenderInner};
#[derive(Clone, Copy)]
pub(crate) struct InterruptPender(u16);
impl InterruptPender {
pub(crate) fn pend(self) {
// STIR is faster, but is only available in v7 and higher.
#[cfg(not(armv6m))]
{
let mut nvic: cortex_m::peripheral::NVIC = unsafe { core::mem::transmute(()) };
nvic.request(self);
}
#[cfg(armv6m)]
cortex_m::peripheral::NVIC::pend(self);
}
}
unsafe impl cortex_m::interrupt::InterruptNumber for InterruptPender {
fn number(self) -> u16 {
self.0
}
}
use crate::raw;
/// Interrupt mode executor.
///
@ -194,9 +208,7 @@ mod interrupt {
unsafe {
(&mut *self.executor.get())
.as_mut_ptr()
.write(raw::Executor::new(Pender(PenderInner::Interrupt(InterruptPender(
irq.number(),
)))))
.write(raw::Executor::new(irq.number() as *mut ()))
}
let executor = unsafe { (&*self.executor.get()).assume_init_ref() };

View File

@ -11,22 +11,16 @@ mod thread {
#[cfg(feature = "nightly")]
pub use embassy_macros::main_riscv as main;
use crate::raw::{Pender, PenderInner};
use crate::{raw, Spawner};
#[derive(Copy, Clone)]
pub(crate) struct ThreadPender;
impl ThreadPender {
#[allow(unused)]
pub(crate) fn pend(self) {
SIGNAL_WORK_THREAD_MODE.store(true, core::sync::atomic::Ordering::SeqCst);
}
}
/// global atomic used to keep track of whether there is work to do since sev() is not available on RISCV
static SIGNAL_WORK_THREAD_MODE: AtomicBool = AtomicBool::new(false);
#[export_name = "__pender"]
fn __pender(_context: *mut ()) {
SIGNAL_WORK_THREAD_MODE.store(true, Ordering::SeqCst);
}
/// RISCV32 Executor
pub struct Executor {
inner: raw::Executor,
@ -37,7 +31,7 @@ mod thread {
/// Create a new Executor.
pub fn new() -> Self {
Self {
inner: raw::Executor::new(Pender(PenderInner::Thread(ThreadPender))),
inner: raw::Executor::new(core::ptr::null_mut()),
not_send: PhantomData,
}
}

View File

@ -11,17 +11,12 @@ mod thread {
#[cfg(feature = "nightly")]
pub use embassy_macros::main_std as main;
use crate::raw::{Pender, PenderInner};
use crate::{raw, Spawner};
#[derive(Copy, Clone)]
pub(crate) struct ThreadPender(&'static Signaler);
impl ThreadPender {
#[allow(unused)]
pub(crate) fn pend(self) {
self.0.signal()
}
#[export_name = "__pender"]
fn __pender(context: *mut ()) {
let signaler: &'static Signaler = unsafe { std::mem::transmute(context) };
signaler.signal()
}
/// Single-threaded std-based executor.
@ -34,9 +29,9 @@ mod thread {
impl Executor {
/// Create a new Executor.
pub fn new() -> Self {
let signaler = &*Box::leak(Box::new(Signaler::new()));
let signaler = Box::leak(Box::new(Signaler::new()));
Self {
inner: raw::Executor::new(Pender(PenderInner::Thread(ThreadPender(signaler)))),
inner: raw::Executor::new(signaler as *mut Signaler as *mut ()),
not_send: PhantomData,
signaler,
}

View File

@ -14,14 +14,12 @@ mod thread {
use wasm_bindgen::prelude::*;
use crate::raw::util::UninitCell;
use crate::raw::{Pender, PenderInner};
use crate::{raw, Spawner};
/// WASM executor, wasm_bindgen to schedule tasks on the JS event loop.
pub struct Executor {
inner: raw::Executor,
ctx: &'static WasmContext,
not_send: PhantomData<*mut ()>,
#[export_name = "__pender"]
fn __pender(context: *mut ()) {
let signaler: &'static WasmContext = unsafe { std::mem::transmute(context) };
let _ = signaler.promise.then(unsafe { signaler.closure.as_mut() });
}
pub(crate) struct WasmContext {
@ -29,16 +27,6 @@ mod thread {
closure: UninitCell<Closure<dyn FnMut(JsValue)>>,
}
#[derive(Copy, Clone)]
pub(crate) struct ThreadPender(&'static WasmContext);
impl ThreadPender {
#[allow(unused)]
pub(crate) fn pend(self) {
let _ = self.0.promise.then(unsafe { self.0.closure.as_mut() });
}
}
impl WasmContext {
pub fn new() -> Self {
Self {
@ -48,14 +36,21 @@ mod thread {
}
}
/// WASM executor, wasm_bindgen to schedule tasks on the JS event loop.
pub struct Executor {
inner: raw::Executor,
ctx: &'static WasmContext,
not_send: PhantomData<*mut ()>,
}
impl Executor {
/// Create a new Executor.
pub fn new() -> Self {
let ctx = &*Box::leak(Box::new(WasmContext::new()));
let ctx = Box::leak(Box::new(WasmContext::new()));
Self {
inner: raw::Executor::new(Pender(PenderInner::Thread(ThreadPender(ctx)))),
not_send: PhantomData,
inner: raw::Executor::new(ctx as *mut WasmContext as *mut ()),
ctx,
not_send: PhantomData,
}
}

View File

@ -8,22 +8,16 @@ mod thread {
use core::marker::PhantomData;
use core::sync::atomic::{AtomicBool, Ordering};
use crate::raw::{Pender, PenderInner};
use crate::{raw, Spawner};
#[derive(Copy, Clone)]
pub(crate) struct ThreadPender;
impl ThreadPender {
#[allow(unused)]
pub(crate) fn pend(self) {
SIGNAL_WORK_THREAD_MODE.store(true, core::sync::atomic::Ordering::SeqCst);
}
}
/// global atomic used to keep track of whether there is work to do since sev() is not available on Xtensa
static SIGNAL_WORK_THREAD_MODE: AtomicBool = AtomicBool::new(false);
#[export_name = "__pender"]
fn __pender(_context: *mut ()) {
SIGNAL_WORK_THREAD_MODE.store(true, Ordering::SeqCst);
}
/// Xtensa Executor
pub struct Executor {
inner: raw::Executor,
@ -34,7 +28,7 @@ mod thread {
/// Create a new Executor.
pub fn new() -> Self {
Self {
inner: raw::Executor::new(Pender(PenderInner::Thread(ThreadPender))),
inner: raw::Executor::new(core::ptr::null_mut()),
not_send: PhantomData,
}
}

View File

@ -292,53 +292,17 @@ impl<F: Future + 'static, const N: usize> TaskPool<F, N> {
}
#[derive(Clone, Copy)]
pub(crate) enum PenderInner {
#[cfg(feature = "executor-thread")]
Thread(crate::arch::ThreadPender),
#[cfg(feature = "executor-interrupt")]
Interrupt(crate::arch::InterruptPender),
#[cfg(feature = "pender-callback")]
Callback { func: fn(*mut ()), context: *mut () },
}
pub(crate) struct Pender(*mut ());
unsafe impl Send for PenderInner {}
unsafe impl Sync for PenderInner {}
/// Platform/architecture-specific action executed when an executor has pending work.
///
/// When a task within an executor is woken, the `Pender` is called. This does a
/// platform/architecture-specific action to signal there is pending work in the executor.
/// When this happens, you must arrange for [`Executor::poll`] to be called.
///
/// You can think of it as a waker, but for the whole executor.
pub struct Pender(pub(crate) PenderInner);
unsafe impl Send for Pender {}
unsafe impl Sync for Pender {}
impl Pender {
/// Create a `Pender` that will call an arbitrary function pointer.
///
/// # Arguments
///
/// - `func`: The function pointer to call.
/// - `context`: Opaque context pointer, that will be passed to the function pointer.
#[cfg(feature = "pender-callback")]
pub fn new_from_callback(func: fn(*mut ()), context: *mut ()) -> Self {
Self(PenderInner::Callback {
func,
context: context.into(),
})
}
}
impl Pender {
pub(crate) fn pend(&self) {
match self.0 {
#[cfg(feature = "executor-thread")]
PenderInner::Thread(x) => x.pend(),
#[cfg(feature = "executor-interrupt")]
PenderInner::Interrupt(x) => x.pend(),
#[cfg(feature = "pender-callback")]
PenderInner::Callback { func, context } => func(context),
pub(crate) fn pend(self) {
extern "Rust" {
fn __pender(context: *mut ());
}
unsafe { __pender(self.0) };
}
}
@ -472,15 +436,31 @@ impl SyncExecutor {
///
/// - To get the executor to do work, call `poll()`. This will poll all queued tasks (all tasks
/// that "want to run").
/// - You must supply a [`Pender`]. The executor will call it to notify you it has work
/// to do. You must arrange for `poll()` to be called as soon as possible.
/// - You must supply a pender function, as shown below. The executor will call it to notify you
/// it has work to do. You must arrange for `poll()` to be called as soon as possible.
/// - Enabling `arch-xx` features will define a pender function for you. This means that you
/// are limited to using the executors provided to you by the architecture/platform
/// implementation. If you need a different executor, you must not enable `arch-xx` features.
///
/// The [`Pender`] can be called from *any* context: any thread, any interrupt priority
/// The pender can be called from *any* context: any thread, any interrupt priority
/// level, etc. It may be called synchronously from any `Executor` method call as well.
/// You must deal with this correctly.
///
/// In particular, you must NOT call `poll` directly from the pender callback, as this violates
/// the requirement for `poll` to not be called reentrantly.
///
/// The pender function must be exported with the name `__pender` and have the following signature:
///
/// ```rust
/// #[export_name = "__pender"]
/// fn pender(context: *mut ()) {
/// // schedule `poll()` to be called
/// }
/// ```
///
/// The `context` argument is a piece of arbitrary data the executor will pass to the pender.
/// You can set the `context` when calling [`Executor::new()`]. You can use it to, for example,
/// differentiate between executors, or to pass a pointer to a callback that should be called.
#[repr(transparent)]
pub struct Executor {
pub(crate) inner: SyncExecutor,
@ -495,12 +475,12 @@ impl Executor {
/// Create a new executor.
///
/// When the executor has work to do, it will call the [`Pender`].
/// When the executor has work to do, it will call the pender function and pass `context` to it.
///
/// See [`Executor`] docs for details on `Pender`.
pub fn new(pender: Pender) -> Self {
/// See [`Executor`] docs for details on the pender.
pub fn new(context: *mut ()) -> Self {
Self {
inner: SyncExecutor::new(pender),
inner: SyncExecutor::new(Pender(context)),
_not_sync: PhantomData,
}
}
@ -523,16 +503,16 @@ impl Executor {
/// This loops over all tasks that are queued to be polled (i.e. they're
/// freshly spawned or they've been woken). Other tasks are not polled.
///
/// You must call `poll` after receiving a call to the [`Pender`]. It is OK
/// to call `poll` even when not requested by the `Pender`, but it wastes
/// You must call `poll` after receiving a call to the pender. It is OK
/// to call `poll` even when not requested by the pender, but it wastes
/// energy.
///
/// # Safety
///
/// You must NOT call `poll` reentrantly on the same executor.
///
/// In particular, note that `poll` may call the `Pender` synchronously. Therefore, you
/// must NOT directly call `poll()` from the `Pender` callback. Instead, the callback has to
/// In particular, note that `poll` may call the pender synchronously. Therefore, you
/// must NOT directly call `poll()` from the pender callback. Instead, the callback has to
/// somehow schedule for `poll()` to be called later, at a time you know for sure there's
/// no `poll()` already running.
pub unsafe fn poll(&'static self) {