Merge branch 'master' of https://github.com/akiles/embassy into implement-uart

This commit is contained in:
xoviat 2021-01-12 14:00:12 -06:00
commit 914abdeb15
8 changed files with 289 additions and 140 deletions

View File

@ -8,6 +8,7 @@ use example_common::*;
use cortex_m_rt::entry;
use defmt::panic;
use nrf52840_hal as hal;
use nrf52840_hal::gpio;
use embassy::executor::{task, Executor};
@ -35,9 +36,14 @@ async fn run() {
rts: None,
};
let ppi = hal::ppi::Parts::new(p.PPI);
let irq = interrupt::take!(UARTE0_UART0);
let mut u = buffered_uarte::BufferedUarte::new(
p.UARTE0,
p.TIMER0,
ppi.ppi0,
ppi.ppi1,
irq,
unsafe { &mut RX_BUFFER },
unsafe { &mut TX_BUFFER },

View File

@ -14,24 +14,25 @@ use embassy::io::{AsyncBufRead, AsyncWrite, Result};
use embassy::util::WakerRegistration;
use embedded_hal::digital::v2::OutputPin;
use crate::fmt::{panic, todo, *};
use crate::hal::gpio::Port as GpioPort;
use crate::hal::ppi::ConfigurablePpi;
use crate::interrupt::{self, OwnedInterrupt};
use crate::pac;
use crate::pac::uarte0;
use crate::util::peripheral::{PeripheralMutex, PeripheralState};
use crate::util::ring_buffer::RingBuffer;
use crate::{
fmt::{panic, todo, *},
util::low_power_wait_until,
};
// Re-export SVD variants to allow user to directly set values
pub use crate::hal::uarte::Pins;
pub use uarte0::{baudrate::BAUDRATE_A as Baudrate, config::PARITY_A as Parity};
pub use pac::uarte0::{baudrate::BAUDRATE_A as Baudrate, config::PARITY_A as Parity};
#[derive(Copy, Clone, Debug, PartialEq)]
enum RxState {
Idle,
Receiving,
ReceivingReady,
Stopping,
}
#[derive(Copy, Clone, Debug, PartialEq)]
@ -40,8 +41,11 @@ enum TxState {
Transmitting(usize),
}
struct State<'a, T: Instance> {
inner: T,
struct State<'a, U: Instance, T: TimerInstance, P1: ConfigurablePpi, P2: ConfigurablePpi> {
uarte: U,
timer: T,
ppi_channel_1: P1,
ppi_channel_2: P2,
rx: RingBuffer<'a>,
rx_state: RxState,
@ -60,12 +64,16 @@ struct State<'a, T: Instance> {
/// are disabled before using `Uarte`. See product specification:
/// - nrf52832: Section 15.2
/// - nrf52840: Section 6.1.2
pub struct BufferedUarte<'a, T: Instance> {
inner: PeripheralMutex<T::Interrupt, State<'a, T>>,
pub struct BufferedUarte<
'a,
U: Instance,
T: TimerInstance,
P1: ConfigurablePpi,
P2: ConfigurablePpi,
> {
inner: PeripheralMutex<State<'a, U, T, P1, P2>>,
}
impl<'a, T: Instance> Unpin for BufferedUarte<'a, T> {}
#[cfg(any(feature = "52833", feature = "52840"))]
fn port_bit(port: GpioPort) -> bool {
match port {
@ -74,10 +82,15 @@ fn port_bit(port: GpioPort) -> bool {
}
}
impl<'a, T: Instance> BufferedUarte<'a, T> {
impl<'a, U: Instance, T: TimerInstance, P1: ConfigurablePpi, P2: ConfigurablePpi>
BufferedUarte<'a, U, T, P1, P2>
{
pub fn new(
uarte: T,
irq: T::Interrupt,
uarte: U,
timer: T,
mut ppi_channel_1: P1,
mut ppi_channel_2: P2,
irq: U::Interrupt,
rx_buffer: &'a mut [u8],
tx_buffer: &'a mut [u8],
mut pins: Pins,
@ -141,11 +154,41 @@ impl<'a, T: Instance> BufferedUarte<'a, T> {
irq.disable();
irq.pend();
// BAUDRATE register values are `baudrate * 2^32 / 16000000`
// source: https://devzone.nordicsemi.com/f/nordic-q-a/391/uart-baudrate-register-values
//
// We want to stop RX if line is idle for 2 bytes worth of time
// That is 20 bits (each byte is 1 start bit + 8 data bits + 1 stop bit)
// This gives us the amount of 16M ticks for 20 bits.
let timeout = 0x8000_0000 / (baudrate as u32 / 40);
timer.tasks_stop.write(|w| unsafe { w.bits(1) });
timer.bitmode.write(|w| w.bitmode()._32bit());
timer.prescaler.write(|w| unsafe { w.prescaler().bits(0) });
timer.cc[0].write(|w| unsafe { w.bits(timeout) });
timer.mode.write(|w| w.mode().timer());
timer.shorts.write(|w| {
w.compare0_clear().set_bit();
w.compare0_stop().set_bit();
w
});
ppi_channel_1.set_event_endpoint(&uarte.events_rxdrdy);
ppi_channel_1.set_task_endpoint(&timer.tasks_clear);
ppi_channel_1.set_fork_task_endpoint(&timer.tasks_start);
ppi_channel_1.enable();
ppi_channel_2.set_event_endpoint(&timer.events_compare[0]);
ppi_channel_2.set_task_endpoint(&uarte.tasks_stoprx);
ppi_channel_2.enable();
BufferedUarte {
inner: PeripheralMutex::new(
irq,
State {
inner: uarte,
uarte,
timer,
ppi_channel_1,
ppi_channel_2,
rx: RingBuffer::new(rx_buffer),
rx_state: RxState::Idle,
@ -155,25 +198,57 @@ impl<'a, T: Instance> BufferedUarte<'a, T> {
tx_state: TxState::Idle,
tx_waker: WakerRegistration::new(),
},
irq,
),
}
}
fn inner(self: Pin<&mut Self>) -> Pin<&mut PeripheralMutex<T::Interrupt, State<'a, T>>> {
pub fn set_baudrate(self: Pin<&mut Self>, baudrate: Baudrate) {
self.inner().with(|state, _irq| {
let timeout = 0x8000_0000 / (baudrate as u32 / 40);
state.timer.cc[0].write(|w| unsafe { w.bits(timeout) });
state.timer.tasks_clear.write(|w| unsafe { w.bits(1) });
state
.uarte
.baudrate
.write(|w| w.baudrate().variant(baudrate));
});
}
fn inner(self: Pin<&mut Self>) -> Pin<&mut PeripheralMutex<State<'a, U, T, P1, P2>>> {
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().inner) }
}
}
impl<'a, T: Instance> Drop for BufferedUarte<'a, T> {
fn drop(&mut self) {
// stop DMA before dropping, because DMA is using the buffer in `self`.
todo!()
pub fn free(self: Pin<&mut Self>) -> (U, T, P1, P2, U::Interrupt) {
let (mut state, irq) = self.inner().free();
state.stop();
(
state.uarte,
state.timer,
state.ppi_channel_1,
state.ppi_channel_2,
irq,
)
}
}
impl<'a, T: Instance> AsyncBufRead for BufferedUarte<'a, T> {
impl<'a, U: Instance, T: TimerInstance, P1: ConfigurablePpi, P2: ConfigurablePpi> Drop
for BufferedUarte<'a, U, T, P1, P2>
{
fn drop(&mut self) {
let inner = unsafe { Pin::new_unchecked(&mut self.inner) };
if let Some((mut state, _irq)) = inner.try_free() {
state.stop();
}
}
}
impl<'a, U: Instance, T: TimerInstance, P1: ConfigurablePpi, P2: ConfigurablePpi> AsyncBufRead
for BufferedUarte<'a, U, T, P1, P2>
{
fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<&[u8]>> {
self.inner().with(|_irq, state| {
self.inner().with(|state, _irq| {
// Conservative compiler fence to prevent optimizations that do not
// take in to account actions by DMA. The fence has been placed here,
// before any DMA action has started
@ -190,20 +265,13 @@ impl<'a, T: Instance> AsyncBufRead for BufferedUarte<'a, T> {
}
trace!(" empty");
if state.rx_state == RxState::ReceivingReady {
trace!(" stopping");
state.rx_state = RxState::Stopping;
state.inner.tasks_stoprx.write(|w| unsafe { w.bits(1) });
}
state.rx_waker.register(cx.waker());
Poll::<Result<&[u8]>>::Pending
})
}
fn consume(self: Pin<&mut Self>, amt: usize) {
self.inner().with(|irq, state| {
self.inner().with(|state, irq| {
trace!("consume {:?}", amt);
state.rx.pop(amt);
irq.pend();
@ -211,9 +279,11 @@ impl<'a, T: Instance> AsyncBufRead for BufferedUarte<'a, T> {
}
}
impl<'a, T: Instance> AsyncWrite for BufferedUarte<'a, T> {
impl<'a, U: Instance, T: TimerInstance, P1: ConfigurablePpi, P2: ConfigurablePpi> AsyncWrite
for BufferedUarte<'a, U, T, P1, P2>
{
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<Result<usize>> {
self.inner().with(|irq, state| {
self.inner().with(|state, irq| {
trace!("poll_write: {:?}", buf.len());
let tx_buf = state.tx.push_buf();
@ -241,36 +311,48 @@ impl<'a, T: Instance> AsyncWrite for BufferedUarte<'a, T> {
}
}
impl<'a, T: Instance> PeripheralState for State<'a, T> {
impl<'a, U: Instance, T: TimerInstance, P1: ConfigurablePpi, P2: ConfigurablePpi>
State<'a, U, T, P1, P2>
{
fn stop(&mut self) {
self.timer.tasks_stop.write(|w| unsafe { w.bits(1) });
if let RxState::Receiving = self.rx_state {
self.uarte.tasks_stoprx.write(|w| unsafe { w.bits(1) });
}
if let TxState::Transmitting(_) = self.tx_state {
self.uarte.tasks_stoptx.write(|w| unsafe { w.bits(1) });
}
if let RxState::Receiving = self.rx_state {
low_power_wait_until(|| self.uarte.events_endrx.read().bits() == 1);
}
if let TxState::Transmitting(_) = self.tx_state {
low_power_wait_until(|| self.uarte.events_endtx.read().bits() == 1);
}
}
}
impl<'a, U: Instance, T: TimerInstance, P1: ConfigurablePpi, P2: ConfigurablePpi> PeripheralState
for State<'a, U, T, P1, P2>
{
type Interrupt = U::Interrupt;
fn on_interrupt(&mut self) {
trace!("irq: start");
let mut more_work = true;
while more_work {
more_work = false;
loop {
match self.rx_state {
RxState::Idle => {
trace!(" irq_rx: in state idle");
if self.inner.events_rxdrdy.read().bits() != 0 {
trace!(" irq_rx: rxdrdy?????");
self.inner.events_rxdrdy.reset();
}
if self.inner.events_endrx.read().bits() != 0 {
panic!("unexpected endrx");
}
let buf = self.rx.push_buf();
if buf.len() != 0 {
trace!(" irq_rx: starting {:?}", buf.len());
self.rx_state = RxState::Receiving;
// Set up the DMA read
self.inner.rxd.ptr.write(|w|
self.uarte.rxd.ptr.write(|w|
// The PTR field is a full 32 bits wide and accepts the full range
// of values.
unsafe { w.ptr().bits(buf.as_ptr() as u32) });
self.inner.rxd.maxcnt.write(|w|
self.uarte.rxd.maxcnt.write(|w|
// We're giving it the length of the buffer, so no danger of
// accessing invalid memory. We have verified that the length of the
// buffer fits in an `u8`, so the cast to `u8` is also fine.
@ -280,60 +362,34 @@ impl<'a, T: Instance> PeripheralState for State<'a, T> {
unsafe { w.maxcnt().bits(buf.len() as _) });
trace!(" irq_rx: buf {:?} {:?}", buf.as_ptr() as u32, buf.len());
// Enable RXRDY interrupt.
self.inner.events_rxdrdy.reset();
self.inner.intenset.write(|w| w.rxdrdy().set());
// Start UARTE Receive transaction
self.inner.tasks_startrx.write(|w|
self.uarte.tasks_startrx.write(|w|
// `1` is a valid value to write to task registers.
unsafe { w.bits(1) });
}
break;
}
RxState::Receiving => {
trace!(" irq_rx: in state receiving");
if self.inner.events_rxdrdy.read().bits() != 0 {
trace!(" irq_rx: rxdrdy");
if self.uarte.events_endrx.read().bits() != 0 {
self.timer.tasks_stop.write(|w| unsafe { w.bits(1) });
// Disable the RXRDY event interrupt
// RXRDY is triggered for every byte, but we only care about whether we have
// some bytes or not. So as soon as we have at least one, disable it, to avoid
// wasting CPU cycles in interrupts.
self.inner.intenclr.write(|w| w.rxdrdy().clear());
self.inner.events_rxdrdy.reset();
self.rx_waker.wake();
self.rx_state = RxState::ReceivingReady;
more_work = true; // in case we also have endrx pending
}
}
RxState::ReceivingReady | RxState::Stopping => {
trace!(" irq_rx: in state ReceivingReady");
if self.inner.events_rxdrdy.read().bits() != 0 {
trace!(" irq_rx: rxdrdy");
self.inner.events_rxdrdy.reset();
}
if self.inner.events_endrx.read().bits() != 0 {
let n: usize = self.inner.rxd.amount.read().amount().bits() as usize;
let n: usize = self.uarte.rxd.amount.read().amount().bits() as usize;
trace!(" irq_rx: endrx {:?}", n);
self.rx.push(n);
self.inner.events_endrx.reset();
self.uarte.events_endrx.reset();
self.rx_waker.wake();
self.rx_state = RxState::Idle;
more_work = true; // start another rx if possible
} else {
break;
}
}
}
}
more_work = true;
while more_work {
more_work = false;
loop {
match self.tx_state {
TxState::Idle => {
trace!(" irq_tx: in state Idle");
@ -343,11 +399,11 @@ impl<'a, T: Instance> PeripheralState for State<'a, T> {
self.tx_state = TxState::Transmitting(buf.len());
// Set up the DMA write
self.inner.txd.ptr.write(|w|
self.uarte.txd.ptr.write(|w|
// The PTR field is a full 32 bits wide and accepts the full range
// of values.
unsafe { w.ptr().bits(buf.as_ptr() as u32) });
self.inner.txd.maxcnt.write(|w|
self.uarte.txd.maxcnt.write(|w|
// We're giving it the length of the buffer, so no danger of
// accessing invalid memory. We have verified that the length of the
// buffer fits in an `u8`, so the cast to `u8` is also fine.
@ -357,21 +413,23 @@ impl<'a, T: Instance> PeripheralState for State<'a, T> {
unsafe { w.maxcnt().bits(buf.len() as _) });
// Start UARTE Transmit transaction
self.inner.tasks_starttx.write(|w|
self.uarte.tasks_starttx.write(|w|
// `1` is a valid value to write to task registers.
unsafe { w.bits(1) });
}
break;
}
TxState::Transmitting(n) => {
trace!(" irq_tx: in state Transmitting");
if self.inner.events_endtx.read().bits() != 0 {
self.inner.events_endtx.reset();
if self.uarte.events_endtx.read().bits() != 0 {
self.uarte.events_endtx.reset();
trace!(" irq_tx: endtx {:?}", n);
self.tx.pop(n);
self.tx_waker.wake();
self.tx_state = TxState::Idle;
more_work = true; // start another tx if possible
} else {
break;
}
}
}
@ -380,15 +438,20 @@ impl<'a, T: Instance> PeripheralState for State<'a, T> {
}
}
mod private {
pub trait Sealed {}
mod sealed {
pub trait Instance {}
impl Sealed for crate::pac::UARTE0 {}
impl Instance for crate::pac::UARTE0 {}
#[cfg(any(feature = "52833", feature = "52840", feature = "9160"))]
impl Sealed for crate::pac::UARTE1 {}
impl Instance for crate::pac::UARTE1 {}
pub trait TimerInstance {}
impl TimerInstance for crate::pac::TIMER0 {}
impl TimerInstance for crate::pac::TIMER1 {}
impl TimerInstance for crate::pac::TIMER2 {}
}
pub trait Instance: Deref<Target = uarte0::RegisterBlock> + private::Sealed {
pub trait Instance: Deref<Target = pac::uarte0::RegisterBlock> + sealed::Instance {
type Interrupt: OwnedInterrupt;
}
@ -400,3 +463,11 @@ impl Instance for pac::UARTE0 {
impl Instance for pac::UARTE1 {
type Interrupt = interrupt::UARTE1Interrupt;
}
pub trait TimerInstance:
Deref<Target = pac::timer0::RegisterBlock> + sealed::TimerInstance
{
}
impl TimerInstance for crate::pac::TIMER0 {}
impl TimerInstance for crate::pac::TIMER1 {}
impl TimerInstance for crate::pac::TIMER2 {}

View File

@ -6,25 +6,26 @@ use crate::fmt::*;
use crate::interrupt::OwnedInterrupt;
pub trait PeripheralState {
type Interrupt: OwnedInterrupt;
fn on_interrupt(&mut self);
}
pub struct PeripheralMutex<I: OwnedInterrupt, S: PeripheralState> {
inner: Option<(I, UnsafeCell<S>)>,
pub struct PeripheralMutex<S: PeripheralState> {
inner: Option<(UnsafeCell<S>, S::Interrupt)>,
not_send: PhantomData<*mut ()>,
}
impl<I: OwnedInterrupt, S: PeripheralState> PeripheralMutex<I, S> {
pub fn new(irq: I, state: S) -> Self {
impl<S: PeripheralState> PeripheralMutex<S> {
pub fn new(state: S, irq: S::Interrupt) -> Self {
Self {
inner: Some((irq, UnsafeCell::new(state))),
inner: Some((UnsafeCell::new(state), irq)),
not_send: PhantomData,
}
}
pub fn with<R>(self: Pin<&mut Self>, f: impl FnOnce(&mut I, &mut S) -> R) -> R {
pub fn with<R>(self: Pin<&mut Self>, f: impl FnOnce(&mut S, &mut S::Interrupt) -> R) -> R {
let this = unsafe { self.get_unchecked_mut() };
let (irq, state) = unwrap!(this.inner.as_mut());
let (state, irq) = unwrap!(this.inner.as_mut());
irq.disable();
compiler_fence(Ordering::SeqCst);
@ -43,7 +44,7 @@ impl<I: OwnedInterrupt, S: PeripheralState> PeripheralMutex<I, S> {
// Safety: it's OK to get a &mut to the state, since the irq is disabled.
let state = unsafe { &mut *state.get() };
let r = f(irq, state);
let r = f(state, irq);
compiler_fence(Ordering::SeqCst);
irq.enable();
@ -51,18 +52,23 @@ impl<I: OwnedInterrupt, S: PeripheralState> PeripheralMutex<I, S> {
r
}
pub fn free(self: Pin<&mut Self>) -> (I, S) {
pub fn try_free(self: Pin<&mut Self>) -> Option<(S, S::Interrupt)> {
let this = unsafe { self.get_unchecked_mut() };
let (irq, state) = unwrap!(this.inner.take());
irq.disable();
irq.remove_handler();
(irq, state.into_inner())
this.inner.take().map(|(state, irq)| {
irq.disable();
irq.remove_handler();
(state.into_inner(), irq)
})
}
pub fn free(self: Pin<&mut Self>) -> (S, S::Interrupt) {
unwrap!(self.try_free())
}
}
impl<I: OwnedInterrupt, S: PeripheralState> Drop for PeripheralMutex<I, S> {
impl<S: PeripheralState> Drop for PeripheralMutex<S> {
fn drop(&mut self) {
if let Some((irq, state)) = &mut self.inner {
if let Some((state, irq)) = &mut self.inner {
irq.disable();
irq.remove_handler();
}

View File

@ -2,12 +2,11 @@ use core::future::Future;
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum Error {
Failed,
AddressMisaligned,
BufferMisaligned,
_NonExhaustive,
}
pub trait Flash {

View File

@ -1,9 +1,7 @@
/// Categories of errors that can occur.
///
/// This list is intended to grow over time and it is not recommended to
/// exhaustively match against it.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub enum Error {
/// An entity was not found, often a file.
NotFound,
@ -142,22 +140,3 @@ impl core::fmt::Display for Error {
#[cfg(feature = "std")]
impl std::error::Error for Error {}
/*
impl From<smoltcp::Error> for Error {
fn from(err: smoltcp::Error) -> Error {
match err {
smoltcp::Error::Exhausted => Error::Exhausted,
smoltcp::Error::Illegal => Error::Illegal,
smoltcp::Error::Unaddressable => Error::Unaddressable,
smoltcp::Error::Truncated => Error::Truncated,
smoltcp::Error::Checksum => Error::Checksum,
smoltcp::Error::Unrecognized => Error::Unrecognized,
smoltcp::Error::Fragmented => Error::Fragmented,
smoltcp::Error::Malformed => Error::Malformed,
smoltcp::Error::Dropped => Error::Dropped,
_ => Error::Other,
}
}
}
*/

View File

@ -1,11 +1,13 @@
mod drop_bomb;
mod forever;
mod mutex;
mod portal;
mod signal;
mod waker;
pub use drop_bomb::*;
pub use forever::*;
pub use mutex::*;
pub use portal::*;
pub use signal::*;
pub use waker::*;

75
embassy/src/util/mutex.rs Normal file
View File

@ -0,0 +1,75 @@
use core::cell::UnsafeCell;
use cortex_m::interrupt::CriticalSection;
use crate::fmt::{assert, panic, *};
/// A "mutex" based on critical sections
///
/// # Safety
///
/// **This Mutex is only safe on single-core systems.**
///
/// On multi-core systems, a `CriticalSection` **is not sufficient** to ensure exclusive access.
pub struct CriticalSectionMutex<T> {
inner: UnsafeCell<T>,
}
unsafe impl<T> Sync for CriticalSectionMutex<T> {}
unsafe impl<T> Send for CriticalSectionMutex<T> {}
impl<T> CriticalSectionMutex<T> {
/// Creates a new mutex
pub const fn new(value: T) -> Self {
CriticalSectionMutex {
inner: UnsafeCell::new(value),
}
}
}
impl<T> CriticalSectionMutex<T> {
/// Borrows the data for the duration of the critical section
pub fn borrow<'cs>(&'cs self, _cs: &'cs CriticalSection) -> &'cs T {
unsafe { &*self.inner.get() }
}
}
/// A "mutex" that only allows borrowing from thread mode.
///
/// # Safety
///
/// **This Mutex is only safe on single-core systems.**
///
/// On multi-core systems, a `ThreadModeMutex` **is not sufficient** to ensure exclusive access.
pub struct ThreadModeMutex<T> {
inner: UnsafeCell<T>,
}
unsafe impl<T> Sync for ThreadModeMutex<T> {}
unsafe impl<T> Send for ThreadModeMutex<T> {}
impl<T> ThreadModeMutex<T> {
/// Creates a new mutex
pub const fn new(value: T) -> Self {
ThreadModeMutex {
inner: UnsafeCell::new(value),
}
}
}
impl<T> ThreadModeMutex<T> {
/// Borrows the data
pub fn borrow(&self) -> &T {
assert!(
in_thread_mode(),
"ThreadModeMutex can only be borrowed from thread mode."
);
unsafe { &*self.inner.get() }
}
}
pub fn in_thread_mode() -> bool {
#[cfg(feature = "std")]
return Some("main") == std::thread::current().name();
#[cfg(not(feature = "std"))]
return cortex_m::peripheral::SCB::vect_active()
== cortex_m::peripheral::scb::VectActive::ThreadMode;
}

View File

@ -1,3 +1,4 @@
use core::mem;
use core::task::Context;
use core::task::Waker;
@ -19,11 +20,21 @@ impl WakerRegistration {
// keep the old waker, skipping the clone. (In most executor implementations,
// cloning a waker is somewhat expensive, comparable to cloning an Arc).
Some(ref w2) if (w2.will_wake(w)) => {}
// In all other cases
// - we have no waker registered
// - we have a waker registered but it's for a different task.
// then clone the new waker and store it
_ => self.waker = Some(w.clone()),
_ => {
// clone the new waker and store it
if let Some(old_waker) = mem::replace(&mut self.waker, Some(w.clone())) {
// We had a waker registered for another task. Wake it, so the other task can
// reregister itself if it's still interested.
//
// If two tasks are waiting on the same thing concurrently, this will cause them
// to wake each other in a loop fighting over this WakerRegistration. This wastes
// CPU but things will still work.
//
// If the user wants to have two tasks waiting on the same thing they should use
// a more appropriate primitive that can store multiple wakers.
old_waker.wake()
}
}
}
}
@ -35,4 +46,4 @@ impl WakerRegistration {
pub fn context(&self) -> Option<Context<'_>> {
self.waker.as_ref().map(|w| Context::from_waker(w))
}
}
}