Compare commits

...

33 Commits

Author SHA1 Message Date
ba9cc102e2 ci: cache lockfiles
- Removes the slow "updating crates.io index, updating git" at start of the job.
- Avoids CI being randomly slower if a widely-used dep (like serde) gets bumped causing rebuilding everything.
2023-11-21 17:33:07 +01:00
32b59148a8 Merge pull request #2210 from embassy-rs/lulf-patch-1
fix: faq.adoc syntax
2023-11-21 15:04:43 +00:00
bd2e6b0422 Update faq.adoc 2023-11-21 16:01:46 +01:00
85059f693b Merge pull request #2209 from bugadani/faq
Direct the ESP32 users towards esp-hal
2023-11-21 15:54:04 +01:00
c211d51c06 Add note for Xtensa 2023-11-21 15:53:30 +01:00
7f1fd199bb Merge pull request #2207 from mickvangelderen/split-can-bus-2
Split can bus 2
2023-11-21 14:45:10 +00:00
5ddd2cf9a6 Merge pull request #2208 from jamesmunns/add-faq
Add FAQ with one question from chat
2023-11-21 14:42:28 +00:00
88f893da45 Format 2023-11-21 15:40:07 +01:00
aedd41eac4 Add FAQ with one question from chat 2023-11-21 15:38:33 +01:00
19ba7da3fd Rename _flush* methods 2023-11-21 15:37:38 +01:00
06a83c0f89 Refactor bxcan split. 2023-11-21 15:34:34 +01:00
766ec77ec5 Merge pull request #2198 from adamgreig/stm32-opamp
STM32: Don't enable opamps in new(), wait until configured
2023-11-20 22:47:19 +00:00
d1af696605 STM32 opamp: use impl Peripheral instead of directly taking pins 2023-11-20 21:35:05 +00:00
2386619f1f STM32: Disable opamp when OpAmpOutput is dropped, not when the parent OpAmp is dropped 2023-11-20 21:17:09 +00:00
382949e1ff Merge pull request #2197 from MabezDev/priority-channel
Priority channel
2023-11-20 11:34:33 +00:00
454828accb revert module changes, reexport heapless relevant items 2023-11-20 11:28:31 +00:00
cf82fa687c Merge pull request #2192 from RobertTDowling/stm32h7-adc-clock
stm32h7 ADC: Fix stalled clock in default h7 config
2023-11-20 00:00:25 +00:00
7f258cd3c4 PR feedback 2023-11-19 15:56:34 -08:00
3a939fb511 Merge pull request #2204 from Slowki/fix/igmp-compile-error
populate `medium` in DriverAdapter struct in IGMP code
2023-11-19 22:43:55 +00:00
b8e9e00b44 add build with igmp enabled to ci.sh 2023-11-19 17:26:16 -05:00
ff2f1049b1 Merge pull request #2139 from jamesmunns/patch-1
Update RP2040 memory.x
2023-11-19 22:17:27 +00:00
be17e1b363 populate medium in DriverAdapter struct in IGMP code 2023-11-19 17:11:56 -05:00
f3c77e59c4 Add docs, touch all linker fragments 2023-11-19 23:10:11 +01:00
30424d83ff Update RP2040 memory.x
The RP2040 has 264KiB of memory, not 256KiB.
2023-11-19 23:10:11 +01:00
e3be0b957a Merge pull request #2203 from embassy-rs/h7-sai4only-fix
stm32/sai: fix build on chips with only SAI4 (like stm32h725re), improve sync config.
2023-11-19 21:13:01 +00:00
814e096d22 STM32: Don't enable opamps in new(), wait until configured. 2023-11-18 19:37:56 +00:00
5a60024af7 docs 2023-11-18 15:09:41 +00:00
f482a105b8 more clean up, refactor channel into module to share code 2023-11-18 15:01:12 +00:00
7589b5e13e reduce duplication further by sharing Dynamic sender/receiver 2023-11-18 14:56:29 +00:00
2efa73f431 docs and simple test for priority 2023-11-18 14:37:15 +00:00
270ec324b0 Reduce duplication, fix tests 2023-11-18 14:31:09 +00:00
ca0d02933b Priority channel using binary heap 2023-11-18 14:21:43 +00:00
4947b13615 stm32h7 ADC: Fix stalled clock in default h7 config 2023-11-15 17:11:16 -08:00
17 changed files with 797 additions and 79 deletions

View File

@ -12,9 +12,19 @@ export CARGO_TARGET_DIR=/ci/cache/target
# used when pointing stm32-metapac to a CI-built one.
export CARGO_NET_GIT_FETCH_WITH_CLI=true
# Restore lockfiles
if [ -f /ci/cache/lockfiles.tar ]; then
echo Restoring lockfiles...
tar xf /ci/cache/lockfiles.tar
fi
hashtime restore /ci/cache/filetime.json || true
hashtime save /ci/cache/filetime.json
sed -i 's/channel.*/channel = "stable"/g' rust-toolchain.toml
./ci_stable.sh
# Save lockfiles
echo Saving lockfiles...
find . -type f -name Cargo.lock -exec tar -cf /ci/cache/lockfiles.tar '{}' \+

10
.github/ci/build.sh vendored
View File

@ -18,7 +18,17 @@ fi
# used when pointing stm32-metapac to a CI-built one.
export CARGO_NET_GIT_FETCH_WITH_CLI=true
# Restore lockfiles
if [ -f /ci/cache/lockfiles.tar ]; then
echo Restoring lockfiles...
tar xf /ci/cache/lockfiles.tar
fi
hashtime restore /ci/cache/filetime.json || true
hashtime save /ci/cache/filetime.json
./ci.sh
# Save lockfiles
echo Saving lockfiles...
find . -type f -name Cargo.lock -exec tar -cf /ci/cache/lockfiles.tar '{}' \+

1
ci.sh
View File

@ -38,6 +38,7 @@ cargo batch \
--- build --release --manifest-path embassy-sync/Cargo.toml --target thumbv6m-none-eabi --features nightly,defmt \
--- build --release --manifest-path embassy-time/Cargo.toml --target thumbv6m-none-eabi --features nightly,defmt,defmt-timestamp-uptime,tick-hz-32_768,generic-queue-8 \
--- build --release --manifest-path embassy-net/Cargo.toml --target thumbv7em-none-eabi --features defmt,tcp,udp,dns,proto-ipv4,medium-ethernet \
--- build --release --manifest-path embassy-net/Cargo.toml --target thumbv7em-none-eabi --features defmt,tcp,udp,dns,proto-ipv4,igmp,medium-ethernet \
--- build --release --manifest-path embassy-net/Cargo.toml --target thumbv7em-none-eabi --features defmt,tcp,udp,dns,dhcpv4,medium-ethernet \
--- build --release --manifest-path embassy-net/Cargo.toml --target thumbv7em-none-eabi --features defmt,tcp,udp,dns,dhcpv4,medium-ethernet,nightly,dhcpv4-hostname \
--- build --release --manifest-path embassy-net/Cargo.toml --target thumbv7em-none-eabi --features defmt,tcp,udp,dns,proto-ipv6,medium-ethernet \

View File

@ -10,3 +10,4 @@
* xref:examples.adoc[Examples]
* xref:developer.adoc[Developer]
** xref:developer_stm32.adoc[Developer: STM32]
* xref:faq.adoc[Frequently Asked Questions]

View File

@ -0,0 +1,25 @@
= Frequently Asked Questions
These are a list of unsorted, commonly asked questions and answers.
Please feel free to add items to link:https://github.com/embassy-rs/embassy/edit/main/docs/modules/ROOT/pages/faq.adoc[this page], especially if someone in the chat answered a question for you!
== Missing main macro
If you see an error like this:
[source,rust]
----
#[embassy_executor::main]
| ^^^^ could not find `main` in `embassy_executor`
----
You are likely missing some features of the `embassy-executor` crate.
For Cortex-M targets, consider making sure that ALL of the following features are active in your `Cargo.toml` for the `embassy-executor` crate:
* `arch-cortex-m`
* `executor-thread`
* `nightly`
For Xtensa ESP32, consider using the executors and `#[main]` macro provided by your appropriate link:https://crates.io/crates/esp-hal-common[HAL crate].

View File

@ -616,9 +616,11 @@ impl<D: Driver> Stack<D> {
let addr = addr.into();
self.with_mut(|s, i| {
let (_hardware_addr, medium) = to_smoltcp_hardware_address(i.device.hardware_address());
let mut smoldev = DriverAdapter {
cx: Some(cx),
inner: &mut i.device,
medium,
};
match s
@ -653,9 +655,11 @@ impl<D: Driver> Stack<D> {
let addr = addr.into();
self.with_mut(|s, i| {
let (_hardware_addr, medium) = to_smoltcp_hardware_address(i.device.hardware_address());
let mut smoldev = DriverAdapter {
cx: Some(cx),
inner: &mut i.device,
medium,
};
match s

View File

@ -1,4 +1,3 @@
use core::cell::{RefCell, RefMut};
use core::future::poll_fn;
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
@ -84,7 +83,7 @@ impl<T: Instance> interrupt::typelevel::Handler<T::SCEInterrupt> for SceInterrup
}
pub struct Can<'d, T: Instance> {
pub can: RefCell<bxcan::Can<BxcanInstance<'d, T>>>,
pub can: bxcan::Can<BxcanInstance<'d, T>>,
}
#[derive(Debug)]
@ -175,17 +174,12 @@ impl<'d, T: Instance> Can<'d, T> {
tx.set_as_af(tx.af_num(), AFType::OutputPushPull);
let can = bxcan::Can::builder(BxcanInstance(peri)).leave_disabled();
let can_ref_cell = RefCell::new(can);
Self { can: can_ref_cell }
Self { can }
}
pub fn set_bitrate(&mut self, bitrate: u32) {
let bit_timing = Self::calc_bxcan_timings(T::frequency(), bitrate).unwrap();
self.can
.borrow_mut()
.modify_config()
.set_bit_timing(bit_timing)
.leave_disabled();
self.can.modify_config().set_bit_timing(bit_timing).leave_disabled();
}
/// Enables the peripheral and synchronizes with the bus.
@ -193,7 +187,7 @@ impl<'d, T: Instance> Can<'d, T> {
/// This will wait for 11 consecutive recessive bits (bus idle state).
/// Contrary to enable method from bxcan library, this will not freeze the executor while waiting.
pub async fn enable(&mut self) {
while self.borrow_mut().enable_non_blocking().is_err() {
while self.enable_non_blocking().is_err() {
// SCE interrupt is only generated for entering sleep mode, but not leaving.
// Yield to allow other tasks to execute while can bus is initializing.
embassy_futures::yield_now().await;
@ -202,46 +196,46 @@ impl<'d, T: Instance> Can<'d, T> {
/// Queues the message to be sent but exerts backpressure
pub async fn write(&mut self, frame: &Frame) -> bxcan::TransmitStatus {
CanTx { can: &self.can }.write(frame).await
self.split().0.write(frame).await
}
/// Attempts to transmit a frame without blocking.
///
/// Returns [Err(TryWriteError::Full)] if all transmit mailboxes are full.
pub fn try_write(&mut self, frame: &Frame) -> Result<bxcan::TransmitStatus, TryWriteError> {
CanTx { can: &self.can }.try_write(frame)
self.split().0.try_write(frame)
}
/// Waits for a specific transmit mailbox to become empty
pub async fn flush(&self, mb: bxcan::Mailbox) {
CanTx { can: &self.can }.flush(mb).await
CanTx::<T>::flush_inner(mb).await
}
/// Waits until any of the transmit mailboxes become empty
pub async fn flush_any(&self) {
CanTx { can: &self.can }.flush_any().await
CanTx::<T>::flush_any_inner().await
}
/// Waits until all of the transmit mailboxes become empty
pub async fn flush_all(&self) {
CanTx { can: &self.can }.flush_all().await
CanTx::<T>::flush_all_inner().await
}
/// Returns a tuple of the time the message was received and the message frame
pub async fn read(&mut self) -> Result<Envelope, BusError> {
CanRx { can: &self.can }.read().await
self.split().1.read().await
}
/// Attempts to read a can frame without blocking.
///
/// Returns [Err(TryReadError::Empty)] if there are no frames in the rx queue.
pub fn try_read(&mut self) -> Result<Envelope, TryReadError> {
CanRx { can: &self.can }.try_read()
self.split().1.try_read()
}
/// Waits while receive queue is empty.
pub async fn wait_not_empty(&mut self) {
CanRx { can: &self.can }.wait_not_empty().await
self.split().1.wait_not_empty().await
}
unsafe fn receive_fifo(fifo: RxFifo) {
@ -385,24 +379,25 @@ impl<'d, T: Instance> Can<'d, T> {
Some((sjw - 1) << 24 | (bs1 as u32 - 1) << 16 | (bs2 as u32 - 1) << 20 | (prescaler - 1))
}
pub fn split<'c>(&'c self) -> (CanTx<'c, 'd, T>, CanRx<'c, 'd, T>) {
(CanTx { can: &self.can }, CanRx { can: &self.can })
pub fn split<'c>(&'c mut self) -> (CanTx<'c, 'd, T>, CanRx<'c, 'd, T>) {
let (tx, rx0, rx1) = self.can.split_by_ref();
(CanTx { tx }, CanRx { rx0, rx1 })
}
pub fn as_mut(&self) -> RefMut<'_, bxcan::Can<BxcanInstance<'d, T>>> {
self.can.borrow_mut()
pub fn as_mut(&mut self) -> &mut bxcan::Can<BxcanInstance<'d, T>> {
&mut self.can
}
}
pub struct CanTx<'c, 'd, T: Instance> {
can: &'c RefCell<bxcan::Can<BxcanInstance<'d, T>>>,
tx: &'c mut bxcan::Tx<BxcanInstance<'d, T>>,
}
impl<'c, 'd, T: Instance> CanTx<'c, 'd, T> {
pub async fn write(&mut self, frame: &Frame) -> bxcan::TransmitStatus {
poll_fn(|cx| {
T::state().tx_waker.register(cx.waker());
if let Ok(status) = self.can.borrow_mut().transmit(frame) {
if let Ok(status) = self.tx.transmit(frame) {
return Poll::Ready(status);
}
@ -415,11 +410,10 @@ impl<'c, 'd, T: Instance> CanTx<'c, 'd, T> {
///
/// Returns [Err(TryWriteError::Full)] if all transmit mailboxes are full.
pub fn try_write(&mut self, frame: &Frame) -> Result<bxcan::TransmitStatus, TryWriteError> {
self.can.borrow_mut().transmit(frame).map_err(|_| TryWriteError::Full)
self.tx.transmit(frame).map_err(|_| TryWriteError::Full)
}
/// Waits for a specific transmit mailbox to become empty
pub async fn flush(&self, mb: bxcan::Mailbox) {
async fn flush_inner(mb: bxcan::Mailbox) {
poll_fn(|cx| {
T::state().tx_waker.register(cx.waker());
if T::regs().tsr().read().tme(mb.index()) {
@ -431,8 +425,12 @@ impl<'c, 'd, T: Instance> CanTx<'c, 'd, T> {
.await;
}
/// Waits until any of the transmit mailboxes become empty
pub async fn flush_any(&self) {
/// Waits for a specific transmit mailbox to become empty
pub async fn flush(&self, mb: bxcan::Mailbox) {
Self::flush_inner(mb).await
}
async fn flush_any_inner() {
poll_fn(|cx| {
T::state().tx_waker.register(cx.waker());
@ -449,8 +447,12 @@ impl<'c, 'd, T: Instance> CanTx<'c, 'd, T> {
.await;
}
/// Waits until all of the transmit mailboxes become empty
pub async fn flush_all(&self) {
/// Waits until any of the transmit mailboxes become empty
pub async fn flush_any(&self) {
Self::flush_any_inner().await
}
async fn flush_all_inner() {
poll_fn(|cx| {
T::state().tx_waker.register(cx.waker());
@ -466,11 +468,17 @@ impl<'c, 'd, T: Instance> CanTx<'c, 'd, T> {
})
.await;
}
/// Waits until all of the transmit mailboxes become empty
pub async fn flush_all(&self) {
Self::flush_all_inner().await
}
}
#[allow(dead_code)]
pub struct CanRx<'c, 'd, T: Instance> {
can: &'c RefCell<bxcan::Can<BxcanInstance<'d, T>>>,
rx0: &'c mut bxcan::Rx0<BxcanInstance<'d, T>>,
rx1: &'c mut bxcan::Rx1<BxcanInstance<'d, T>>,
}
impl<'c, 'd, T: Instance> CanRx<'c, 'd, T> {
@ -538,7 +546,7 @@ impl<'d, T: Instance> Drop for Can<'d, T> {
}
impl<'d, T: Instance> Deref for Can<'d, T> {
type Target = RefCell<bxcan::Can<BxcanInstance<'d, T>>>;
type Target = bxcan::Can<BxcanInstance<'d, T>>;
fn deref(&self) -> &Self::Target {
&self.can

View File

@ -31,12 +31,9 @@ impl From<OpAmpSpeed> for crate::pac::opamp::vals::OpampCsrOpahsm {
/// OpAmp external outputs, wired to a GPIO pad.
///
/// The GPIO output pad is held by this struct to ensure it cannot be used elsewhere.
///
/// This struct can also be used as an ADC input.
pub struct OpAmpOutput<'d, 'p, T: Instance, P: OutputPin<T>> {
pub struct OpAmpOutput<'d, T: Instance> {
_inner: &'d OpAmp<'d, T>,
_output: &'p mut P,
}
/// OpAmp internal outputs, wired directly to ADC inputs.
@ -54,19 +51,12 @@ pub struct OpAmp<'d, T: Instance> {
impl<'d, T: Instance> OpAmp<'d, T> {
/// Create a new driver instance.
///
/// Enables the OpAmp and configures the speed, but
/// does not set any other configuration.
/// Does not enable the opamp, but does set the speed mode on some families.
pub fn new(opamp: impl Peripheral<P = T> + 'd, #[cfg(opamp_g4)] speed: OpAmpSpeed) -> Self {
into_ref!(opamp);
#[cfg(opamp_f3)]
T::regs().opampcsr().modify(|w| {
w.set_opampen(true);
});
#[cfg(opamp_g4)]
T::regs().opamp_csr().modify(|w| {
w.set_opaen(true);
w.set_opahsm(speed.into());
});
@ -74,24 +64,23 @@ impl<'d, T: Instance> OpAmp<'d, T> {
}
/// Configure the OpAmp as a buffer for the provided input pin,
/// outputting to the provided output pin.
/// outputting to the provided output pin, and enable the opamp.
///
/// The input pin is configured for analogue mode but not consumed,
/// so it may subsequently be used for ADC or comparator inputs.
///
/// The output pin is held within the returned [`OpAmpOutput`] struct,
/// preventing it being used elsewhere. The `OpAmpOutput` can then be
/// directly used as an ADC input.
pub fn buffer_ext<'a, 'b, IP, OP>(
&'a mut self,
in_pin: &IP,
out_pin: &'b mut OP,
/// directly used as an ADC input. The opamp will be disabled when the
/// [`OpAmpOutput`] is dropped.
pub fn buffer_ext(
&'d mut self,
in_pin: impl Peripheral<P = impl NonInvertingPin<T> + crate::gpio::sealed::Pin>,
out_pin: impl Peripheral<P = impl OutputPin<T> + crate::gpio::sealed::Pin> + 'd,
gain: OpAmpGain,
) -> OpAmpOutput<'a, 'b, T, OP>
where
IP: NonInvertingPin<T> + crate::gpio::sealed::Pin,
OP: OutputPin<T> + crate::gpio::sealed::Pin,
{
) -> OpAmpOutput<'d, T> {
into_ref!(in_pin);
into_ref!(out_pin);
in_pin.set_as_analog();
out_pin.set_as_analog();
@ -122,24 +111,24 @@ impl<'d, T: Instance> OpAmp<'d, T> {
w.set_opaen(true);
});
OpAmpOutput {
_inner: self,
_output: out_pin,
}
OpAmpOutput { _inner: self }
}
/// Configure the OpAmp as a buffer for the provided input pin,
/// with the output only used internally.
/// with the output only used internally, and enable the opamp.
///
/// The input pin is configured for analogue mode but not consumed,
/// so it may be subsequently used for ADC or comparator inputs.
///
/// The returned `OpAmpInternalOutput` struct may be used as an ADC input.
/// The opamp output will be disabled when it is dropped.
#[cfg(opamp_g4)]
pub fn buffer_int<'a, P>(&'a mut self, pin: &P, gain: OpAmpGain) -> OpAmpInternalOutput<'a, T>
where
P: NonInvertingPin<T> + crate::gpio::sealed::Pin,
{
pub fn buffer_int(
&'d mut self,
pin: impl Peripheral<P = impl NonInvertingPin<T> + crate::gpio::sealed::Pin>,
gain: OpAmpGain,
) -> OpAmpInternalOutput<'d, T> {
into_ref!(pin);
pin.set_as_analog();
let (vm_sel, pga_gain) = match gain {
@ -163,7 +152,21 @@ impl<'d, T: Instance> OpAmp<'d, T> {
}
}
impl<'d, T: Instance> Drop for OpAmp<'d, T> {
impl<'d, T: Instance> Drop for OpAmpOutput<'d, T> {
fn drop(&mut self) {
#[cfg(opamp_f3)]
T::regs().opampcsr().modify(|w| {
w.set_opampen(false);
});
#[cfg(opamp_g4)]
T::regs().opamp_csr().modify(|w| {
w.set_opaen(false);
});
}
}
impl<'d, T: Instance> Drop for OpAmpInternalOutput<'d, T> {
fn drop(&mut self) {
#[cfg(opamp_f3)]
T::regs().opampcsr().modify(|w| {
@ -203,16 +206,16 @@ macro_rules! impl_opamp_external_output {
($inst:ident, $adc:ident, $ch:expr) => {
foreach_adc!(
($adc, $common_inst:ident, $adc_clock:ident) => {
impl<'d, 'p, P: OutputPin<crate::peripherals::$inst>> crate::adc::sealed::AdcPin<crate::peripherals::$adc>
for OpAmpOutput<'d, 'p, crate::peripherals::$inst, P>
impl<'d> crate::adc::sealed::AdcPin<crate::peripherals::$adc>
for OpAmpOutput<'d, crate::peripherals::$inst>
{
fn channel(&self) -> u8 {
$ch
}
}
impl<'d, 'p, P: OutputPin<crate::peripherals::$inst>> crate::adc::AdcPin<crate::peripherals::$adc>
for OpAmpOutput<'d, 'p, crate::peripherals::$inst, P>
impl<'d> crate::adc::AdcPin<crate::peripherals::$adc>
for OpAmpOutput<'d, crate::peripherals::$inst>
{
}
};

View File

@ -168,7 +168,12 @@ impl Default for Config {
apb4_pre: APBPrescaler::DIV1,
per_clock_source: PerClockSource::HSI,
adc_clock_source: AdcClockSource::from_bits(0), // PLL2_P on H7, HCLK on H5
#[cfg(stm32h5)]
adc_clock_source: AdcClockSource::HCLK1,
#[cfg(stm32h7)]
adc_clock_source: AdcClockSource::PER,
timer_prescaler: TimerPrescaler::DefaultX2,
voltage_scale: VoltageScale::Scale0,
ls: Default::default(),

View File

@ -5,6 +5,7 @@ An [Embassy](https://embassy.dev) project.
Synchronization primitives and data structures with async support:
- [`Channel`](channel::Channel) - A Multiple Producer Multiple Consumer (MPMC) channel. Each message is only received by a single consumer.
- [`PriorityChannel`](channel::priority::PriorityChannel) - A Multiple Producer Multiple Consumer (MPMC) channel. Each message is only received by a single consumer. Higher priority items are sifted to the front of the channel.
- [`PubSubChannel`](pubsub::PubSubChannel) - A broadcast channel (publish-subscribe) channel. Each message is received by all consumers.
- [`Signal`](signal::Signal) - Signalling latest value to a single consumer.
- [`Mutex`](mutex::Mutex) - Mutex for synchronizing state between asynchronous tasks.

View File

@ -76,7 +76,7 @@ where
/// Send-only access to a [`Channel`] without knowing channel size.
pub struct DynamicSender<'ch, T> {
channel: &'ch dyn DynamicChannel<T>,
pub(crate) channel: &'ch dyn DynamicChannel<T>,
}
impl<'ch, T> Clone for DynamicSender<'ch, T> {
@ -176,7 +176,7 @@ where
/// Receive-only access to a [`Channel`] without knowing channel size.
pub struct DynamicReceiver<'ch, T> {
channel: &'ch dyn DynamicChannel<T>,
pub(crate) channel: &'ch dyn DynamicChannel<T>,
}
impl<'ch, T> Clone for DynamicReceiver<'ch, T> {
@ -321,7 +321,7 @@ impl<'ch, T> Future for DynamicSendFuture<'ch, T> {
impl<'ch, T> Unpin for DynamicSendFuture<'ch, T> {}
trait DynamicChannel<T> {
pub(crate) trait DynamicChannel<T> {
fn try_send_with_context(&self, message: T, cx: Option<&mut Context<'_>>) -> Result<(), TrySendError<T>>;
fn try_receive_with_context(&self, cx: Option<&mut Context<'_>>) -> Result<T, TryReceiveError>;

View File

@ -15,6 +15,7 @@ pub mod blocking_mutex;
pub mod channel;
pub mod mutex;
pub mod pipe;
pub mod priority_channel;
pub mod pubsub;
pub mod signal;
pub mod waitqueue;

View File

@ -0,0 +1,613 @@
//! A queue for sending values between asynchronous tasks.
//!
//! Similar to a [`Channel`](crate::channel::Channel), however [`PriorityChannel`] sifts higher priority items to the front of the queue.
//! Priority is determined by the `Ord` trait. Priority behavior is determined by the [`Kind`](heapless::binary_heap::Kind) parameter of the channel.
use core::cell::RefCell;
use core::future::Future;
use core::pin::Pin;
use core::task::{Context, Poll};
pub use heapless::binary_heap::{Kind, Max, Min};
use heapless::BinaryHeap;
use crate::blocking_mutex::raw::RawMutex;
use crate::blocking_mutex::Mutex;
use crate::channel::{DynamicChannel, DynamicReceiver, DynamicSender, TryReceiveError, TrySendError};
use crate::waitqueue::WakerRegistration;
/// Send-only access to a [`PriorityChannel`].
pub struct Sender<'ch, M, T, K, const N: usize>
where
T: Ord,
K: Kind,
M: RawMutex,
{
channel: &'ch PriorityChannel<M, T, K, N>,
}
impl<'ch, M, T, K, const N: usize> Clone for Sender<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
fn clone(&self) -> Self {
Sender { channel: self.channel }
}
}
impl<'ch, M, T, K, const N: usize> Copy for Sender<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
}
impl<'ch, M, T, K, const N: usize> Sender<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
/// Sends a value.
///
/// See [`PriorityChannel::send()`]
pub fn send(&self, message: T) -> SendFuture<'ch, M, T, K, N> {
self.channel.send(message)
}
/// Attempt to immediately send a message.
///
/// See [`PriorityChannel::send()`]
pub fn try_send(&self, message: T) -> Result<(), TrySendError<T>> {
self.channel.try_send(message)
}
/// Allows a poll_fn to poll until the channel is ready to send
///
/// See [`PriorityChannel::poll_ready_to_send()`]
pub fn poll_ready_to_send(&self, cx: &mut Context<'_>) -> Poll<()> {
self.channel.poll_ready_to_send(cx)
}
}
impl<'ch, M, T, K, const N: usize> From<Sender<'ch, M, T, K, N>> for DynamicSender<'ch, T>
where
T: Ord,
K: Kind,
M: RawMutex,
{
fn from(s: Sender<'ch, M, T, K, N>) -> Self {
Self { channel: s.channel }
}
}
/// Receive-only access to a [`PriorityChannel`].
pub struct Receiver<'ch, M, T, K, const N: usize>
where
T: Ord,
K: Kind,
M: RawMutex,
{
channel: &'ch PriorityChannel<M, T, K, N>,
}
impl<'ch, M, T, K, const N: usize> Clone for Receiver<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
fn clone(&self) -> Self {
Receiver { channel: self.channel }
}
}
impl<'ch, M, T, K, const N: usize> Copy for Receiver<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
}
impl<'ch, M, T, K, const N: usize> Receiver<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
/// Receive the next value.
///
/// See [`PriorityChannel::receive()`].
pub fn receive(&self) -> ReceiveFuture<'_, M, T, K, N> {
self.channel.receive()
}
/// Attempt to immediately receive the next value.
///
/// See [`PriorityChannel::try_receive()`]
pub fn try_receive(&self) -> Result<T, TryReceiveError> {
self.channel.try_receive()
}
/// Allows a poll_fn to poll until the channel is ready to receive
///
/// See [`PriorityChannel::poll_ready_to_receive()`]
pub fn poll_ready_to_receive(&self, cx: &mut Context<'_>) -> Poll<()> {
self.channel.poll_ready_to_receive(cx)
}
/// Poll the channel for the next item
///
/// See [`PriorityChannel::poll_receive()`]
pub fn poll_receive(&self, cx: &mut Context<'_>) -> Poll<T> {
self.channel.poll_receive(cx)
}
}
impl<'ch, M, T, K, const N: usize> From<Receiver<'ch, M, T, K, N>> for DynamicReceiver<'ch, T>
where
T: Ord,
K: Kind,
M: RawMutex,
{
fn from(s: Receiver<'ch, M, T, K, N>) -> Self {
Self { channel: s.channel }
}
}
/// Future returned by [`PriorityChannel::receive`] and [`Receiver::receive`].
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct ReceiveFuture<'ch, M, T, K, const N: usize>
where
T: Ord,
K: Kind,
M: RawMutex,
{
channel: &'ch PriorityChannel<M, T, K, N>,
}
impl<'ch, M, T, K, const N: usize> Future for ReceiveFuture<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
type Output = T;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
self.channel.poll_receive(cx)
}
}
/// Future returned by [`PriorityChannel::send`] and [`Sender::send`].
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct SendFuture<'ch, M, T, K, const N: usize>
where
T: Ord,
K: Kind,
M: RawMutex,
{
channel: &'ch PriorityChannel<M, T, K, N>,
message: Option<T>,
}
impl<'ch, M, T, K, const N: usize> Future for SendFuture<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.message.take() {
Some(m) => match self.channel.try_send_with_context(m, Some(cx)) {
Ok(..) => Poll::Ready(()),
Err(TrySendError::Full(m)) => {
self.message = Some(m);
Poll::Pending
}
},
None => panic!("Message cannot be None"),
}
}
}
impl<'ch, M, T, K, const N: usize> Unpin for SendFuture<'ch, M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
}
struct ChannelState<T, K, const N: usize> {
queue: BinaryHeap<T, K, N>,
receiver_waker: WakerRegistration,
senders_waker: WakerRegistration,
}
impl<T, K, const N: usize> ChannelState<T, K, N>
where
T: Ord,
K: Kind,
{
const fn new() -> Self {
ChannelState {
queue: BinaryHeap::new(),
receiver_waker: WakerRegistration::new(),
senders_waker: WakerRegistration::new(),
}
}
fn try_receive(&mut self) -> Result<T, TryReceiveError> {
self.try_receive_with_context(None)
}
fn try_receive_with_context(&mut self, cx: Option<&mut Context<'_>>) -> Result<T, TryReceiveError> {
if self.queue.len() == self.queue.capacity() {
self.senders_waker.wake();
}
if let Some(message) = self.queue.pop() {
Ok(message)
} else {
if let Some(cx) = cx {
self.receiver_waker.register(cx.waker());
}
Err(TryReceiveError::Empty)
}
}
fn poll_receive(&mut self, cx: &mut Context<'_>) -> Poll<T> {
if self.queue.len() == self.queue.capacity() {
self.senders_waker.wake();
}
if let Some(message) = self.queue.pop() {
Poll::Ready(message)
} else {
self.receiver_waker.register(cx.waker());
Poll::Pending
}
}
fn poll_ready_to_receive(&mut self, cx: &mut Context<'_>) -> Poll<()> {
self.receiver_waker.register(cx.waker());
if !self.queue.is_empty() {
Poll::Ready(())
} else {
Poll::Pending
}
}
fn try_send(&mut self, message: T) -> Result<(), TrySendError<T>> {
self.try_send_with_context(message, None)
}
fn try_send_with_context(&mut self, message: T, cx: Option<&mut Context<'_>>) -> Result<(), TrySendError<T>> {
match self.queue.push(message) {
Ok(()) => {
self.receiver_waker.wake();
Ok(())
}
Err(message) => {
if let Some(cx) = cx {
self.senders_waker.register(cx.waker());
}
Err(TrySendError::Full(message))
}
}
}
fn poll_ready_to_send(&mut self, cx: &mut Context<'_>) -> Poll<()> {
self.senders_waker.register(cx.waker());
if !self.queue.len() == self.queue.capacity() {
Poll::Ready(())
} else {
Poll::Pending
}
}
}
/// A bounded channel for communicating between asynchronous tasks
/// with backpressure.
///
/// The channel will buffer up to the provided number of messages. Once the
/// buffer is full, attempts to `send` new messages will wait until a message is
/// received from the channel.
///
/// Sent data may be reordered based on their priorty within the channel.
/// For example, in a [`Max`](heapless::binary_heap::Max) [`PriorityChannel`]
/// containing `u32`'s, data sent in the following order `[1, 2, 3]` will be recieved as `[3, 2, 1]`.
pub struct PriorityChannel<M, T, K, const N: usize>
where
T: Ord,
K: Kind,
M: RawMutex,
{
inner: Mutex<M, RefCell<ChannelState<T, K, N>>>,
}
impl<M, T, K, const N: usize> PriorityChannel<M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
/// Establish a new bounded channel. For example, to create one with a NoopMutex:
///
/// ```
/// use embassy_sync::priority_channel::{PriorityChannel, Max};
/// use embassy_sync::blocking_mutex::raw::NoopRawMutex;
///
/// // Declare a bounded channel of 3 u32s.
/// let mut channel = PriorityChannel::<NoopRawMutex, u32, Max, 3>::new();
/// ```
pub const fn new() -> Self {
Self {
inner: Mutex::new(RefCell::new(ChannelState::new())),
}
}
fn lock<R>(&self, f: impl FnOnce(&mut ChannelState<T, K, N>) -> R) -> R {
self.inner.lock(|rc| f(&mut *unwrap!(rc.try_borrow_mut())))
}
fn try_receive_with_context(&self, cx: Option<&mut Context<'_>>) -> Result<T, TryReceiveError> {
self.lock(|c| c.try_receive_with_context(cx))
}
/// Poll the channel for the next message
pub fn poll_receive(&self, cx: &mut Context<'_>) -> Poll<T> {
self.lock(|c| c.poll_receive(cx))
}
fn try_send_with_context(&self, m: T, cx: Option<&mut Context<'_>>) -> Result<(), TrySendError<T>> {
self.lock(|c| c.try_send_with_context(m, cx))
}
/// Allows a poll_fn to poll until the channel is ready to receive
pub fn poll_ready_to_receive(&self, cx: &mut Context<'_>) -> Poll<()> {
self.lock(|c| c.poll_ready_to_receive(cx))
}
/// Allows a poll_fn to poll until the channel is ready to send
pub fn poll_ready_to_send(&self, cx: &mut Context<'_>) -> Poll<()> {
self.lock(|c| c.poll_ready_to_send(cx))
}
/// Get a sender for this channel.
pub fn sender(&self) -> Sender<'_, M, T, K, N> {
Sender { channel: self }
}
/// Get a receiver for this channel.
pub fn receiver(&self) -> Receiver<'_, M, T, K, N> {
Receiver { channel: self }
}
/// Send a value, waiting until there is capacity.
///
/// Sending completes when the value has been pushed to the channel's queue.
/// This doesn't mean the value has been received yet.
pub fn send(&self, message: T) -> SendFuture<'_, M, T, K, N> {
SendFuture {
channel: self,
message: Some(message),
}
}
/// Attempt to immediately send a message.
///
/// This method differs from [`send`](PriorityChannel::send) by returning immediately if the channel's
/// buffer is full, instead of waiting.
///
/// # Errors
///
/// If the channel capacity has been reached, i.e., the channel has `n`
/// buffered values where `n` is the argument passed to [`PriorityChannel`], then an
/// error is returned.
pub fn try_send(&self, message: T) -> Result<(), TrySendError<T>> {
self.lock(|c| c.try_send(message))
}
/// Receive the next value.
///
/// If there are no messages in the channel's buffer, this method will
/// wait until a message is sent.
pub fn receive(&self) -> ReceiveFuture<'_, M, T, K, N> {
ReceiveFuture { channel: self }
}
/// Attempt to immediately receive a message.
///
/// This method will either receive a message from the channel immediately or return an error
/// if the channel is empty.
pub fn try_receive(&self) -> Result<T, TryReceiveError> {
self.lock(|c| c.try_receive())
}
}
/// Implements the DynamicChannel to allow creating types that are unaware of the queue size with the
/// tradeoff cost of dynamic dispatch.
impl<M, T, K, const N: usize> DynamicChannel<T> for PriorityChannel<M, T, K, N>
where
T: Ord,
K: Kind,
M: RawMutex,
{
fn try_send_with_context(&self, m: T, cx: Option<&mut Context<'_>>) -> Result<(), TrySendError<T>> {
PriorityChannel::try_send_with_context(self, m, cx)
}
fn try_receive_with_context(&self, cx: Option<&mut Context<'_>>) -> Result<T, TryReceiveError> {
PriorityChannel::try_receive_with_context(self, cx)
}
fn poll_ready_to_send(&self, cx: &mut Context<'_>) -> Poll<()> {
PriorityChannel::poll_ready_to_send(self, cx)
}
fn poll_ready_to_receive(&self, cx: &mut Context<'_>) -> Poll<()> {
PriorityChannel::poll_ready_to_receive(self, cx)
}
fn poll_receive(&self, cx: &mut Context<'_>) -> Poll<T> {
PriorityChannel::poll_receive(self, cx)
}
}
#[cfg(test)]
mod tests {
use core::time::Duration;
use futures_executor::ThreadPool;
use futures_timer::Delay;
use futures_util::task::SpawnExt;
use heapless::binary_heap::{Kind, Max};
use static_cell::StaticCell;
use super::*;
use crate::blocking_mutex::raw::{CriticalSectionRawMutex, NoopRawMutex};
fn capacity<T, K, const N: usize>(c: &ChannelState<T, K, N>) -> usize
where
T: Ord,
K: Kind,
{
c.queue.capacity() - c.queue.len()
}
#[test]
fn sending_once() {
let mut c = ChannelState::<u32, Max, 3>::new();
assert!(c.try_send(1).is_ok());
assert_eq!(capacity(&c), 2);
}
#[test]
fn sending_when_full() {
let mut c = ChannelState::<u32, Max, 3>::new();
let _ = c.try_send(1);
let _ = c.try_send(1);
let _ = c.try_send(1);
match c.try_send(2) {
Err(TrySendError::Full(2)) => assert!(true),
_ => assert!(false),
}
assert_eq!(capacity(&c), 0);
}
#[test]
fn send_priority() {
// Prio channel with kind `Max` sifts larger numbers to the front of the queue
let mut c = ChannelState::<u32, Max, 3>::new();
assert!(c.try_send(1).is_ok());
assert!(c.try_send(2).is_ok());
assert!(c.try_send(3).is_ok());
assert_eq!(c.try_receive().unwrap(), 3);
assert_eq!(c.try_receive().unwrap(), 2);
assert_eq!(c.try_receive().unwrap(), 1);
}
#[test]
fn receiving_once_with_one_send() {
let mut c = ChannelState::<u32, Max, 3>::new();
assert!(c.try_send(1).is_ok());
assert_eq!(c.try_receive().unwrap(), 1);
assert_eq!(capacity(&c), 3);
}
#[test]
fn receiving_when_empty() {
let mut c = ChannelState::<u32, Max, 3>::new();
match c.try_receive() {
Err(TryReceiveError::Empty) => assert!(true),
_ => assert!(false),
}
assert_eq!(capacity(&c), 3);
}
#[test]
fn simple_send_and_receive() {
let c = PriorityChannel::<NoopRawMutex, u32, Max, 3>::new();
assert!(c.try_send(1).is_ok());
assert_eq!(c.try_receive().unwrap(), 1);
}
#[test]
fn cloning() {
let c = PriorityChannel::<NoopRawMutex, u32, Max, 3>::new();
let r1 = c.receiver();
let s1 = c.sender();
let _ = r1.clone();
let _ = s1.clone();
}
#[test]
fn dynamic_dispatch() {
let c = PriorityChannel::<NoopRawMutex, u32, Max, 3>::new();
let s: DynamicSender<'_, u32> = c.sender().into();
let r: DynamicReceiver<'_, u32> = c.receiver().into();
assert!(s.try_send(1).is_ok());
assert_eq!(r.try_receive().unwrap(), 1);
}
#[futures_test::test]
async fn receiver_receives_given_try_send_async() {
let executor = ThreadPool::new().unwrap();
static CHANNEL: StaticCell<PriorityChannel<CriticalSectionRawMutex, u32, Max, 3>> = StaticCell::new();
let c = &*CHANNEL.init(PriorityChannel::new());
let c2 = c;
assert!(executor
.spawn(async move {
assert!(c2.try_send(1).is_ok());
})
.is_ok());
assert_eq!(c.receive().await, 1);
}
#[futures_test::test]
async fn sender_send_completes_if_capacity() {
let c = PriorityChannel::<CriticalSectionRawMutex, u32, Max, 1>::new();
c.send(1).await;
assert_eq!(c.receive().await, 1);
}
#[futures_test::test]
async fn senders_sends_wait_until_capacity() {
let executor = ThreadPool::new().unwrap();
static CHANNEL: StaticCell<PriorityChannel<CriticalSectionRawMutex, u32, Max, 1>> = StaticCell::new();
let c = &*CHANNEL.init(PriorityChannel::new());
assert!(c.try_send(1).is_ok());
let c2 = c;
let send_task_1 = executor.spawn_with_handle(async move { c2.send(2).await });
let c2 = c;
let send_task_2 = executor.spawn_with_handle(async move { c2.send(3).await });
// Wish I could think of a means of determining that the async send is waiting instead.
// However, I've used the debugger to observe that the send does indeed wait.
Delay::new(Duration::from_millis(500)).await;
assert_eq!(c.receive().await, 1);
assert!(executor
.spawn(async move {
loop {
c.receive().await;
}
})
.is_ok());
send_task_1.unwrap().await;
send_task_2.unwrap().await;
}
}

View File

@ -5,7 +5,19 @@ MEMORY
BOOTLOADER_STATE : ORIGIN = 0x10006000, LENGTH = 4K
FLASH : ORIGIN = 0x10007000, LENGTH = 512K
DFU : ORIGIN = 0x10087000, LENGTH = 516K
RAM : ORIGIN = 0x20000000, LENGTH = 256K
/* Pick one of the two options for RAM layout */
/* OPTION A: Use all RAM banks as one big block */
/* Reasonable, unless you are doing something */
/* really particular with DMA or other concurrent */
/* access that would benefit from striping */
RAM : ORIGIN = 0x20000000, LENGTH = 264K
/* OPTION B: Keep the unstriped sections separate */
/* RAM: ORIGIN = 0x20000000, LENGTH = 256K */
/* SCRATCH_A: ORIGIN = 0x20040000, LENGTH = 4K */
/* SCRATCH_B: ORIGIN = 0x20041000, LENGTH = 4K */
}
__bootloader_state_start = ORIGIN(BOOTLOADER_STATE) - ORIGIN(BOOT2);

View File

@ -6,7 +6,19 @@ MEMORY
BOOTLOADER_STATE : ORIGIN = 0x10006000, LENGTH = 4K
ACTIVE : ORIGIN = 0x10007000, LENGTH = 512K
DFU : ORIGIN = 0x10087000, LENGTH = 516K
RAM : ORIGIN = 0x20000000, LENGTH = 256K
/* Pick one of the two options for RAM layout */
/* OPTION A: Use all RAM banks as one big block */
/* Reasonable, unless you are doing something */
/* really particular with DMA or other concurrent */
/* access that would benefit from striping */
RAM : ORIGIN = 0x20000000, LENGTH = 264K
/* OPTION B: Keep the unstriped sections separate */
/* RAM: ORIGIN = 0x20000000, LENGTH = 256K */
/* SCRATCH_A: ORIGIN = 0x20040000, LENGTH = 4K */
/* SCRATCH_B: ORIGIN = 0x20041000, LENGTH = 4K */
}
__bootloader_state_start = ORIGIN(BOOTLOADER_STATE) - ORIGIN(BOOT2);

View File

@ -1,5 +1,17 @@
MEMORY {
BOOT2 : ORIGIN = 0x10000000, LENGTH = 0x100
FLASH : ORIGIN = 0x10000100, LENGTH = 2048K - 0x100
RAM : ORIGIN = 0x20000000, LENGTH = 256K
}
/* Pick one of the two options for RAM layout */
/* OPTION A: Use all RAM banks as one big block */
/* Reasonable, unless you are doing something */
/* really particular with DMA or other concurrent */
/* access that would benefit from striping */
RAM : ORIGIN = 0x20000000, LENGTH = 264K
/* OPTION B: Keep the unstriped sections separate */
/* RAM: ORIGIN = 0x20000000, LENGTH = 256K */
/* SCRATCH_A: ORIGIN = 0x20040000, LENGTH = 4K */
/* SCRATCH_B: ORIGIN = 0x20041000, LENGTH = 4K */
}

View File

@ -39,7 +39,7 @@ async fn main(_spawner: Spawner) -> ! {
let mut vrefint = adc.enable_vref(&mut Delay);
let mut temperature = adc.enable_temperature();
let mut buffer = opamp.buffer_ext(&p.PA7, &mut p.PA6, OpAmpGain::Mul1);
let mut buffer = opamp.buffer_ext(&mut p.PA7, &mut p.PA6, OpAmpGain::Mul1);
loop {
let vref = adc.read(&mut vrefint).await;