embassy/embassy-stm32/src/dma/bdma.rs

730 lines
23 KiB
Rust
Raw Normal View History

2023-12-17 22:09:14 +01:00
//! Basic Direct Memory Acccess (BDMA)
2021-07-15 05:42:06 +02:00
2023-04-17 00:04:54 +02:00
use core::future::Future;
use core::pin::Pin;
2023-10-12 01:16:42 +02:00
use core::sync::atomic::{fence, AtomicUsize, Ordering};
2023-05-01 18:14:53 +02:00
use core::task::{Context, Poll, Waker};
2021-07-15 05:42:06 +02:00
use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker;
2021-07-15 05:42:06 +02:00
2023-07-30 16:25:58 +02:00
use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
use super::word::{Word, WordSize};
use super::Dir;
use crate::_generated::BDMA_CHANNEL_COUNT;
use crate::interrupt::typelevel::Interrupt;
use crate::interrupt::Priority;
2021-07-15 05:42:06 +02:00
use crate::pac;
2023-05-01 18:14:53 +02:00
use crate::pac::bdma::{regs, vals};
2021-07-15 05:42:06 +02:00
2023-12-17 22:09:14 +01:00
/// BDMA transfer options.
2023-04-17 00:04:54 +02:00
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {
/// Enable circular DMA
pub circular: bool,
/// Enable half transfer interrupt
2023-06-19 13:50:17 +02:00
pub half_transfer_ir: bool,
/// Enable transfer complete interrupt
pub complete_transfer_ir: bool,
}
2023-04-17 00:04:54 +02:00
impl Default for TransferOptions {
fn default() -> Self {
Self {
circular: false,
2023-06-19 13:50:17 +02:00
half_transfer_ir: false,
complete_transfer_ir: true,
}
2023-04-17 00:04:54 +02:00
}
}
2021-11-19 19:15:55 +01:00
impl From<WordSize> for vals::Size {
fn from(raw: WordSize) -> Self {
match raw {
WordSize::OneByte => Self::BITS8,
WordSize::TwoBytes => Self::BITS16,
WordSize::FourBytes => Self::BITS32,
}
}
}
2023-04-17 00:04:54 +02:00
impl From<Dir> for vals::Dir {
fn from(raw: Dir) -> Self {
match raw {
Dir::MemoryToPeripheral => Self::FROMMEMORY,
Dir::PeripheralToMemory => Self::FROMPERIPHERAL,
}
}
}
2021-07-15 05:42:06 +02:00
struct State {
ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
2023-05-01 18:14:53 +02:00
complete_count: [AtomicUsize; BDMA_CHANNEL_COUNT],
2021-07-15 05:42:06 +02:00
}
impl State {
const fn new() -> Self {
2023-05-01 18:14:53 +02:00
const ZERO: AtomicUsize = AtomicUsize::new(0);
2021-07-15 05:42:06 +02:00
const AW: AtomicWaker = AtomicWaker::new();
Self {
ch_wakers: [AW; BDMA_CHANNEL_COUNT],
2023-05-01 18:14:53 +02:00
complete_count: [ZERO; BDMA_CHANNEL_COUNT],
2021-07-15 05:42:06 +02:00
}
}
}
static STATE: State = State::new();
/// safety: must be called only once
pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
foreach_interrupt! {
($peri:ident, bdma, $block:ident, $signal_name:ident, $irq:ident) => {
crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
crate::interrupt::typelevel::$irq::enable();
2021-07-15 05:42:06 +02:00
};
}
crate::_generated::init_bdma();
2021-07-15 05:42:06 +02:00
}
foreach_dma_channel! {
($channel_peri:ident, BDMA1, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
2022-02-24 05:59:42 +01:00
// BDMA1 in H7 doesn't use DMAMUX, which breaks
};
($channel_peri:ident, $dma_peri:ident, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
2023-04-17 00:04:54 +02:00
impl sealed::Channel for crate::peripherals::$channel_peri {
fn regs(&self) -> pac::bdma::Dma {
pac::$dma_peri
2021-07-15 05:42:06 +02:00
}
2023-04-17 00:04:54 +02:00
fn num(&self) -> usize {
$channel_num
}
2023-04-17 00:04:54 +02:00
fn index(&self) -> usize {
$index
2021-07-15 05:42:06 +02:00
}
2023-04-17 00:04:54 +02:00
fn on_irq() {
unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
}
2023-04-17 00:04:54 +02:00
}
2023-04-17 00:04:54 +02:00
impl Channel for crate::peripherals::$channel_peri {}
};
}
2023-04-17 00:04:54 +02:00
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index: usize) {
let isr = dma.isr().read();
let cr = dma.ch(channel_num).cr();
2023-04-17 00:04:54 +02:00
if isr.teif(channel_num) {
2023-06-19 03:07:26 +02:00
panic!("DMA: error on BDMA@{:08x} channel {}", dma.as_ptr() as u32, channel_num);
2023-04-17 00:04:54 +02:00
}
2023-05-01 18:14:53 +02:00
if isr.htif(channel_num) && cr.read().htie() {
// Acknowledge half transfer complete interrupt
dma.ifcr().write(|w| w.set_htif(channel_num, true));
} else if isr.tcif(channel_num) && cr.read().tcie() {
2023-05-01 18:14:53 +02:00
// Acknowledge transfer complete interrupt
dma.ifcr().write(|w| w.set_tcif(channel_num, true));
2023-10-12 01:16:42 +02:00
#[cfg(not(armv6m))]
2023-05-01 18:14:53 +02:00
STATE.complete_count[index].fetch_add(1, Ordering::Release);
2023-10-12 01:16:42 +02:00
#[cfg(armv6m)]
critical_section::with(|_| {
let x = STATE.complete_count[index].load(Ordering::Relaxed);
STATE.complete_count[index].store(x + 1, Ordering::Release);
})
} else {
return;
2023-05-01 18:14:53 +02:00
}
STATE.ch_wakers[index].wake();
2023-04-17 00:04:54 +02:00
}
2023-12-17 22:09:14 +01:00
/// DMA request type alias.
2023-04-17 00:04:54 +02:00
#[cfg(any(bdma_v2, dmamux))]
pub type Request = u8;
2023-12-17 22:09:14 +01:00
/// DMA request type alias.
2023-04-17 00:04:54 +02:00
#[cfg(not(any(bdma_v2, dmamux)))]
pub type Request = ();
2021-11-19 19:15:55 +01:00
2023-12-17 22:09:14 +01:00
/// DMA channel.
2023-04-17 00:04:54 +02:00
#[cfg(dmamux)]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
2023-12-17 22:09:14 +01:00
/// DMA channel.
2023-04-17 00:04:54 +02:00
#[cfg(not(dmamux))]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
2021-11-19 19:15:55 +01:00
2023-04-17 00:04:54 +02:00
pub(crate) mod sealed {
use super::*;
2023-04-17 00:04:54 +02:00
pub trait Channel {
fn regs(&self) -> pac::bdma::Dma;
fn num(&self) -> usize;
fn index(&self) -> usize;
fn on_irq();
}
}
2021-11-19 19:15:55 +01:00
2023-12-17 22:09:14 +01:00
/// DMA transfer.
2023-04-17 00:04:54 +02:00
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
2021-07-15 05:42:06 +02:00
}
2023-04-17 00:04:54 +02:00
impl<'a, C: Channel> Transfer<'a, C> {
2023-12-17 22:09:14 +01:00
/// Create a new read DMA transfer (peripheral to memory).
2023-04-17 00:04:54 +02:00
pub unsafe fn new_read<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
2021-11-19 19:15:55 +01:00
2023-12-17 22:09:14 +01:00
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
2023-04-17 00:04:54 +02:00
pub unsafe fn new_read_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: *mut [W],
2022-03-16 18:41:34 +01:00
options: TransferOptions,
2023-04-17 00:04:54 +02:00
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts_mut(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::PeripheralToMemory,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::size(),
2023-04-17 00:04:54 +02:00
options,
)
}
2022-03-16 18:41:34 +01:00
2023-12-17 22:09:14 +01:00
/// Create a new write DMA transfer (memory to peripheral).
2023-04-17 00:04:54 +02:00
pub unsafe fn new_write<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: &'a [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
2021-11-19 19:15:55 +01:00
2023-12-17 22:09:14 +01:00
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
2023-04-17 00:04:54 +02:00
pub unsafe fn new_write_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: *const [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::size(),
2023-04-17 00:04:54 +02:00
options,
)
}
2021-12-08 03:30:07 +01:00
2023-12-17 22:09:14 +01:00
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
2023-04-17 00:04:54 +02:00
pub unsafe fn new_write_repeated<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
repeated: &'a W,
count: usize,
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const W as *mut u32,
count,
false,
W::size(),
2023-04-17 00:04:54 +02:00
options,
)
}
2021-11-19 19:15:55 +01:00
2023-04-17 00:04:54 +02:00
unsafe fn new_inner(
channel: PeripheralRef<'a, C>,
_request: Request,
dir: Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: WordSize,
options: TransferOptions,
2023-04-17 00:04:54 +02:00
) -> Self {
let ch = channel.regs().ch(channel.num());
2021-11-19 19:15:55 +01:00
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
2023-04-17 00:04:54 +02:00
#[cfg(bdma_v2)]
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
let mut this = Self { channel };
this.clear_irqs();
2023-05-01 18:14:53 +02:00
STATE.complete_count[this.channel.index()].store(0, Ordering::Release);
2023-04-17 00:04:54 +02:00
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
2021-11-19 19:15:55 +01:00
ch.par().write_value(peri_addr as u32);
ch.mar().write_value(mem_addr as u32);
ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
ch.cr().write(|w| {
2023-04-17 00:04:54 +02:00
w.set_psize(data_size.into());
w.set_msize(data_size.into());
2023-12-19 10:12:34 +01:00
w.set_minc(incr_mem);
2023-04-17 00:04:54 +02:00
w.set_dir(dir.into());
2021-11-19 19:15:55 +01:00
w.set_teie(true);
w.set_tcie(options.complete_transfer_ir);
2023-06-19 13:50:17 +02:00
w.set_htie(options.half_transfer_ir);
2023-12-19 10:12:34 +01:00
w.set_circ(options.circular);
if options.circular {
debug!("Setting circular mode");
}
w.set_pl(vals::Pl::VERYHIGH);
2021-11-19 19:15:55 +01:00
w.set_en(true);
});
2023-04-17 00:04:54 +02:00
this
}
2021-11-19 19:15:55 +01:00
2023-04-17 00:04:54 +02:00
fn clear_irqs(&mut self) {
2023-06-19 03:07:26 +02:00
self.channel.regs().ifcr().write(|w| {
w.set_tcif(self.channel.num(), true);
w.set_teif(self.channel.num(), true);
});
2023-04-17 00:04:54 +02:00
}
2021-11-19 19:15:55 +01:00
2023-12-17 22:09:14 +01:00
/// Request the transfer to stop.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
2023-04-17 00:04:54 +02:00
pub fn request_stop(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
2021-11-19 19:15:55 +01:00
2023-04-17 00:04:54 +02:00
// Disable the channel. Keep the IEs enabled so the irqs still fire.
2023-06-19 03:07:26 +02:00
ch.cr().write(|w| {
w.set_teie(true);
w.set_tcie(true);
});
2021-11-19 19:15:55 +01:00
}
2023-12-17 22:09:14 +01:00
/// Return whether this transfer is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`](Self::request_stop).
2023-04-17 00:04:54 +02:00
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().ch(self.channel.num());
2023-06-19 03:07:26 +02:00
let en = ch.cr().read().en();
2023-12-19 10:12:34 +01:00
let circular = ch.cr().read().circ();
2023-05-01 18:14:53 +02:00
let tcif = STATE.complete_count[self.channel.index()].load(Ordering::Acquire) != 0;
en && (circular || !tcif)
2021-11-19 19:15:55 +01:00
}
2023-12-17 22:09:14 +01:00
/// Get the total remaining transfers for the channel.
///
/// This will be zero for transfers that completed instead of being canceled with [`request_stop`](Self::request_stop).
2023-04-17 00:04:54 +02:00
pub fn get_remaining_transfers(&self) -> u16 {
let ch = self.channel.regs().ch(self.channel.num());
2023-06-19 03:07:26 +02:00
ch.ndtr().read().ndt()
2021-11-19 19:15:55 +01:00
}
2023-12-17 22:09:14 +01:00
/// Blocking wait until the transfer finishes.
2023-04-17 00:04:54 +02:00
pub fn blocking_wait(mut self) {
while self.is_running() {}
self.request_stop();
2021-11-19 19:15:55 +01:00
2023-04-17 00:04:54 +02:00
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
2021-11-19 19:15:55 +01:00
}
2023-04-17 00:04:54 +02:00
}
2023-04-17 00:04:54 +02:00
impl<'a, C: Channel> Drop for Transfer<'a, C> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
2023-04-17 00:04:54 +02:00
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
2023-04-17 00:04:54 +02:00
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
impl<'a, C: Channel> Future for Transfer<'a, C> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
STATE.ch_wakers[self.channel.index()].register(cx.waker());
if self.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
2021-11-19 19:15:55 +01:00
}
2023-05-01 18:14:53 +02:00
// ==============================
struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
fn get_remaining_transfers(&self) -> usize {
let ch = self.0.regs().ch(self.0.num());
2023-06-19 03:07:26 +02:00
ch.ndtr().read().ndt() as usize
2023-05-01 18:14:53 +02:00
}
fn get_complete_count(&self) -> usize {
STATE.complete_count[self.0.index()].load(Ordering::Acquire)
2023-05-01 18:14:53 +02:00
}
fn reset_complete_count(&mut self) -> usize {
2023-10-12 01:16:42 +02:00
#[cfg(not(armv6m))]
return STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel);
#[cfg(armv6m)]
return critical_section::with(|_| {
let x = STATE.complete_count[self.0.index()].load(Ordering::Acquire);
STATE.complete_count[self.0.index()].store(0, Ordering::Release);
x
});
2023-05-01 18:14:53 +02:00
}
2023-08-04 04:12:34 +02:00
fn set_waker(&mut self, waker: &Waker) {
STATE.ch_wakers[self.0.index()].register(waker);
}
2023-05-01 18:14:53 +02:00
}
2023-12-17 22:09:14 +01:00
/// Ringbuffer for reading data using DMA circular mode.
2023-07-30 02:25:18 +02:00
pub struct ReadableRingBuffer<'a, C: Channel, W: Word> {
2023-05-01 18:14:53 +02:00
cr: regs::Cr,
channel: PeripheralRef<'a, C>,
2023-07-30 02:25:18 +02:00
ringbuf: ReadableDmaRingBuffer<'a, W>,
2023-05-01 18:14:53 +02:00
}
2023-07-30 02:25:18 +02:00
impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> {
2023-12-17 22:09:14 +01:00
/// Create a new ring buffer.
pub unsafe fn new(
2023-05-01 18:14:53 +02:00
channel: impl Peripheral<P = C> + 'a,
_request: Request,
peri_addr: *mut W,
buffer: &'a mut [W],
_options: TransferOptions,
) -> Self {
into_ref!(channel);
let len = buffer.len();
assert!(len > 0 && len <= 0xFFFF);
let dir = Dir::PeripheralToMemory;
let data_size = W::size();
let channel_number = channel.num();
let dma = channel.regs();
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
#[cfg(bdma_v2)]
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
let mut w = regs::Cr(0);
w.set_psize(data_size.into());
w.set_msize(data_size.into());
2023-12-19 10:12:34 +01:00
w.set_minc(true);
2023-05-01 18:14:53 +02:00
w.set_dir(dir.into());
w.set_teie(true);
w.set_htie(true);
w.set_tcie(true);
2023-12-19 10:12:34 +01:00
w.set_circ(true);
2023-05-01 18:14:53 +02:00
w.set_pl(vals::Pl::VERYHIGH);
w.set_en(true);
let buffer_ptr = buffer.as_mut_ptr();
let mut this = Self {
channel,
cr: w,
2023-07-30 02:25:18 +02:00
ringbuf: ReadableDmaRingBuffer::new(buffer),
2023-05-01 18:14:53 +02:00
};
this.clear_irqs();
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
let ch = dma.ch(channel_number);
ch.par().write_value(peri_addr as u32);
ch.mar().write_value(buffer_ptr as u32);
ch.ndtr().write(|w| w.set_ndt(len as u16));
this
}
2023-12-17 22:09:14 +01:00
/// Start the ring buffer operation.
///
/// You must call this after creating it for it to work.
2023-05-01 18:14:53 +02:00
pub fn start(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
2023-06-19 03:07:26 +02:00
ch.cr().write_value(self.cr)
2023-05-01 18:14:53 +02:00
}
2023-12-17 22:09:14 +01:00
/// Clear all data in the ring buffer.
2023-05-01 18:14:53 +02:00
pub fn clear(&mut self) {
2023-08-04 04:12:34 +02:00
self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
2023-05-01 18:14:53 +02:00
}
/// Read elements from the ring buffer
/// Return a tuple of the length read and the length remaining in the buffer
/// If not all of the elements were read, then there will be some elements in the buffer remaining
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
2023-05-01 18:14:53 +02:00
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
2023-08-04 04:12:34 +02:00
self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
2023-05-01 18:14:53 +02:00
}
2023-07-30 02:27:16 +02:00
/// Read an exact number of elements from the ringbuffer.
///
/// Returns the remaining number of elements available for immediate reading.
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
///
/// Async/Wake Behavior:
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
/// ring buffer was created with a buffer of size 'N':
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> {
2023-08-04 04:12:34 +02:00
self.ringbuf
.read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
.await
}
2023-07-30 02:27:16 +02:00
/// The capacity of the ringbuffer.
2023-12-17 22:09:14 +01:00
pub const fn capacity(&self) -> usize {
self.ringbuf.cap()
2023-05-01 18:14:53 +02:00
}
2023-12-17 22:09:14 +01:00
/// Set a waker to be woken when at least one byte is received.
2023-05-01 18:14:53 +02:00
pub fn set_waker(&mut self, waker: &Waker) {
2023-08-04 04:12:34 +02:00
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
2023-05-01 18:14:53 +02:00
}
fn clear_irqs(&mut self) {
let dma = self.channel.regs();
2023-06-19 03:07:26 +02:00
dma.ifcr().write(|w| {
w.set_htif(self.channel.num(), true);
w.set_tcif(self.channel.num(), true);
w.set_teif(self.channel.num(), true);
});
2023-05-01 18:14:53 +02:00
}
2023-12-17 22:09:14 +01:00
/// Request DMA to stop.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
2023-05-01 18:14:53 +02:00
pub fn request_stop(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
// Disable the channel. Keep the IEs enabled so the irqs still fire.
// If the channel is enabled and transfer is not completed, we need to perform
// two separate write access to the CR register to disable the channel.
2023-06-19 03:07:26 +02:00
ch.cr().write(|w| {
w.set_teie(true);
w.set_htie(true);
w.set_tcie(true);
});
2023-05-01 18:14:53 +02:00
}
2023-12-17 22:09:14 +01:00
/// Return whether DMA is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`](Self::request_stop).
2023-05-01 18:14:53 +02:00
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().ch(self.channel.num());
2023-06-19 03:07:26 +02:00
ch.cr().read().en()
2023-05-01 18:14:53 +02:00
}
}
2023-07-30 02:25:18 +02:00
impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> {
2023-05-01 18:14:53 +02:00
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
2023-07-30 16:25:58 +02:00
2023-12-17 22:09:14 +01:00
/// Ringbuffer for writing data using DMA circular mode.
2023-07-30 16:25:58 +02:00
pub struct WritableRingBuffer<'a, C: Channel, W: Word> {
cr: regs::Cr,
channel: PeripheralRef<'a, C>,
ringbuf: WritableDmaRingBuffer<'a, W>,
}
impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> {
2023-12-17 22:09:14 +01:00
/// Create a new ring buffer.
pub unsafe fn new(
2023-07-30 16:25:58 +02:00
channel: impl Peripheral<P = C> + 'a,
_request: Request,
peri_addr: *mut W,
buffer: &'a mut [W],
_options: TransferOptions,
) -> Self {
into_ref!(channel);
let len = buffer.len();
assert!(len > 0 && len <= 0xFFFF);
let dir = Dir::MemoryToPeripheral;
let data_size = W::size();
let channel_number = channel.num();
let dma = channel.regs();
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
#[cfg(bdma_v2)]
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
let mut w = regs::Cr(0);
w.set_psize(data_size.into());
w.set_msize(data_size.into());
2023-12-19 10:12:34 +01:00
w.set_minc(true);
2023-07-30 16:25:58 +02:00
w.set_dir(dir.into());
w.set_teie(true);
w.set_htie(true);
w.set_tcie(true);
2023-12-19 10:12:34 +01:00
w.set_circ(true);
2023-07-30 16:25:58 +02:00
w.set_pl(vals::Pl::VERYHIGH);
w.set_en(true);
let buffer_ptr = buffer.as_mut_ptr();
let mut this = Self {
channel,
cr: w,
ringbuf: WritableDmaRingBuffer::new(buffer),
};
this.clear_irqs();
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
let ch = dma.ch(channel_number);
ch.par().write_value(peri_addr as u32);
ch.mar().write_value(buffer_ptr as u32);
ch.ndtr().write(|w| w.set_ndt(len as u16));
this
}
2023-12-17 22:09:14 +01:00
/// Start the ring buffer operation.
///
/// You must call this after creating it for it to work.
2023-07-30 16:25:58 +02:00
pub fn start(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
ch.cr().write_value(self.cr)
}
2023-12-17 22:09:14 +01:00
/// Clear all data in the ring buffer.
2023-07-30 16:25:58 +02:00
pub fn clear(&mut self) {
2023-08-04 04:12:34 +02:00
self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
2023-07-30 16:25:58 +02:00
}
2023-07-30 16:28:02 +02:00
/// Write elements to the ring buffer
2023-07-30 16:25:58 +02:00
/// Return a tuple of the length written and the length remaining in the buffer
2023-07-30 16:28:02 +02:00
pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
2023-08-04 04:12:34 +02:00
self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
2023-07-30 16:25:58 +02:00
}
/// Write an exact number of elements to the ringbuffer.
pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
2023-08-04 04:12:34 +02:00
self.ringbuf
.write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
.await
2023-07-30 16:25:58 +02:00
}
/// The capacity of the ringbuffer.
2023-12-17 22:09:14 +01:00
pub const fn capacity(&self) -> usize {
2023-07-30 16:25:58 +02:00
self.ringbuf.cap()
}
2023-12-17 22:09:14 +01:00
/// Set a waker to be woken when at least one byte is sent.
2023-07-30 16:25:58 +02:00
pub fn set_waker(&mut self, waker: &Waker) {
2023-08-04 04:12:34 +02:00
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
2023-07-30 16:25:58 +02:00
}
fn clear_irqs(&mut self) {
let dma = self.channel.regs();
dma.ifcr().write(|w| {
w.set_htif(self.channel.num(), true);
w.set_tcif(self.channel.num(), true);
w.set_teif(self.channel.num(), true);
});
}
2023-12-17 22:09:14 +01:00
/// Request DMA to stop.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
2023-07-30 16:25:58 +02:00
pub fn request_stop(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
// Disable the channel. Keep the IEs enabled so the irqs still fire.
// If the channel is enabled and transfer is not completed, we need to perform
// two separate write access to the CR register to disable the channel.
ch.cr().write(|w| {
w.set_teie(true);
w.set_htie(true);
w.set_tcie(true);
});
}
2023-12-17 22:09:14 +01:00
/// Return whether DMA is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`](Self::request_stop).
2023-07-30 16:25:58 +02:00
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().ch(self.channel.num());
ch.cr().read().en()
}
}
impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}