diff --git a/embassy-stm32/src/dma/bdma.rs b/embassy-stm32/src/dma/bdma.rs index 7b5008f0..2905338d 100644 --- a/embassy-stm32/src/dma/bdma.rs +++ b/embassy-stm32/src/dma/bdma.rs @@ -9,7 +9,7 @@ use atomic_polyfill::AtomicUsize; use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef}; use embassy_sync::waitqueue::AtomicWaker; -use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer}; +use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer}; use super::word::{Word, WordSize}; use super::Dir; use crate::_generated::BDMA_CHANNEL_COUNT; @@ -559,3 +559,155 @@ impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> { fence(Ordering::SeqCst); } } + +pub struct WritableRingBuffer<'a, C: Channel, W: Word> { + cr: regs::Cr, + channel: PeripheralRef<'a, C>, + ringbuf: WritableDmaRingBuffer<'a, W>, +} + +impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { + pub unsafe fn new_write( + channel: impl Peripheral

+ 'a, + _request: Request, + peri_addr: *mut W, + buffer: &'a mut [W], + _options: TransferOptions, + ) -> Self { + into_ref!(channel); + + let len = buffer.len(); + assert!(len > 0 && len <= 0xFFFF); + + let dir = Dir::MemoryToPeripheral; + let data_size = W::size(); + + let channel_number = channel.num(); + let dma = channel.regs(); + + // "Preceding reads and writes cannot be moved past subsequent writes." + fence(Ordering::SeqCst); + + #[cfg(bdma_v2)] + critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request))); + + let mut w = regs::Cr(0); + w.set_psize(data_size.into()); + w.set_msize(data_size.into()); + w.set_minc(vals::Inc::ENABLED); + w.set_dir(dir.into()); + w.set_teie(true); + w.set_htie(true); + w.set_tcie(true); + w.set_circ(vals::Circ::ENABLED); + w.set_pl(vals::Pl::VERYHIGH); + w.set_en(true); + + let buffer_ptr = buffer.as_mut_ptr(); + let mut this = Self { + channel, + cr: w, + ringbuf: WritableDmaRingBuffer::new(buffer), + }; + this.clear_irqs(); + + #[cfg(dmamux)] + super::dmamux::configure_dmamux(&mut *this.channel, _request); + + let ch = dma.ch(channel_number); + ch.par().write_value(peri_addr as u32); + ch.mar().write_value(buffer_ptr as u32); + ch.ndtr().write(|w| w.set_ndt(len as u16)); + + this + } + + pub fn start(&mut self) { + let ch = self.channel.regs().ch(self.channel.num()); + ch.cr().write_value(self.cr) + } + + pub fn clear(&mut self) { + self.ringbuf.clear(DmaCtrlImpl(self.channel.reborrow())); + } + + /// Write elements from the ring buffer + /// Return a tuple of the length written and the length remaining in the buffer + pub fn read(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> { + self.ringbuf.write(DmaCtrlImpl(self.channel.reborrow()), buf) + } + + /// Write an exact number of elements to the ringbuffer. + pub async fn write_exact(&mut self, buffer: &[W]) -> Result { + use core::future::poll_fn; + use core::sync::atomic::compiler_fence; + + let mut written_data = 0; + let buffer_len = buffer.len(); + + poll_fn(|cx| { + self.set_waker(cx.waker()); + + compiler_fence(Ordering::SeqCst); + + match self.read(&buffer[written_data..buffer_len]) { + Ok((len, remaining)) => { + written_data += len; + if written_data == buffer_len { + Poll::Ready(Ok(remaining)) + } else { + Poll::Pending + } + } + Err(e) => Poll::Ready(Err(e)), + } + }) + .await + } + + /// The capacity of the ringbuffer. + pub fn cap(&self) -> usize { + self.ringbuf.cap() + } + + pub fn set_waker(&mut self, waker: &Waker) { + STATE.ch_wakers[self.channel.index()].register(waker); + } + + fn clear_irqs(&mut self) { + let dma = self.channel.regs(); + dma.ifcr().write(|w| { + w.set_htif(self.channel.num(), true); + w.set_tcif(self.channel.num(), true); + w.set_teif(self.channel.num(), true); + }); + } + + pub fn request_stop(&mut self) { + let ch = self.channel.regs().ch(self.channel.num()); + + // Disable the channel. Keep the IEs enabled so the irqs still fire. + // If the channel is enabled and transfer is not completed, we need to perform + // two separate write access to the CR register to disable the channel. + ch.cr().write(|w| { + w.set_teie(true); + w.set_htie(true); + w.set_tcie(true); + }); + } + + pub fn is_running(&mut self) -> bool { + let ch = self.channel.regs().ch(self.channel.num()); + ch.cr().read().en() + } +} + +impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> { + fn drop(&mut self) { + self.request_stop(); + while self.is_running() {} + + // "Subsequent reads and writes cannot be moved ahead of preceding reads." + fence(Ordering::SeqCst); + } +} diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs index 3c5c79fd..9cd7aa8d 100644 --- a/embassy-stm32/src/dma/dma.rs +++ b/embassy-stm32/src/dma/dma.rs @@ -814,7 +814,7 @@ pub struct WritableRingBuffer<'a, C: Channel, W: Word> { } impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { - pub unsafe fn new_read( + pub unsafe fn new_write( channel: impl Peripheral

+ 'a, _request: Request, peri_addr: *mut W, @@ -899,7 +899,7 @@ impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> { self.ringbuf.write(DmaCtrlImpl(self.channel.reborrow()), buf) } - /// Write an exact number of elements from the ringbuffer. + /// Write an exact number of elements to the ringbuffer. pub async fn write_exact(&mut self, buffer: &[W]) -> Result { use core::future::poll_fn; use core::sync::atomic::compiler_fence;