issue-2059 gpdma ringbuffers commit before rebase

This commit is contained in:
Tyler Gilbert 2023-11-26 15:46:21 -06:00
parent a306c43633
commit 342bc3aa20
3 changed files with 394 additions and 7 deletions

View File

@ -58,7 +58,8 @@ rand_core = "0.6.3"
sdio-host = "0.5.0"
embedded-sdmmc = { git = "https://github.com/embassy-rs/embedded-sdmmc-rs", rev = "a4f293d3a6f72158385f79c98634cb8a14d0d2fc", optional = true }
critical-section = "1.1"
stm32-metapac = { git = "https://github.com/embassy-rs/stm32-data-generated", tag = "stm32-data-7117ad49c06fa00c388130a34977e029910083bd" }
atomic-polyfill = "1.0.1"
stm32-metapac = { path = "../../stm32-data/build/stm32-metapac" }
vcell = "0.1.3"
bxcan = "0.7.0"
nb = "1.0.0"
@ -76,8 +77,8 @@ critical-section = { version = "1.1", features = ["std"] }
[build-dependencies]
proc-macro2 = "1.0.36"
quote = "1.0.15"
stm32-metapac = { git = "https://github.com/embassy-rs/stm32-data-generated", tag = "stm32-data-7117ad49c06fa00c388130a34977e029910083bd", default-features = false, features = ["metadata"]}
stm32-metapac = { path = "../../stm32-data/build/stm32-metapac", default-features = false, features = ["metadata"]}
#stm32-metapac = { git = "https://github.com/embassy-rs/stm32-data-generated", tag = "stm32-data-e6e51db6cdd7d533e52ca7a3237f7816a0486cd4", default-features = false, features = ["metadata"]}
[features]
default = ["rt"]

View File

@ -2,12 +2,13 @@
use core::future::Future;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering};
use core::task::{Context, Poll};
use core::sync::atomic::{fence, AtomicUsize, Ordering};
use core::task::{Context, Poll, Waker};
use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker;
use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
use super::word::{Word, WordSize};
use super::Dir;
use crate::_generated::GPDMA_CHANNEL_COUNT;
@ -39,13 +40,18 @@ impl From<WordSize> for vals::ChTr1Dw {
struct State {
ch_wakers: [AtomicWaker; GPDMA_CHANNEL_COUNT],
circular_address: [AtomicUsize; GPDMA_CHANNEL_COUNT],
complete_count: [AtomicUsize; GPDMA_CHANNEL_COUNT],
}
impl State {
const fn new() -> Self {
const ZERO: AtomicUsize = AtomicUsize::new(0);
const AW: AtomicWaker = AtomicWaker::new();
Self {
ch_wakers: [AW; GPDMA_CHANNEL_COUNT],
circular_address: [ZERO; GPDMA_CHANNEL_COUNT],
complete_count: [ZERO; GPDMA_CHANNEL_COUNT],
}
}
}
@ -89,6 +95,8 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::gpdma::Gpdma, channel_num: usize, in
let ch = dma.ch(channel_num);
let sr = ch.sr().read();
defmt::info!("DMA IRQ");
if sr.dtef() {
panic!(
"DMA: data transfer error on DMA@{:08x} channel {}",
@ -104,7 +112,25 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::gpdma::Gpdma, channel_num: usize, in
);
}
if sr.suspf() || sr.tcf() {
if sr.htf() {
//clear the flag for the half transfer complete
defmt::info!("half complete");
ch.fcr().modify(|w| w.set_htf(true));
STATE.ch_wakers[index].wake();
}
if sr.tcf() {
//clear the flag for the transfer complete
defmt::info!("complete");
ch.fcr().modify(|w| w.set_tcf(true));
STATE.complete_count[index].fetch_add(1, Ordering::Relaxed);
STATE.ch_wakers[index].wake();
return;
}
if sr.suspf() {
ch.fcr().modify(|w| w.set_suspf(true));
// disable all xxIEs to prevent the irq from firing again.
ch.cr().write(|_| {});
@ -350,3 +376,350 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
}
}
}
struct DmaCtrlImpl<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
word_size: WordSize,
}
impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
fn get_remaining_transfers(&self) -> usize {
let ch = self.channel.regs().ch(self.channel.num());
(ch.br1().read().bndt() / self.word_size.bytes() as u16) as usize
}
fn get_complete_count(&self) -> usize {
STATE.complete_count[self.channel.index()].load(Ordering::Acquire)
}
fn reset_complete_count(&mut self) -> usize {
STATE.complete_count[self.channel.index()].swap(0, Ordering::AcqRel)
}
fn set_waker(&mut self, waker: &Waker) {
STATE.ch_wakers[self.channel.index()].register(waker);
}
}
struct RingBuffer {}
impl RingBuffer {
fn configure<'a, W: Word>(
ch: &pac::gpdma::Channel,
channel_index: usize,
request: Request,
dir: Dir,
peri_addr: *mut W,
buffer: &'a mut [W],
_options: TransferOptions,
) {
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
let (mem_addr, mem_len) = super::slice_ptr_parts_mut(buffer);
ch.cr().write(|w| w.set_reset(true));
ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs
if mem_addr & 0b11 != 0 {
panic!("circular address must be 4-byte aligned");
}
STATE.circular_address[channel_index].store(mem_addr, Ordering::Release);
let lli = STATE.circular_address[channel_index].as_ptr() as u32;
ch.llr().write(|w| {
match dir {
Dir::MemoryToPeripheral => w.set_usa(true),
Dir::PeripheralToMemory => w.set_uda(true),
}
// lower 16 bites of the address of destination address
w.set_la(((lli >> 2usize) & 0x3fff) as u16);
});
ch.lbar().write(|w| {
// upper 16 bits of the address of lli1
w.set_lba((lli >> 16usize) as u16);
});
let data_size = W::size();
ch.tr1().write(|w| {
w.set_sdw(data_size.into());
w.set_ddw(data_size.into());
w.set_sinc(dir == Dir::MemoryToPeripheral);
w.set_dinc(dir == Dir::PeripheralToMemory);
});
ch.tr2().write(|w| {
w.set_dreq(match dir {
Dir::MemoryToPeripheral => vals::ChTr2Dreq::DESTINATIONPERIPHERAL,
Dir::PeripheralToMemory => vals::ChTr2Dreq::SOURCEPERIPHERAL,
});
w.set_reqsel(request);
});
ch.br1().write(|w| {
// BNDT is specified as bytes, not as number of transfers.
w.set_bndt((mem_len * data_size.bytes()) as u16)
});
match dir {
Dir::MemoryToPeripheral => {
defmt::info!("memory to peripheral");
ch.sar().write_value(mem_addr as _);
ch.dar().write_value(peri_addr as _);
}
Dir::PeripheralToMemory => {
ch.sar().write_value(peri_addr as _);
ch.dar().write_value(mem_addr as _);
}
}
ch.cr().write(|w| {
// Enable interrupts
w.set_tcie(true);
w.set_useie(true);
w.set_dteie(true);
w.set_suspie(true);
w.set_htie(true);
});
}
fn start(ch: &pac::gpdma::Channel) {
Self::clear_irqs(ch);
ch.cr().modify(|w| w.set_en(true));
defmt::info!("DMA CR is {}", ch.cr().read().0);
}
fn request_stop(ch: &pac::gpdma::Channel) {
// break the loop - will stop on the next transfer complete
ch.llr().write(|_| 0);
}
fn clear_irqs(ch: &pac::gpdma::Channel) {
ch.fcr().modify(|w| {
w.set_htf(true);
w.set_tcf(true);
w.set_suspf(true);
});
}
fn is_running(ch: &pac::gpdma::Channel) -> bool {
!ch.sr().read().tcf()
}
}
pub struct ReadableRingBuffer<'a, C: Channel, W: Word> {
channel: PeripheralRef<'a, C>,
ringbuf: ReadableDmaRingBuffer<'a, W>,
}
impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> {
pub unsafe fn new_read(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buffer: &'a mut [W],
options: TransferOptions,
) -> Self {
into_ref!(channel);
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut channel, request);
RingBuffer::configure(
&channel.regs().ch(channel.num()),
channel.index(),
request,
Dir::PeripheralToMemory,
peri_addr,
buffer,
options,
);
Self {
channel,
ringbuf: ReadableDmaRingBuffer::new(buffer),
}
}
pub fn start(&mut self) {
RingBuffer::start(&self.channel.regs().ch(self.channel.num()));
}
pub fn clear(&mut self) {
self.ringbuf.clear(&mut DmaCtrlImpl {
channel: self.channel.reborrow(),
word_size: W::size(),
});
}
/// Read elements from the ring buffer
/// Return a tuple of the length read and the length remaining in the buffer
/// If not all of the elements were read, then there will be some elements in the buffer remaining
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
self.ringbuf.read(
&mut DmaCtrlImpl {
channel: self.channel.reborrow(),
word_size: W::size(),
},
buf,
)
}
/// Read an exact number of elements from the ringbuffer.
///
/// Returns the remaining number of elements available for immediate reading.
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
///
/// Async/Wake Behavior:
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
/// ring buffer was created with a buffer of size 'N':
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> {
self.ringbuf
.read_exact(
&mut DmaCtrlImpl {
channel: self.channel.reborrow(),
word_size: W::size(),
},
buffer,
)
.await
}
// The capacity of the ringbuffer
pub const fn cap(&self) -> usize {
self.ringbuf.cap()
}
pub fn set_waker(&mut self, waker: &Waker) {
DmaCtrlImpl {
channel: self.channel.reborrow(),
word_size: W::size(),
}
.set_waker(waker);
}
pub fn request_stop(&mut self) {
RingBuffer::request_stop(&self.channel.regs().ch(self.channel.num()));
}
pub fn is_running(&mut self) -> bool {
RingBuffer::is_running(&self.channel.regs().ch(self.channel.num()))
}
}
impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
pub struct WritableRingBuffer<'a, C: Channel, W: Word> {
#[allow(dead_code)] //this is only read by the DMA controller
channel: PeripheralRef<'a, C>,
ringbuf: WritableDmaRingBuffer<'a, W>,
}
impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> {
pub unsafe fn new_write(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buffer: &'a mut [W],
options: TransferOptions,
) -> Self {
into_ref!(channel);
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut channel, request);
RingBuffer::configure(
&channel.regs().ch(channel.num()),
channel.index(),
request,
Dir::MemoryToPeripheral,
peri_addr,
buffer,
options,
);
Self {
channel,
ringbuf: WritableDmaRingBuffer::new(buffer),
}
}
pub fn start(&mut self) {
RingBuffer::start(&self.channel.regs().ch(self.channel.num()));
}
pub fn clear(&mut self) {
self.ringbuf.clear(&mut DmaCtrlImpl {
channel: self.channel.reborrow(),
word_size: W::size(),
});
}
/// Write elements from the ring buffer
/// Return a tuple of the length written and the length remaining in the buffer
pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
self.ringbuf.write(
&mut DmaCtrlImpl {
channel: self.channel.reborrow(),
word_size: W::size(),
},
buf,
)
}
/// Write an exact number of elements to the ringbuffer.
pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
self.ringbuf
.write_exact(
&mut DmaCtrlImpl {
channel: self.channel.reborrow(),
word_size: W::size(),
},
buffer,
)
.await
}
// The capacity of the ringbuffer
pub const fn cap(&self) -> usize {
self.ringbuf.cap()
}
pub fn set_waker(&mut self, waker: &Waker) {
DmaCtrlImpl {
channel: self.channel.reborrow(),
word_size: W::size(),
}
.set_waker(waker);
}
pub fn request_stop(&mut self) {
RingBuffer::request_stop(&self.channel.regs().ch(self.channel.num()));
}
pub fn is_running(&mut self) -> bool {
RingBuffer::is_running(&self.channel.regs().ch(self.channel.num()))
}
}
impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}

View File

@ -577,7 +577,6 @@ fn get_ring_buffer<'d, T: Instance, C: Channel, W: word::Word>(
tx_rx: TxRx,
) -> RingBuffer<'d, C, W> {
let opts = TransferOptions {
half_transfer_ir: true,
//the new_write() and new_read() always use circular mode
..Default::default()
};
@ -996,6 +995,20 @@ impl<'d, T: Instance, C: Channel, W: word::Word> SubBlock<'d, T, C, W> {
Config::default()
}
pub fn is_running(&mut self) -> bool {
match &mut self.ring_buffer {
RingBuffer::Writable(buffer) => buffer.is_running(),
RingBuffer::Readable(buffer) => buffer.is_running(),
}
}
pub fn request_stop(&mut self) {
match &mut self.ring_buffer {
RingBuffer::Writable(buffer) => buffer.request_stop(),
RingBuffer::Readable(buffer) => buffer.request_stop(),
}
}
pub async fn write(&mut self, data: &[W]) -> Result<(), Error> {
match &mut self.ring_buffer {
RingBuffer::Writable(buffer) => {