stm32/dma: add double buffered mode for DMA, update DCMI.

This commit is contained in:
Dario Nieuwenhuis 2023-04-18 16:16:33 +02:00
parent 173c65b543
commit efc70debb3
2 changed files with 182 additions and 14 deletions

View File

@ -434,9 +434,13 @@ where
result
}
#[cfg(not(dma))]
async fn capture_giant(&mut self, _buffer: &mut [u32]) -> Result<(), Error> {
todo!()
/*
panic!("capturing to buffers larger than 0xffff is only supported on DMA for now, not on BDMA or GPDMA.");
}
#[cfg(dma)]
async fn capture_giant(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
use crate::dma::TransferOptions;
let data_len = buffer.len();
@ -460,16 +464,24 @@ where
let r = self.inner.regs();
let src = r.dr().ptr() as *mut u32;
unsafe {
channel.start_double_buffered_read(request, src, m0ar, m1ar, chunk_size, TransferOptions::default());
}
let mut transfer = unsafe {
crate::dma::DoubleBuffered::new_read(
&mut self.dma,
request,
src,
m0ar,
m1ar,
chunk_size,
TransferOptions::default(),
)
};
let mut last_chunk_set_for_transfer = false;
let mut buffer0_last_accessible = false;
let dma_result = poll_fn(|cx| {
channel.set_waker(cx.waker());
transfer.set_waker(cx.waker());
let buffer0_currently_accessible = unsafe { channel.is_buffer0_accessible() };
let buffer0_currently_accessible = transfer.is_buffer0_accessible();
// check if the accessible buffer changed since last poll
if buffer0_last_accessible == buffer0_currently_accessible {
@ -480,21 +492,21 @@ where
if remaining_chunks != 0 {
if remaining_chunks % 2 == 0 && buffer0_currently_accessible {
m0ar = unsafe { m0ar.add(2 * chunk_size) };
unsafe { channel.set_buffer0(m0ar) }
unsafe { transfer.set_buffer0(m0ar) }
remaining_chunks -= 1;
} else if !buffer0_currently_accessible {
m1ar = unsafe { m1ar.add(2 * chunk_size) };
unsafe { channel.set_buffer1(m1ar) };
unsafe { transfer.set_buffer1(m1ar) };
remaining_chunks -= 1;
}
} else {
if buffer0_currently_accessible {
unsafe { channel.set_buffer0(buffer.as_mut_ptr()) }
unsafe { transfer.set_buffer0(buffer.as_mut_ptr()) }
} else {
unsafe { channel.set_buffer1(buffer.as_mut_ptr()) }
unsafe { transfer.set_buffer1(buffer.as_mut_ptr()) }
}
if last_chunk_set_for_transfer {
channel.request_stop();
transfer.request_stop();
return Poll::Ready(());
}
last_chunk_set_for_transfer = true;
@ -542,7 +554,6 @@ where
unsafe { Self::toggle(false) };
result
*/
}
}

View File

@ -1,7 +1,8 @@
use core::future::Future;
use core::marker::PhantomData;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering};
use core::task::{Context, Poll};
use core::task::{Context, Poll, Waker};
use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
@ -440,3 +441,159 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
}
}
}
// ==================================
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct DoubleBuffered<'a, C: Channel, W: Word> {
channel: PeripheralRef<'a, C>,
_phantom: PhantomData<W>,
}
impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
pub unsafe fn new_read(
channel: impl Peripheral<P = C> + 'a,
_request: Request,
peri_addr: *mut W,
buf0: *mut W,
buf1: *mut W,
len: usize,
options: TransferOptions,
) -> Self {
into_ref!(channel);
assert!(len > 0 && len <= 0xFFFF);
let dir = Dir::PeripheralToMemory;
let data_size = W::bits();
let channel_number = channel.num();
let dma = channel.regs();
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
let mut this = Self {
channel,
_phantom: PhantomData,
};
this.clear_irqs();
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
let ch = dma.st(channel_number);
ch.par().write_value(peri_addr as u32);
ch.m0ar().write_value(buf0 as u32);
ch.m1ar().write_value(buf1 as u32);
ch.ndtr().write_value(regs::Ndtr(len as _));
ch.fcr().write(|w| {
if let Some(fth) = options.fifo_threshold {
// FIFO mode
w.set_dmdis(vals::Dmdis::DISABLED);
w.set_fth(fth.into());
} else {
// Direct mode
w.set_dmdis(vals::Dmdis::ENABLED);
}
});
ch.cr().write(|w| {
w.set_dir(dir.into());
w.set_msize(data_size.into());
w.set_psize(data_size.into());
w.set_pl(vals::Pl::VERYHIGH);
w.set_minc(vals::Inc::INCREMENTED);
w.set_pinc(vals::Inc::FIXED);
w.set_teie(true);
w.set_tcie(true);
#[cfg(dma_v1)]
w.set_trbuff(true);
#[cfg(dma_v2)]
w.set_chsel(_request);
w.set_pburst(options.pburst.into());
w.set_mburst(options.mburst.into());
w.set_pfctrl(options.flow_ctrl.into());
w.set_en(true);
});
this
}
fn clear_irqs(&mut self) {
let channel_number = self.channel.num();
let dma = self.channel.regs();
let isrn = channel_number / 4;
let isrbit = channel_number % 4;
unsafe {
dma.ifcr(isrn).write(|w| {
w.set_tcif(isrbit, true);
w.set_teif(isrbit, true);
})
}
}
pub unsafe fn set_buffer0(&mut self, buffer: *mut W) {
let ch = self.channel.regs().st(self.channel.num());
ch.m0ar().write_value(buffer as _);
}
pub unsafe fn set_buffer1(&mut self, buffer: *mut W) {
let ch = self.channel.regs().st(self.channel.num());
ch.m1ar().write_value(buffer as _);
}
pub fn is_buffer0_accessible(&mut self) -> bool {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.cr().read() }.ct() == vals::Ct::MEMORY1
}
pub fn set_waker(&mut self, waker: &Waker) {
STATE.ch_wakers[self.channel.index()].register(waker);
}
pub fn request_stop(&mut self) {
let ch = self.channel.regs().st(self.channel.num());
// Disable the channel. Keep the IEs enabled so the irqs still fire.
unsafe {
ch.cr().write(|w| {
w.set_teie(true);
w.set_tcie(true);
})
}
}
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.cr().read() }.en()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub fn get_remaining_transfers(&self) -> u16 {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.ndtr().read() }.ndt()
}
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}