stm32/dma: refactor.

This commit is contained in:
Dario Nieuwenhuis 2023-04-17 00:04:54 +02:00
parent 46227bec1e
commit 173c65b543
14 changed files with 1025 additions and 1092 deletions

View File

@ -260,7 +260,7 @@ fn main() {
// ======== // ========
// Generate DMA IRQs. // Generate DMA IRQs.
let mut dma_irqs: HashMap<&str, Vec<(&str, &str)>> = HashMap::new(); let mut dma_irqs: HashMap<&str, Vec<(&str, &str, &str)>> = HashMap::new();
for p in METADATA.peripherals { for p in METADATA.peripherals {
if let Some(r) = &p.registers { if let Some(r) = &p.registers {
@ -270,7 +270,10 @@ fn main() {
continue; continue;
} }
for irq in p.interrupts { for irq in p.interrupts {
dma_irqs.entry(irq.interrupt).or_default().push((p.name, irq.signal)); dma_irqs
.entry(irq.interrupt)
.or_default()
.push((r.kind, p.name, irq.signal));
} }
} }
} }
@ -279,13 +282,14 @@ fn main() {
for (irq, channels) in dma_irqs { for (irq, channels) in dma_irqs {
let irq = format_ident!("{}", irq); let irq = format_ident!("{}", irq);
let channels = channels.iter().map(|(dma, ch)| format_ident!("{}_{}", dma, ch)); let xdma = format_ident!("{}", channels[0].0);
let channels = channels.iter().map(|(_, dma, ch)| format_ident!("{}_{}", dma, ch));
g.extend(quote! { g.extend(quote! {
#[crate::interrupt] #[crate::interrupt]
unsafe fn #irq () { unsafe fn #irq () {
#( #(
<crate::peripherals::#channels as crate::dma::sealed::Channel>::on_irq(); <crate::peripherals::#channels as crate::dma::#xdma::sealed::Channel>::on_irq();
)* )*
} }
}); });

View File

@ -4,6 +4,7 @@ use core::task::Poll;
use embassy_hal_common::{into_ref, PeripheralRef}; use embassy_hal_common::{into_ref, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker; use embassy_sync::waitqueue::AtomicWaker;
use crate::dma::Transfer;
use crate::gpio::sealed::AFType; use crate::gpio::sealed::AFType;
use crate::gpio::Speed; use crate::gpio::Speed;
use crate::interrupt::{Interrupt, InterruptExt}; use crate::interrupt::{Interrupt, InterruptExt};
@ -385,14 +386,11 @@ where
return self.capture_giant(buffer).await; return self.capture_giant(buffer).await;
} }
} }
async fn capture_small(&mut self, buffer: &mut [u32]) -> Result<(), Error> { async fn capture_small(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
let channel = &mut self.dma;
let request = channel.request();
let r = self.inner.regs(); let r = self.inner.regs();
let src = r.dr().ptr() as *mut u32; let src = r.dr().ptr() as *mut u32;
let dma_read = crate::dma::read(channel, request, src, buffer); let request = self.dma.request();
let dma_read = unsafe { Transfer::new_read(&mut self.dma, request, src, buffer, Default::default()) };
Self::clear_interrupt_flags(); Self::clear_interrupt_flags();
Self::enable_irqs(); Self::enable_irqs();
@ -436,7 +434,9 @@ where
result result
} }
async fn capture_giant(&mut self, buffer: &mut [u32]) -> Result<(), Error> { async fn capture_giant(&mut self, _buffer: &mut [u32]) -> Result<(), Error> {
todo!()
/*
use crate::dma::TransferOptions; use crate::dma::TransferOptions;
let data_len = buffer.len(); let data_len = buffer.len();
@ -542,6 +542,7 @@ where
unsafe { Self::toggle(false) }; unsafe { Self::toggle(false) };
result result
*/
} }
} }

View File

@ -1,18 +1,31 @@
#![macro_use] #![macro_use]
use core::future::Future;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering}; use core::sync::atomic::{fence, Ordering};
use core::task::Waker; use core::task::{Context, Poll};
use embassy_cortex_m::interrupt::Priority; use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker; use embassy_sync::waitqueue::AtomicWaker;
use super::{TransferOptions, Word, WordSize}; use super::{Dir, Word, WordSize};
use crate::_generated::BDMA_CHANNEL_COUNT; use crate::_generated::BDMA_CHANNEL_COUNT;
use crate::dma::Request;
use crate::interrupt::{Interrupt, InterruptExt}; use crate::interrupt::{Interrupt, InterruptExt};
use crate::pac; use crate::pac;
use crate::pac::bdma::vals; use crate::pac::bdma::vals;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {}
impl Default for TransferOptions {
fn default() -> Self {
Self {}
}
}
impl From<WordSize> for vals::Size { impl From<WordSize> for vals::Size {
fn from(raw: WordSize) -> Self { fn from(raw: WordSize) -> Self {
match raw { match raw {
@ -23,6 +36,15 @@ impl From<WordSize> for vals::Size {
} }
} }
impl From<Dir> for vals::Dir {
fn from(raw: Dir) -> Self {
match raw {
Dir::MemoryToPeripheral => Self::FROMMEMORY,
Dir::PeripheralToMemory => Self::FROMPERIPHERAL,
}
}
}
struct State { struct State {
ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT], ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
} }
@ -55,219 +77,27 @@ foreach_dma_channel! {
// BDMA1 in H7 doesn't use DMAMUX, which breaks // BDMA1 in H7 doesn't use DMAMUX, which breaks
}; };
($channel_peri:ident, $dma_peri:ident, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => { ($channel_peri:ident, $dma_peri:ident, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri { impl sealed::Channel for crate::peripherals::$channel_peri {
fn regs(&self) -> pac::bdma::Dma {
unsafe fn start_write<W: Word>(&mut self, _request: Request, buf: *const[W], reg_addr: *mut W, options: TransferOptions) { pac::$dma_peri
let (ptr, len) = super::slice_ptr_parts(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMMEMORY,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
} }
fn num(&self) -> usize {
unsafe fn start_write_repeated<W: Word>(&mut self, _request: Request, repeated: *const W, count: usize, reg_addr: *mut W, options: TransferOptions) { $channel_num
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMMEMORY,
reg_addr as *const u32,
repeated as *mut u32,
count,
false,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
)
} }
fn index(&self) -> usize {
unsafe fn start_read<W: Word>(&mut self, _request: Request, reg_addr: *const W, buf: *mut [W], options: TransferOptions) { $index
let (ptr, len) = super::slice_ptr_parts_mut(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMPERIPHERAL,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
} }
unsafe fn start_double_buffered_read<W: super::Word>(
&mut self,
_request: Request,
_reg_addr: *const W,
_buffer0: *mut W,
_buffer1: *mut W,
_buffer_len: usize,
_options: TransferOptions,
) {
panic!("Unsafe double buffered mode is unavailable on BDMA");
}
unsafe fn set_buffer0<W: super::Word>(&mut self, _buffer: *mut W) {
panic!("Unsafe double buffered mode is unavailable on BDMA");
}
unsafe fn set_buffer1<W: super::Word>(&mut self, _buffer: *mut W) {
panic!("Unsafe double buffered mode is unavailable on BDMA");
}
unsafe fn is_buffer0_accessible(&mut self) -> bool {
panic!("Unsafe double buffered mode is unavailable on BDMA");
}
fn request_stop(&mut self){
unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);}
}
fn is_running(&self) -> bool {
unsafe {low_level_api::is_running(pac::$dma_peri, $channel_num)}
}
fn remaining_transfers(&mut self) -> u16 {
unsafe {low_level_api::get_remaining_transfers(pac::$dma_peri, $channel_num)}
}
fn set_waker(&mut self, waker: &Waker) {
unsafe { low_level_api::set_waker($index, waker) }
}
fn on_irq() { fn on_irq() {
unsafe { unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
low_level_api::on_irq_inner(pac::$dma_peri, $channel_num, $index);
}
} }
} }
impl crate::dma::Channel for crate::peripherals::$channel_peri {} impl Channel for crate::peripherals::$channel_peri {}
}; };
} }
mod low_level_api { /// Safety: Must be called with a matching set of parameters for a valid dma channel
use super::*; pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index: usize) {
pub unsafe fn start_transfer(
dma: pac::bdma::Dma,
channel_number: u8,
#[cfg(any(bdma_v2, dmamux))] request: Request,
dir: vals::Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: vals::Size,
options: TransferOptions,
#[cfg(dmamux)] dmamux_regs: pac::dmamux::Dmamux,
#[cfg(dmamux)] dmamux_ch_num: u8,
) {
assert!(options.mburst == crate::dma::Burst::Single, "Burst mode not supported");
assert!(options.pburst == crate::dma::Burst::Single, "Burst mode not supported");
assert!(
options.flow_ctrl == crate::dma::FlowControl::Dma,
"Peripheral flow control not supported"
);
assert!(options.fifo_threshold.is_none(), "FIFO mode not supported");
let ch = dma.ch(channel_number as _);
reset_status(dma, channel_number);
#[cfg(dmamux)]
super::super::dmamux::configure_dmamux(dmamux_regs, dmamux_ch_num, request);
#[cfg(bdma_v2)]
critical_section::with(|_| dma.cselr().modify(|w| w.set_cs(channel_number as _, request)));
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
ch.par().write_value(peri_addr as u32);
ch.mar().write_value(mem_addr as u32);
ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
ch.cr().write(|w| {
w.set_psize(data_size);
w.set_msize(data_size);
if incr_mem {
w.set_minc(vals::Inc::ENABLED);
} else {
w.set_minc(vals::Inc::DISABLED);
}
w.set_dir(dir);
w.set_teie(true);
w.set_tcie(true);
w.set_en(true);
});
}
pub unsafe fn request_stop(dma: pac::bdma::Dma, channel_number: u8) {
reset_status(dma, channel_number);
let ch = dma.ch(channel_number as _);
// Disable the channel and interrupts with the default value.
ch.cr().write(|_| ());
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
pub unsafe fn is_running(dma: pac::bdma::Dma, ch: u8) -> bool {
let ch = dma.ch(ch as _);
ch.cr().read().en()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub unsafe fn get_remaining_transfers(dma: pac::bdma::Dma, ch: u8) -> u16 {
// get a handle on the channel itself
let ch = dma.ch(ch as _);
// read the remaining transfer count. If this is zero, the transfer completed fully.
ch.ndtr().read().ndt() as u16
}
/// Sets the waker for the specified DMA channel
pub unsafe fn set_waker(state_number: usize, waker: &Waker) {
STATE.ch_wakers[state_number].register(waker);
}
pub unsafe fn reset_status(dma: pac::bdma::Dma, channel_number: u8) {
dma.ifcr().write(|w| {
w.set_tcif(channel_number as _, true);
w.set_teif(channel_number as _, true);
});
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: u8, index: u8) {
let channel_num = channel_num as usize;
let index = index as usize;
let isr = dma.isr().read(); let isr = dma.isr().read();
let cr = dma.ch(channel_num).cr(); let cr = dma.ch(channel_num).cr();
@ -278,5 +108,236 @@ mod low_level_api {
cr.write(|_| ()); // Disable channel interrupts with the default value. cr.write(|_| ()); // Disable channel interrupts with the default value.
STATE.ch_wakers[index].wake(); STATE.ch_wakers[index].wake();
} }
}
#[cfg(any(bdma_v2, dmamux))]
pub type Request = u8;
#[cfg(not(any(bdma_v2, dmamux)))]
pub type Request = ();
#[cfg(dmamux)]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
#[cfg(not(dmamux))]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
pub(crate) mod sealed {
use super::*;
pub trait Channel {
fn regs(&self) -> pac::bdma::Dma;
fn num(&self) -> usize;
fn index(&self) -> usize;
fn on_irq();
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
}
impl<'a, C: Channel> Transfer<'a, C> {
pub unsafe fn new_read<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
pub unsafe fn new_read_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: *mut [W],
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts_mut(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::PeripheralToMemory,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: &'a [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
pub unsafe fn new_write_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: *const [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write_repeated<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
repeated: &'a W,
count: usize,
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const W as *mut u32,
count,
false,
W::bits(),
options,
)
}
unsafe fn new_inner(
channel: PeripheralRef<'a, C>,
_request: Request,
dir: Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: WordSize,
_options: TransferOptions,
) -> Self {
let ch = channel.regs().ch(channel.num());
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
#[cfg(bdma_v2)]
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
let mut this = Self { channel };
this.clear_irqs();
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
ch.par().write_value(peri_addr as u32);
ch.mar().write_value(mem_addr as u32);
ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
ch.cr().write(|w| {
w.set_psize(data_size.into());
w.set_msize(data_size.into());
if incr_mem {
w.set_minc(vals::Inc::ENABLED);
} else {
w.set_minc(vals::Inc::DISABLED);
}
w.set_dir(dir.into());
w.set_teie(true);
w.set_tcie(true);
w.set_en(true);
});
this
}
fn clear_irqs(&mut self) {
unsafe {
self.channel.regs().ifcr().write(|w| {
w.set_tcif(self.channel.num(), true);
w.set_teif(self.channel.num(), true);
})
}
}
pub fn request_stop(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
// Disable the channel. Keep the IEs enabled so the irqs still fire.
unsafe {
ch.cr().write(|w| {
w.set_teie(true);
w.set_tcie(true);
})
}
}
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().ch(self.channel.num());
unsafe { ch.cr().read() }.en()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub fn get_remaining_transfers(&self) -> u16 {
let ch = self.channel.regs().ch(self.channel.num());
unsafe { ch.ndtr().read() }.ndt()
}
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
impl<'a, C: Channel> Drop for Transfer<'a, C> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
impl<'a, C: Channel> Future for Transfer<'a, C> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
STATE.ch_wakers[self.channel.index()].register(cx.waker());
if self.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
} }
} }

View File

@ -1,15 +1,44 @@
use core::future::Future;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering}; use core::sync::atomic::{fence, Ordering};
use core::task::Waker; use core::task::{Context, Poll};
use embassy_cortex_m::interrupt::Priority; use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker; use embassy_sync::waitqueue::AtomicWaker;
use pac::dma::regs;
use super::{Burst, FifoThreshold, FlowControl, Request, TransferOptions, Word, WordSize}; use super::{Dir, Word, WordSize};
use crate::_generated::DMA_CHANNEL_COUNT; use crate::_generated::DMA_CHANNEL_COUNT;
use crate::interrupt::{Interrupt, InterruptExt}; use crate::interrupt::{Interrupt, InterruptExt};
use crate::pac::dma::{regs, vals}; use crate::pac::dma::vals;
use crate::{interrupt, pac}; use crate::{interrupt, pac};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {
/// Peripheral burst transfer configuration
pub pburst: Burst,
/// Memory burst transfer configuration
pub mburst: Burst,
/// Flow control configuration
pub flow_ctrl: FlowControl,
/// FIFO threshold for DMA FIFO mode. If none, direct mode is used.
pub fifo_threshold: Option<FifoThreshold>,
}
impl Default for TransferOptions {
fn default() -> Self {
Self {
pburst: Burst::Single,
mburst: Burst::Single,
flow_ctrl: FlowControl::Dma,
fifo_threshold: None,
}
}
}
impl From<WordSize> for vals::Size { impl From<WordSize> for vals::Size {
fn from(raw: WordSize) -> Self { fn from(raw: WordSize) -> Self {
match raw { match raw {
@ -20,6 +49,28 @@ impl From<WordSize> for vals::Size {
} }
} }
impl From<Dir> for vals::Dir {
fn from(raw: Dir) -> Self {
match raw {
Dir::MemoryToPeripheral => Self::MEMORYTOPERIPHERAL,
Dir::PeripheralToMemory => Self::PERIPHERALTOMEMORY,
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Burst {
/// Single transfer
Single,
/// Incremental burst of 4 beats
Incr4,
/// Incremental burst of 8 beats
Incr8,
/// Incremental burst of 16 beats
Incr16,
}
impl From<Burst> for vals::Burst { impl From<Burst> for vals::Burst {
fn from(burst: Burst) -> Self { fn from(burst: Burst) -> Self {
match burst { match burst {
@ -31,6 +82,15 @@ impl From<Burst> for vals::Burst {
} }
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum FlowControl {
/// Flow control by DMA
Dma,
/// Flow control by peripheral
Peripheral,
}
impl From<FlowControl> for vals::Pfctrl { impl From<FlowControl> for vals::Pfctrl {
fn from(flow: FlowControl) -> Self { fn from(flow: FlowControl) -> Self {
match flow { match flow {
@ -40,6 +100,19 @@ impl From<FlowControl> for vals::Pfctrl {
} }
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum FifoThreshold {
/// 1/4 full FIFO
Quarter,
/// 1/2 full FIFO
Half,
/// 3/4 full FIFO
ThreeQuarters,
/// Full FIFO
Full,
}
impl From<FifoThreshold> for vals::Fth { impl From<FifoThreshold> for vals::Fth {
fn from(value: FifoThreshold) -> Self { fn from(value: FifoThreshold) -> Self {
match value { match value {
@ -51,27 +124,15 @@ impl From<FifoThreshold> for vals::Fth {
} }
} }
struct ChannelState {
waker: AtomicWaker,
}
impl ChannelState {
const fn new() -> Self {
Self {
waker: AtomicWaker::new(),
}
}
}
struct State { struct State {
channels: [ChannelState; DMA_CHANNEL_COUNT], ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT],
} }
impl State { impl State {
const fn new() -> Self { const fn new() -> Self {
const CH: ChannelState = ChannelState::new(); const AW: AtomicWaker = AtomicWaker::new();
Self { Self {
channels: [CH; DMA_CHANNEL_COUNT], ch_wakers: [AW; DMA_CHANNEL_COUNT],
} }
} }
} }
@ -92,158 +153,183 @@ pub(crate) unsafe fn init(irq_priority: Priority) {
foreach_dma_channel! { foreach_dma_channel! {
($channel_peri:ident, $dma_peri:ident, dma, $channel_num:expr, $index:expr, $dmamux:tt) => { ($channel_peri:ident, $dma_peri:ident, dma, $channel_num:expr, $index:expr, $dmamux:tt) => {
impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri { impl sealed::Channel for crate::peripherals::$channel_peri {
unsafe fn start_write<W: Word>(&mut self, request: Request, buf: *const [W], reg_addr: *mut W, options: TransferOptions) { fn regs(&self) -> pac::dma::Dma {
let (ptr, len) = super::slice_ptr_parts(buf); pac::$dma_peri
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
request,
vals::Dir::MEMORYTOPERIPHERAL,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
)
} }
fn num(&self) -> usize {
unsafe fn start_write_repeated<W: Word>(&mut self, request: Request, repeated: *const W, count: usize, reg_addr: *mut W, options: TransferOptions) { $channel_num
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
request,
vals::Dir::MEMORYTOPERIPHERAL,
reg_addr as *const u32,
repeated as *mut u32,
count,
false,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
)
} }
fn index(&self) -> usize {
unsafe fn start_read<W: Word>(&mut self, request: Request, reg_addr: *const W, buf: *mut [W], options: TransferOptions) { $index
let (ptr, len) = super::slice_ptr_parts_mut(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
request,
vals::Dir::PERIPHERALTOMEMORY,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
} }
unsafe fn start_double_buffered_read<W: Word>(
&mut self,
request: Request,
reg_addr: *const W,
buffer0: *mut W,
buffer1: *mut W,
buffer_len: usize,
options: TransferOptions,
) {
low_level_api::start_dbm_transfer(
pac::$dma_peri,
$channel_num,
request,
vals::Dir::PERIPHERALTOMEMORY,
reg_addr as *const u32,
buffer0 as *mut u32,
buffer1 as *mut u32,
buffer_len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
}
unsafe fn set_buffer0<W: Word>(&mut self, buffer: *mut W) {
low_level_api::set_dbm_buffer0(pac::$dma_peri, $channel_num, buffer as *mut u32);
}
unsafe fn set_buffer1<W: Word>(&mut self, buffer: *mut W) {
low_level_api::set_dbm_buffer1(pac::$dma_peri, $channel_num, buffer as *mut u32);
}
unsafe fn is_buffer0_accessible(&mut self) -> bool {
low_level_api::is_buffer0_accessible(pac::$dma_peri, $channel_num)
}
fn request_stop(&mut self) {
unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);}
}
fn is_running(&self) -> bool {
unsafe {low_level_api::is_running(pac::$dma_peri, $channel_num)}
}
fn remaining_transfers(&mut self) -> u16 {
unsafe {low_level_api::get_remaining_transfers(pac::$dma_peri, $channel_num)}
}
fn set_waker(&mut self, waker: &Waker) {
unsafe {low_level_api::set_waker($index, waker )}
}
fn on_irq() { fn on_irq() {
unsafe { unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
low_level_api::on_irq_inner(pac::$dma_peri, $channel_num, $index);
} }
} }
}
impl crate::dma::Channel for crate::peripherals::$channel_peri { } impl Channel for crate::peripherals::$channel_peri {}
}; };
} }
mod low_level_api { /// Safety: Must be called with a matching set of parameters for a valid dma channel
pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index: usize) {
let cr = dma.st(channel_num).cr();
let isr = dma.isr(channel_num / 4).read();
if isr.teif(channel_num % 4) {
panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
}
if isr.tcif(channel_num % 4) && cr.read().tcie() {
/* acknowledge transfer complete interrupt */
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
STATE.ch_wakers[index].wake();
}
}
#[cfg(any(dma_v2, dmamux))]
pub type Request = u8;
#[cfg(not(any(dma_v2, dmamux)))]
pub type Request = ();
#[cfg(dmamux)]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
#[cfg(not(dmamux))]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
pub(crate) mod sealed {
use super::*; use super::*;
pub unsafe fn start_transfer( pub trait Channel {
dma: pac::dma::Dma, fn regs(&self) -> pac::dma::Dma;
channel_number: u8, fn num(&self) -> usize;
fn index(&self) -> usize;
fn on_irq();
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
}
impl<'a, C: Channel> Transfer<'a, C> {
pub unsafe fn new_read<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request, request: Request,
dir: vals::Dir, peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
pub unsafe fn new_read_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: *mut [W],
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts_mut(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::PeripheralToMemory,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: &'a [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
pub unsafe fn new_write_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: *const [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write_repeated<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
repeated: &'a W,
count: usize,
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const W as *mut u32,
count,
false,
W::bits(),
options,
)
}
unsafe fn new_inner(
channel: PeripheralRef<'a, C>,
_request: Request,
dir: Dir,
peri_addr: *const u32, peri_addr: *const u32,
mem_addr: *mut u32, mem_addr: *mut u32,
mem_len: usize, mem_len: usize,
incr_mem: bool, incr_mem: bool,
data_size: vals::Size, data_size: WordSize,
options: TransferOptions, options: TransferOptions,
#[cfg(dmamux)] dmamux_regs: pac::dmamux::Dmamux, ) -> Self {
#[cfg(dmamux)] dmamux_ch_num: u8, let ch = channel.regs().st(channel.num());
) {
#[cfg(dmamux)]
super::super::dmamux::configure_dmamux(dmamux_regs, dmamux_ch_num, request);
// "Preceding reads and writes cannot be moved past subsequent writes." // "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst); fence(Ordering::SeqCst);
reset_status(dma, channel_number); let mut this = Self { channel };
this.clear_irqs();
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
let ch = dma.st(channel_number as _);
ch.par().write_value(peri_addr as u32); ch.par().write_value(peri_addr as u32);
ch.m0ar().write_value(mem_addr as u32); ch.m0ar().write_value(mem_addr as u32);
ch.ndtr().write_value(regs::Ndtr(mem_len as _)); ch.ndtr().write_value(regs::Ndtr(mem_len as _));
@ -258,15 +344,14 @@ mod low_level_api {
} }
}); });
ch.cr().write(|w| { ch.cr().write(|w| {
w.set_dir(dir); w.set_dir(dir.into());
w.set_msize(data_size); w.set_msize(data_size.into());
w.set_psize(data_size); w.set_psize(data_size.into());
w.set_pl(vals::Pl::VERYHIGH); w.set_pl(vals::Pl::VERYHIGH);
if incr_mem { w.set_minc(match incr_mem {
w.set_minc(vals::Inc::INCREMENTED); true => vals::Inc::INCREMENTED,
} else { false => vals::Inc::FIXED,
w.set_minc(vals::Inc::FIXED); });
}
w.set_pinc(vals::Inc::FIXED); w.set_pinc(vals::Inc::FIXED);
w.set_teie(true); w.set_teie(true);
w.set_tcie(true); w.set_tcie(true);
@ -274,7 +359,7 @@ mod low_level_api {
w.set_trbuff(true); w.set_trbuff(true);
#[cfg(dma_v2)] #[cfg(dma_v2)]
w.set_chsel(request); w.set_chsel(_request);
w.set_pburst(options.pburst.into()); w.set_pburst(options.pburst.into());
w.set_mburst(options.mburst.into()); w.set_mburst(options.mburst.into());
@ -282,159 +367,76 @@ mod low_level_api {
w.set_en(true); w.set_en(true);
}); });
this
} }
pub unsafe fn start_dbm_transfer( fn clear_irqs(&mut self) {
dma: pac::dma::Dma, let isrn = self.channel.num() / 4;
channel_number: u8, let isrbit = self.channel.num() % 4;
request: Request,
dir: vals::Dir,
peri_addr: *const u32,
mem0_addr: *mut u32,
mem1_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: vals::Size,
options: TransferOptions,
#[cfg(dmamux)] dmamux_regs: pac::dmamux::Dmamux,
#[cfg(dmamux)] dmamux_ch_num: u8,
) {
#[cfg(dmamux)]
super::super::dmamux::configure_dmamux(dmamux_regs, dmamux_ch_num, request);
trace!( unsafe {
"Starting DBM transfer with 0: 0x{:x}, 1: 0x{:x}, len: 0x{:x}", self.channel.regs().ifcr(isrn).write(|w| {
mem0_addr as u32, w.set_tcif(isrbit, true);
mem1_addr as u32, w.set_teif(isrbit, true);
mem_len })
);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
reset_status(dma, channel_number);
let ch = dma.st(channel_number as _);
ch.par().write_value(peri_addr as u32);
ch.m0ar().write_value(mem0_addr as u32);
// configures the second buffer for DBM
ch.m1ar().write_value(mem1_addr as u32);
ch.ndtr().write_value(regs::Ndtr(mem_len as _));
ch.cr().write(|w| {
w.set_dir(dir);
w.set_msize(data_size);
w.set_psize(data_size);
w.set_pl(vals::Pl::VERYHIGH);
if incr_mem {
w.set_minc(vals::Inc::INCREMENTED);
} else {
w.set_minc(vals::Inc::FIXED);
} }
w.set_pinc(vals::Inc::FIXED);
w.set_teie(true);
w.set_tcie(true);
#[cfg(dma_v1)]
w.set_trbuff(true);
#[cfg(dma_v2)]
w.set_chsel(request);
// enable double buffered mode
w.set_dbm(vals::Dbm::ENABLED);
w.set_pburst(options.pburst.into());
w.set_mburst(options.mburst.into());
w.set_pfctrl(options.flow_ctrl.into());
w.set_en(true);
});
} }
pub unsafe fn set_dbm_buffer0(dma: pac::dma::Dma, channel_number: u8, mem_addr: *mut u32) { pub fn request_stop(&mut self) {
// get a handle on the channel itself let ch = self.channel.regs().st(self.channel.num());
let ch = dma.st(channel_number as _);
// change M0AR to the new address
ch.m0ar().write_value(mem_addr as _);
}
pub unsafe fn set_dbm_buffer1(dma: pac::dma::Dma, channel_number: u8, mem_addr: *mut u32) {
// get a handle on the channel itself
let ch = dma.st(channel_number as _);
// change M1AR to the new address
ch.m1ar().write_value(mem_addr as _);
}
pub unsafe fn is_buffer0_accessible(dma: pac::dma::Dma, channel_number: u8) -> bool {
// get a handle on the channel itself
let ch = dma.st(channel_number as _);
// check the current target register value
ch.cr().read().ct() == vals::Ct::MEMORY1
}
/// Stops the DMA channel.
pub unsafe fn request_stop(dma: pac::dma::Dma, channel_number: u8) {
// get a handle on the channel itself
let ch = dma.st(channel_number as _);
// Disable the channel. Keep the IEs enabled so the irqs still fire. // Disable the channel. Keep the IEs enabled so the irqs still fire.
unsafe {
ch.cr().write(|w| { ch.cr().write(|w| {
w.set_teie(true); w.set_teie(true);
w.set_tcie(true); w.set_tcie(true);
}); })
}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
} }
/// Gets the running status of the channel pub fn is_running(&mut self) -> bool {
pub unsafe fn is_running(dma: pac::dma::Dma, ch: u8) -> bool { let ch = self.channel.regs().st(self.channel.num());
// get a handle on the channel itself unsafe { ch.cr().read() }.en()
let ch = dma.st(ch as _);
// Get whether it's enabled (running)
ch.cr().read().en()
} }
/// Gets the total remaining transfers for the channel /// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation. /// Note: this will be zero for transfers that completed without cancellation.
pub unsafe fn get_remaining_transfers(dma: pac::dma::Dma, ch: u8) -> u16 { pub fn get_remaining_transfers(&self) -> u16 {
// get a handle on the channel itself let ch = self.channel.regs().st(self.channel.num());
let ch = dma.st(ch as _); unsafe { ch.ndtr().read() }.ndt()
// read the remaining transfer count. If this is zero, the transfer completed fully.
ch.ndtr().read().ndt()
} }
/// Sets the waker for the specified DMA channel pub fn blocking_wait(mut self) {
pub unsafe fn set_waker(state_number: usize, waker: &Waker) { while self.is_running() {}
STATE.channels[state_number].waker.register(waker);
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
} }
}
pub unsafe fn reset_status(dma: pac::dma::Dma, channel_number: u8) { impl<'a, C: Channel> Drop for Transfer<'a, C> {
let isrn = channel_number as usize / 4; fn drop(&mut self) {
let isrbit = channel_number as usize % 4; self.request_stop();
while self.is_running() {}
dma.ifcr(isrn).write(|w| { // "Subsequent reads and writes cannot be moved ahead of preceding reads."
w.set_tcif(isrbit, true); fence(Ordering::SeqCst);
w.set_teif(isrbit, true);
});
} }
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
pub unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: u8, state_index: u8) { impl<'a, C: Channel> Future for Transfer<'a, C> {
let channel_num = channel_num as usize; type Output = ();
let state_index = state_index as usize; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
STATE.ch_wakers[self.channel.index()].register(cx.waker());
let cr = dma.st(channel_num).cr(); if self.is_running() {
let isr = dma.isr(channel_num / 4).read(); Poll::Pending
} else {
if isr.teif(channel_num % 4) { Poll::Ready(())
panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
}
if isr.tcif(channel_num % 4) && cr.read().tcie() {
/* acknowledge transfer complete interrupt */
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
STATE.channels[state_index].waker.wake();
} }
} }
} }

View File

@ -2,8 +2,8 @@
use crate::{pac, peripherals}; use crate::{pac, peripherals};
pub(crate) unsafe fn configure_dmamux(dmamux_regs: pac::dmamux::Dmamux, dmamux_ch_num: u8, request: u8) { pub(crate) unsafe fn configure_dmamux<M: MuxChannel>(channel: &mut M, request: u8) {
let ch_mux_regs = dmamux_regs.ccr(dmamux_ch_num as _); let ch_mux_regs = channel.mux_regs().ccr(channel.mux_num());
ch_mux_regs.write(|reg| { ch_mux_regs.write(|reg| {
reg.set_nbreq(0); reg.set_nbreq(0);
reg.set_dmareq_id(request); reg.set_dmareq_id(request);
@ -14,11 +14,11 @@ pub(crate) unsafe fn configure_dmamux(dmamux_regs: pac::dmamux::Dmamux, dmamux_c
}); });
} }
pub(crate) mod sealed { pub(crate) mod dmamux_sealed {
use super::*; use super::*;
pub trait MuxChannel { pub trait MuxChannel {
const DMAMUX_CH_NUM: u8; fn mux_regs(&self) -> pac::dmamux::Dmamux;
const DMAMUX_REGS: pac::dmamux::Dmamux; fn mux_num(&self) -> usize;
} }
} }
@ -26,15 +26,19 @@ pub struct DMAMUX1;
#[cfg(stm32h7)] #[cfg(stm32h7)]
pub struct DMAMUX2; pub struct DMAMUX2;
pub trait MuxChannel: sealed::MuxChannel + super::Channel { pub trait MuxChannel: dmamux_sealed::MuxChannel {
type Mux; type Mux;
} }
foreach_dma_channel! { foreach_dma_channel! {
($channel_peri:ident, $dma_peri:ident, $version:ident, $channel_num:expr, $index:expr, {dmamux: $dmamux:ident, dmamux_channel: $dmamux_channel:expr}) => { ($channel_peri:ident, $dma_peri:ident, $version:ident, $channel_num:expr, $index:expr, {dmamux: $dmamux:ident, dmamux_channel: $dmamux_channel:expr}) => {
impl sealed::MuxChannel for peripherals::$channel_peri { impl dmamux_sealed::MuxChannel for peripherals::$channel_peri {
const DMAMUX_CH_NUM: u8 = $dmamux_channel; fn mux_regs(&self) -> pac::dmamux::Dmamux {
const DMAMUX_REGS: pac::dmamux::Dmamux = pac::$dmamux; pac::$dmamux
}
fn mux_num(&self) -> usize {
$dmamux_channel
}
} }
impl MuxChannel for peripherals::$channel_peri { impl MuxChannel for peripherals::$channel_peri {
type Mux = $dmamux; type Mux = $dmamux;

View File

@ -1,13 +1,30 @@
use core::sync::atomic::{fence, Ordering}; #![macro_use]
use core::task::Waker;
use core::future::Future;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering};
use core::task::{Context, Poll};
use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker; use embassy_sync::waitqueue::AtomicWaker;
use super::{Request, TransferOptions, Word, WordSize}; use super::{Dir, Word, WordSize};
use crate::_generated::GPDMA_CHANNEL_COUNT; use crate::_generated::GPDMA_CHANNEL_COUNT;
use crate::interrupt::{Interrupt, InterruptExt}; use crate::interrupt::{Interrupt, InterruptExt};
use crate::pac::gpdma::{vals, Gpdma}; use crate::pac;
use crate::{interrupt, pac}; use crate::pac::gpdma::vals;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {}
impl Default for TransferOptions {
fn default() -> Self {
Self {}
}
}
impl From<WordSize> for vals::ChTr1Dw { impl From<WordSize> for vals::ChTr1Dw {
fn from(raw: WordSize) -> Self { fn from(raw: WordSize) -> Self {
@ -19,27 +36,15 @@ impl From<WordSize> for vals::ChTr1Dw {
} }
} }
struct ChannelState {
waker: AtomicWaker,
}
impl ChannelState {
const fn new() -> Self {
Self {
waker: AtomicWaker::new(),
}
}
}
struct State { struct State {
channels: [ChannelState; GPDMA_CHANNEL_COUNT], ch_wakers: [AtomicWaker; GPDMA_CHANNEL_COUNT],
} }
impl State { impl State {
const fn new() -> Self { const fn new() -> Self {
const CH: ChannelState = ChannelState::new(); const AW: AtomicWaker = AtomicWaker::new();
Self { Self {
channels: [CH; GPDMA_CHANNEL_COUNT], ch_wakers: [AW; GPDMA_CHANNEL_COUNT],
} }
} }
} }
@ -47,10 +52,12 @@ impl State {
static STATE: State = State::new(); static STATE: State = State::new();
/// safety: must be called only once /// safety: must be called only once
pub(crate) unsafe fn init() { pub(crate) unsafe fn init(irq_priority: Priority) {
foreach_interrupt! { foreach_interrupt! {
($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => {
interrupt::$irq::steal().enable(); let irq = crate::interrupt::$irq::steal();
irq.set_priority(irq_priority);
irq.enable();
}; };
} }
crate::_generated::init_gpdma(); crate::_generated::init_gpdma();
@ -58,15 +65,103 @@ pub(crate) unsafe fn init() {
foreach_dma_channel! { foreach_dma_channel! {
($channel_peri:ident, $dma_peri:ident, gpdma, $channel_num:expr, $index:expr, $dmamux:tt) => { ($channel_peri:ident, $dma_peri:ident, gpdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri { impl sealed::Channel for crate::peripherals::$channel_peri {
unsafe fn start_write<W: Word>(&mut self, request: Request, buf: *const [W], reg_addr: *mut W, options: TransferOptions) { fn regs(&self) -> pac::gpdma::Gpdma {
let (ptr, len) = super::slice_ptr_parts(buf); pac::$dma_peri
low_level_api::start_transfer( }
pac::$dma_peri, fn num(&self) -> usize {
$channel_num, $channel_num
}
fn index(&self) -> usize {
$index
}
fn on_irq() {
unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
}
}
impl Channel for crate::peripherals::$channel_peri {}
};
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub(crate) unsafe fn on_irq_inner(dma: pac::gpdma::Gpdma, channel_num: usize, index: usize) {
let ch = dma.ch(channel_num);
let sr = ch.sr().read();
if sr.dtef() {
panic!(
"DMA: data transfer error on DMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
}
if sr.usef() {
panic!(
"DMA: user settings error on DMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
}
if sr.suspf() || sr.tcf() {
// disable all xxIEs to prevent the irq from firing again.
ch.cr().write(|_| {});
// Wake the future. It'll look at tcf and see it's set.
STATE.ch_wakers[index].wake();
}
}
pub type Request = u8;
#[cfg(dmamux)]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
#[cfg(not(dmamux))]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
pub(crate) mod sealed {
use super::*;
pub trait Channel {
fn regs(&self) -> pac::gpdma::Gpdma;
fn num(&self) -> usize;
fn index(&self) -> usize;
fn on_irq();
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
}
impl<'a, C: Channel> Transfer<'a, C> {
pub unsafe fn new_read<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
pub unsafe fn new_read_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: *mut [W],
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts_mut(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request, request,
low_level_api::Dir::MemoryToPeripheral, Dir::PeripheralToMemory,
reg_addr as *const u32, peri_addr as *const u32,
ptr as *mut u32, ptr as *mut u32,
len, len,
true, true,
@ -75,14 +170,57 @@ foreach_dma_channel! {
) )
} }
unsafe fn start_write_repeated<W: Word>(&mut self, request: Request, repeated: *const W, count: usize, reg_addr: *mut W, options: TransferOptions) { pub unsafe fn new_write<W: Word>(
low_level_api::start_transfer( channel: impl Peripheral<P = C> + 'a,
pac::$dma_peri, request: Request,
$channel_num, buf: &'a [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
pub unsafe fn new_write_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: *const [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request, request,
low_level_api::Dir::MemoryToPeripheral, Dir::MemoryToPeripheral,
reg_addr as *const u32, peri_addr as *const u32,
repeated as *mut u32, ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write_repeated<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
repeated: &'a W,
count: usize,
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const W as *mut u32,
count, count,
false, false,
W::bits(), W::bits(),
@ -90,85 +228,8 @@ foreach_dma_channel! {
) )
} }
unsafe fn start_read<W: Word>(&mut self, request: Request, reg_addr: *const W, buf: *mut [W], options: TransferOptions) { unsafe fn new_inner(
let (ptr, len) = super::slice_ptr_parts_mut(buf); channel: PeripheralRef<'a, C>,
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
request,
low_level_api::Dir::PeripheralToMemory,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
);
}
unsafe fn start_double_buffered_read<W: Word>(
&mut self,
_request: Request,
_reg_addr: *const W,
_buffer0: *mut W,
_buffer1: *mut W,
_buffer_len: usize,
_options: TransferOptions,
) {
panic!("Unsafe double buffered mode is unavailable on GPBDMA");
}
unsafe fn set_buffer0<W: Word>(&mut self, _buffer: *mut W) {
panic!("Unsafe double buffered mode is unavailable on GPBDMA");
}
unsafe fn set_buffer1<W: Word>(&mut self, _buffer: *mut W) {
panic!("Unsafe double buffered mode is unavailable on GPBDMA");
}
unsafe fn is_buffer0_accessible(&mut self) -> bool {
panic!("Unsafe double buffered mode is unavailable on GPBDMA");
}
fn request_stop(&mut self) {
unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);}
}
fn is_running(&self) -> bool {
unsafe {low_level_api::is_running(pac::$dma_peri, $channel_num)}
}
fn remaining_transfers(&mut self) -> u16 {
unsafe {low_level_api::get_remaining_transfers(pac::$dma_peri, $channel_num)}
}
fn set_waker(&mut self, waker: &Waker) {
unsafe {low_level_api::set_waker($index, waker )}
}
fn on_irq() {
unsafe {
low_level_api::on_irq_inner(pac::$dma_peri, $channel_num, $index);
}
}
}
impl crate::dma::Channel for crate::peripherals::$channel_peri { }
};
}
mod low_level_api {
use super::*;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Dir {
MemoryToPeripheral,
PeripheralToMemory,
}
pub unsafe fn start_transfer(
dma: Gpdma,
channel_number: u8,
request: Request, request: Request,
dir: Dir, dir: Dir,
peri_addr: *const u32, peri_addr: *const u32,
@ -176,24 +237,19 @@ mod low_level_api {
mem_len: usize, mem_len: usize,
incr_mem: bool, incr_mem: bool,
data_size: WordSize, data_size: WordSize,
options: TransferOptions, _options: TransferOptions,
) { ) -> Self {
assert!(options.mburst == crate::dma::Burst::Single, "Burst mode not supported"); let ch = channel.regs().ch(channel.num());
assert!(options.pburst == crate::dma::Burst::Single, "Burst mode not supported");
assert!(
options.flow_ctrl == crate::dma::FlowControl::Dma,
"Peripheral flow control not supported"
);
assert!(options.fifo_threshold.is_none(), "FIFO mode not supported");
// "Preceding reads and writes cannot be moved past subsequent writes." // "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst); fence(Ordering::SeqCst);
let ch = dma.ch(channel_number as _); let this = Self { channel };
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, request);
// Reset ch
ch.cr().write(|w| w.set_reset(true)); ch.cr().write(|w| w.set_reset(true));
ch.llr().write(|_| {}); // no linked list ch.llr().write(|_| {}); // no linked list
ch.tr1().write(|w| { ch.tr1().write(|w| {
w.set_sdw(data_size.into()); w.set_sdw(data_size.into());
@ -234,72 +290,66 @@ mod low_level_api {
// Start it // Start it
w.set_en(true); w.set_en(true);
}); });
this
} }
/// Stops the DMA channel. pub fn request_stop(&mut self) {
pub unsafe fn request_stop(dma: Gpdma, channel_number: u8) { let ch = self.channel.regs().ch(self.channel.num());
// get a handle on the channel itself
let ch = dma.ch(channel_number as _);
// Disable the channel. Keep the IEs enabled so the irqs still fire. // Disable the channel. Keep the IEs enabled so the irqs still fire.
unsafe {
ch.cr().write(|w| { ch.cr().write(|w| {
w.set_tcie(true); w.set_tcie(true);
w.set_useie(true); w.set_useie(true);
w.set_dteie(true); w.set_dteie(true);
w.set_suspie(true); w.set_suspie(true);
}); })
}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
} }
/// Gets the running status of the channel pub fn is_running(&mut self) -> bool {
pub unsafe fn is_running(dma: Gpdma, ch: u8) -> bool { let ch = self.channel.regs().ch(self.channel.num());
let ch = dma.ch(ch as _); !unsafe { ch.sr().read() }.tcf()
!ch.sr().read().tcf()
} }
/// Gets the total remaining transfers for the channel /// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation. /// Note: this will be zero for transfers that completed without cancellation.
pub unsafe fn get_remaining_transfers(dma: Gpdma, ch: u8) -> u16 { pub fn get_remaining_transfers(&self) -> u16 {
// get a handle on the channel itself let ch = self.channel.regs().ch(self.channel.num());
let ch = dma.ch(ch as _); unsafe { ch.br1().read() }.bndt()
// read the remaining transfer count. If this is zero, the transfer completed fully.
ch.br1().read().bndt()
} }
/// Sets the waker for the specified DMA channel pub fn blocking_wait(mut self) {
pub unsafe fn set_waker(state_number: usize, waker: &Waker) { while self.is_running() {}
STATE.channels[state_number].waker.register(waker);
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
} }
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel impl<'a, C: Channel> Drop for Transfer<'a, C> {
pub unsafe fn on_irq_inner(dma: Gpdma, channel_num: u8, state_index: u8) { fn drop(&mut self) {
let channel_num = channel_num as usize; self.request_stop();
let state_index = state_index as usize; while self.is_running() {}
let ch = dma.ch(channel_num); // "Subsequent reads and writes cannot be moved ahead of preceding reads."
let sr = ch.sr().read(); fence(Ordering::SeqCst);
if sr.dtef() {
panic!(
"DMA: data transfer error on DMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
}
if sr.usef() {
panic!(
"DMA: user settings error on DMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
} }
}
if sr.suspf() || sr.tcf() { impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
// disable all xxIEs to prevent the irq from firing again. impl<'a, C: Channel> Future for Transfer<'a, C> {
ch.cr().write(|_| {}); type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
STATE.ch_wakers[self.channel.index()].register(cx.waker());
// Wake the future. It'll look at tcf and see it's set. if self.is_running() {
STATE.channels[state_index].waker.wake(); Poll::Pending
} else {
Poll::Ready(())
} }
} }
} }

View File

@ -1,124 +1,39 @@
#[cfg(bdma)]
pub(crate) mod bdma;
#[cfg(dma)] #[cfg(dma)]
pub(crate) mod dma; pub(crate) mod dma;
#[cfg(dma)]
pub use dma::*;
// stm32h7 has both dma and bdma. In that case, we export dma as "main" dma,
// and bdma as "secondary", under `embassy_stm32::dma::bdma`.
#[cfg(all(bdma, dma))]
pub mod bdma;
#[cfg(all(bdma, not(dma)))]
pub(crate) mod bdma;
#[cfg(all(bdma, not(dma)))]
pub use bdma::*;
#[cfg(gpdma)]
pub(crate) mod gpdma;
#[cfg(gpdma)]
pub use gpdma::*;
#[cfg(dmamux)] #[cfg(dmamux)]
mod dmamux; mod dmamux;
#[cfg(gpdma)]
mod gpdma;
use core::future::Future;
use core::mem; use core::mem;
use core::pin::Pin;
use core::task::{Context, Poll, Waker};
#[cfg(any(dma, bdma))]
use embassy_cortex_m::interrupt::Priority; use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{impl_peripheral, into_ref}; use embassy_hal_common::impl_peripheral;
#[cfg(dmamux)] #[cfg(dmamux)]
pub use self::dmamux::*; pub use self::dmamux::*;
use crate::Peripheral;
#[cfg(feature = "unstable-pac")] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub mod low_level { #[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub use super::transfers::*; enum Dir {
} MemoryToPeripheral,
PeripheralToMemory,
pub(crate) use transfers::*;
#[cfg(any(bdma_v2, dma_v2, dmamux, gpdma))]
pub type Request = u8;
#[cfg(not(any(bdma_v2, dma_v2, dmamux, gpdma)))]
pub type Request = ();
pub(crate) mod sealed {
use super::*;
pub trait Word {}
pub trait Channel {
/// Starts this channel for writing a stream of words.
///
/// Safety:
/// - `buf` must point to a valid buffer for DMA reading.
/// - `buf` must be alive for the entire duration of the DMA transfer.
/// - `reg_addr` must be a valid peripheral register address to write to.
unsafe fn start_write<W: super::Word>(
&mut self,
request: Request,
buf: *const [W],
reg_addr: *mut W,
options: TransferOptions,
);
/// Starts this channel for writing a word repeatedly.
///
/// Safety:
/// - `reg_addr` must be a valid peripheral register address to write to.
unsafe fn start_write_repeated<W: super::Word>(
&mut self,
request: Request,
repeated: *const W,
count: usize,
reg_addr: *mut W,
options: TransferOptions,
);
/// Starts this channel for reading a stream of words.
///
/// Safety:
/// - `buf` must point to a valid buffer for DMA writing.
/// - `buf` must be alive for the entire duration of the DMA transfer.
/// - `reg_addr` must be a valid peripheral register address to read from.
unsafe fn start_read<W: super::Word>(
&mut self,
request: Request,
reg_addr: *const W,
buf: *mut [W],
options: TransferOptions,
);
/// DMA double-buffered mode is unsafe as UB can happen when the hardware writes to a buffer currently owned by the software
/// more information can be found here: https://github.com/embassy-rs/embassy/issues/702
/// This feature is now used solely for the purposes of implementing giant DMA transfers required for DCMI
unsafe fn start_double_buffered_read<W: super::Word>(
&mut self,
request: Request,
reg_addr: *const W,
buffer0: *mut W,
buffer1: *mut W,
buffer_len: usize,
options: TransferOptions,
);
unsafe fn set_buffer0<W: super::Word>(&mut self, buffer: *mut W);
unsafe fn set_buffer1<W: super::Word>(&mut self, buffer: *mut W);
unsafe fn is_buffer0_accessible(&mut self) -> bool;
/// Requests the channel to stop.
/// NOTE: The channel does not immediately stop, you have to wait
/// for `is_running() = false`.
fn request_stop(&mut self);
/// Returns whether this channel is running or stopped.
///
/// The channel stops running when it either completes or is manually stopped.
fn is_running(&self) -> bool;
/// Returns the total number of remaining transfers.
fn remaining_transfers(&mut self) -> u16;
/// Sets the waker that is called when this channel stops (either completed or manually stopped)
fn set_waker(&mut self, waker: &Waker);
/// This is called when this channel triggers an interrupt.
/// Note: Because some channels share an interrupt, this function might be
/// called for a channel that didn't trigger an interrupt.
fn on_irq();
}
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
@ -139,191 +54,39 @@ impl WordSize {
} }
} }
pub trait Word: sealed::Word { mod word_sealed {
pub trait Word {}
}
pub trait Word: word_sealed::Word {
fn bits() -> WordSize; fn bits() -> WordSize;
} }
impl sealed::Word for u8 {} impl word_sealed::Word for u8 {}
impl Word for u8 { impl Word for u8 {
fn bits() -> WordSize { fn bits() -> WordSize {
WordSize::OneByte WordSize::OneByte
} }
} }
impl sealed::Word for u16 {} impl word_sealed::Word for u16 {}
impl Word for u16 { impl Word for u16 {
fn bits() -> WordSize { fn bits() -> WordSize {
WordSize::TwoBytes WordSize::TwoBytes
} }
} }
impl sealed::Word for u32 {} impl word_sealed::Word for u32 {}
impl Word for u32 { impl Word for u32 {
fn bits() -> WordSize { fn bits() -> WordSize {
WordSize::FourBytes WordSize::FourBytes
} }
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Burst {
/// Single transfer
Single,
/// Incremental burst of 4 beats
Incr4,
/// Incremental burst of 8 beats
Incr8,
/// Incremental burst of 16 beats
Incr16,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum FlowControl {
/// Flow control by DMA
Dma,
/// Flow control by peripheral
Peripheral,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum FifoThreshold {
/// 1/4 full FIFO
Quarter,
/// 1/2 full FIFO
Half,
/// 3/4 full FIFO
ThreeQuarters,
/// Full FIFO
Full,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct TransferOptions {
/// Peripheral burst transfer configuration
pub pburst: Burst,
/// Memory burst transfer configuration
pub mburst: Burst,
/// Flow control configuration
pub flow_ctrl: FlowControl,
/// FIFO threshold for DMA FIFO mode. If none, direct mode is used.
pub fifo_threshold: Option<FifoThreshold>,
}
impl Default for TransferOptions {
fn default() -> Self {
Self {
pburst: Burst::Single,
mburst: Burst::Single,
flow_ctrl: FlowControl::Dma,
fifo_threshold: None,
}
}
}
mod transfers {
use embassy_hal_common::PeripheralRef;
use super::*;
#[allow(unused)]
pub fn read<'a, W: Word>(
channel: impl Peripheral<P = impl Channel> + 'a,
request: Request,
reg_addr: *mut W,
buf: &'a mut [W],
) -> impl Future<Output = ()> + 'a {
assert!(buf.len() > 0 && buf.len() <= 0xFFFF);
into_ref!(channel);
unsafe { channel.start_read::<W>(request, reg_addr, buf, Default::default()) };
Transfer::new(channel)
}
#[allow(unused)]
pub fn write<'a, W: Word>(
channel: impl Peripheral<P = impl Channel> + 'a,
request: Request,
buf: &'a [W],
reg_addr: *mut W,
) -> impl Future<Output = ()> + 'a {
assert!(buf.len() > 0 && buf.len() <= 0xFFFF);
into_ref!(channel);
unsafe { channel.start_write::<W>(request, buf, reg_addr, Default::default()) };
Transfer::new(channel)
}
#[allow(unused)]
pub fn write_repeated<'a, W: Word>(
channel: impl Peripheral<P = impl Channel> + 'a,
request: Request,
repeated: *const W,
count: usize,
reg_addr: *mut W,
) -> impl Future<Output = ()> + 'a {
into_ref!(channel);
unsafe { channel.start_write_repeated::<W>(request, repeated, count, reg_addr, Default::default()) };
Transfer::new(channel)
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub(crate) struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
}
impl<'a, C: Channel> Transfer<'a, C> {
pub(crate) fn new(channel: impl Peripheral<P = C> + 'a) -> Self {
into_ref!(channel);
Self { channel }
}
}
impl<'a, C: Channel> Drop for Transfer<'a, C> {
fn drop(&mut self) {
self.channel.request_stop();
while self.channel.is_running() {}
}
}
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
impl<'a, C: Channel> Future for Transfer<'a, C> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.channel.set_waker(cx.waker());
if self.channel.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}
}
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
pub struct NoDma; pub struct NoDma;
impl_peripheral!(NoDma); impl_peripheral!(NoDma);
// safety: must be called only once at startup
pub(crate) unsafe fn init(#[cfg(bdma)] bdma_priority: Priority, #[cfg(dma)] dma_priority: Priority) {
#[cfg(bdma)]
bdma::init(bdma_priority);
#[cfg(dma)]
dma::init(dma_priority);
#[cfg(dmamux)]
dmamux::init();
#[cfg(gpdma)]
gpdma::init();
}
// TODO: replace transmutes with core::ptr::metadata once it's stable // TODO: replace transmutes with core::ptr::metadata once it's stable
#[allow(unused)] #[allow(unused)]
pub(crate) fn slice_ptr_parts<T>(slice: *const [T]) -> (usize, usize) { pub(crate) fn slice_ptr_parts<T>(slice: *const [T]) -> (usize, usize) {
@ -334,3 +97,19 @@ pub(crate) fn slice_ptr_parts<T>(slice: *const [T]) -> (usize, usize) {
pub(crate) fn slice_ptr_parts_mut<T>(slice: *mut [T]) -> (usize, usize) { pub(crate) fn slice_ptr_parts_mut<T>(slice: *mut [T]) -> (usize, usize) {
unsafe { mem::transmute(slice) } unsafe { mem::transmute(slice) }
} }
// safety: must be called only once at startup
pub(crate) unsafe fn init(
#[cfg(bdma)] bdma_priority: Priority,
#[cfg(dma)] dma_priority: Priority,
#[cfg(gpdma)] gpdma_priority: Priority,
) {
#[cfg(bdma)]
bdma::init(bdma_priority);
#[cfg(dma)]
dma::init(dma_priority);
#[cfg(gpdma)]
gpdma::init(gpdma_priority);
#[cfg(dmamux)]
dmamux::init();
}

View File

@ -8,7 +8,7 @@ use embassy_hal_common::drop::OnDrop;
use embassy_hal_common::{into_ref, PeripheralRef}; use embassy_hal_common::{into_ref, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker; use embassy_sync::waitqueue::AtomicWaker;
use crate::dma::NoDma; use crate::dma::{NoDma, Transfer};
use crate::gpio::sealed::AFType; use crate::gpio::sealed::AFType;
use crate::gpio::Pull; use crate::gpio::Pull;
use crate::i2c::{Error, Instance, SclPin, SdaPin}; use crate::i2c::{Error, Instance, SclPin, SdaPin};
@ -476,7 +476,7 @@ impl<'d, T: Instance, TXDMA, RXDMA> I2c<'d, T, TXDMA, RXDMA> {
let ch = &mut self.tx_dma; let ch = &mut self.tx_dma;
let request = ch.request(); let request = ch.request();
crate::dma::write(ch, request, write, dst) Transfer::new_write(ch, request, write, dst, Default::default())
}; };
let state = T::state(); let state = T::state();
@ -576,7 +576,7 @@ impl<'d, T: Instance, TXDMA, RXDMA> I2c<'d, T, TXDMA, RXDMA> {
let ch = &mut self.rx_dma; let ch = &mut self.rx_dma;
let request = ch.request(); let request = ch.request();
crate::dma::read(ch, request, src, buffer) Transfer::new_read(ch, request, src, buffer, Default::default())
}; };
let state = T::state(); let state = T::state();

View File

@ -78,7 +78,6 @@ pub(crate) mod _generated {
// Reexports // Reexports
pub use _generated::{peripherals, Peripherals}; pub use _generated::{peripherals, Peripherals};
pub use embassy_cortex_m::executor; pub use embassy_cortex_m::executor;
#[cfg(any(dma, bdma))]
use embassy_cortex_m::interrupt::Priority; use embassy_cortex_m::interrupt::Priority;
pub use embassy_cortex_m::interrupt::_export::interrupt; pub use embassy_cortex_m::interrupt::_export::interrupt;
pub use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; pub use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
@ -96,6 +95,8 @@ pub struct Config {
pub bdma_interrupt_priority: Priority, pub bdma_interrupt_priority: Priority,
#[cfg(dma)] #[cfg(dma)]
pub dma_interrupt_priority: Priority, pub dma_interrupt_priority: Priority,
#[cfg(gpdma)]
pub gpdma_interrupt_priority: Priority,
} }
impl Default for Config { impl Default for Config {
@ -108,6 +109,8 @@ impl Default for Config {
bdma_interrupt_priority: Priority::P0, bdma_interrupt_priority: Priority::P0,
#[cfg(dma)] #[cfg(dma)]
dma_interrupt_priority: Priority::P0, dma_interrupt_priority: Priority::P0,
#[cfg(gpdma)]
gpdma_interrupt_priority: Priority::P0,
} }
} }
} }
@ -151,6 +154,8 @@ pub fn init(config: Config) -> Peripherals {
config.bdma_interrupt_priority, config.bdma_interrupt_priority,
#[cfg(dma)] #[cfg(dma)]
config.dma_interrupt_priority, config.dma_interrupt_priority,
#[cfg(gpdma)]
config.gpdma_interrupt_priority,
); );
#[cfg(feature = "exti")] #[cfg(feature = "exti")]
exti::init(); exti::init();

View File

@ -5,7 +5,7 @@ pub mod enums;
use embassy_hal_common::{into_ref, PeripheralRef}; use embassy_hal_common::{into_ref, PeripheralRef};
use enums::*; use enums::*;
use crate::dma::TransferOptions; use crate::dma::Transfer;
use crate::gpio::sealed::AFType; use crate::gpio::sealed::AFType;
use crate::gpio::AnyPin; use crate::gpio::AnyPin;
use crate::pac::quadspi::Quadspi as Regs; use crate::pac::quadspi::Quadspi as Regs;
@ -230,9 +230,6 @@ impl<'d, T: Instance, Dma> Qspi<'d, T, Dma> {
unsafe { unsafe {
self.setup_transaction(QspiMode::IndirectWrite, &transaction); self.setup_transaction(QspiMode::IndirectWrite, &transaction);
let request = self.dma.request();
let options = TransferOptions::default();
T::REGS.ccr().modify(|v| { T::REGS.ccr().modify(|v| {
v.set_fmode(QspiMode::IndirectRead.into()); v.set_fmode(QspiMode::IndirectRead.into());
}); });
@ -241,12 +238,18 @@ impl<'d, T: Instance, Dma> Qspi<'d, T, Dma> {
v.set_address(current_ar); v.set_address(current_ar);
}); });
self.dma let request = self.dma.request();
.start_read(request, T::REGS.dr().ptr() as *mut u8, buf, options); let transfer = Transfer::new_read(
&mut self.dma,
request,
T::REGS.dr().ptr() as *mut u8,
buf,
Default::default(),
);
T::REGS.cr().modify(|v| v.set_dmaen(true)); T::REGS.cr().modify(|v| v.set_dmaen(true));
while self.dma.is_running() {} transfer.blocking_wait();
} }
} }
@ -257,19 +260,22 @@ impl<'d, T: Instance, Dma> Qspi<'d, T, Dma> {
unsafe { unsafe {
self.setup_transaction(QspiMode::IndirectWrite, &transaction); self.setup_transaction(QspiMode::IndirectWrite, &transaction);
let request = self.dma.request();
let options = TransferOptions::default();
T::REGS.ccr().modify(|v| { T::REGS.ccr().modify(|v| {
v.set_fmode(QspiMode::IndirectWrite.into()); v.set_fmode(QspiMode::IndirectWrite.into());
}); });
self.dma let request = self.dma.request();
.start_write(request, buf, T::REGS.dr().ptr() as *mut u8, options); let transfer = Transfer::new_write(
&mut self.dma,
request,
buf,
T::REGS.dr().ptr() as *mut u8,
Default::default(),
);
T::REGS.cr().modify(|v| v.set_dmaen(true)); T::REGS.cr().modify(|v| v.set_dmaen(true));
while self.dma.is_running() {} transfer.blocking_wait();
} }
} }

View File

@ -185,6 +185,21 @@ fn clk_div(ker_ck: Hertz, sdmmc_ck: u32) -> Result<(bool, u16, Hertz), Error> {
} }
} }
#[cfg(sdmmc_v1)]
type Transfer<'a, C> = crate::dma::Transfer<'a, C>;
#[cfg(sdmmc_v2)]
type Transfer<'a, C> = core::marker::PhantomData<&'a mut C>;
#[cfg(all(sdmmc_v1, dma))]
const DMA_TRANSFER_OPTIONS: crate::dma::TransferOptions = crate::dma::TransferOptions {
pburst: crate::dma::Burst::Incr4,
mburst: crate::dma::Burst::Incr4,
flow_ctrl: crate::dma::FlowControl::Peripheral,
fifo_threshold: Some(crate::dma::FifoThreshold::Full),
};
#[cfg(all(sdmmc_v1, not(dma)))]
const DMA_TRANSFER_OPTIONS: crate::dma::TransferOptions = crate::dma::TransferOptions {};
/// SDMMC configuration /// SDMMC configuration
/// ///
/// Default values: /// Default values:
@ -490,7 +505,12 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
/// # Safety /// # Safety
/// ///
/// `buffer` must be valid for the whole transfer and word aligned /// `buffer` must be valid for the whole transfer and word aligned
unsafe fn prepare_datapath_read(&mut self, buffer: *mut [u32], length_bytes: u32, block_size: u8) { fn prepare_datapath_read<'a>(
&'a mut self,
buffer: &'a mut [u32],
length_bytes: u32,
block_size: u8,
) -> Transfer<'a, Dma> {
assert!(block_size <= 14, "Block size up to 2^14 bytes"); assert!(block_size <= 14, "Block size up to 2^14 bytes");
let regs = T::regs(); let regs = T::regs();
@ -499,32 +519,28 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
Self::clear_interrupt_flags(); Self::clear_interrupt_flags();
// NOTE(unsafe) We have exclusive access to the regisers // NOTE(unsafe) We have exclusive access to the regisers
unsafe {
regs.dtimer() regs.dtimer()
.write(|w| w.set_datatime(self.config.data_transfer_timeout)); .write(|w| w.set_datatime(self.config.data_transfer_timeout));
regs.dlenr().write(|w| w.set_datalength(length_bytes)); regs.dlenr().write(|w| w.set_datalength(length_bytes));
#[cfg(sdmmc_v1)] #[cfg(sdmmc_v1)]
{ let transfer = {
let request = self.dma.request(); let request = self.dma.request();
self.dma.start_read( Transfer::new_read(
&mut self.dma,
request, request,
regs.fifor().ptr() as *const u32, regs.fifor().ptr() as *mut u32,
buffer, buffer,
crate::dma::TransferOptions { DMA_TRANSFER_OPTIONS,
pburst: crate::dma::Burst::Incr4, )
mburst: crate::dma::Burst::Incr4, };
flow_ctrl: crate::dma::FlowControl::Peripheral,
fifo_threshold: Some(crate::dma::FifoThreshold::Full),
..Default::default()
},
);
}
#[cfg(sdmmc_v2)] #[cfg(sdmmc_v2)]
{ let transfer = {
regs.idmabase0r().write(|w| w.set_idmabase0(buffer as *mut u32 as u32)); regs.idmabase0r().write(|w| w.set_idmabase0(buffer.as_mut_ptr() as u32));
regs.idmactrlr().modify(|w| w.set_idmaen(true)); regs.idmactrlr().modify(|w| w.set_idmaen(true));
} core::marker::PhantomData
};
regs.dctrl().modify(|w| { regs.dctrl().modify(|w| {
w.set_dblocksize(block_size); w.set_dblocksize(block_size);
@ -535,12 +551,20 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
w.set_dten(true); w.set_dten(true);
} }
}); });
transfer
}
} }
/// # Safety /// # Safety
/// ///
/// `buffer` must be valid for the whole transfer and word aligned /// `buffer` must be valid for the whole transfer and word aligned
unsafe fn prepare_datapath_write(&mut self, buffer: *const [u32], length_bytes: u32, block_size: u8) { fn prepare_datapath_write<'a>(
&'a mut self,
buffer: &'a [u32],
length_bytes: u32,
block_size: u8,
) -> Transfer<'a, Dma> {
assert!(block_size <= 14, "Block size up to 2^14 bytes"); assert!(block_size <= 14, "Block size up to 2^14 bytes");
let regs = T::regs(); let regs = T::regs();
@ -549,33 +573,28 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
Self::clear_interrupt_flags(); Self::clear_interrupt_flags();
// NOTE(unsafe) We have exclusive access to the regisers // NOTE(unsafe) We have exclusive access to the regisers
unsafe {
regs.dtimer() regs.dtimer()
.write(|w| w.set_datatime(self.config.data_transfer_timeout)); .write(|w| w.set_datatime(self.config.data_transfer_timeout));
regs.dlenr().write(|w| w.set_datalength(length_bytes)); regs.dlenr().write(|w| w.set_datalength(length_bytes));
#[cfg(sdmmc_v1)] #[cfg(sdmmc_v1)]
{ let transfer = {
let request = self.dma.request(); let request = self.dma.request();
self.dma.start_write( Transfer::new_write(
&mut self.dma,
request, request,
buffer, buffer,
regs.fifor().ptr() as *mut u32, regs.fifor().ptr() as *mut u32,
crate::dma::TransferOptions { DMA_TRANSFER_OPTIONS,
pburst: crate::dma::Burst::Incr4, )
mburst: crate::dma::Burst::Incr4, };
flow_ctrl: crate::dma::FlowControl::Peripheral,
fifo_threshold: Some(crate::dma::FifoThreshold::Full),
..Default::default()
},
);
}
#[cfg(sdmmc_v2)] #[cfg(sdmmc_v2)]
{ let transfer = {
regs.idmabase0r() regs.idmabase0r().write(|w| w.set_idmabase0(buffer.as_ptr() as u32));
.write(|w| w.set_idmabase0(buffer as *const u32 as u32));
regs.idmactrlr().modify(|w| w.set_idmaen(true)); regs.idmactrlr().modify(|w| w.set_idmaen(true));
} core::marker::PhantomData
};
regs.dctrl().modify(|w| { regs.dctrl().modify(|w| {
w.set_dblocksize(block_size); w.set_dblocksize(block_size);
@ -586,6 +605,9 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
w.set_dten(true); w.set_dten(true);
} }
}); });
transfer
}
} }
/// Stops the DMA datapath /// Stops the DMA datapath
@ -662,11 +684,9 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let regs = T::regs(); let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() }); let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
unsafe { let transfer = self.prepare_datapath_read(&mut status, 64, 6);
self.prepare_datapath_read(&mut status, 64, 6);
Self::data_interrupts(true); Self::data_interrupts(true);
} Self::cmd(Cmd::cmd6(set_function), true)?; // CMD6
self.cmd(Cmd::cmd6(set_function), true)?; // CMD6
let res = poll_fn(|cx| { let res = poll_fn(|cx| {
T::state().register(cx.waker()); T::state().register(cx.waker());
@ -696,6 +716,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
Ok(_) => { Ok(_) => {
on_drop.defuse(); on_drop.defuse();
Self::stop_datapath(); Self::stop_datapath();
drop(transfer);
// Function Selection of Function Group 1 // Function Selection of Function Group 1
let selection = (u32::from_be(status[4]) >> 24) & 0xF; let selection = (u32::from_be(status[4]) >> 24) & 0xF;
@ -718,7 +739,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let regs = T::regs(); let regs = T::regs();
let rca = card.rca; let rca = card.rca;
self.cmd(Cmd::card_status(rca << 16), false)?; // CMD13 Self::cmd(Cmd::card_status(rca << 16), false)?; // CMD13
// NOTE(unsafe) Atomic read with no side-effects // NOTE(unsafe) Atomic read with no side-effects
let r1 = unsafe { regs.respr(0).read().cardstatus() }; let r1 = unsafe { regs.respr(0).read().cardstatus() };
@ -730,8 +751,8 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let card = self.card.as_mut().ok_or(Error::NoCard)?; let card = self.card.as_mut().ok_or(Error::NoCard)?;
let rca = card.rca; let rca = card.rca;
self.cmd(Cmd::set_block_length(64), false)?; // CMD16 Self::cmd(Cmd::set_block_length(64), false)?; // CMD16
self.cmd(Cmd::app_cmd(rca << 16), false)?; // APP Self::cmd(Cmd::app_cmd(rca << 16), false)?; // APP
let mut status = [0u32; 16]; let mut status = [0u32; 16];
@ -739,11 +760,9 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let regs = T::regs(); let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() }); let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
unsafe { let transfer = self.prepare_datapath_read(&mut status, 64, 6);
self.prepare_datapath_read(&mut status, 64, 6);
Self::data_interrupts(true); Self::data_interrupts(true);
} Self::cmd(Cmd::card_status(0), true)?;
self.cmd(Cmd::card_status(0), true)?;
let res = poll_fn(|cx| { let res = poll_fn(|cx| {
T::state().register(cx.waker()); T::state().register(cx.waker());
@ -764,6 +783,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
if res.is_ok() { if res.is_ok() {
on_drop.defuse(); on_drop.defuse();
Self::stop_datapath(); Self::stop_datapath();
drop(transfer);
for byte in status.iter_mut() { for byte in status.iter_mut() {
*byte = u32::from_be(*byte); *byte = u32::from_be(*byte);
@ -781,7 +801,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
// Determine Relative Card Address (RCA) of given card // Determine Relative Card Address (RCA) of given card
let rca = card.map(|c| c.rca << 16).unwrap_or(0); let rca = card.map(|c| c.rca << 16).unwrap_or(0);
let r = self.cmd(Cmd::sel_desel_card(rca), false); let r = Self::cmd(Cmd::sel_desel_card(rca), false);
match (r, rca) { match (r, rca) {
(Err(Error::Timeout), 0) => Ok(()), (Err(Error::Timeout), 0) => Ok(()),
_ => r, _ => r,
@ -842,8 +862,8 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
async fn get_scr(&mut self, card: &mut Card) -> Result<(), Error> { async fn get_scr(&mut self, card: &mut Card) -> Result<(), Error> {
// Read the the 64-bit SCR register // Read the the 64-bit SCR register
self.cmd(Cmd::set_block_length(8), false)?; // CMD16 Self::cmd(Cmd::set_block_length(8), false)?; // CMD16
self.cmd(Cmd::app_cmd(card.rca << 16), false)?; Self::cmd(Cmd::app_cmd(card.rca << 16), false)?;
let mut scr = [0u32; 2]; let mut scr = [0u32; 2];
@ -851,11 +871,9 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let regs = T::regs(); let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() }); let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
unsafe { let transfer = self.prepare_datapath_read(&mut scr[..], 8, 3);
self.prepare_datapath_read(&mut scr[..], 8, 3);
Self::data_interrupts(true); Self::data_interrupts(true);
} Self::cmd(Cmd::cmd51(), true)?;
self.cmd(Cmd::cmd51(), true)?;
let res = poll_fn(|cx| { let res = poll_fn(|cx| {
T::state().register(cx.waker()); T::state().register(cx.waker());
@ -876,6 +894,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
if res.is_ok() { if res.is_ok() {
on_drop.defuse(); on_drop.defuse();
Self::stop_datapath(); Self::stop_datapath();
drop(transfer);
unsafe { unsafe {
let scr_bytes = &*(&scr as *const [u32; 2] as *const [u8; 8]); let scr_bytes = &*(&scr as *const [u32; 2] as *const [u8; 8]);
@ -887,7 +906,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
/// Send command to card /// Send command to card
#[allow(unused_variables)] #[allow(unused_variables)]
fn cmd(&self, cmd: Cmd, data: bool) -> Result<(), Error> { fn cmd(cmd: Cmd, data: bool) -> Result<(), Error> {
let regs = T::regs(); let regs = T::regs();
Self::clear_interrupt_flags(); Self::clear_interrupt_flags();
@ -1005,10 +1024,10 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
}); });
regs.power().modify(|w| w.set_pwrctrl(PowerCtrl::On as u8)); regs.power().modify(|w| w.set_pwrctrl(PowerCtrl::On as u8));
self.cmd(Cmd::idle(), false)?; Self::cmd(Cmd::idle(), false)?;
// Check if cards supports CMD8 (with pattern) // Check if cards supports CMD8 (with pattern)
self.cmd(Cmd::hs_send_ext_csd(0x1AA), false)?; Self::cmd(Cmd::hs_send_ext_csd(0x1AA), false)?;
let r1 = regs.respr(0).read().cardstatus(); let r1 = regs.respr(0).read().cardstatus();
let mut card = if r1 == 0x1AA { let mut card = if r1 == 0x1AA {
@ -1020,14 +1039,14 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let ocr = loop { let ocr = loop {
// Signal that next command is a app command // Signal that next command is a app command
self.cmd(Cmd::app_cmd(0), false)?; // CMD55 Self::cmd(Cmd::app_cmd(0), false)?; // CMD55
let arg = CmdAppOper::VOLTAGE_WINDOW_SD as u32 let arg = CmdAppOper::VOLTAGE_WINDOW_SD as u32
| CmdAppOper::HIGH_CAPACITY as u32 | CmdAppOper::HIGH_CAPACITY as u32
| CmdAppOper::SD_SWITCH_1_8V_CAPACITY as u32; | CmdAppOper::SD_SWITCH_1_8V_CAPACITY as u32;
// Initialize card // Initialize card
match self.cmd(Cmd::app_op_cmd(arg), false) { match Self::cmd(Cmd::app_op_cmd(arg), false) {
// ACMD41 // ACMD41
Ok(_) => (), Ok(_) => (),
Err(Error::Crc) => (), Err(Error::Crc) => (),
@ -1048,7 +1067,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
} }
card.ocr = ocr; card.ocr = ocr;
self.cmd(Cmd::all_send_cid(), false)?; // CMD2 Self::cmd(Cmd::all_send_cid(), false)?; // CMD2
let cid0 = regs.respr(0).read().cardstatus() as u128; let cid0 = regs.respr(0).read().cardstatus() as u128;
let cid1 = regs.respr(1).read().cardstatus() as u128; let cid1 = regs.respr(1).read().cardstatus() as u128;
let cid2 = regs.respr(2).read().cardstatus() as u128; let cid2 = regs.respr(2).read().cardstatus() as u128;
@ -1056,10 +1075,10 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let cid = (cid0 << 96) | (cid1 << 64) | (cid2 << 32) | (cid3); let cid = (cid0 << 96) | (cid1 << 64) | (cid2 << 32) | (cid3);
card.cid = cid.into(); card.cid = cid.into();
self.cmd(Cmd::send_rel_addr(), false)?; Self::cmd(Cmd::send_rel_addr(), false)?;
card.rca = regs.respr(0).read().cardstatus() >> 16; card.rca = regs.respr(0).read().cardstatus() >> 16;
self.cmd(Cmd::send_csd(card.rca << 16), false)?; Self::cmd(Cmd::send_csd(card.rca << 16), false)?;
let csd0 = regs.respr(0).read().cardstatus() as u128; let csd0 = regs.respr(0).read().cardstatus() as u128;
let csd1 = regs.respr(1).read().cardstatus() as u128; let csd1 = regs.respr(1).read().cardstatus() as u128;
let csd2 = regs.respr(2).read().cardstatus() as u128; let csd2 = regs.respr(2).read().cardstatus() as u128;
@ -1077,8 +1096,8 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
BusWidth::Four if card.scr.bus_width_four() => (BusWidth::Four, 2), BusWidth::Four if card.scr.bus_width_four() => (BusWidth::Four, 2),
_ => (BusWidth::One, 0), _ => (BusWidth::One, 0),
}; };
self.cmd(Cmd::app_cmd(card.rca << 16), false)?; Self::cmd(Cmd::app_cmd(card.rca << 16), false)?;
self.cmd(Cmd::cmd6(acmd_arg), false)?; Self::cmd(Cmd::cmd6(acmd_arg), false)?;
// CPSMACT and DPSMACT must be 0 to set WIDBUS // CPSMACT and DPSMACT must be 0 to set WIDBUS
Self::wait_idle(); Self::wait_idle();
@ -1139,16 +1158,14 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
CardCapacity::SDSC => block_idx * 512, CardCapacity::SDSC => block_idx * 512,
_ => block_idx, _ => block_idx,
}; };
self.cmd(Cmd::set_block_length(512), false)?; // CMD16 Self::cmd(Cmd::set_block_length(512), false)?; // CMD16
let regs = T::regs(); let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() }); let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
unsafe { let transfer = self.prepare_datapath_read(buffer, 512, 9);
self.prepare_datapath_read(buffer, 512, 9);
Self::data_interrupts(true); Self::data_interrupts(true);
} Self::cmd(Cmd::read_single_block(address), true)?;
self.cmd(Cmd::read_single_block(address), true)?;
let res = poll_fn(|cx| { let res = poll_fn(|cx| {
T::state().register(cx.waker()); T::state().register(cx.waker());
@ -1169,6 +1186,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
if res.is_ok() { if res.is_ok() {
on_drop.defuse(); on_drop.defuse();
Self::stop_datapath(); Self::stop_datapath();
drop(transfer);
} }
res res
} }
@ -1185,22 +1203,20 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
CardCapacity::SDSC => block_idx * 512, CardCapacity::SDSC => block_idx * 512,
_ => block_idx, _ => block_idx,
}; };
self.cmd(Cmd::set_block_length(512), false)?; // CMD16 Self::cmd(Cmd::set_block_length(512), false)?; // CMD16
let regs = T::regs(); let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() }); let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
// sdmmc_v1 uses different cmd/dma order than v2, but only for writes // sdmmc_v1 uses different cmd/dma order than v2, but only for writes
#[cfg(sdmmc_v1)] #[cfg(sdmmc_v1)]
self.cmd(Cmd::write_single_block(address), true)?; Self::cmd(Cmd::write_single_block(address), true)?;
unsafe { let transfer = self.prepare_datapath_write(buffer, 512, 9);
self.prepare_datapath_write(buffer as *const [u32; 128], 512, 9);
Self::data_interrupts(true); Self::data_interrupts(true);
}
#[cfg(sdmmc_v2)] #[cfg(sdmmc_v2)]
self.cmd(Cmd::write_single_block(address), true)?; Self::cmd(Cmd::write_single_block(address), true)?;
let res = poll_fn(|cx| { let res = poll_fn(|cx| {
T::state().register(cx.waker()); T::state().register(cx.waker());
@ -1222,6 +1238,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
Ok(_) => { Ok(_) => {
on_drop.defuse(); on_drop.defuse();
Self::stop_datapath(); Self::stop_datapath();
drop(transfer);
// TODO: Make this configurable // TODO: Make this configurable
let mut timeout: u32 = 0x00FF_FFFF; let mut timeout: u32 = 0x00FF_FFFF;

View File

@ -421,8 +421,7 @@ impl<'d, T: Instance, Tx, Rx> Spi<'d, T, Tx, Rx> {
let tx_request = self.txdma.request(); let tx_request = self.txdma.request();
let tx_dst = T::REGS.tx_ptr(); let tx_dst = T::REGS.tx_ptr();
unsafe { self.txdma.start_write(tx_request, data, tx_dst, Default::default()) } let tx_f = unsafe { Transfer::new_write(&mut self.txdma, tx_request, data, tx_dst, Default::default()) };
let tx_f = Transfer::new(&mut self.txdma);
unsafe { unsafe {
set_txdmaen(T::REGS, true); set_txdmaen(T::REGS, true);
@ -468,13 +467,21 @@ impl<'d, T: Instance, Tx, Rx> Spi<'d, T, Tx, Rx> {
let rx_request = self.rxdma.request(); let rx_request = self.rxdma.request();
let rx_src = T::REGS.rx_ptr(); let rx_src = T::REGS.rx_ptr();
unsafe { self.rxdma.start_read(rx_request, rx_src, data, Default::default()) }; let rx_f = unsafe { Transfer::new_read(&mut self.rxdma, rx_request, rx_src, data, Default::default()) };
let rx_f = Transfer::new(&mut self.rxdma);
let tx_request = self.txdma.request(); let tx_request = self.txdma.request();
let tx_dst = T::REGS.tx_ptr(); let tx_dst = T::REGS.tx_ptr();
let clock_byte = 0x00u8; let clock_byte = 0x00u8;
let tx_f = crate::dma::write_repeated(&mut self.txdma, tx_request, &clock_byte, clock_byte_count, tx_dst); let tx_f = unsafe {
Transfer::new_write_repeated(
&mut self.txdma,
tx_request,
&clock_byte,
clock_byte_count,
tx_dst,
Default::default(),
)
};
unsafe { unsafe {
set_txdmaen(T::REGS, true); set_txdmaen(T::REGS, true);
@ -521,13 +528,11 @@ impl<'d, T: Instance, Tx, Rx> Spi<'d, T, Tx, Rx> {
let rx_request = self.rxdma.request(); let rx_request = self.rxdma.request();
let rx_src = T::REGS.rx_ptr(); let rx_src = T::REGS.rx_ptr();
unsafe { self.rxdma.start_read(rx_request, rx_src, read, Default::default()) }; let rx_f = unsafe { Transfer::new_read_raw(&mut self.rxdma, rx_request, rx_src, read, Default::default()) };
let rx_f = Transfer::new(&mut self.rxdma);
let tx_request = self.txdma.request(); let tx_request = self.txdma.request();
let tx_dst = T::REGS.tx_ptr(); let tx_dst = T::REGS.tx_ptr();
unsafe { self.txdma.start_write(tx_request, write, tx_dst, Default::default()) } let tx_f = unsafe { Transfer::new_write_raw(&mut self.txdma, tx_request, write, tx_dst, Default::default()) };
let tx_f = Transfer::new(&mut self.txdma);
unsafe { unsafe {
set_txdmaen(T::REGS, true); set_txdmaen(T::REGS, true);

View File

@ -34,7 +34,7 @@ macro_rules! dma_trait_impl {
(crate::$mod:ident::$trait:ident, $instance:ident, {dmamux: $dmamux:ident}, $request:expr) => { (crate::$mod:ident::$trait:ident, $instance:ident, {dmamux: $dmamux:ident}, $request:expr) => {
impl<T> crate::$mod::$trait<crate::peripherals::$instance> for T impl<T> crate::$mod::$trait<crate::peripherals::$instance> for T
where where
T: crate::dma::MuxChannel<Mux = crate::dma::$dmamux>, T: crate::dma::Channel + crate::dma::MuxChannel<Mux = crate::dma::$dmamux>,
{ {
fn request(&self) -> crate::dma::Request { fn request(&self) -> crate::dma::Request {
$request $request

View File

@ -6,11 +6,11 @@ use core::sync::atomic::{compiler_fence, Ordering};
use core::task::Poll; use core::task::Poll;
use embassy_cortex_m::interrupt::InterruptExt; use embassy_cortex_m::interrupt::InterruptExt;
use embassy_futures::select::{select, Either};
use embassy_hal_common::drop::OnDrop; use embassy_hal_common::drop::OnDrop;
use embassy_hal_common::{into_ref, PeripheralRef}; use embassy_hal_common::{into_ref, PeripheralRef};
use futures::future::{select, Either};
use crate::dma::NoDma; use crate::dma::{NoDma, Transfer};
use crate::gpio::sealed::AFType; use crate::gpio::sealed::AFType;
#[cfg(any(lpuart_v1, lpuart_v2))] #[cfg(any(lpuart_v1, lpuart_v2))]
use crate::pac::lpuart::{regs, vals, Lpuart as Regs}; use crate::pac::lpuart::{regs, vals, Lpuart as Regs};
@ -91,7 +91,7 @@ enum ReadCompletionEvent {
// DMA Read transfer completed first // DMA Read transfer completed first
DmaCompleted, DmaCompleted,
// Idle line detected first // Idle line detected first
Idle, Idle(usize),
} }
pub struct Uart<'d, T: BasicInstance, TxDma = NoDma, RxDma = NoDma> { pub struct Uart<'d, T: BasicInstance, TxDma = NoDma, RxDma = NoDma> {
@ -183,7 +183,7 @@ impl<'d, T: BasicInstance, TxDma> UartTx<'d, T, TxDma> {
} }
// If we don't assign future to a variable, the data register pointer // If we don't assign future to a variable, the data register pointer
// is held across an await and makes the future non-Send. // is held across an await and makes the future non-Send.
let transfer = crate::dma::write(ch, request, buffer, tdr(T::regs())); let transfer = unsafe { Transfer::new_write(ch, request, buffer, tdr(T::regs()), Default::default()) };
transfer.await; transfer.await;
Ok(()) Ok(())
} }
@ -430,10 +430,12 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
let ch = &mut self.rx_dma; let ch = &mut self.rx_dma;
let request = ch.request(); let request = ch.request();
let buffer_len = buffer.len();
// Start USART DMA // Start USART DMA
// will not do anything yet because DMAR is not yet set // will not do anything yet because DMAR is not yet set
// future which will complete when DMA Read request completes // future which will complete when DMA Read request completes
let transfer = crate::dma::read(ch, request, rdr(T::regs()), buffer); let transfer = unsafe { Transfer::new_read(ch, request, rdr(T::regs()), buffer, Default::default()) };
// SAFETY: The only way we might have a problem is using split rx and tx // SAFETY: The only way we might have a problem is using split rx and tx
// here we only modify or read Rx related flags, interrupts and DMA channel // here we only modify or read Rx related flags, interrupts and DMA channel
@ -565,13 +567,15 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
// when transfer is dropped, it will stop the DMA request // when transfer is dropped, it will stop the DMA request
let r = match select(transfer, idle).await { let r = match select(transfer, idle).await {
// DMA transfer completed first // DMA transfer completed first
Either::First(()) => Ok(ReadCompletionEvent::DmaCompleted), Either::Left(((), _)) => Ok(ReadCompletionEvent::DmaCompleted),
// Idle line detected first // Idle line detected first
Either::Second(Ok(())) => Ok(ReadCompletionEvent::Idle), Either::Right((Ok(()), transfer)) => Ok(ReadCompletionEvent::Idle(
buffer_len - transfer.get_remaining_transfers() as usize,
)),
// error occurred // error occurred
Either::Second(Err(e)) => Err(e), Either::Right((Err(e), _)) => Err(e),
}; };
drop(on_drop); drop(on_drop);
@ -594,14 +598,9 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
// wait for DMA to complete or IDLE line detection if requested // wait for DMA to complete or IDLE line detection if requested
let res = self.inner_read_run(buffer, enable_idle_line_detection).await; let res = self.inner_read_run(buffer, enable_idle_line_detection).await;
let ch = &mut self.rx_dma;
match res { match res {
Ok(ReadCompletionEvent::DmaCompleted) => Ok(buffer_len), Ok(ReadCompletionEvent::DmaCompleted) => Ok(buffer_len),
Ok(ReadCompletionEvent::Idle) => { Ok(ReadCompletionEvent::Idle(n)) => Ok(n),
let n = buffer_len - (ch.remaining_transfers() as usize);
Ok(n)
}
Err(e) => Err(e), Err(e) => Err(e),
} }
} }