Merge pull request #1348 from embassy-rs/h5-spi

stm32/dma: refactor
This commit is contained in:
Dario Nieuwenhuis 2023-04-18 17:03:24 +02:00 committed by GitHub
commit fbd6eeb748
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 1202 additions and 1101 deletions

View File

@ -268,7 +268,7 @@ fn main() {
// ========
// Generate DMA IRQs.
let mut dma_irqs: HashMap<&str, Vec<(&str, &str)>> = HashMap::new();
let mut dma_irqs: HashMap<&str, Vec<(&str, &str, &str)>> = HashMap::new();
for p in METADATA.peripherals {
if let Some(r) = &p.registers {
@ -278,7 +278,10 @@ fn main() {
continue;
}
for irq in p.interrupts {
dma_irqs.entry(irq.interrupt).or_default().push((p.name, irq.signal));
dma_irqs
.entry(irq.interrupt)
.or_default()
.push((r.kind, p.name, irq.signal));
}
}
}
@ -287,13 +290,14 @@ fn main() {
for (irq, channels) in dma_irqs {
let irq = format_ident!("{}", irq);
let channels = channels.iter().map(|(dma, ch)| format_ident!("{}_{}", dma, ch));
let xdma = format_ident!("{}", channels[0].0);
let channels = channels.iter().map(|(_, dma, ch)| format_ident!("{}_{}", dma, ch));
g.extend(quote! {
#[crate::interrupt]
unsafe fn #irq () {
#(
<crate::peripherals::#channels as crate::dma::sealed::Channel>::on_irq();
<crate::peripherals::#channels as crate::dma::#xdma::sealed::Channel>::on_irq();
)*
}
});

View File

@ -4,6 +4,7 @@ use core::task::Poll;
use embassy_hal_common::{into_ref, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker;
use crate::dma::Transfer;
use crate::gpio::sealed::AFType;
use crate::gpio::Speed;
use crate::interrupt::{Interrupt, InterruptExt};
@ -385,14 +386,11 @@ where
return self.capture_giant(buffer).await;
}
}
async fn capture_small(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
let channel = &mut self.dma;
let request = channel.request();
let r = self.inner.regs();
let src = r.dr().ptr() as *mut u32;
let dma_read = crate::dma::read(channel, request, src, buffer);
let request = self.dma.request();
let dma_read = unsafe { Transfer::new_read(&mut self.dma, request, src, buffer, Default::default()) };
Self::clear_interrupt_flags();
Self::enable_irqs();
@ -436,6 +434,12 @@ where
result
}
#[cfg(not(dma))]
async fn capture_giant(&mut self, _buffer: &mut [u32]) -> Result<(), Error> {
panic!("capturing to buffers larger than 0xffff is only supported on DMA for now, not on BDMA or GPDMA.");
}
#[cfg(dma)]
async fn capture_giant(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
use crate::dma::TransferOptions;
@ -460,16 +464,24 @@ where
let r = self.inner.regs();
let src = r.dr().ptr() as *mut u32;
unsafe {
channel.start_double_buffered_read(request, src, m0ar, m1ar, chunk_size, TransferOptions::default());
}
let mut transfer = unsafe {
crate::dma::DoubleBuffered::new_read(
&mut self.dma,
request,
src,
m0ar,
m1ar,
chunk_size,
TransferOptions::default(),
)
};
let mut last_chunk_set_for_transfer = false;
let mut buffer0_last_accessible = false;
let dma_result = poll_fn(|cx| {
channel.set_waker(cx.waker());
transfer.set_waker(cx.waker());
let buffer0_currently_accessible = unsafe { channel.is_buffer0_accessible() };
let buffer0_currently_accessible = transfer.is_buffer0_accessible();
// check if the accessible buffer changed since last poll
if buffer0_last_accessible == buffer0_currently_accessible {
@ -480,21 +492,21 @@ where
if remaining_chunks != 0 {
if remaining_chunks % 2 == 0 && buffer0_currently_accessible {
m0ar = unsafe { m0ar.add(2 * chunk_size) };
unsafe { channel.set_buffer0(m0ar) }
unsafe { transfer.set_buffer0(m0ar) }
remaining_chunks -= 1;
} else if !buffer0_currently_accessible {
m1ar = unsafe { m1ar.add(2 * chunk_size) };
unsafe { channel.set_buffer1(m1ar) };
unsafe { transfer.set_buffer1(m1ar) };
remaining_chunks -= 1;
}
} else {
if buffer0_currently_accessible {
unsafe { channel.set_buffer0(buffer.as_mut_ptr()) }
unsafe { transfer.set_buffer0(buffer.as_mut_ptr()) }
} else {
unsafe { channel.set_buffer1(buffer.as_mut_ptr()) }
unsafe { transfer.set_buffer1(buffer.as_mut_ptr()) }
}
if last_chunk_set_for_transfer {
channel.request_stop();
transfer.request_stop();
return Poll::Ready(());
}
last_chunk_set_for_transfer = true;

View File

@ -1,18 +1,31 @@
#![macro_use]
use core::future::Future;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering};
use core::task::Waker;
use core::task::{Context, Poll};
use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker;
use super::{TransferOptions, Word, WordSize};
use super::{Dir, Word, WordSize};
use crate::_generated::BDMA_CHANNEL_COUNT;
use crate::dma::Request;
use crate::interrupt::{Interrupt, InterruptExt};
use crate::pac;
use crate::pac::bdma::vals;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {}
impl Default for TransferOptions {
fn default() -> Self {
Self {}
}
}
impl From<WordSize> for vals::Size {
fn from(raw: WordSize) -> Self {
match raw {
@ -23,6 +36,15 @@ impl From<WordSize> for vals::Size {
}
}
impl From<Dir> for vals::Dir {
fn from(raw: Dir) -> Self {
match raw {
Dir::MemoryToPeripheral => Self::FROMMEMORY,
Dir::PeripheralToMemory => Self::FROMPERIPHERAL,
}
}
}
struct State {
ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
}
@ -55,219 +77,27 @@ foreach_dma_channel! {
// BDMA1 in H7 doesn't use DMAMUX, which breaks
};
($channel_peri:ident, $dma_peri:ident, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri {
unsafe fn start_write<W: Word>(&mut self, _request: Request, buf: *const[W], reg_addr: *mut W, options: TransferOptions) {
let (ptr, len) = super::slice_ptr_parts(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMMEMORY,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
impl sealed::Channel for crate::peripherals::$channel_peri {
fn regs(&self) -> pac::bdma::Dma {
pac::$dma_peri
}
unsafe fn start_write_repeated<W: Word>(&mut self, _request: Request, repeated: *const W, count: usize, reg_addr: *mut W, options: TransferOptions) {
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMMEMORY,
reg_addr as *const u32,
repeated as *mut u32,
count,
false,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
)
fn num(&self) -> usize {
$channel_num
}
unsafe fn start_read<W: Word>(&mut self, _request: Request, reg_addr: *const W, buf: *mut [W], options: TransferOptions) {
let (ptr, len) = super::slice_ptr_parts_mut(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
#[cfg(any(bdma_v2, dmamux))]
_request,
vals::Dir::FROMPERIPHERAL,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
fn index(&self) -> usize {
$index
}
unsafe fn start_double_buffered_read<W: super::Word>(
&mut self,
_request: Request,
_reg_addr: *const W,
_buffer0: *mut W,
_buffer1: *mut W,
_buffer_len: usize,
_options: TransferOptions,
) {
panic!("Unsafe double buffered mode is unavailable on BDMA");
}
unsafe fn set_buffer0<W: super::Word>(&mut self, _buffer: *mut W) {
panic!("Unsafe double buffered mode is unavailable on BDMA");
}
unsafe fn set_buffer1<W: super::Word>(&mut self, _buffer: *mut W) {
panic!("Unsafe double buffered mode is unavailable on BDMA");
}
unsafe fn is_buffer0_accessible(&mut self) -> bool {
panic!("Unsafe double buffered mode is unavailable on BDMA");
}
fn request_stop(&mut self){
unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);}
}
fn is_running(&self) -> bool {
unsafe {low_level_api::is_running(pac::$dma_peri, $channel_num)}
}
fn remaining_transfers(&mut self) -> u16 {
unsafe {low_level_api::get_remaining_transfers(pac::$dma_peri, $channel_num)}
}
fn set_waker(&mut self, waker: &Waker) {
unsafe { low_level_api::set_waker($index, waker) }
}
fn on_irq() {
unsafe {
low_level_api::on_irq_inner(pac::$dma_peri, $channel_num, $index);
}
unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
}
}
impl crate::dma::Channel for crate::peripherals::$channel_peri {}
impl Channel for crate::peripherals::$channel_peri {}
};
}
mod low_level_api {
use super::*;
pub unsafe fn start_transfer(
dma: pac::bdma::Dma,
channel_number: u8,
#[cfg(any(bdma_v2, dmamux))] request: Request,
dir: vals::Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: vals::Size,
options: TransferOptions,
#[cfg(dmamux)] dmamux_regs: pac::dmamux::Dmamux,
#[cfg(dmamux)] dmamux_ch_num: u8,
) {
assert!(options.mburst == crate::dma::Burst::Single, "Burst mode not supported");
assert!(options.pburst == crate::dma::Burst::Single, "Burst mode not supported");
assert!(
options.flow_ctrl == crate::dma::FlowControl::Dma,
"Peripheral flow control not supported"
);
assert!(options.fifo_threshold.is_none(), "FIFO mode not supported");
let ch = dma.ch(channel_number as _);
reset_status(dma, channel_number);
#[cfg(dmamux)]
super::super::dmamux::configure_dmamux(dmamux_regs, dmamux_ch_num, request);
#[cfg(bdma_v2)]
critical_section::with(|_| dma.cselr().modify(|w| w.set_cs(channel_number as _, request)));
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
ch.par().write_value(peri_addr as u32);
ch.mar().write_value(mem_addr as u32);
ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
ch.cr().write(|w| {
w.set_psize(data_size);
w.set_msize(data_size);
if incr_mem {
w.set_minc(vals::Inc::ENABLED);
} else {
w.set_minc(vals::Inc::DISABLED);
}
w.set_dir(dir);
w.set_teie(true);
w.set_tcie(true);
w.set_en(true);
});
}
pub unsafe fn request_stop(dma: pac::bdma::Dma, channel_number: u8) {
reset_status(dma, channel_number);
let ch = dma.ch(channel_number as _);
// Disable the channel and interrupts with the default value.
ch.cr().write(|_| ());
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
pub unsafe fn is_running(dma: pac::bdma::Dma, ch: u8) -> bool {
let ch = dma.ch(ch as _);
ch.cr().read().en()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub unsafe fn get_remaining_transfers(dma: pac::bdma::Dma, ch: u8) -> u16 {
// get a handle on the channel itself
let ch = dma.ch(ch as _);
// read the remaining transfer count. If this is zero, the transfer completed fully.
ch.ndtr().read().ndt() as u16
}
/// Sets the waker for the specified DMA channel
pub unsafe fn set_waker(state_number: usize, waker: &Waker) {
STATE.ch_wakers[state_number].register(waker);
}
pub unsafe fn reset_status(dma: pac::bdma::Dma, channel_number: u8) {
dma.ifcr().write(|w| {
w.set_tcif(channel_number as _, true);
w.set_teif(channel_number as _, true);
});
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: u8, index: u8) {
let channel_num = channel_num as usize;
let index = index as usize;
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index: usize) {
let isr = dma.isr().read();
let cr = dma.ch(channel_num).cr();
@ -278,5 +108,236 @@ mod low_level_api {
cr.write(|_| ()); // Disable channel interrupts with the default value.
STATE.ch_wakers[index].wake();
}
}
#[cfg(any(bdma_v2, dmamux))]
pub type Request = u8;
#[cfg(not(any(bdma_v2, dmamux)))]
pub type Request = ();
#[cfg(dmamux)]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
#[cfg(not(dmamux))]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
pub(crate) mod sealed {
use super::*;
pub trait Channel {
fn regs(&self) -> pac::bdma::Dma;
fn num(&self) -> usize;
fn index(&self) -> usize;
fn on_irq();
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
}
impl<'a, C: Channel> Transfer<'a, C> {
pub unsafe fn new_read<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
pub unsafe fn new_read_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: *mut [W],
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts_mut(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::PeripheralToMemory,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: &'a [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
pub unsafe fn new_write_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: *const [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write_repeated<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
repeated: &'a W,
count: usize,
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const W as *mut u32,
count,
false,
W::bits(),
options,
)
}
unsafe fn new_inner(
channel: PeripheralRef<'a, C>,
_request: Request,
dir: Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: WordSize,
_options: TransferOptions,
) -> Self {
let ch = channel.regs().ch(channel.num());
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
#[cfg(bdma_v2)]
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
let mut this = Self { channel };
this.clear_irqs();
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
ch.par().write_value(peri_addr as u32);
ch.mar().write_value(mem_addr as u32);
ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
ch.cr().write(|w| {
w.set_psize(data_size.into());
w.set_msize(data_size.into());
if incr_mem {
w.set_minc(vals::Inc::ENABLED);
} else {
w.set_minc(vals::Inc::DISABLED);
}
w.set_dir(dir.into());
w.set_teie(true);
w.set_tcie(true);
w.set_en(true);
});
this
}
fn clear_irqs(&mut self) {
unsafe {
self.channel.regs().ifcr().write(|w| {
w.set_tcif(self.channel.num(), true);
w.set_teif(self.channel.num(), true);
})
}
}
pub fn request_stop(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
// Disable the channel. Keep the IEs enabled so the irqs still fire.
unsafe {
ch.cr().write(|w| {
w.set_teie(true);
w.set_tcie(true);
})
}
}
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().ch(self.channel.num());
unsafe { ch.cr().read() }.en()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub fn get_remaining_transfers(&self) -> u16 {
let ch = self.channel.regs().ch(self.channel.num());
unsafe { ch.ndtr().read() }.ndt()
}
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
impl<'a, C: Channel> Drop for Transfer<'a, C> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
impl<'a, C: Channel> Future for Transfer<'a, C> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
STATE.ch_wakers[self.channel.index()].register(cx.waker());
if self.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}

View File

@ -1,15 +1,45 @@
use core::future::Future;
use core::marker::PhantomData;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering};
use core::task::Waker;
use core::task::{Context, Poll, Waker};
use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker;
use pac::dma::regs;
use super::{Burst, FifoThreshold, FlowControl, Request, TransferOptions, Word, WordSize};
use super::{Dir, Word, WordSize};
use crate::_generated::DMA_CHANNEL_COUNT;
use crate::interrupt::{Interrupt, InterruptExt};
use crate::pac::dma::{regs, vals};
use crate::pac::dma::vals;
use crate::{interrupt, pac};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {
/// Peripheral burst transfer configuration
pub pburst: Burst,
/// Memory burst transfer configuration
pub mburst: Burst,
/// Flow control configuration
pub flow_ctrl: FlowControl,
/// FIFO threshold for DMA FIFO mode. If none, direct mode is used.
pub fifo_threshold: Option<FifoThreshold>,
}
impl Default for TransferOptions {
fn default() -> Self {
Self {
pburst: Burst::Single,
mburst: Burst::Single,
flow_ctrl: FlowControl::Dma,
fifo_threshold: None,
}
}
}
impl From<WordSize> for vals::Size {
fn from(raw: WordSize) -> Self {
match raw {
@ -20,6 +50,28 @@ impl From<WordSize> for vals::Size {
}
}
impl From<Dir> for vals::Dir {
fn from(raw: Dir) -> Self {
match raw {
Dir::MemoryToPeripheral => Self::MEMORYTOPERIPHERAL,
Dir::PeripheralToMemory => Self::PERIPHERALTOMEMORY,
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Burst {
/// Single transfer
Single,
/// Incremental burst of 4 beats
Incr4,
/// Incremental burst of 8 beats
Incr8,
/// Incremental burst of 16 beats
Incr16,
}
impl From<Burst> for vals::Burst {
fn from(burst: Burst) -> Self {
match burst {
@ -31,6 +83,15 @@ impl From<Burst> for vals::Burst {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum FlowControl {
/// Flow control by DMA
Dma,
/// Flow control by peripheral
Peripheral,
}
impl From<FlowControl> for vals::Pfctrl {
fn from(flow: FlowControl) -> Self {
match flow {
@ -40,6 +101,19 @@ impl From<FlowControl> for vals::Pfctrl {
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum FifoThreshold {
/// 1/4 full FIFO
Quarter,
/// 1/2 full FIFO
Half,
/// 3/4 full FIFO
ThreeQuarters,
/// Full FIFO
Full,
}
impl From<FifoThreshold> for vals::Fth {
fn from(value: FifoThreshold) -> Self {
match value {
@ -51,27 +125,15 @@ impl From<FifoThreshold> for vals::Fth {
}
}
struct ChannelState {
waker: AtomicWaker,
}
impl ChannelState {
const fn new() -> Self {
Self {
waker: AtomicWaker::new(),
}
}
}
struct State {
channels: [ChannelState; DMA_CHANNEL_COUNT],
ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT],
}
impl State {
const fn new() -> Self {
const CH: ChannelState = ChannelState::new();
const AW: AtomicWaker = AtomicWaker::new();
Self {
channels: [CH; DMA_CHANNEL_COUNT],
ch_wakers: [AW; DMA_CHANNEL_COUNT],
}
}
}
@ -92,158 +154,183 @@ pub(crate) unsafe fn init(irq_priority: Priority) {
foreach_dma_channel! {
($channel_peri:ident, $dma_peri:ident, dma, $channel_num:expr, $index:expr, $dmamux:tt) => {
impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri {
unsafe fn start_write<W: Word>(&mut self, request: Request, buf: *const [W], reg_addr: *mut W, options: TransferOptions) {
let (ptr, len) = super::slice_ptr_parts(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
request,
vals::Dir::MEMORYTOPERIPHERAL,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
)
impl sealed::Channel for crate::peripherals::$channel_peri {
fn regs(&self) -> pac::dma::Dma {
pac::$dma_peri
}
unsafe fn start_write_repeated<W: Word>(&mut self, request: Request, repeated: *const W, count: usize, reg_addr: *mut W, options: TransferOptions) {
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
request,
vals::Dir::MEMORYTOPERIPHERAL,
reg_addr as *const u32,
repeated as *mut u32,
count,
false,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
)
fn num(&self) -> usize {
$channel_num
}
unsafe fn start_read<W: Word>(&mut self, request: Request, reg_addr: *const W, buf: *mut [W], options: TransferOptions) {
let (ptr, len) = super::slice_ptr_parts_mut(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
request,
vals::Dir::PERIPHERALTOMEMORY,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
fn index(&self) -> usize {
$index
}
unsafe fn start_double_buffered_read<W: Word>(
&mut self,
request: Request,
reg_addr: *const W,
buffer0: *mut W,
buffer1: *mut W,
buffer_len: usize,
options: TransferOptions,
) {
low_level_api::start_dbm_transfer(
pac::$dma_peri,
$channel_num,
request,
vals::Dir::PERIPHERALTOMEMORY,
reg_addr as *const u32,
buffer0 as *mut u32,
buffer1 as *mut u32,
buffer_len,
true,
vals::Size::from(W::bits()),
options,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_REGS,
#[cfg(dmamux)]
<Self as super::dmamux::sealed::MuxChannel>::DMAMUX_CH_NUM,
);
}
unsafe fn set_buffer0<W: Word>(&mut self, buffer: *mut W) {
low_level_api::set_dbm_buffer0(pac::$dma_peri, $channel_num, buffer as *mut u32);
}
unsafe fn set_buffer1<W: Word>(&mut self, buffer: *mut W) {
low_level_api::set_dbm_buffer1(pac::$dma_peri, $channel_num, buffer as *mut u32);
}
unsafe fn is_buffer0_accessible(&mut self) -> bool {
low_level_api::is_buffer0_accessible(pac::$dma_peri, $channel_num)
}
fn request_stop(&mut self) {
unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);}
}
fn is_running(&self) -> bool {
unsafe {low_level_api::is_running(pac::$dma_peri, $channel_num)}
}
fn remaining_transfers(&mut self) -> u16 {
unsafe {low_level_api::get_remaining_transfers(pac::$dma_peri, $channel_num)}
}
fn set_waker(&mut self, waker: &Waker) {
unsafe {low_level_api::set_waker($index, waker )}
}
fn on_irq() {
unsafe {
low_level_api::on_irq_inner(pac::$dma_peri, $channel_num, $index);
unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
}
}
}
impl crate::dma::Channel for crate::peripherals::$channel_peri { }
impl Channel for crate::peripherals::$channel_peri {}
};
}
mod low_level_api {
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index: usize) {
let cr = dma.st(channel_num).cr();
let isr = dma.isr(channel_num / 4).read();
if isr.teif(channel_num % 4) {
panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
}
if isr.tcif(channel_num % 4) && cr.read().tcie() {
/* acknowledge transfer complete interrupt */
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
STATE.ch_wakers[index].wake();
}
}
#[cfg(any(dma_v2, dmamux))]
pub type Request = u8;
#[cfg(not(any(dma_v2, dmamux)))]
pub type Request = ();
#[cfg(dmamux)]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
#[cfg(not(dmamux))]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
pub(crate) mod sealed {
use super::*;
pub unsafe fn start_transfer(
dma: pac::dma::Dma,
channel_number: u8,
pub trait Channel {
fn regs(&self) -> pac::dma::Dma;
fn num(&self) -> usize;
fn index(&self) -> usize;
fn on_irq();
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
}
impl<'a, C: Channel> Transfer<'a, C> {
pub unsafe fn new_read<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
dir: vals::Dir,
peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
pub unsafe fn new_read_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: *mut [W],
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts_mut(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::PeripheralToMemory,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: &'a [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
pub unsafe fn new_write_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: *const [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write_repeated<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
repeated: &'a W,
count: usize,
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const W as *mut u32,
count,
false,
W::bits(),
options,
)
}
unsafe fn new_inner(
channel: PeripheralRef<'a, C>,
_request: Request,
dir: Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: vals::Size,
data_size: WordSize,
options: TransferOptions,
#[cfg(dmamux)] dmamux_regs: pac::dmamux::Dmamux,
#[cfg(dmamux)] dmamux_ch_num: u8,
) {
#[cfg(dmamux)]
super::super::dmamux::configure_dmamux(dmamux_regs, dmamux_ch_num, request);
) -> Self {
let ch = channel.regs().st(channel.num());
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
reset_status(dma, channel_number);
let mut this = Self { channel };
this.clear_irqs();
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
let ch = dma.st(channel_number as _);
ch.par().write_value(peri_addr as u32);
ch.m0ar().write_value(mem_addr as u32);
ch.ndtr().write_value(regs::Ndtr(mem_len as _));
@ -258,15 +345,14 @@ mod low_level_api {
}
});
ch.cr().write(|w| {
w.set_dir(dir);
w.set_msize(data_size);
w.set_psize(data_size);
w.set_dir(dir.into());
w.set_msize(data_size.into());
w.set_psize(data_size.into());
w.set_pl(vals::Pl::VERYHIGH);
if incr_mem {
w.set_minc(vals::Inc::INCREMENTED);
} else {
w.set_minc(vals::Inc::FIXED);
}
w.set_minc(match incr_mem {
true => vals::Inc::INCREMENTED,
false => vals::Inc::FIXED,
});
w.set_pinc(vals::Inc::FIXED);
w.set_teie(true);
w.set_tcie(true);
@ -274,7 +360,7 @@ mod low_level_api {
w.set_trbuff(true);
#[cfg(dma_v2)]
w.set_chsel(request);
w.set_chsel(_request);
w.set_pburst(options.pburst.into());
w.set_mburst(options.mburst.into());
@ -282,159 +368,232 @@ mod low_level_api {
w.set_en(true);
});
this
}
pub unsafe fn start_dbm_transfer(
dma: pac::dma::Dma,
channel_number: u8,
request: Request,
dir: vals::Dir,
peri_addr: *const u32,
mem0_addr: *mut u32,
mem1_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: vals::Size,
options: TransferOptions,
#[cfg(dmamux)] dmamux_regs: pac::dmamux::Dmamux,
#[cfg(dmamux)] dmamux_ch_num: u8,
) {
#[cfg(dmamux)]
super::super::dmamux::configure_dmamux(dmamux_regs, dmamux_ch_num, request);
fn clear_irqs(&mut self) {
let isrn = self.channel.num() / 4;
let isrbit = self.channel.num() % 4;
trace!(
"Starting DBM transfer with 0: 0x{:x}, 1: 0x{:x}, len: 0x{:x}",
mem0_addr as u32,
mem1_addr as u32,
mem_len
);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
reset_status(dma, channel_number);
let ch = dma.st(channel_number as _);
ch.par().write_value(peri_addr as u32);
ch.m0ar().write_value(mem0_addr as u32);
// configures the second buffer for DBM
ch.m1ar().write_value(mem1_addr as u32);
ch.ndtr().write_value(regs::Ndtr(mem_len as _));
ch.cr().write(|w| {
w.set_dir(dir);
w.set_msize(data_size);
w.set_psize(data_size);
w.set_pl(vals::Pl::VERYHIGH);
if incr_mem {
w.set_minc(vals::Inc::INCREMENTED);
} else {
w.set_minc(vals::Inc::FIXED);
unsafe {
self.channel.regs().ifcr(isrn).write(|w| {
w.set_tcif(isrbit, true);
w.set_teif(isrbit, true);
})
}
w.set_pinc(vals::Inc::FIXED);
w.set_teie(true);
w.set_tcie(true);
#[cfg(dma_v1)]
w.set_trbuff(true);
#[cfg(dma_v2)]
w.set_chsel(request);
// enable double buffered mode
w.set_dbm(vals::Dbm::ENABLED);
w.set_pburst(options.pburst.into());
w.set_mburst(options.mburst.into());
w.set_pfctrl(options.flow_ctrl.into());
w.set_en(true);
});
}
pub unsafe fn set_dbm_buffer0(dma: pac::dma::Dma, channel_number: u8, mem_addr: *mut u32) {
// get a handle on the channel itself
let ch = dma.st(channel_number as _);
// change M0AR to the new address
ch.m0ar().write_value(mem_addr as _);
}
pub unsafe fn set_dbm_buffer1(dma: pac::dma::Dma, channel_number: u8, mem_addr: *mut u32) {
// get a handle on the channel itself
let ch = dma.st(channel_number as _);
// change M1AR to the new address
ch.m1ar().write_value(mem_addr as _);
}
pub unsafe fn is_buffer0_accessible(dma: pac::dma::Dma, channel_number: u8) -> bool {
// get a handle on the channel itself
let ch = dma.st(channel_number as _);
// check the current target register value
ch.cr().read().ct() == vals::Ct::MEMORY1
}
/// Stops the DMA channel.
pub unsafe fn request_stop(dma: pac::dma::Dma, channel_number: u8) {
// get a handle on the channel itself
let ch = dma.st(channel_number as _);
pub fn request_stop(&mut self) {
let ch = self.channel.regs().st(self.channel.num());
// Disable the channel. Keep the IEs enabled so the irqs still fire.
unsafe {
ch.cr().write(|w| {
w.set_teie(true);
w.set_tcie(true);
});
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
})
}
}
/// Gets the running status of the channel
pub unsafe fn is_running(dma: pac::dma::Dma, ch: u8) -> bool {
// get a handle on the channel itself
let ch = dma.st(ch as _);
// Get whether it's enabled (running)
ch.cr().read().en()
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.cr().read() }.en()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub unsafe fn get_remaining_transfers(dma: pac::dma::Dma, ch: u8) -> u16 {
// get a handle on the channel itself
let ch = dma.st(ch as _);
// read the remaining transfer count. If this is zero, the transfer completed fully.
ch.ndtr().read().ndt()
pub fn get_remaining_transfers(&self) -> u16 {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.ndtr().read() }.ndt()
}
/// Sets the waker for the specified DMA channel
pub unsafe fn set_waker(state_number: usize, waker: &Waker) {
STATE.channels[state_number].waker.register(waker);
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
pub unsafe fn reset_status(dma: pac::dma::Dma, channel_number: u8) {
let isrn = channel_number as usize / 4;
let isrbit = channel_number as usize % 4;
impl<'a, C: Channel> Drop for Transfer<'a, C> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
dma.ifcr(isrn).write(|w| {
w.set_tcif(isrbit, true);
w.set_teif(isrbit, true);
});
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: u8, state_index: u8) {
let channel_num = channel_num as usize;
let state_index = state_index as usize;
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
impl<'a, C: Channel> Future for Transfer<'a, C> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
STATE.ch_wakers[self.channel.index()].register(cx.waker());
let cr = dma.st(channel_num).cr();
let isr = dma.isr(channel_num / 4).read();
if isr.teif(channel_num % 4) {
panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
}
if isr.tcif(channel_num % 4) && cr.read().tcie() {
/* acknowledge transfer complete interrupt */
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
STATE.channels[state_index].waker.wake();
if self.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}
// ==================================
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct DoubleBuffered<'a, C: Channel, W: Word> {
channel: PeripheralRef<'a, C>,
_phantom: PhantomData<W>,
}
impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
pub unsafe fn new_read(
channel: impl Peripheral<P = C> + 'a,
_request: Request,
peri_addr: *mut W,
buf0: *mut W,
buf1: *mut W,
len: usize,
options: TransferOptions,
) -> Self {
into_ref!(channel);
assert!(len > 0 && len <= 0xFFFF);
let dir = Dir::PeripheralToMemory;
let data_size = W::bits();
let channel_number = channel.num();
let dma = channel.regs();
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
let mut this = Self {
channel,
_phantom: PhantomData,
};
this.clear_irqs();
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, _request);
let ch = dma.st(channel_number);
ch.par().write_value(peri_addr as u32);
ch.m0ar().write_value(buf0 as u32);
ch.m1ar().write_value(buf1 as u32);
ch.ndtr().write_value(regs::Ndtr(len as _));
ch.fcr().write(|w| {
if let Some(fth) = options.fifo_threshold {
// FIFO mode
w.set_dmdis(vals::Dmdis::DISABLED);
w.set_fth(fth.into());
} else {
// Direct mode
w.set_dmdis(vals::Dmdis::ENABLED);
}
});
ch.cr().write(|w| {
w.set_dir(dir.into());
w.set_msize(data_size.into());
w.set_psize(data_size.into());
w.set_pl(vals::Pl::VERYHIGH);
w.set_minc(vals::Inc::INCREMENTED);
w.set_pinc(vals::Inc::FIXED);
w.set_teie(true);
w.set_tcie(true);
#[cfg(dma_v1)]
w.set_trbuff(true);
#[cfg(dma_v2)]
w.set_chsel(_request);
w.set_pburst(options.pburst.into());
w.set_mburst(options.mburst.into());
w.set_pfctrl(options.flow_ctrl.into());
w.set_en(true);
});
this
}
fn clear_irqs(&mut self) {
let channel_number = self.channel.num();
let dma = self.channel.regs();
let isrn = channel_number / 4;
let isrbit = channel_number % 4;
unsafe {
dma.ifcr(isrn).write(|w| {
w.set_tcif(isrbit, true);
w.set_teif(isrbit, true);
})
}
}
pub unsafe fn set_buffer0(&mut self, buffer: *mut W) {
let ch = self.channel.regs().st(self.channel.num());
ch.m0ar().write_value(buffer as _);
}
pub unsafe fn set_buffer1(&mut self, buffer: *mut W) {
let ch = self.channel.regs().st(self.channel.num());
ch.m1ar().write_value(buffer as _);
}
pub fn is_buffer0_accessible(&mut self) -> bool {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.cr().read() }.ct() == vals::Ct::MEMORY1
}
pub fn set_waker(&mut self, waker: &Waker) {
STATE.ch_wakers[self.channel.index()].register(waker);
}
pub fn request_stop(&mut self) {
let ch = self.channel.regs().st(self.channel.num());
// Disable the channel. Keep the IEs enabled so the irqs still fire.
unsafe {
ch.cr().write(|w| {
w.set_teie(true);
w.set_tcie(true);
})
}
}
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.cr().read() }.en()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub fn get_remaining_transfers(&self) -> u16 {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.ndtr().read() }.ndt()
}
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}

View File

@ -2,8 +2,8 @@
use crate::{pac, peripherals};
pub(crate) unsafe fn configure_dmamux(dmamux_regs: pac::dmamux::Dmamux, dmamux_ch_num: u8, request: u8) {
let ch_mux_regs = dmamux_regs.ccr(dmamux_ch_num as _);
pub(crate) unsafe fn configure_dmamux<M: MuxChannel>(channel: &mut M, request: u8) {
let ch_mux_regs = channel.mux_regs().ccr(channel.mux_num());
ch_mux_regs.write(|reg| {
reg.set_nbreq(0);
reg.set_dmareq_id(request);
@ -14,11 +14,11 @@ pub(crate) unsafe fn configure_dmamux(dmamux_regs: pac::dmamux::Dmamux, dmamux_c
});
}
pub(crate) mod sealed {
pub(crate) mod dmamux_sealed {
use super::*;
pub trait MuxChannel {
const DMAMUX_CH_NUM: u8;
const DMAMUX_REGS: pac::dmamux::Dmamux;
fn mux_regs(&self) -> pac::dmamux::Dmamux;
fn mux_num(&self) -> usize;
}
}
@ -26,15 +26,19 @@ pub struct DMAMUX1;
#[cfg(stm32h7)]
pub struct DMAMUX2;
pub trait MuxChannel: sealed::MuxChannel + super::Channel {
pub trait MuxChannel: dmamux_sealed::MuxChannel {
type Mux;
}
foreach_dma_channel! {
($channel_peri:ident, $dma_peri:ident, $version:ident, $channel_num:expr, $index:expr, {dmamux: $dmamux:ident, dmamux_channel: $dmamux_channel:expr}) => {
impl sealed::MuxChannel for peripherals::$channel_peri {
const DMAMUX_CH_NUM: u8 = $dmamux_channel;
const DMAMUX_REGS: pac::dmamux::Dmamux = pac::$dmamux;
impl dmamux_sealed::MuxChannel for peripherals::$channel_peri {
fn mux_regs(&self) -> pac::dmamux::Dmamux {
pac::$dmamux
}
fn mux_num(&self) -> usize {
$dmamux_channel
}
}
impl MuxChannel for peripherals::$channel_peri {
type Mux = $dmamux;

View File

@ -1,13 +1,30 @@
use core::sync::atomic::{fence, Ordering};
use core::task::Waker;
#![macro_use]
use core::future::Future;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering};
use core::task::{Context, Poll};
use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker;
use super::{Request, TransferOptions, Word, WordSize};
use super::{Dir, Word, WordSize};
use crate::_generated::GPDMA_CHANNEL_COUNT;
use crate::interrupt::{Interrupt, InterruptExt};
use crate::pac::gpdma::{vals, Gpdma};
use crate::{interrupt, pac};
use crate::pac;
use crate::pac::gpdma::vals;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {}
impl Default for TransferOptions {
fn default() -> Self {
Self {}
}
}
impl From<WordSize> for vals::ChTr1Dw {
fn from(raw: WordSize) -> Self {
@ -19,27 +36,15 @@ impl From<WordSize> for vals::ChTr1Dw {
}
}
struct ChannelState {
waker: AtomicWaker,
}
impl ChannelState {
const fn new() -> Self {
Self {
waker: AtomicWaker::new(),
}
}
}
struct State {
channels: [ChannelState; GPDMA_CHANNEL_COUNT],
ch_wakers: [AtomicWaker; GPDMA_CHANNEL_COUNT],
}
impl State {
const fn new() -> Self {
const CH: ChannelState = ChannelState::new();
const AW: AtomicWaker = AtomicWaker::new();
Self {
channels: [CH; GPDMA_CHANNEL_COUNT],
ch_wakers: [AW; GPDMA_CHANNEL_COUNT],
}
}
}
@ -47,10 +52,12 @@ impl State {
static STATE: State = State::new();
/// safety: must be called only once
pub(crate) unsafe fn init() {
pub(crate) unsafe fn init(irq_priority: Priority) {
foreach_interrupt! {
($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => {
interrupt::$irq::steal().enable();
let irq = crate::interrupt::$irq::steal();
irq.set_priority(irq_priority);
irq.enable();
};
}
crate::_generated::init_gpdma();
@ -58,15 +65,103 @@ pub(crate) unsafe fn init() {
foreach_dma_channel! {
($channel_peri:ident, $dma_peri:ident, gpdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri {
unsafe fn start_write<W: Word>(&mut self, request: Request, buf: *const [W], reg_addr: *mut W, options: TransferOptions) {
let (ptr, len) = super::slice_ptr_parts(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
impl sealed::Channel for crate::peripherals::$channel_peri {
fn regs(&self) -> pac::gpdma::Gpdma {
pac::$dma_peri
}
fn num(&self) -> usize {
$channel_num
}
fn index(&self) -> usize {
$index
}
fn on_irq() {
unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
}
}
impl Channel for crate::peripherals::$channel_peri {}
};
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub(crate) unsafe fn on_irq_inner(dma: pac::gpdma::Gpdma, channel_num: usize, index: usize) {
let ch = dma.ch(channel_num);
let sr = ch.sr().read();
if sr.dtef() {
panic!(
"DMA: data transfer error on DMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
}
if sr.usef() {
panic!(
"DMA: user settings error on DMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
}
if sr.suspf() || sr.tcf() {
// disable all xxIEs to prevent the irq from firing again.
ch.cr().write(|_| {});
// Wake the future. It'll look at tcf and see it's set.
STATE.ch_wakers[index].wake();
}
}
pub type Request = u8;
#[cfg(dmamux)]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
#[cfg(not(dmamux))]
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
pub(crate) mod sealed {
use super::*;
pub trait Channel {
fn regs(&self) -> pac::gpdma::Gpdma;
fn num(&self) -> usize;
fn index(&self) -> usize;
fn on_irq();
}
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
}
impl<'a, C: Channel> Transfer<'a, C> {
pub unsafe fn new_read<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
pub unsafe fn new_read_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
peri_addr: *mut W,
buf: *mut [W],
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts_mut(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
low_level_api::Dir::MemoryToPeripheral,
reg_addr as *const u32,
Dir::PeripheralToMemory,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
@ -75,14 +170,57 @@ foreach_dma_channel! {
)
}
unsafe fn start_write_repeated<W: Word>(&mut self, request: Request, repeated: *const W, count: usize, reg_addr: *mut W, options: TransferOptions) {
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
pub unsafe fn new_write<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: &'a [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
pub unsafe fn new_write_raw<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
buf: *const [W],
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
let (ptr, len) = super::slice_ptr_parts(buf);
assert!(len > 0 && len <= 0xFFFF);
Self::new_inner(
channel,
request,
low_level_api::Dir::MemoryToPeripheral,
reg_addr as *const u32,
repeated as *mut u32,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
)
}
pub unsafe fn new_write_repeated<W: Word>(
channel: impl Peripheral<P = C> + 'a,
request: Request,
repeated: &'a W,
count: usize,
peri_addr: *mut W,
options: TransferOptions,
) -> Self {
into_ref!(channel);
Self::new_inner(
channel,
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const W as *mut u32,
count,
false,
W::bits(),
@ -90,85 +228,8 @@ foreach_dma_channel! {
)
}
unsafe fn start_read<W: Word>(&mut self, request: Request, reg_addr: *const W, buf: *mut [W], options: TransferOptions) {
let (ptr, len) = super::slice_ptr_parts_mut(buf);
low_level_api::start_transfer(
pac::$dma_peri,
$channel_num,
request,
low_level_api::Dir::PeripheralToMemory,
reg_addr as *const u32,
ptr as *mut u32,
len,
true,
W::bits(),
options,
);
}
unsafe fn start_double_buffered_read<W: Word>(
&mut self,
_request: Request,
_reg_addr: *const W,
_buffer0: *mut W,
_buffer1: *mut W,
_buffer_len: usize,
_options: TransferOptions,
) {
panic!("Unsafe double buffered mode is unavailable on GPBDMA");
}
unsafe fn set_buffer0<W: Word>(&mut self, _buffer: *mut W) {
panic!("Unsafe double buffered mode is unavailable on GPBDMA");
}
unsafe fn set_buffer1<W: Word>(&mut self, _buffer: *mut W) {
panic!("Unsafe double buffered mode is unavailable on GPBDMA");
}
unsafe fn is_buffer0_accessible(&mut self) -> bool {
panic!("Unsafe double buffered mode is unavailable on GPBDMA");
}
fn request_stop(&mut self) {
unsafe {low_level_api::request_stop(pac::$dma_peri, $channel_num);}
}
fn is_running(&self) -> bool {
unsafe {low_level_api::is_running(pac::$dma_peri, $channel_num)}
}
fn remaining_transfers(&mut self) -> u16 {
unsafe {low_level_api::get_remaining_transfers(pac::$dma_peri, $channel_num)}
}
fn set_waker(&mut self, waker: &Waker) {
unsafe {low_level_api::set_waker($index, waker )}
}
fn on_irq() {
unsafe {
low_level_api::on_irq_inner(pac::$dma_peri, $channel_num, $index);
}
}
}
impl crate::dma::Channel for crate::peripherals::$channel_peri { }
};
}
mod low_level_api {
use super::*;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Dir {
MemoryToPeripheral,
PeripheralToMemory,
}
pub unsafe fn start_transfer(
dma: Gpdma,
channel_number: u8,
unsafe fn new_inner(
channel: PeripheralRef<'a, C>,
request: Request,
dir: Dir,
peri_addr: *const u32,
@ -176,24 +237,19 @@ mod low_level_api {
mem_len: usize,
incr_mem: bool,
data_size: WordSize,
options: TransferOptions,
) {
assert!(options.mburst == crate::dma::Burst::Single, "Burst mode not supported");
assert!(options.pburst == crate::dma::Burst::Single, "Burst mode not supported");
assert!(
options.flow_ctrl == crate::dma::FlowControl::Dma,
"Peripheral flow control not supported"
);
assert!(options.fifo_threshold.is_none(), "FIFO mode not supported");
_options: TransferOptions,
) -> Self {
let ch = channel.regs().ch(channel.num());
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
let ch = dma.ch(channel_number as _);
let this = Self { channel };
#[cfg(dmamux)]
super::dmamux::configure_dmamux(&mut *this.channel, request);
// Reset ch
ch.cr().write(|w| w.set_reset(true));
ch.llr().write(|_| {}); // no linked list
ch.tr1().write(|w| {
w.set_sdw(data_size.into());
@ -234,72 +290,66 @@ mod low_level_api {
// Start it
w.set_en(true);
});
this
}
/// Stops the DMA channel.
pub unsafe fn request_stop(dma: Gpdma, channel_number: u8) {
// get a handle on the channel itself
let ch = dma.ch(channel_number as _);
pub fn request_stop(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
// Disable the channel. Keep the IEs enabled so the irqs still fire.
unsafe {
ch.cr().write(|w| {
w.set_tcie(true);
w.set_useie(true);
w.set_dteie(true);
w.set_suspie(true);
});
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
})
}
}
/// Gets the running status of the channel
pub unsafe fn is_running(dma: Gpdma, ch: u8) -> bool {
let ch = dma.ch(ch as _);
!ch.sr().read().tcf()
pub fn is_running(&mut self) -> bool {
let ch = self.channel.regs().ch(self.channel.num());
!unsafe { ch.sr().read() }.tcf()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub unsafe fn get_remaining_transfers(dma: Gpdma, ch: u8) -> u16 {
// get a handle on the channel itself
let ch = dma.ch(ch as _);
// read the remaining transfer count. If this is zero, the transfer completed fully.
ch.br1().read().bndt()
pub fn get_remaining_transfers(&self) -> u16 {
let ch = self.channel.regs().ch(self.channel.num());
unsafe { ch.br1().read() }.bndt()
}
/// Sets the waker for the specified DMA channel
pub unsafe fn set_waker(state_number: usize, waker: &Waker) {
STATE.channels[state_number].waker.register(waker);
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub unsafe fn on_irq_inner(dma: Gpdma, channel_num: u8, state_index: u8) {
let channel_num = channel_num as usize;
let state_index = state_index as usize;
impl<'a, C: Channel> Drop for Transfer<'a, C> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
let ch = dma.ch(channel_num);
let sr = ch.sr().read();
if sr.dtef() {
panic!(
"DMA: data transfer error on DMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
}
if sr.usef() {
panic!(
"DMA: user settings error on DMA@{:08x} channel {}",
dma.0 as u32, channel_num
);
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
if sr.suspf() || sr.tcf() {
// disable all xxIEs to prevent the irq from firing again.
ch.cr().write(|_| {});
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
impl<'a, C: Channel> Future for Transfer<'a, C> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
STATE.ch_wakers[self.channel.index()].register(cx.waker());
// Wake the future. It'll look at tcf and see it's set.
STATE.channels[state_index].waker.wake();
if self.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}

View File

@ -1,124 +1,39 @@
#[cfg(bdma)]
pub(crate) mod bdma;
#[cfg(dma)]
pub(crate) mod dma;
#[cfg(dma)]
pub use dma::*;
// stm32h7 has both dma and bdma. In that case, we export dma as "main" dma,
// and bdma as "secondary", under `embassy_stm32::dma::bdma`.
#[cfg(all(bdma, dma))]
pub mod bdma;
#[cfg(all(bdma, not(dma)))]
pub(crate) mod bdma;
#[cfg(all(bdma, not(dma)))]
pub use bdma::*;
#[cfg(gpdma)]
pub(crate) mod gpdma;
#[cfg(gpdma)]
pub use gpdma::*;
#[cfg(dmamux)]
mod dmamux;
#[cfg(gpdma)]
mod gpdma;
use core::future::Future;
use core::mem;
use core::pin::Pin;
use core::task::{Context, Poll, Waker};
#[cfg(any(dma, bdma))]
use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{impl_peripheral, into_ref};
use embassy_hal_common::impl_peripheral;
#[cfg(dmamux)]
pub use self::dmamux::*;
use crate::Peripheral;
#[cfg(feature = "unstable-pac")]
pub mod low_level {
pub use super::transfers::*;
}
pub(crate) use transfers::*;
#[cfg(any(bdma_v2, dma_v2, dmamux, gpdma))]
pub type Request = u8;
#[cfg(not(any(bdma_v2, dma_v2, dmamux, gpdma)))]
pub type Request = ();
pub(crate) mod sealed {
use super::*;
pub trait Word {}
pub trait Channel {
/// Starts this channel for writing a stream of words.
///
/// Safety:
/// - `buf` must point to a valid buffer for DMA reading.
/// - `buf` must be alive for the entire duration of the DMA transfer.
/// - `reg_addr` must be a valid peripheral register address to write to.
unsafe fn start_write<W: super::Word>(
&mut self,
request: Request,
buf: *const [W],
reg_addr: *mut W,
options: TransferOptions,
);
/// Starts this channel for writing a word repeatedly.
///
/// Safety:
/// - `reg_addr` must be a valid peripheral register address to write to.
unsafe fn start_write_repeated<W: super::Word>(
&mut self,
request: Request,
repeated: *const W,
count: usize,
reg_addr: *mut W,
options: TransferOptions,
);
/// Starts this channel for reading a stream of words.
///
/// Safety:
/// - `buf` must point to a valid buffer for DMA writing.
/// - `buf` must be alive for the entire duration of the DMA transfer.
/// - `reg_addr` must be a valid peripheral register address to read from.
unsafe fn start_read<W: super::Word>(
&mut self,
request: Request,
reg_addr: *const W,
buf: *mut [W],
options: TransferOptions,
);
/// DMA double-buffered mode is unsafe as UB can happen when the hardware writes to a buffer currently owned by the software
/// more information can be found here: https://github.com/embassy-rs/embassy/issues/702
/// This feature is now used solely for the purposes of implementing giant DMA transfers required for DCMI
unsafe fn start_double_buffered_read<W: super::Word>(
&mut self,
request: Request,
reg_addr: *const W,
buffer0: *mut W,
buffer1: *mut W,
buffer_len: usize,
options: TransferOptions,
);
unsafe fn set_buffer0<W: super::Word>(&mut self, buffer: *mut W);
unsafe fn set_buffer1<W: super::Word>(&mut self, buffer: *mut W);
unsafe fn is_buffer0_accessible(&mut self) -> bool;
/// Requests the channel to stop.
/// NOTE: The channel does not immediately stop, you have to wait
/// for `is_running() = false`.
fn request_stop(&mut self);
/// Returns whether this channel is running or stopped.
///
/// The channel stops running when it either completes or is manually stopped.
fn is_running(&self) -> bool;
/// Returns the total number of remaining transfers.
fn remaining_transfers(&mut self) -> u16;
/// Sets the waker that is called when this channel stops (either completed or manually stopped)
fn set_waker(&mut self, waker: &Waker);
/// This is called when this channel triggers an interrupt.
/// Note: Because some channels share an interrupt, this function might be
/// called for a channel that didn't trigger an interrupt.
fn on_irq();
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
enum Dir {
MemoryToPeripheral,
PeripheralToMemory,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
@ -139,191 +54,39 @@ impl WordSize {
}
}
pub trait Word: sealed::Word {
mod word_sealed {
pub trait Word {}
}
pub trait Word: word_sealed::Word {
fn bits() -> WordSize;
}
impl sealed::Word for u8 {}
impl word_sealed::Word for u8 {}
impl Word for u8 {
fn bits() -> WordSize {
WordSize::OneByte
}
}
impl sealed::Word for u16 {}
impl word_sealed::Word for u16 {}
impl Word for u16 {
fn bits() -> WordSize {
WordSize::TwoBytes
}
}
impl sealed::Word for u32 {}
impl word_sealed::Word for u32 {}
impl Word for u32 {
fn bits() -> WordSize {
WordSize::FourBytes
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Burst {
/// Single transfer
Single,
/// Incremental burst of 4 beats
Incr4,
/// Incremental burst of 8 beats
Incr8,
/// Incremental burst of 16 beats
Incr16,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum FlowControl {
/// Flow control by DMA
Dma,
/// Flow control by peripheral
Peripheral,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum FifoThreshold {
/// 1/4 full FIFO
Quarter,
/// 1/2 full FIFO
Half,
/// 3/4 full FIFO
ThreeQuarters,
/// Full FIFO
Full,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct TransferOptions {
/// Peripheral burst transfer configuration
pub pburst: Burst,
/// Memory burst transfer configuration
pub mburst: Burst,
/// Flow control configuration
pub flow_ctrl: FlowControl,
/// FIFO threshold for DMA FIFO mode. If none, direct mode is used.
pub fifo_threshold: Option<FifoThreshold>,
}
impl Default for TransferOptions {
fn default() -> Self {
Self {
pburst: Burst::Single,
mburst: Burst::Single,
flow_ctrl: FlowControl::Dma,
fifo_threshold: None,
}
}
}
mod transfers {
use embassy_hal_common::PeripheralRef;
use super::*;
#[allow(unused)]
pub fn read<'a, W: Word>(
channel: impl Peripheral<P = impl Channel> + 'a,
request: Request,
reg_addr: *mut W,
buf: &'a mut [W],
) -> impl Future<Output = ()> + 'a {
assert!(buf.len() > 0 && buf.len() <= 0xFFFF);
into_ref!(channel);
unsafe { channel.start_read::<W>(request, reg_addr, buf, Default::default()) };
Transfer::new(channel)
}
#[allow(unused)]
pub fn write<'a, W: Word>(
channel: impl Peripheral<P = impl Channel> + 'a,
request: Request,
buf: &'a [W],
reg_addr: *mut W,
) -> impl Future<Output = ()> + 'a {
assert!(buf.len() > 0 && buf.len() <= 0xFFFF);
into_ref!(channel);
unsafe { channel.start_write::<W>(request, buf, reg_addr, Default::default()) };
Transfer::new(channel)
}
#[allow(unused)]
pub fn write_repeated<'a, W: Word>(
channel: impl Peripheral<P = impl Channel> + 'a,
request: Request,
repeated: *const W,
count: usize,
reg_addr: *mut W,
) -> impl Future<Output = ()> + 'a {
into_ref!(channel);
unsafe { channel.start_write_repeated::<W>(request, repeated, count, reg_addr, Default::default()) };
Transfer::new(channel)
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub(crate) struct Transfer<'a, C: Channel> {
channel: PeripheralRef<'a, C>,
}
impl<'a, C: Channel> Transfer<'a, C> {
pub(crate) fn new(channel: impl Peripheral<P = C> + 'a) -> Self {
into_ref!(channel);
Self { channel }
}
}
impl<'a, C: Channel> Drop for Transfer<'a, C> {
fn drop(&mut self) {
self.channel.request_stop();
while self.channel.is_running() {}
}
}
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
impl<'a, C: Channel> Future for Transfer<'a, C> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.channel.set_waker(cx.waker());
if self.channel.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}
}
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
pub struct NoDma;
impl_peripheral!(NoDma);
// safety: must be called only once at startup
pub(crate) unsafe fn init(#[cfg(bdma)] bdma_priority: Priority, #[cfg(dma)] dma_priority: Priority) {
#[cfg(bdma)]
bdma::init(bdma_priority);
#[cfg(dma)]
dma::init(dma_priority);
#[cfg(dmamux)]
dmamux::init();
#[cfg(gpdma)]
gpdma::init();
}
// TODO: replace transmutes with core::ptr::metadata once it's stable
#[allow(unused)]
pub(crate) fn slice_ptr_parts<T>(slice: *const [T]) -> (usize, usize) {
@ -334,3 +97,19 @@ pub(crate) fn slice_ptr_parts<T>(slice: *const [T]) -> (usize, usize) {
pub(crate) fn slice_ptr_parts_mut<T>(slice: *mut [T]) -> (usize, usize) {
unsafe { mem::transmute(slice) }
}
// safety: must be called only once at startup
pub(crate) unsafe fn init(
#[cfg(bdma)] bdma_priority: Priority,
#[cfg(dma)] dma_priority: Priority,
#[cfg(gpdma)] gpdma_priority: Priority,
) {
#[cfg(bdma)]
bdma::init(bdma_priority);
#[cfg(dma)]
dma::init(dma_priority);
#[cfg(gpdma)]
gpdma::init(gpdma_priority);
#[cfg(dmamux)]
dmamux::init();
}

View File

@ -8,7 +8,7 @@ use embassy_hal_common::drop::OnDrop;
use embassy_hal_common::{into_ref, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker;
use crate::dma::NoDma;
use crate::dma::{NoDma, Transfer};
use crate::gpio::sealed::AFType;
use crate::gpio::Pull;
use crate::i2c::{Error, Instance, SclPin, SdaPin};
@ -476,7 +476,7 @@ impl<'d, T: Instance, TXDMA, RXDMA> I2c<'d, T, TXDMA, RXDMA> {
let ch = &mut self.tx_dma;
let request = ch.request();
crate::dma::write(ch, request, write, dst)
Transfer::new_write(ch, request, write, dst, Default::default())
};
let state = T::state();
@ -576,7 +576,7 @@ impl<'d, T: Instance, TXDMA, RXDMA> I2c<'d, T, TXDMA, RXDMA> {
let ch = &mut self.rx_dma;
let request = ch.request();
crate::dma::read(ch, request, src, buffer)
Transfer::new_read(ch, request, src, buffer, Default::default())
};
let state = T::state();

View File

@ -78,7 +78,6 @@ pub(crate) mod _generated {
// Reexports
pub use _generated::{peripherals, Peripherals};
pub use embassy_cortex_m::executor;
#[cfg(any(dma, bdma))]
use embassy_cortex_m::interrupt::Priority;
pub use embassy_cortex_m::interrupt::_export::interrupt;
pub use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
@ -96,6 +95,8 @@ pub struct Config {
pub bdma_interrupt_priority: Priority,
#[cfg(dma)]
pub dma_interrupt_priority: Priority,
#[cfg(gpdma)]
pub gpdma_interrupt_priority: Priority,
}
impl Default for Config {
@ -108,6 +109,8 @@ impl Default for Config {
bdma_interrupt_priority: Priority::P0,
#[cfg(dma)]
dma_interrupt_priority: Priority::P0,
#[cfg(gpdma)]
gpdma_interrupt_priority: Priority::P0,
}
}
}
@ -151,6 +154,8 @@ pub fn init(config: Config) -> Peripherals {
config.bdma_interrupt_priority,
#[cfg(dma)]
config.dma_interrupt_priority,
#[cfg(gpdma)]
config.gpdma_interrupt_priority,
);
#[cfg(feature = "exti")]
exti::init();

View File

@ -5,7 +5,7 @@ pub mod enums;
use embassy_hal_common::{into_ref, PeripheralRef};
use enums::*;
use crate::dma::TransferOptions;
use crate::dma::Transfer;
use crate::gpio::sealed::AFType;
use crate::gpio::AnyPin;
use crate::pac::quadspi::Quadspi as Regs;
@ -230,9 +230,6 @@ impl<'d, T: Instance, Dma> Qspi<'d, T, Dma> {
unsafe {
self.setup_transaction(QspiMode::IndirectWrite, &transaction);
let request = self.dma.request();
let options = TransferOptions::default();
T::REGS.ccr().modify(|v| {
v.set_fmode(QspiMode::IndirectRead.into());
});
@ -241,12 +238,18 @@ impl<'d, T: Instance, Dma> Qspi<'d, T, Dma> {
v.set_address(current_ar);
});
self.dma
.start_read(request, T::REGS.dr().ptr() as *mut u8, buf, options);
let request = self.dma.request();
let transfer = Transfer::new_read(
&mut self.dma,
request,
T::REGS.dr().ptr() as *mut u8,
buf,
Default::default(),
);
T::REGS.cr().modify(|v| v.set_dmaen(true));
while self.dma.is_running() {}
transfer.blocking_wait();
}
}
@ -257,19 +260,22 @@ impl<'d, T: Instance, Dma> Qspi<'d, T, Dma> {
unsafe {
self.setup_transaction(QspiMode::IndirectWrite, &transaction);
let request = self.dma.request();
let options = TransferOptions::default();
T::REGS.ccr().modify(|v| {
v.set_fmode(QspiMode::IndirectWrite.into());
});
self.dma
.start_write(request, buf, T::REGS.dr().ptr() as *mut u8, options);
let request = self.dma.request();
let transfer = Transfer::new_write(
&mut self.dma,
request,
buf,
T::REGS.dr().ptr() as *mut u8,
Default::default(),
);
T::REGS.cr().modify(|v| v.set_dmaen(true));
while self.dma.is_running() {}
transfer.blocking_wait();
}
}

View File

@ -185,6 +185,21 @@ fn clk_div(ker_ck: Hertz, sdmmc_ck: u32) -> Result<(bool, u16, Hertz), Error> {
}
}
#[cfg(sdmmc_v1)]
type Transfer<'a, C> = crate::dma::Transfer<'a, C>;
#[cfg(sdmmc_v2)]
type Transfer<'a, C> = core::marker::PhantomData<&'a mut C>;
#[cfg(all(sdmmc_v1, dma))]
const DMA_TRANSFER_OPTIONS: crate::dma::TransferOptions = crate::dma::TransferOptions {
pburst: crate::dma::Burst::Incr4,
mburst: crate::dma::Burst::Incr4,
flow_ctrl: crate::dma::FlowControl::Peripheral,
fifo_threshold: Some(crate::dma::FifoThreshold::Full),
};
#[cfg(all(sdmmc_v1, not(dma)))]
const DMA_TRANSFER_OPTIONS: crate::dma::TransferOptions = crate::dma::TransferOptions {};
/// SDMMC configuration
///
/// Default values:
@ -490,7 +505,12 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
/// # Safety
///
/// `buffer` must be valid for the whole transfer and word aligned
unsafe fn prepare_datapath_read(&mut self, buffer: *mut [u32], length_bytes: u32, block_size: u8) {
fn prepare_datapath_read<'a>(
&'a mut self,
buffer: &'a mut [u32],
length_bytes: u32,
block_size: u8,
) -> Transfer<'a, Dma> {
assert!(block_size <= 14, "Block size up to 2^14 bytes");
let regs = T::regs();
@ -499,32 +519,28 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
Self::clear_interrupt_flags();
// NOTE(unsafe) We have exclusive access to the regisers
unsafe {
regs.dtimer()
.write(|w| w.set_datatime(self.config.data_transfer_timeout));
regs.dlenr().write(|w| w.set_datalength(length_bytes));
#[cfg(sdmmc_v1)]
{
let transfer = {
let request = self.dma.request();
self.dma.start_read(
Transfer::new_read(
&mut self.dma,
request,
regs.fifor().ptr() as *const u32,
regs.fifor().ptr() as *mut u32,
buffer,
crate::dma::TransferOptions {
pburst: crate::dma::Burst::Incr4,
mburst: crate::dma::Burst::Incr4,
flow_ctrl: crate::dma::FlowControl::Peripheral,
fifo_threshold: Some(crate::dma::FifoThreshold::Full),
..Default::default()
},
);
}
DMA_TRANSFER_OPTIONS,
)
};
#[cfg(sdmmc_v2)]
{
regs.idmabase0r().write(|w| w.set_idmabase0(buffer as *mut u32 as u32));
let transfer = {
regs.idmabase0r().write(|w| w.set_idmabase0(buffer.as_mut_ptr() as u32));
regs.idmactrlr().modify(|w| w.set_idmaen(true));
}
core::marker::PhantomData
};
regs.dctrl().modify(|w| {
w.set_dblocksize(block_size);
@ -535,12 +551,20 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
w.set_dten(true);
}
});
transfer
}
}
/// # Safety
///
/// `buffer` must be valid for the whole transfer and word aligned
unsafe fn prepare_datapath_write(&mut self, buffer: *const [u32], length_bytes: u32, block_size: u8) {
fn prepare_datapath_write<'a>(
&'a mut self,
buffer: &'a [u32],
length_bytes: u32,
block_size: u8,
) -> Transfer<'a, Dma> {
assert!(block_size <= 14, "Block size up to 2^14 bytes");
let regs = T::regs();
@ -549,33 +573,28 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
Self::clear_interrupt_flags();
// NOTE(unsafe) We have exclusive access to the regisers
unsafe {
regs.dtimer()
.write(|w| w.set_datatime(self.config.data_transfer_timeout));
regs.dlenr().write(|w| w.set_datalength(length_bytes));
#[cfg(sdmmc_v1)]
{
let transfer = {
let request = self.dma.request();
self.dma.start_write(
Transfer::new_write(
&mut self.dma,
request,
buffer,
regs.fifor().ptr() as *mut u32,
crate::dma::TransferOptions {
pburst: crate::dma::Burst::Incr4,
mburst: crate::dma::Burst::Incr4,
flow_ctrl: crate::dma::FlowControl::Peripheral,
fifo_threshold: Some(crate::dma::FifoThreshold::Full),
..Default::default()
},
);
}
DMA_TRANSFER_OPTIONS,
)
};
#[cfg(sdmmc_v2)]
{
regs.idmabase0r()
.write(|w| w.set_idmabase0(buffer as *const u32 as u32));
let transfer = {
regs.idmabase0r().write(|w| w.set_idmabase0(buffer.as_ptr() as u32));
regs.idmactrlr().modify(|w| w.set_idmaen(true));
}
core::marker::PhantomData
};
regs.dctrl().modify(|w| {
w.set_dblocksize(block_size);
@ -586,6 +605,9 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
w.set_dten(true);
}
});
transfer
}
}
/// Stops the DMA datapath
@ -662,11 +684,9 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
unsafe {
self.prepare_datapath_read(&mut status, 64, 6);
let transfer = self.prepare_datapath_read(&mut status, 64, 6);
Self::data_interrupts(true);
}
self.cmd(Cmd::cmd6(set_function), true)?; // CMD6
Self::cmd(Cmd::cmd6(set_function), true)?; // CMD6
let res = poll_fn(|cx| {
T::state().register(cx.waker());
@ -696,6 +716,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
Ok(_) => {
on_drop.defuse();
Self::stop_datapath();
drop(transfer);
// Function Selection of Function Group 1
let selection = (u32::from_be(status[4]) >> 24) & 0xF;
@ -718,7 +739,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let regs = T::regs();
let rca = card.rca;
self.cmd(Cmd::card_status(rca << 16), false)?; // CMD13
Self::cmd(Cmd::card_status(rca << 16), false)?; // CMD13
// NOTE(unsafe) Atomic read with no side-effects
let r1 = unsafe { regs.respr(0).read().cardstatus() };
@ -730,8 +751,8 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let card = self.card.as_mut().ok_or(Error::NoCard)?;
let rca = card.rca;
self.cmd(Cmd::set_block_length(64), false)?; // CMD16
self.cmd(Cmd::app_cmd(rca << 16), false)?; // APP
Self::cmd(Cmd::set_block_length(64), false)?; // CMD16
Self::cmd(Cmd::app_cmd(rca << 16), false)?; // APP
let mut status = [0u32; 16];
@ -739,11 +760,9 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
unsafe {
self.prepare_datapath_read(&mut status, 64, 6);
let transfer = self.prepare_datapath_read(&mut status, 64, 6);
Self::data_interrupts(true);
}
self.cmd(Cmd::card_status(0), true)?;
Self::cmd(Cmd::card_status(0), true)?;
let res = poll_fn(|cx| {
T::state().register(cx.waker());
@ -764,6 +783,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
if res.is_ok() {
on_drop.defuse();
Self::stop_datapath();
drop(transfer);
for byte in status.iter_mut() {
*byte = u32::from_be(*byte);
@ -781,7 +801,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
// Determine Relative Card Address (RCA) of given card
let rca = card.map(|c| c.rca << 16).unwrap_or(0);
let r = self.cmd(Cmd::sel_desel_card(rca), false);
let r = Self::cmd(Cmd::sel_desel_card(rca), false);
match (r, rca) {
(Err(Error::Timeout), 0) => Ok(()),
_ => r,
@ -842,8 +862,8 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
async fn get_scr(&mut self, card: &mut Card) -> Result<(), Error> {
// Read the the 64-bit SCR register
self.cmd(Cmd::set_block_length(8), false)?; // CMD16
self.cmd(Cmd::app_cmd(card.rca << 16), false)?;
Self::cmd(Cmd::set_block_length(8), false)?; // CMD16
Self::cmd(Cmd::app_cmd(card.rca << 16), false)?;
let mut scr = [0u32; 2];
@ -851,11 +871,9 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
unsafe {
self.prepare_datapath_read(&mut scr[..], 8, 3);
let transfer = self.prepare_datapath_read(&mut scr[..], 8, 3);
Self::data_interrupts(true);
}
self.cmd(Cmd::cmd51(), true)?;
Self::cmd(Cmd::cmd51(), true)?;
let res = poll_fn(|cx| {
T::state().register(cx.waker());
@ -876,6 +894,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
if res.is_ok() {
on_drop.defuse();
Self::stop_datapath();
drop(transfer);
unsafe {
let scr_bytes = &*(&scr as *const [u32; 2] as *const [u8; 8]);
@ -887,7 +906,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
/// Send command to card
#[allow(unused_variables)]
fn cmd(&self, cmd: Cmd, data: bool) -> Result<(), Error> {
fn cmd(cmd: Cmd, data: bool) -> Result<(), Error> {
let regs = T::regs();
Self::clear_interrupt_flags();
@ -1005,10 +1024,10 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
});
regs.power().modify(|w| w.set_pwrctrl(PowerCtrl::On as u8));
self.cmd(Cmd::idle(), false)?;
Self::cmd(Cmd::idle(), false)?;
// Check if cards supports CMD8 (with pattern)
self.cmd(Cmd::hs_send_ext_csd(0x1AA), false)?;
Self::cmd(Cmd::hs_send_ext_csd(0x1AA), false)?;
let r1 = regs.respr(0).read().cardstatus();
let mut card = if r1 == 0x1AA {
@ -1020,14 +1039,14 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let ocr = loop {
// Signal that next command is a app command
self.cmd(Cmd::app_cmd(0), false)?; // CMD55
Self::cmd(Cmd::app_cmd(0), false)?; // CMD55
let arg = CmdAppOper::VOLTAGE_WINDOW_SD as u32
| CmdAppOper::HIGH_CAPACITY as u32
| CmdAppOper::SD_SWITCH_1_8V_CAPACITY as u32;
// Initialize card
match self.cmd(Cmd::app_op_cmd(arg), false) {
match Self::cmd(Cmd::app_op_cmd(arg), false) {
// ACMD41
Ok(_) => (),
Err(Error::Crc) => (),
@ -1048,7 +1067,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
}
card.ocr = ocr;
self.cmd(Cmd::all_send_cid(), false)?; // CMD2
Self::cmd(Cmd::all_send_cid(), false)?; // CMD2
let cid0 = regs.respr(0).read().cardstatus() as u128;
let cid1 = regs.respr(1).read().cardstatus() as u128;
let cid2 = regs.respr(2).read().cardstatus() as u128;
@ -1056,10 +1075,10 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
let cid = (cid0 << 96) | (cid1 << 64) | (cid2 << 32) | (cid3);
card.cid = cid.into();
self.cmd(Cmd::send_rel_addr(), false)?;
Self::cmd(Cmd::send_rel_addr(), false)?;
card.rca = regs.respr(0).read().cardstatus() >> 16;
self.cmd(Cmd::send_csd(card.rca << 16), false)?;
Self::cmd(Cmd::send_csd(card.rca << 16), false)?;
let csd0 = regs.respr(0).read().cardstatus() as u128;
let csd1 = regs.respr(1).read().cardstatus() as u128;
let csd2 = regs.respr(2).read().cardstatus() as u128;
@ -1077,8 +1096,8 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
BusWidth::Four if card.scr.bus_width_four() => (BusWidth::Four, 2),
_ => (BusWidth::One, 0),
};
self.cmd(Cmd::app_cmd(card.rca << 16), false)?;
self.cmd(Cmd::cmd6(acmd_arg), false)?;
Self::cmd(Cmd::app_cmd(card.rca << 16), false)?;
Self::cmd(Cmd::cmd6(acmd_arg), false)?;
// CPSMACT and DPSMACT must be 0 to set WIDBUS
Self::wait_idle();
@ -1139,16 +1158,14 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
CardCapacity::SDSC => block_idx * 512,
_ => block_idx,
};
self.cmd(Cmd::set_block_length(512), false)?; // CMD16
Self::cmd(Cmd::set_block_length(512), false)?; // CMD16
let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
unsafe {
self.prepare_datapath_read(buffer, 512, 9);
let transfer = self.prepare_datapath_read(buffer, 512, 9);
Self::data_interrupts(true);
}
self.cmd(Cmd::read_single_block(address), true)?;
Self::cmd(Cmd::read_single_block(address), true)?;
let res = poll_fn(|cx| {
T::state().register(cx.waker());
@ -1169,6 +1186,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
if res.is_ok() {
on_drop.defuse();
Self::stop_datapath();
drop(transfer);
}
res
}
@ -1185,22 +1203,20 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
CardCapacity::SDSC => block_idx * 512,
_ => block_idx,
};
self.cmd(Cmd::set_block_length(512), false)?; // CMD16
Self::cmd(Cmd::set_block_length(512), false)?; // CMD16
let regs = T::regs();
let on_drop = OnDrop::new(|| unsafe { Self::on_drop() });
// sdmmc_v1 uses different cmd/dma order than v2, but only for writes
#[cfg(sdmmc_v1)]
self.cmd(Cmd::write_single_block(address), true)?;
Self::cmd(Cmd::write_single_block(address), true)?;
unsafe {
self.prepare_datapath_write(buffer as *const [u32; 128], 512, 9);
let transfer = self.prepare_datapath_write(buffer, 512, 9);
Self::data_interrupts(true);
}
#[cfg(sdmmc_v2)]
self.cmd(Cmd::write_single_block(address), true)?;
Self::cmd(Cmd::write_single_block(address), true)?;
let res = poll_fn(|cx| {
T::state().register(cx.waker());
@ -1222,6 +1238,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
Ok(_) => {
on_drop.defuse();
Self::stop_datapath();
drop(transfer);
// TODO: Make this configurable
let mut timeout: u32 = 0x00FF_FFFF;

View File

@ -421,8 +421,7 @@ impl<'d, T: Instance, Tx, Rx> Spi<'d, T, Tx, Rx> {
let tx_request = self.txdma.request();
let tx_dst = T::REGS.tx_ptr();
unsafe { self.txdma.start_write(tx_request, data, tx_dst, Default::default()) }
let tx_f = Transfer::new(&mut self.txdma);
let tx_f = unsafe { Transfer::new_write(&mut self.txdma, tx_request, data, tx_dst, Default::default()) };
unsafe {
set_txdmaen(T::REGS, true);
@ -468,13 +467,21 @@ impl<'d, T: Instance, Tx, Rx> Spi<'d, T, Tx, Rx> {
let rx_request = self.rxdma.request();
let rx_src = T::REGS.rx_ptr();
unsafe { self.rxdma.start_read(rx_request, rx_src, data, Default::default()) };
let rx_f = Transfer::new(&mut self.rxdma);
let rx_f = unsafe { Transfer::new_read(&mut self.rxdma, rx_request, rx_src, data, Default::default()) };
let tx_request = self.txdma.request();
let tx_dst = T::REGS.tx_ptr();
let clock_byte = 0x00u8;
let tx_f = crate::dma::write_repeated(&mut self.txdma, tx_request, &clock_byte, clock_byte_count, tx_dst);
let tx_f = unsafe {
Transfer::new_write_repeated(
&mut self.txdma,
tx_request,
&clock_byte,
clock_byte_count,
tx_dst,
Default::default(),
)
};
unsafe {
set_txdmaen(T::REGS, true);
@ -521,13 +528,11 @@ impl<'d, T: Instance, Tx, Rx> Spi<'d, T, Tx, Rx> {
let rx_request = self.rxdma.request();
let rx_src = T::REGS.rx_ptr();
unsafe { self.rxdma.start_read(rx_request, rx_src, read, Default::default()) };
let rx_f = Transfer::new(&mut self.rxdma);
let rx_f = unsafe { Transfer::new_read_raw(&mut self.rxdma, rx_request, rx_src, read, Default::default()) };
let tx_request = self.txdma.request();
let tx_dst = T::REGS.tx_ptr();
unsafe { self.txdma.start_write(tx_request, write, tx_dst, Default::default()) }
let tx_f = Transfer::new(&mut self.txdma);
let tx_f = unsafe { Transfer::new_write_raw(&mut self.txdma, tx_request, write, tx_dst, Default::default()) };
unsafe {
set_txdmaen(T::REGS, true);

View File

@ -34,7 +34,7 @@ macro_rules! dma_trait_impl {
(crate::$mod:ident::$trait:ident, $instance:ident, {dmamux: $dmamux:ident}, $request:expr) => {
impl<T> crate::$mod::$trait<crate::peripherals::$instance> for T
where
T: crate::dma::MuxChannel<Mux = crate::dma::$dmamux>,
T: crate::dma::Channel + crate::dma::MuxChannel<Mux = crate::dma::$dmamux>,
{
fn request(&self) -> crate::dma::Request {
$request

View File

@ -6,11 +6,11 @@ use core::sync::atomic::{compiler_fence, Ordering};
use core::task::Poll;
use embassy_cortex_m::interrupt::InterruptExt;
use embassy_futures::select::{select, Either};
use embassy_hal_common::drop::OnDrop;
use embassy_hal_common::{into_ref, PeripheralRef};
use futures::future::{select, Either};
use crate::dma::NoDma;
use crate::dma::{NoDma, Transfer};
use crate::gpio::sealed::AFType;
#[cfg(any(lpuart_v1, lpuart_v2))]
use crate::pac::lpuart::{regs, vals, Lpuart as Regs};
@ -91,7 +91,7 @@ enum ReadCompletionEvent {
// DMA Read transfer completed first
DmaCompleted,
// Idle line detected first
Idle,
Idle(usize),
}
pub struct Uart<'d, T: BasicInstance, TxDma = NoDma, RxDma = NoDma> {
@ -183,7 +183,7 @@ impl<'d, T: BasicInstance, TxDma> UartTx<'d, T, TxDma> {
}
// If we don't assign future to a variable, the data register pointer
// is held across an await and makes the future non-Send.
let transfer = crate::dma::write(ch, request, buffer, tdr(T::regs()));
let transfer = unsafe { Transfer::new_write(ch, request, buffer, tdr(T::regs()), Default::default()) };
transfer.await;
Ok(())
}
@ -430,10 +430,12 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
let ch = &mut self.rx_dma;
let request = ch.request();
let buffer_len = buffer.len();
// Start USART DMA
// will not do anything yet because DMAR is not yet set
// future which will complete when DMA Read request completes
let transfer = crate::dma::read(ch, request, rdr(T::regs()), buffer);
let transfer = unsafe { Transfer::new_read(ch, request, rdr(T::regs()), buffer, Default::default()) };
// SAFETY: The only way we might have a problem is using split rx and tx
// here we only modify or read Rx related flags, interrupts and DMA channel
@ -565,13 +567,15 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
// when transfer is dropped, it will stop the DMA request
let r = match select(transfer, idle).await {
// DMA transfer completed first
Either::First(()) => Ok(ReadCompletionEvent::DmaCompleted),
Either::Left(((), _)) => Ok(ReadCompletionEvent::DmaCompleted),
// Idle line detected first
Either::Second(Ok(())) => Ok(ReadCompletionEvent::Idle),
Either::Right((Ok(()), transfer)) => Ok(ReadCompletionEvent::Idle(
buffer_len - transfer.get_remaining_transfers() as usize,
)),
// error occurred
Either::Second(Err(e)) => Err(e),
Either::Right((Err(e), _)) => Err(e),
};
drop(on_drop);
@ -594,14 +598,9 @@ impl<'d, T: BasicInstance, RxDma> UartRx<'d, T, RxDma> {
// wait for DMA to complete or IDLE line detection if requested
let res = self.inner_read_run(buffer, enable_idle_line_detection).await;
let ch = &mut self.rx_dma;
match res {
Ok(ReadCompletionEvent::DmaCompleted) => Ok(buffer_len),
Ok(ReadCompletionEvent::Idle) => {
let n = buffer_len - (ch.remaining_transfers() as usize);
Ok(n)
}
Ok(ReadCompletionEvent::Idle(n)) => Ok(n),
Err(e) => Err(e),
}
}