Support overflow detection for more than one ring-period

This commit is contained in:
Rasmus Melchior Jacobsen 2023-04-27 10:48:38 +02:00 committed by Dario Nieuwenhuis
parent 4ea6662e55
commit fc268df6f5
5 changed files with 216 additions and 181 deletions

View File

@ -4,6 +4,7 @@ use core::pin::Pin;
use core::sync::atomic::{fence, Ordering}; use core::sync::atomic::{fence, Ordering};
use core::task::{Context, Poll, Waker}; use core::task::{Context, Poll, Waker};
use atomic_polyfill::AtomicUsize;
use embassy_cortex_m::interrupt::Priority; use embassy_cortex_m::interrupt::Priority;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::waitqueue::AtomicWaker; use embassy_sync::waitqueue::AtomicWaker;
@ -129,13 +130,16 @@ impl From<FifoThreshold> for vals::Fth {
struct State { struct State {
ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT], ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT],
complete_count: [AtomicUsize; DMA_CHANNEL_COUNT],
} }
impl State { impl State {
const fn new() -> Self { const fn new() -> Self {
const ZERO: AtomicUsize = AtomicUsize::new(0);
const AW: AtomicWaker = AtomicWaker::new(); const AW: AtomicWaker = AtomicWaker::new();
Self { Self {
ch_wakers: [AW; DMA_CHANNEL_COUNT], ch_wakers: [AW; DMA_CHANNEL_COUNT],
complete_count: [ZERO; DMA_CHANNEL_COUNT],
} }
} }
} }
@ -184,13 +188,43 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index:
panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
} }
if isr.tcif(channel_num % 4) && cr.read().tcie() { let mut wake = false;
/* acknowledge transfer complete interrupt */
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); if isr.htif(channel_num % 4) && cr.read().htie() {
// Acknowledge half transfer complete interrupt
dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true));
wake = true;
}
wake |= process_tcif(dma, channel_num, index);
if wake {
STATE.ch_wakers[index].wake(); STATE.ch_wakers[index].wake();
} }
} }
unsafe fn process_tcif(dma: pac::dma::Dma, channel_num: usize, index: usize) -> bool {
let isr_reg = dma.isr(channel_num / 4);
let cr_reg = dma.st(channel_num).cr();
// First, figure out if tcif is set without a cs.
if isr_reg.read().tcif(channel_num % 4) && cr_reg.read().tcie() {
// Make tcif test again within a cs to avoid race when incrementing complete_count.
critical_section::with(|_| {
if isr_reg.read().tcif(channel_num % 4) && cr_reg.read().tcie() {
// Acknowledge transfer complete interrupt
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
STATE.complete_count[index].fetch_add(1, Ordering::Release);
true
} else {
false
}
})
} else {
false
}
}
#[cfg(any(dma_v2, dmamux))] #[cfg(any(dma_v2, dmamux))]
pub type Request = u8; pub type Request = u8;
#[cfg(not(any(dma_v2, dmamux)))] #[cfg(not(any(dma_v2, dmamux)))]
@ -530,6 +564,7 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> {
unsafe { unsafe {
dma.ifcr(isrn).write(|w| { dma.ifcr(isrn).write(|w| {
w.set_htif(isrbit, true);
w.set_tcif(isrbit, true); w.set_tcif(isrbit, true);
w.set_teif(isrbit, true); w.set_teif(isrbit, true);
}) })
@ -593,32 +628,28 @@ impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
// ============================== // ==============================
impl<C: Channel> DmaCtrl for C { impl<C: Channel> DmaCtrl for C {
fn tcif(&self) -> bool {
let channel_number = self.num();
let dma = self.regs();
let isrn = channel_number / 4;
let isrbit = channel_number % 4;
unsafe { dma.isr(isrn).read() }.tcif(isrbit)
}
fn clear_tcif(&mut self) {
let channel_number = self.num();
let dma = self.regs();
let isrn = channel_number / 4;
let isrbit = channel_number % 4;
unsafe {
dma.ifcr(isrn).write(|w| {
w.set_tcif(isrbit, true);
})
}
}
fn ndtr(&self) -> usize { fn ndtr(&self) -> usize {
let ch = self.regs().st(self.num()); let ch = self.regs().st(self.num());
unsafe { ch.ndtr().read() }.ndt() as usize unsafe { ch.ndtr().read() }.ndt() as usize
} }
fn get_complete_count(&self) -> usize {
let dma = self.regs();
let channel_num = self.num();
let index = self.index();
// Manually process tcif in case transfer was completed and we are in a higher priority task.
unsafe { process_tcif(dma, channel_num, index) };
STATE.complete_count[index].load(Ordering::Acquire)
}
fn reset_complete_count(&mut self) -> usize {
let dma = self.regs();
let channel_num = self.num();
let index = self.index();
// Manually process tcif in case transfer was completed and we are in a higher priority task.
unsafe { process_tcif(dma, channel_num, index) };
STATE.complete_count[index].swap(0, Ordering::AcqRel)
}
} }
pub struct RingBuffer<'a, C: Channel, W: Word> { pub struct RingBuffer<'a, C: Channel, W: Word> {
@ -657,7 +688,8 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
w.set_minc(vals::Inc::INCREMENTED); w.set_minc(vals::Inc::INCREMENTED);
w.set_pinc(vals::Inc::FIXED); w.set_pinc(vals::Inc::FIXED);
w.set_teie(true); w.set_teie(true);
w.set_tcie(false); w.set_htie(true);
w.set_tcie(true);
w.set_circ(vals::Circ::ENABLED); w.set_circ(vals::Circ::ENABLED);
#[cfg(dma_v1)] #[cfg(dma_v1)]
w.set_trbuff(true); w.set_trbuff(true);
@ -703,7 +735,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
} }
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.ringbuf.clear(); self.ringbuf.clear(&mut *self.channel);
} }
/// Read bytes from the ring buffer /// Read bytes from the ring buffer
@ -712,6 +744,22 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
self.ringbuf.read(&mut *self.channel, buf) self.ringbuf.read(&mut *self.channel, buf)
} }
pub fn is_empty(&self) -> bool {
self.ringbuf.is_empty()
}
pub fn len(&self) -> usize {
self.ringbuf.len()
}
pub fn capacity(&self) -> usize {
self.ringbuf.dma_buf.len()
}
pub fn set_waker(&mut self, waker: &Waker) {
STATE.ch_wakers[self.channel.index()].register(waker);
}
fn clear_irqs(&mut self) { fn clear_irqs(&mut self) {
let channel_number = self.channel.num(); let channel_number = self.channel.num();
let dma = self.channel.regs(); let dma = self.channel.regs();
@ -720,6 +768,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
unsafe { unsafe {
dma.ifcr(isrn).write(|w| { dma.ifcr(isrn).write(|w| {
w.set_htif(isrbit, true);
w.set_tcif(isrbit, true); w.set_tcif(isrbit, true);
w.set_teif(isrbit, true); w.set_teif(isrbit, true);
}) })
@ -733,6 +782,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
unsafe { unsafe {
ch.cr().write(|w| { ch.cr().write(|w| {
w.set_teie(true); w.set_teie(true);
w.set_htie(true);
w.set_tcie(true); w.set_tcie(true);
}) })
} }
@ -743,15 +793,10 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
unsafe { ch.cr().read() }.en() unsafe { ch.cr().read() }.en()
} }
/// Gets the total remaining transfers for the channel /// Synchronize the position of the ring buffer to the actual DMA controller position
/// Note: this will be zero for transfers that completed without cancellation. pub fn reload_position(&mut self) {
pub fn get_remaining_transfers(&self) -> usize {
let ch = self.channel.regs().st(self.channel.num()); let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.ndtr().read() }.ndt() as usize self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
}
pub fn set_ndtr(&mut self, ndtr: usize) {
self.ringbuf.ndtr = ndtr;
} }
} }

View File

@ -10,14 +10,6 @@ use super::word::Word;
/// to the current register value. `ndtr` describes the current position of the DMA /// to the current register value. `ndtr` describes the current position of the DMA
/// write. /// write.
/// ///
/// # Safety
///
/// The ring buffer controls the TCIF (transfer completed interrupt flag) to
/// detect buffer overruns, hence this interrupt must be disabled.
/// The buffer can detect overruns up to one period, that is, for a X byte buffer,
/// overruns can be detected if they happen from byte X+1 up to 2X. After this
/// point, overrunds may or may not be detected.
///
/// # Buffer layout /// # Buffer layout
/// ///
/// ```text /// ```text
@ -39,7 +31,6 @@ pub struct DmaRingBuffer<'a, W: Word> {
pub(crate) dma_buf: &'a mut [W], pub(crate) dma_buf: &'a mut [W],
first: usize, first: usize,
pub ndtr: usize, pub ndtr: usize,
expect_next_read_to_wrap: bool,
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -50,13 +41,13 @@ pub trait DmaCtrl {
/// buffer until the dma writer wraps. /// buffer until the dma writer wraps.
fn ndtr(&self) -> usize; fn ndtr(&self) -> usize;
/// Read the transfer completed interrupt flag /// Get the transfer completed counter.
/// This flag is set by the dma controller when NDTR is reloaded, /// This counter is incremented by the dma controller when NDTR is reloaded,
/// i.e. when the writing wraps. /// i.e. when the writing wraps.
fn tcif(&self) -> bool; fn get_complete_count(&self) -> usize;
/// Clear the transfer completed interrupt flag /// Reset the transfer completed counter to 0 and return the value just prior to the reset.
fn clear_tcif(&mut self); fn reset_complete_count(&mut self) -> usize;
} }
impl<'a, W: Word> DmaRingBuffer<'a, W> { impl<'a, W: Word> DmaRingBuffer<'a, W> {
@ -66,15 +57,14 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
dma_buf, dma_buf,
first: 0, first: 0,
ndtr, ndtr,
expect_next_read_to_wrap: false,
} }
} }
/// Reset the ring buffer to its initial state /// Reset the ring buffer to its initial state
pub fn clear(&mut self) { pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
self.first = 0; self.first = 0;
self.ndtr = self.dma_buf.len(); self.ndtr = self.dma_buf.len();
self.expect_next_read_to_wrap = false; dma.reset_complete_count();
} }
/// The buffer end position /// The buffer end position
@ -83,14 +73,12 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
} }
/// Returns whether the buffer is empty /// Returns whether the buffer is empty
#[allow(dead_code)]
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.first == self.end() self.first == self.end()
} }
/// The current number of bytes in the buffer /// The current number of bytes in the buffer
/// This may change at any time if dma is currently active /// This may change at any time if dma is currently active
#[allow(dead_code)]
pub fn len(&self) -> usize { pub fn len(&self) -> usize {
// Read out a stable end (the dma periheral can change it at anytime) // Read out a stable end (the dma periheral can change it at anytime)
let end = self.end(); let end = self.end();
@ -112,27 +100,19 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
if self.first == end { if self.first == end {
// The buffer is currently empty // The buffer is currently empty
if dma.tcif() { if dma.get_complete_count() > 0 {
// The dma controller has written such that the ring buffer now wraps // The DMA has written such that the ring buffer wraps at least once
// This is the special case where exactly n*dma_buf.len(), n = 1,2,..., bytes was written,
// but where additional bytes are now written causing the ring buffer to wrap.
// This is only an error if the writing has passed the current unread region.
self.ndtr = dma.ndtr(); self.ndtr = dma.ndtr();
if self.end() > self.first { if self.end() > self.first || dma.get_complete_count() > 1 {
dma.clear_tcif();
return Err(OverrunError); return Err(OverrunError);
} }
} }
self.expect_next_read_to_wrap = false;
Ok(0) Ok(0)
} else if self.first < end { } else if self.first < end {
// The available, unread portion in the ring buffer DOES NOT wrap // The available, unread portion in the ring buffer DOES NOT wrap
if self.expect_next_read_to_wrap { if dma.get_complete_count() > 1 {
// The read was expected to wrap but it did not
dma.clear_tcif();
return Err(OverrunError); return Err(OverrunError);
} }
@ -141,35 +121,39 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
compiler_fence(Ordering::SeqCst); compiler_fence(Ordering::SeqCst);
if dma.tcif() { match dma.get_complete_count() {
// The dma controller has written such that the ring buffer now wraps 0 => {
// The DMA writer has not wrapped before nor after the copy
}
1 => {
// The DMA writer has written such that the ring buffer now wraps
self.ndtr = dma.ndtr(); self.ndtr = dma.ndtr();
if self.end() > self.first { if self.end() > self.first || dma.get_complete_count() > 1 {
// The bytes that we have copied out have overflowed // The bytes that we have copied out have overflowed
// as the writer has now both wrapped and is currently writing // as the writer has now both wrapped and is currently writing
// within the region that we have just copied out // within the region that we have just copied out
return Err(OverrunError);
// Clear transfer completed interrupt flag }
dma.clear_tcif(); }
_ => {
return Err(OverrunError); return Err(OverrunError);
} }
} }
self.first = (self.first + len) % self.dma_buf.len(); self.first = (self.first + len) % self.dma_buf.len();
self.expect_next_read_to_wrap = false;
Ok(len) Ok(len)
} else { } else {
// The available, unread portion in the ring buffer DOES wrap // The available, unread portion in the ring buffer DOES wrap
// The dma controller has wrapped since we last read and is currently // The DMA writer has wrapped since we last read and is currently
// writing (or the next byte added will be) in the beginning of the ring buffer. // writing (or the next byte added will be) in the beginning of the ring buffer.
// If the unread portion wraps then the writer must also have wrapped, let complete_count = dma.get_complete_count();
// or it has wrapped and we already cleared the TCIF flag if complete_count > 1 {
assert!(dma.tcif() || self.expect_next_read_to_wrap); return Err(OverrunError);
}
// Clear transfer completed interrupt flag // If the unread portion wraps then the writer must also have wrapped
dma.clear_tcif(); assert!(complete_count == 1);
if self.first + buf.len() < self.dma_buf.len() { if self.first + buf.len() < self.dma_buf.len() {
// The provided read buffer is not large enough to include all bytes from the tail of the dma buffer. // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
@ -182,13 +166,12 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
// We have now copied out the data from dma_buf // We have now copied out the data from dma_buf
// Make sure that the just read part was not overwritten during the copy // Make sure that the just read part was not overwritten during the copy
self.ndtr = dma.ndtr(); self.ndtr = dma.ndtr();
if self.end() > self.first || dma.tcif() { if self.end() > self.first || dma.get_complete_count() > 1 {
// The writer has entered the data that we have just read since we read out `end` in the beginning and until now. // The writer has entered the data that we have just read since we read out `end` in the beginning and until now.
return Err(OverrunError); return Err(OverrunError);
} }
self.first = (self.first + len) % self.dma_buf.len(); self.first = (self.first + len) % self.dma_buf.len();
self.expect_next_read_to_wrap = true;
Ok(len) Ok(len)
} else { } else {
// The provided read buffer is large enough to include all bytes from the tail of the dma buffer, // The provided read buffer is large enough to include all bytes from the tail of the dma buffer,
@ -201,14 +184,14 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
compiler_fence(Ordering::SeqCst); compiler_fence(Ordering::SeqCst);
// We have now copied out the data from dma_buf // We have now copied out the data from dma_buf
// Make sure that the just read part was not overwritten during the copy // Reset complete counter and make sure that the just read part was not overwritten during the copy
self.ndtr = dma.ndtr(); self.ndtr = dma.ndtr();
if self.end() > self.first || dma.tcif() { let complete_count = dma.reset_complete_count();
if self.end() > self.first || complete_count > 1 {
return Err(OverrunError); return Err(OverrunError);
} }
self.first = head; self.first = head;
self.expect_next_read_to_wrap = false;
Ok(tail + head) Ok(tail + head)
} }
} }
@ -243,14 +226,14 @@ mod tests {
struct TestCtrl { struct TestCtrl {
next_ndtr: RefCell<Option<usize>>, next_ndtr: RefCell<Option<usize>>,
tcif: bool, complete_count: usize,
} }
impl TestCtrl { impl TestCtrl {
pub const fn new() -> Self { pub const fn new() -> Self {
Self { Self {
next_ndtr: RefCell::new(None), next_ndtr: RefCell::new(None),
tcif: false, complete_count: 0,
} }
} }
@ -264,12 +247,14 @@ mod tests {
self.next_ndtr.borrow_mut().unwrap() self.next_ndtr.borrow_mut().unwrap()
} }
fn tcif(&self) -> bool { fn get_complete_count(&self) -> usize {
self.tcif self.complete_count
} }
fn clear_tcif(&mut self) { fn reset_complete_count(&mut self) -> usize {
self.tcif = false; let old = self.complete_count;
self.complete_count = 0;
old
} }
} }
@ -320,7 +305,7 @@ mod tests {
ringbuf.ndtr = 10; ringbuf.ndtr = 10;
// The dma controller has written 4 + 6 bytes and has reloaded NDTR // The dma controller has written 4 + 6 bytes and has reloaded NDTR
ctrl.tcif = true; ctrl.complete_count = 1;
ctrl.set_next_ndtr(10); ctrl.set_next_ndtr(10);
assert!(!ringbuf.is_empty()); assert!(!ringbuf.is_empty());
@ -346,14 +331,14 @@ mod tests {
ringbuf.ndtr = 6; ringbuf.ndtr = 6;
// The dma controller has written 6 + 2 bytes and has reloaded NDTR // The dma controller has written 6 + 2 bytes and has reloaded NDTR
ctrl.tcif = true; ctrl.complete_count = 1;
ctrl.set_next_ndtr(14); ctrl.set_next_ndtr(14);
let mut buf = [0; 2]; let mut buf = [0; 2];
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!([2, 3], buf); assert_eq!([2, 3], buf);
assert_eq!(true, ctrl.tcif); // The interrupt flag IS NOT cleared assert_eq!(1, ctrl.complete_count); // The interrupt flag IS NOT cleared
} }
#[test] #[test]
@ -365,14 +350,14 @@ mod tests {
ringbuf.ndtr = 10; ringbuf.ndtr = 10;
// The dma controller has written 6 + 2 bytes and has reloaded NDTR // The dma controller has written 6 + 2 bytes and has reloaded NDTR
ctrl.tcif = true; ctrl.complete_count = 1;
ctrl.set_next_ndtr(14); ctrl.set_next_ndtr(14);
let mut buf = [0; 10]; let mut buf = [0; 10];
assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap()); assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf); assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf);
assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared assert_eq!(0, ctrl.complete_count); // The interrupt flag IS cleared
} }
#[test] #[test]
@ -387,12 +372,12 @@ mod tests {
assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty
// The dma controller has written exactly 16 bytes // The dma controller has written exactly 16 bytes
ctrl.tcif = true; ctrl.complete_count = 1;
let mut buf = [0; 2]; let mut buf = [0; 2];
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
} }
#[test] #[test]
@ -404,13 +389,13 @@ mod tests {
ringbuf.ndtr = 6; ringbuf.ndtr = 6;
// The dma controller has written 6 + 3 bytes and has reloaded NDTR // The dma controller has written 6 + 3 bytes and has reloaded NDTR
ctrl.tcif = true; ctrl.complete_count = 1;
ctrl.set_next_ndtr(13); ctrl.set_next_ndtr(13);
let mut buf = [0; 2]; let mut buf = [0; 2];
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
} }
#[test] #[test]
@ -422,12 +407,12 @@ mod tests {
ringbuf.ndtr = 10; ringbuf.ndtr = 10;
// The dma controller has written 6 + 13 bytes and has reloaded NDTR // The dma controller has written 6 + 13 bytes and has reloaded NDTR
ctrl.tcif = true; ctrl.complete_count = 1;
ctrl.set_next_ndtr(3); ctrl.set_next_ndtr(3);
let mut buf = [0; 2]; let mut buf = [0; 2];
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
} }
} }

View File

@ -4,8 +4,9 @@ use core::task::Poll;
use embassy_hal_common::drop::OnDrop; use embassy_hal_common::drop::OnDrop;
use embassy_hal_common::PeripheralRef; use embassy_hal_common::PeripheralRef;
use futures::future::{select, Either};
use super::{rdr, sr, BasicInstance, Error, UartRx}; use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx};
use crate::dma::ringbuffer::OverrunError; use crate::dma::ringbuffer::OverrunError;
use crate::dma::RingBuffer; use crate::dma::RingBuffer;
@ -98,7 +99,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
} }
/// Read bytes that are readily available in the ring buffer. /// Read bytes that are readily available in the ring buffer.
/// If no bytes are currently available in the buffer the call waits until data are received. /// If no bytes are currently available in the buffer the call waits until the some
/// bytes are available (at least one byte and at most half the buffer size)
/// ///
/// Background receive is started if `start()` has not been previously called. /// Background receive is started if `start()` has not been previously called.
/// ///
@ -107,10 +109,9 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> { pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
let r = T::regs(); let r = T::regs();
// Start background receive if it was not already started
// SAFETY: read only // SAFETY: read only
let is_started = unsafe { r.cr3().read().dmar() }; let is_started = unsafe { r.cr3().read().dmar() };
// Start background receive if it was not already started
if !is_started { if !is_started {
self.start()?; self.start()?;
} }
@ -132,8 +133,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
} }
} }
let ndtr = self.ring_buf.get_remaining_transfers(); self.ring_buf.reload_position();
self.ring_buf.set_ndtr(ndtr);
match self.ring_buf.read(buf) { match self.ring_buf.read(buf) {
Ok(len) if len == 0 => {} Ok(len) if len == 0 => {}
Ok(len) => { Ok(len) => {
@ -148,28 +148,32 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
} }
} }
// Wait for any data since `ndtr` loop {
self.wait_for_data(ndtr).await?; self.wait_for_data_or_idle().await?;
self.ring_buf.reload_position();
if !self.ring_buf.is_empty() {
break;
}
}
// ndtr is now different than the value provided to `wait_for_data()`
// Re-sample ndtr now when it has changed.
self.ring_buf.set_ndtr(self.ring_buf.get_remaining_transfers());
let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?; let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?;
assert!(len > 0); assert!(len > 0);
Ok(len) Ok(len)
} }
/// Wait for uart data /// Wait for uart idle or dma half-full or full
async fn wait_for_data(&mut self, old_ndtr: usize) -> Result<(), Error> { async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> {
let r = T::regs(); let r = T::regs();
// make sure USART state is restored to neutral state when this future is dropped // make sure USART state is restored to neutral state
let _drop = OnDrop::new(move || { let _on_drop = OnDrop::new(move || {
// SAFETY: only clears Rx related flags // SAFETY: only clears Rx related flags
unsafe { unsafe {
r.cr1().modify(|w| { r.cr1().modify(|w| {
// disable RXNE interrupt // disable idle line interrupt
w.set_rxneie(false); w.set_idleie(false);
}); });
} }
}); });
@ -177,80 +181,69 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
// SAFETY: only sets Rx related flags // SAFETY: only sets Rx related flags
unsafe { unsafe {
r.cr1().modify(|w| { r.cr1().modify(|w| {
// enable RXNE interrupt // enable idle line interrupt
w.set_rxneie(true); w.set_idleie(true);
}); });
} }
// future which completes when RX "not empty" is detected, compiler_fence(Ordering::SeqCst);
// i.e. when there is data in uart rx register
let rxne = poll_fn(|cx| {
let s = T::state();
// Register waker to be awaken when RXNE interrupt is received // Future which completes when there is dma is half full or full
let dma = poll_fn(|cx| {
self.ring_buf.set_waker(cx.waker());
compiler_fence(Ordering::SeqCst);
self.ring_buf.reload_position();
if !self.ring_buf.is_empty() {
// Some data is now available
Poll::Ready(())
} else {
Poll::Pending
}
});
// Future which completes when idle line is detected
let uart = poll_fn(|cx| {
let s = T::state();
s.rx_waker.register(cx.waker()); s.rx_waker.register(cx.waker());
compiler_fence(Ordering::SeqCst); compiler_fence(Ordering::SeqCst);
// SAFETY: read only and we only use Rx related flags // SAFETY: read only and we only use Rx related flags
let s = unsafe { sr(r).read() }; let sr = unsafe { sr(r).read() };
let has_errors = s.pe() || s.fe() || s.ne() || s.ore();
let has_errors = sr.pe() || sr.fe() || sr.ne() || sr.ore();
if has_errors { if has_errors {
if s.pe() { if sr.pe() {
return Poll::Ready(Err(Error::Parity)); return Poll::Ready(Err(Error::Parity));
} else if s.fe() { } else if sr.fe() {
return Poll::Ready(Err(Error::Framing)); return Poll::Ready(Err(Error::Framing));
} else if s.ne() { } else if sr.ne() {
return Poll::Ready(Err(Error::Noise)); return Poll::Ready(Err(Error::Noise));
} else { } else {
return Poll::Ready(Err(Error::Overrun)); return Poll::Ready(Err(Error::Overrun));
} }
} }
// Re-sample ndtr and determine if it has changed since we started if sr.idle() {
// waiting for data. // Idle line is detected
let new_ndtr = self.ring_buf.get_remaining_transfers();
if new_ndtr != old_ndtr {
// Some data was received as NDTR has changed
Poll::Ready(Ok(()))
} else {
// It may be that the DMA controller is currently busy consuming the
// RX data register. We therefore wait register to become empty.
while unsafe { sr(r).read().rxne() } {}
compiler_fence(Ordering::SeqCst);
// Re-get again: This time we know that the DMA controller has consumed
// the current read register if it was busy doing so
let new_ndtr = self.ring_buf.get_remaining_transfers();
if new_ndtr != old_ndtr {
// Some data was received as NDTR has changed
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} else { } else {
Poll::Pending Poll::Pending
} }
}
}); });
compiler_fence(Ordering::SeqCst); match select(dma, uart).await {
Either::Left(((), _)) => Ok(()),
let new_ndtr = self.ring_buf.get_remaining_transfers(); Either::Right((Ok(()), _)) => Ok(()),
if new_ndtr != old_ndtr { Either::Right((Err(e), _)) => {
// Fast path - NDTR has already changed, no reason to poll
Ok(())
} else {
// NDTR has not changed since we first read from the ring buffer
// Wait for RXNE interrupt...
match rxne.await {
Ok(()) => Ok(()),
Err(e) => {
self.teardown_uart(); self.teardown_uart();
Err(e) Err(e)
} }
} }
} }
} }
}
impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T, RxDma> { impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T, RxDma> {
fn drop(&mut self) { fn drop(&mut self) {

View File

@ -56,6 +56,7 @@ mod board {
} }
const ONE_BYTE_DURATION_US: u32 = 9_000_000 / 115200; const ONE_BYTE_DURATION_US: u32 = 9_000_000 / 115200;
const DMA_BUF_SIZE: usize = 64;
#[embassy_executor::main] #[embassy_executor::main]
async fn main(spawner: Spawner) { async fn main(spawner: Spawner) {
@ -114,7 +115,7 @@ async fn main(spawner: Spawner) {
let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config); let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config);
let (tx, rx) = usart.split(); let (tx, rx) = usart.split();
static mut DMA_BUF: [u8; 64] = [0; 64]; static mut DMA_BUF: [u8; DMA_BUF_SIZE] = [0; DMA_BUF_SIZE];
let dma_buf = unsafe { DMA_BUF.as_mut() }; let dma_buf = unsafe { DMA_BUF.as_mut() };
let rx = rx.into_ring_buffered(dma_buf); let rx = rx.into_ring_buffered(dma_buf);
@ -159,7 +160,14 @@ async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::Rx
loop { loop {
let mut buf = [0; 100]; let mut buf = [0; 100];
let max_len = 1 + (rng.next_u32() as usize % (buf.len() - 1)); let max_len = 1 + (rng.next_u32() as usize % (buf.len() - 1));
let received = rx.read(&mut buf[..max_len]).await.unwrap(); let received = match rx.read(&mut buf[..max_len]).await {
Ok(r) => r,
Err(e) => {
error!("Test fail! read error: {:?}", e);
cortex_m::asm::bkpt();
return;
}
};
if expected.is_none() { if expected.is_none() {
info!("Test started"); info!("Test started");
@ -176,8 +184,11 @@ async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::Rx
} }
if received < max_len { if received < max_len {
let byte_count = rng.next_u32() % 64; let byte_count = rng.next_u32() % (DMA_BUF_SIZE as u32);
Timer::after(Duration::from_micros((byte_count * ONE_BYTE_DURATION_US) as _)).await; let random_delay_us = (byte_count * ONE_BYTE_DURATION_US) as u64;
if random_delay_us > 200 {
Timer::after(Duration::from_micros(random_delay_us - 200)).await;
}
} }
i += 1; i += 1;

View File

@ -1,18 +1,19 @@
use std::path::Path; use std::path::Path;
use std::time::Duration; use std::time::Duration;
use std::{env, io, thread}; use std::{env, io, process, thread};
use rand::random; use rand::random;
use serial::SerialPort; use serial::SerialPort;
pub fn main() { pub fn main() {
if let Some(port_name) = env::args().nth(1) { if let Some(port_name) = env::args().nth(1) {
let sleep = env::args().position(|x| x == "--sleep").is_some(); let idles = env::args().position(|x| x == "--idles").is_some();
println!("Saturating port {:?} with 115200 8N1", port_name); println!("Saturating port {:?} with 115200 8N1", port_name);
println!("Sleep: {}", sleep); println!("Idles: {}", idles);
println!("Process ID: {}", process::id());
let mut port = serial::open(&port_name).unwrap(); let mut port = serial::open(&port_name).unwrap();
if saturate(&mut port, sleep).is_err() { if saturate(&mut port, idles).is_err() {
eprintln!("Unable to saturate port"); eprintln!("Unable to saturate port");
} }
} else { } else {
@ -23,7 +24,7 @@ pub fn main() {
} }
} }
fn saturate<T: SerialPort>(port: &mut T, sleep: bool) -> io::Result<()> { fn saturate<T: SerialPort>(port: &mut T, idles: bool) -> io::Result<()> {
port.reconfigure(&|settings| { port.reconfigure(&|settings| {
settings.set_baud_rate(serial::Baud115200)?; settings.set_baud_rate(serial::Baud115200)?;
settings.set_char_size(serial::Bits8); settings.set_char_size(serial::Bits8);
@ -39,7 +40,7 @@ fn saturate<T: SerialPort>(port: &mut T, sleep: bool) -> io::Result<()> {
port.write_all(&buf)?; port.write_all(&buf)?;
if sleep { if idles {
let micros = (random::<usize>() % 1000) as u64; let micros = (random::<usize>() % 1000) as u64;
println!("Sleeping {}us", micros); println!("Sleeping {}us", micros);
port.flush().unwrap(); port.flush().unwrap();