2023-05-01 18:15:46 +02:00
|
|
|
#![cfg_attr(gpdma, allow(unused))]
|
|
|
|
|
2023-08-04 04:12:34 +02:00
|
|
|
use core::future::poll_fn;
|
2023-04-26 10:51:23 +02:00
|
|
|
use core::ops::Range;
|
|
|
|
use core::sync::atomic::{compiler_fence, Ordering};
|
2023-08-04 04:12:34 +02:00
|
|
|
use core::task::{Poll, Waker};
|
2023-04-26 10:51:23 +02:00
|
|
|
|
|
|
|
use super::word::Word;
|
|
|
|
|
|
|
|
/// A "read-only" ring-buffer to be used together with the DMA controller which
|
|
|
|
/// writes in a circular way, "uncontrolled" to the buffer.
|
|
|
|
///
|
|
|
|
/// A snapshot of the ring buffer state can be attained by setting the `ndtr` field
|
|
|
|
/// to the current register value. `ndtr` describes the current position of the DMA
|
|
|
|
/// write.
|
|
|
|
///
|
|
|
|
/// # Buffer layout
|
|
|
|
///
|
|
|
|
/// ```text
|
|
|
|
/// Without wraparound: With wraparound:
|
|
|
|
///
|
|
|
|
/// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+
|
|
|
|
/// | | | | | |
|
|
|
|
/// v v v v v v
|
|
|
|
/// +-----------------------------------------+ +-----------------------------------------+
|
|
|
|
/// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX|
|
|
|
|
/// +-----------------------------------------+ +-----------------------------------------+
|
|
|
|
/// ^ ^ ^ ^ ^ ^
|
|
|
|
/// | | | | | |
|
2023-05-29 21:49:43 +02:00
|
|
|
/// +- start --+ | +- end ------+ |
|
2023-04-26 10:51:23 +02:00
|
|
|
/// | | | |
|
2023-05-29 21:49:43 +02:00
|
|
|
/// +- end --------------------+ +- start ----------------+
|
2023-04-26 10:51:23 +02:00
|
|
|
/// ```
|
2023-07-30 02:25:18 +02:00
|
|
|
pub struct ReadableDmaRingBuffer<'a, W: Word> {
|
2023-04-26 10:51:23 +02:00
|
|
|
pub(crate) dma_buf: &'a mut [W],
|
2023-05-29 21:49:43 +02:00
|
|
|
start: usize,
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct OverrunError;
|
|
|
|
|
|
|
|
pub trait DmaCtrl {
|
|
|
|
/// Get the NDTR register value, i.e. the space left in the underlying
|
|
|
|
/// buffer until the dma writer wraps.
|
2023-05-29 21:49:43 +02:00
|
|
|
fn get_remaining_transfers(&self) -> usize;
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-04-27 10:48:38 +02:00
|
|
|
/// Get the transfer completed counter.
|
|
|
|
/// This counter is incremented by the dma controller when NDTR is reloaded,
|
2023-04-26 10:51:23 +02:00
|
|
|
/// i.e. when the writing wraps.
|
2023-04-27 10:48:38 +02:00
|
|
|
fn get_complete_count(&self) -> usize;
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-04-27 10:48:38 +02:00
|
|
|
/// Reset the transfer completed counter to 0 and return the value just prior to the reset.
|
|
|
|
fn reset_complete_count(&mut self) -> usize;
|
2023-08-04 04:12:34 +02:00
|
|
|
|
|
|
|
/// Set the waker for a running poll_fn
|
|
|
|
fn set_waker(&mut self, waker: &Waker);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-07-30 02:25:18 +02:00
|
|
|
impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> {
|
2023-04-26 10:51:23 +02:00
|
|
|
pub fn new(dma_buf: &'a mut [W]) -> Self {
|
2023-05-29 21:49:43 +02:00
|
|
|
Self { dma_buf, start: 0 }
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Reset the ring buffer to its initial state
|
2023-08-04 04:12:34 +02:00
|
|
|
pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
|
2023-05-29 21:49:43 +02:00
|
|
|
self.start = 0;
|
2023-04-27 10:48:38 +02:00
|
|
|
dma.reset_complete_count();
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/// The capacity of the ringbuffer
|
|
|
|
pub const fn cap(&self) -> usize {
|
|
|
|
self.dma_buf.len()
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/// The current position of the ringbuffer
|
2023-08-04 04:12:34 +02:00
|
|
|
fn pos(&self, dma: &mut impl DmaCtrl) -> usize {
|
|
|
|
self.cap() - dma.get_remaining_transfers()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Read an exact number of elements from the ringbuffer.
|
|
|
|
///
|
|
|
|
/// Returns the remaining number of elements available for immediate reading.
|
|
|
|
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
|
|
|
///
|
|
|
|
/// Async/Wake Behavior:
|
|
|
|
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
|
|
|
|
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
|
|
|
|
/// ring buffer was created with a buffer of size 'N':
|
|
|
|
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
|
|
|
|
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
|
|
|
|
pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, OverrunError> {
|
|
|
|
let mut read_data = 0;
|
|
|
|
let buffer_len = buffer.len();
|
|
|
|
|
|
|
|
poll_fn(|cx| {
|
|
|
|
dma.set_waker(cx.waker());
|
|
|
|
|
|
|
|
compiler_fence(Ordering::SeqCst);
|
|
|
|
|
|
|
|
match self.read(dma, &mut buffer[read_data..buffer_len]) {
|
|
|
|
Ok((len, remaining)) => {
|
|
|
|
read_data += len;
|
|
|
|
if read_data == buffer_len {
|
|
|
|
Poll::Ready(Ok(remaining))
|
|
|
|
} else {
|
|
|
|
Poll::Pending
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(e) => Poll::Ready(Err(e)),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.await
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-07-23 01:17:01 +02:00
|
|
|
/// Read elements from the ring buffer
|
2023-05-29 21:49:43 +02:00
|
|
|
/// Return a tuple of the length read and the length remaining in the buffer
|
2023-07-23 01:17:01 +02:00
|
|
|
/// If not all of the elements were read, then there will be some elements in the buffer remaining
|
|
|
|
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
|
2023-04-26 10:51:23 +02:00
|
|
|
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
2023-08-04 04:12:34 +02:00
|
|
|
pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
|
2023-05-29 21:49:43 +02:00
|
|
|
/*
|
|
|
|
This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check
|
|
|
|
after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed
|
|
|
|
to fire in the same clock cycle that a register is read, so checking get_complete_count early does
|
|
|
|
not yield relevant information.
|
|
|
|
|
|
|
|
Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full
|
|
|
|
buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error
|
|
|
|
conditions.
|
|
|
|
|
|
|
|
After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that
|
|
|
|
the dma has not overrun within the data we could have copied. We check the data we could have copied
|
|
|
|
rather than the data we actually copied because it costs nothing and confirms an error condition
|
|
|
|
earlier.
|
|
|
|
*/
|
2023-08-04 04:12:34 +02:00
|
|
|
let end = self.pos(dma);
|
2023-05-29 21:49:43 +02:00
|
|
|
if self.start == end && dma.get_complete_count() == 0 {
|
2023-07-23 01:17:01 +02:00
|
|
|
// No elements are available in the buffer
|
2023-05-29 21:49:43 +02:00
|
|
|
Ok((0, self.cap()))
|
|
|
|
} else if self.start < end {
|
|
|
|
// The available, unread portion in the ring buffer DOES NOT wrap
|
2023-07-23 01:17:01 +02:00
|
|
|
// Copy out the elements from the dma buffer
|
2023-05-29 21:49:43 +02:00
|
|
|
let len = self.copy_to(buf, self.start..end);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
compiler_fence(Ordering::SeqCst);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/*
|
|
|
|
first, check if the dma has wrapped at all if it's after end
|
|
|
|
or more than once if it's before start
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
this is in a critical section to try to reduce mushy behavior.
|
|
|
|
it's not ideal but it's the best we can do
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
then, get the current position of of the dma write and check
|
|
|
|
if it's inside data we could have copied
|
|
|
|
*/
|
2023-08-04 04:12:34 +02:00
|
|
|
let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count()));
|
2023-05-29 21:49:43 +02:00
|
|
|
if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 {
|
|
|
|
Err(OverrunError)
|
|
|
|
} else {
|
|
|
|
self.start = (self.start + len) % self.cap();
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
Ok((len, self.cap() - self.start))
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
2023-05-29 21:49:43 +02:00
|
|
|
} else if self.start + buf.len() < self.cap() {
|
|
|
|
// The available, unread portion in the ring buffer DOES wrap
|
|
|
|
// The DMA writer has wrapped since we last read and is currently
|
|
|
|
// writing (or the next byte added will be) in the beginning of the ring buffer.
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-07-23 01:17:01 +02:00
|
|
|
// The provided read buffer is not large enough to include all elements from the tail of the dma buffer.
|
2023-05-29 21:49:43 +02:00
|
|
|
|
|
|
|
// Copy out from the dma buffer
|
|
|
|
let len = self.copy_to(buf, self.start..self.cap());
|
2023-04-26 10:51:23 +02:00
|
|
|
|
|
|
|
compiler_fence(Ordering::SeqCst);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/*
|
|
|
|
first, check if the dma has wrapped around more than once
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
then, get the current position of of the dma write and check
|
|
|
|
if it's inside data we could have copied
|
|
|
|
*/
|
2023-08-04 04:12:34 +02:00
|
|
|
let pos = self.pos(dma);
|
2023-05-29 21:49:43 +02:00
|
|
|
if pos > self.start || pos < end || dma.get_complete_count() > 1 {
|
|
|
|
Err(OverrunError)
|
|
|
|
} else {
|
|
|
|
self.start = (self.start + len) % self.cap();
|
|
|
|
|
|
|
|
Ok((len, self.start + end))
|
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
} else {
|
|
|
|
// The available, unread portion in the ring buffer DOES wrap
|
2023-04-27 10:48:38 +02:00
|
|
|
// The DMA writer has wrapped since we last read and is currently
|
2023-04-26 10:51:23 +02:00
|
|
|
// writing (or the next byte added will be) in the beginning of the ring buffer.
|
|
|
|
|
2023-07-23 01:17:01 +02:00
|
|
|
// The provided read buffer is large enough to include all elements from the tail of the dma buffer,
|
|
|
|
// so the next read will not have any unread tail elements in the ring buffer.
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
// Copy out from the dma buffer
|
|
|
|
let tail = self.copy_to(buf, self.start..self.cap());
|
|
|
|
let head = self.copy_to(&mut buf[tail..], 0..end);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
compiler_fence(Ordering::SeqCst);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/*
|
|
|
|
first, check if the dma has wrapped around more than once
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
then, get the current position of of the dma write and check
|
|
|
|
if it's inside data we could have copied
|
|
|
|
*/
|
2023-08-04 04:12:34 +02:00
|
|
|
let pos = self.pos(dma);
|
2023-05-29 21:49:43 +02:00
|
|
|
if pos > self.start || pos < end || dma.reset_complete_count() > 1 {
|
|
|
|
Err(OverrunError)
|
2023-04-26 10:51:23 +02:00
|
|
|
} else {
|
2023-05-29 21:49:43 +02:00
|
|
|
self.start = head;
|
|
|
|
Ok((tail + head, self.cap() - self.start))
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/// Copy from the dma buffer at `data_range` into `buf`
|
|
|
|
fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize {
|
2023-07-23 01:17:01 +02:00
|
|
|
// Limit the number of elements that can be copied
|
2023-04-26 10:51:23 +02:00
|
|
|
let length = usize::min(data_range.len(), buf.len());
|
|
|
|
|
|
|
|
// Copy from dma buffer into read buffer
|
|
|
|
// We need to do it like this instead of a simple copy_from_slice() because
|
|
|
|
// reading from a part of memory that may be simultaneously written to is unsafe
|
|
|
|
unsafe {
|
|
|
|
let dma_buf = self.dma_buf.as_ptr();
|
|
|
|
|
|
|
|
for i in 0..length {
|
|
|
|
buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
length
|
|
|
|
}
|
|
|
|
}
|
2023-07-30 03:10:29 +02:00
|
|
|
|
|
|
|
pub struct WritableDmaRingBuffer<'a, W: Word> {
|
|
|
|
pub(crate) dma_buf: &'a mut [W],
|
|
|
|
end: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
|
|
|
|
pub fn new(dma_buf: &'a mut [W]) -> Self {
|
|
|
|
Self { dma_buf, end: 0 }
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Reset the ring buffer to its initial state
|
2023-08-04 04:12:34 +02:00
|
|
|
pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
|
2023-07-30 03:10:29 +02:00
|
|
|
self.end = 0;
|
|
|
|
dma.reset_complete_count();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The capacity of the ringbuffer
|
|
|
|
pub const fn cap(&self) -> usize {
|
|
|
|
self.dma_buf.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The current position of the ringbuffer
|
2023-08-04 04:12:34 +02:00
|
|
|
fn pos(&self, dma: &mut impl DmaCtrl) -> usize {
|
|
|
|
self.cap() - dma.get_remaining_transfers()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Write an exact number of elements to the ringbuffer.
|
|
|
|
pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, OverrunError> {
|
|
|
|
let mut written_data = 0;
|
|
|
|
let buffer_len = buffer.len();
|
|
|
|
|
|
|
|
poll_fn(|cx| {
|
|
|
|
dma.set_waker(cx.waker());
|
|
|
|
|
|
|
|
compiler_fence(Ordering::SeqCst);
|
|
|
|
|
|
|
|
match self.write(dma, &buffer[written_data..buffer_len]) {
|
|
|
|
Ok((len, remaining)) => {
|
|
|
|
written_data += len;
|
|
|
|
if written_data == buffer_len {
|
|
|
|
Poll::Ready(Ok(remaining))
|
|
|
|
} else {
|
|
|
|
Poll::Pending
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(e) => Poll::Ready(Err(e)),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.await
|
2023-07-30 03:10:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Write elements from the ring buffer
|
|
|
|
/// Return a tuple of the length written and the capacity remaining to be written in the buffer
|
2023-08-04 04:12:34 +02:00
|
|
|
pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> {
|
|
|
|
let start = self.pos(dma);
|
2023-07-30 16:18:33 +02:00
|
|
|
if start > self.end {
|
|
|
|
// The occupied portion in the ring buffer DOES wrap
|
|
|
|
let len = self.copy_from(buf, self.end..start);
|
2023-07-30 03:10:29 +02:00
|
|
|
|
|
|
|
compiler_fence(Ordering::SeqCst);
|
|
|
|
|
|
|
|
// Confirm that the DMA is not inside data we could have written
|
2023-08-04 04:12:34 +02:00
|
|
|
let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count()));
|
2023-07-31 03:22:14 +02:00
|
|
|
if (pos >= self.end && pos < start) || (complete_count > 0 && pos >= start) || complete_count > 1 {
|
2023-07-30 03:10:29 +02:00
|
|
|
Err(OverrunError)
|
|
|
|
} else {
|
|
|
|
self.end = (self.end + len) % self.cap();
|
|
|
|
|
2023-07-30 16:18:33 +02:00
|
|
|
Ok((len, self.cap() - (start - self.end)))
|
2023-07-30 03:10:29 +02:00
|
|
|
}
|
2023-07-30 17:57:17 +02:00
|
|
|
} else if start == self.end && dma.get_complete_count() == 0 {
|
|
|
|
Ok((0, 0))
|
2023-07-30 16:18:33 +02:00
|
|
|
} else if start <= self.end && self.end + buf.len() < self.cap() {
|
|
|
|
// The occupied portion in the ring buffer DOES NOT wrap
|
|
|
|
// and copying elements into the buffer WILL NOT cause it to
|
|
|
|
|
|
|
|
// Copy into the dma buffer
|
|
|
|
let len = self.copy_from(buf, self.end..self.cap());
|
2023-07-30 03:10:29 +02:00
|
|
|
|
|
|
|
compiler_fence(Ordering::SeqCst);
|
|
|
|
|
2023-07-30 16:18:33 +02:00
|
|
|
// Confirm that the DMA is not inside data we could have written
|
2023-08-04 04:12:34 +02:00
|
|
|
let pos = self.pos(dma);
|
2023-07-30 16:18:33 +02:00
|
|
|
if pos > self.end || pos < start || dma.get_complete_count() > 1 {
|
|
|
|
Err(OverrunError)
|
|
|
|
} else {
|
|
|
|
self.end = (self.end + len) % self.cap();
|
2023-07-30 03:10:29 +02:00
|
|
|
|
2023-07-30 16:18:33 +02:00
|
|
|
Ok((len, self.cap() - (self.end - start)))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// The occupied portion in the ring buffer DOES NOT wrap
|
|
|
|
// and copying elements into the buffer WILL cause it to
|
2023-07-30 03:10:29 +02:00
|
|
|
|
|
|
|
let tail = self.copy_from(buf, self.end..self.cap());
|
|
|
|
let head = self.copy_from(&buf[tail..], 0..start);
|
|
|
|
|
|
|
|
compiler_fence(Ordering::SeqCst);
|
|
|
|
|
2023-07-30 16:18:33 +02:00
|
|
|
// Confirm that the DMA is not inside data we could have written
|
2023-08-04 04:12:34 +02:00
|
|
|
let pos = self.pos(dma);
|
2023-07-30 16:18:33 +02:00
|
|
|
if pos > self.end || pos < start || dma.reset_complete_count() > 1 {
|
|
|
|
Err(OverrunError)
|
|
|
|
} else {
|
|
|
|
self.end = head;
|
2023-07-30 03:10:29 +02:00
|
|
|
|
2023-07-30 16:18:33 +02:00
|
|
|
Ok((tail + head, self.cap() - (start - self.end)))
|
|
|
|
}
|
2023-07-30 03:10:29 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/// Copy into the dma buffer at `data_range` from `buf`
|
|
|
|
fn copy_from(&mut self, buf: &[W], data_range: Range<usize>) -> usize {
|
|
|
|
// Limit the number of elements that can be copied
|
|
|
|
let length = usize::min(data_range.len(), buf.len());
|
|
|
|
|
|
|
|
// Copy into dma buffer from read buffer
|
|
|
|
// We need to do it like this instead of a simple copy_from_slice() because
|
|
|
|
// reading from a part of memory that may be simultaneously written to is unsafe
|
|
|
|
unsafe {
|
|
|
|
let dma_buf = self.dma_buf.as_mut_ptr();
|
|
|
|
|
|
|
|
for i in 0..length {
|
|
|
|
core::ptr::write_volatile(dma_buf.offset((data_range.start + i) as isize), buf[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
length
|
|
|
|
}
|
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use core::array;
|
2023-05-29 21:49:43 +02:00
|
|
|
use std::{cell, vec};
|
2023-04-26 10:51:23 +02:00
|
|
|
|
|
|
|
use super::*;
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
#[allow(dead_code)]
|
|
|
|
#[derive(PartialEq, Debug)]
|
|
|
|
enum TestCircularTransferRequest {
|
|
|
|
GetCompleteCount(usize),
|
|
|
|
ResetCompleteCount(usize),
|
|
|
|
PositionRequest(usize),
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
struct TestCircularTransfer {
|
|
|
|
len: usize,
|
|
|
|
requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>,
|
|
|
|
}
|
|
|
|
|
2023-08-04 04:12:34 +02:00
|
|
|
impl DmaCtrl for TestCircularTransfer {
|
2023-05-29 21:49:43 +02:00
|
|
|
fn get_remaining_transfers(&self) -> usize {
|
|
|
|
match self.requests.borrow_mut().pop().unwrap() {
|
|
|
|
TestCircularTransferRequest::PositionRequest(pos) => {
|
|
|
|
let len = self.len;
|
|
|
|
|
|
|
|
assert!(len >= pos);
|
|
|
|
|
|
|
|
len - pos
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
fn get_complete_count(&self) -> usize {
|
|
|
|
match self.requests.borrow_mut().pop().unwrap() {
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count,
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
fn reset_complete_count(&mut self) -> usize {
|
|
|
|
match self.requests.get_mut().pop().unwrap() {
|
|
|
|
TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count,
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
2023-08-04 04:12:34 +02:00
|
|
|
|
|
|
|
fn set_waker(&mut self, waker: &Waker) {}
|
2023-05-29 21:49:43 +02:00
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
impl TestCircularTransfer {
|
|
|
|
pub fn new(len: usize) -> Self {
|
|
|
|
Self {
|
|
|
|
requests: cell::RefCell::new(vec![]),
|
2023-08-06 22:00:39 +02:00
|
|
|
len,
|
2023-05-29 21:49:43 +02:00
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) {
|
|
|
|
requests.reverse();
|
|
|
|
self.requests.replace(requests);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-05-29 21:49:43 +02:00
|
|
|
fn empty_and_read_not_started() {
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf = [0u8; 16];
|
2023-07-30 02:25:18 +02:00
|
|
|
let ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_read() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
2023-07-30 02:25:18 +02:00
|
|
|
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-05-29 22:14:43 +02:00
|
|
|
assert_eq!(16, ringbuf.cap());
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(8),
|
|
|
|
TestCircularTransferRequest::PositionRequest(10),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut buf = [0; 2];
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
2023-04-26 10:51:23 +02:00
|
|
|
assert_eq!([0, 1], buf);
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(2, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(10),
|
|
|
|
TestCircularTransferRequest::PositionRequest(12),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut buf = [0; 2];
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
2023-04-26 10:51:23 +02:00
|
|
|
assert_eq!([2, 3], buf);
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(4, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(12),
|
|
|
|
TestCircularTransferRequest::PositionRequest(14),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut buf = [0; 8];
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
2023-04-26 10:51:23 +02:00
|
|
|
assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]);
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(12, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_read_with_wrap() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
2023-07-30 02:25:18 +02:00
|
|
|
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-05-29 22:14:43 +02:00
|
|
|
assert_eq!(16, ringbuf.cap());
|
2023-05-29 21:49:43 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Read to close to the end of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(14),
|
|
|
|
TestCircularTransferRequest::PositionRequest(16),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 14];
|
|
|
|
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(14, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, read around the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::PositionRequest(8),
|
|
|
|
TestCircularTransferRequest::ResetCompleteCount(1),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(4, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
2023-07-30 02:25:18 +02:00
|
|
|
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-05-29 22:14:43 +02:00
|
|
|
assert_eq!(16, ringbuf.cap());
|
2023-05-29 21:49:43 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Read to close to the end of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(14),
|
|
|
|
TestCircularTransferRequest::PositionRequest(16),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 14];
|
|
|
|
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(14, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, read to the end of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::PositionRequest(8),
|
|
|
|
TestCircularTransferRequest::ResetCompleteCount(1),
|
|
|
|
]);
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut buf = [0; 2];
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-05-29 21:49:43 +02:00
|
|
|
fn can_read_when_dma_writer_wraps_once_with_same_ndtr() {
|
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
2023-07-30 02:25:18 +02:00
|
|
|
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-05-29 22:14:43 +02:00
|
|
|
assert_eq!(16, ringbuf.cap());
|
2023-05-29 21:49:43 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Read to about the middle of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(6, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, wrap the DMA controller around
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(1),
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(1),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(12, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
2023-07-30 02:25:18 +02:00
|
|
|
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-05-29 22:14:43 +02:00
|
|
|
assert_eq!(16, ringbuf.cap());
|
2023-05-29 21:49:43 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Read a few bytes
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(2),
|
|
|
|
TestCircularTransferRequest::PositionRequest(2),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(2, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, overtake the reader
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(4),
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(1),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
2023-07-30 02:25:18 +02:00
|
|
|
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-05-29 22:14:43 +02:00
|
|
|
assert_eq!(16, ringbuf.cap());
|
2023-05-29 21:49:43 +02:00
|
|
|
|
|
|
|
/*
|
|
|
|
Read to close to the end of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(14),
|
|
|
|
TestCircularTransferRequest::PositionRequest(16),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 14];
|
|
|
|
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(14, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, overtake the reader
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(8),
|
|
|
|
TestCircularTransferRequest::PositionRequest(10),
|
|
|
|
TestCircularTransferRequest::ResetCompleteCount(2),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
}
|