2023-05-01 18:15:46 +02:00
|
|
|
#![cfg_attr(gpdma, allow(unused))]
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
use core::ops::Range;
|
|
|
|
use core::sync::atomic::{compiler_fence, Ordering};
|
|
|
|
|
|
|
|
use super::word::Word;
|
|
|
|
|
|
|
|
/// A "read-only" ring-buffer to be used together with the DMA controller which
|
|
|
|
/// writes in a circular way, "uncontrolled" to the buffer.
|
|
|
|
///
|
|
|
|
/// A snapshot of the ring buffer state can be attained by setting the `ndtr` field
|
|
|
|
/// to the current register value. `ndtr` describes the current position of the DMA
|
|
|
|
/// write.
|
|
|
|
///
|
|
|
|
/// # Buffer layout
|
|
|
|
///
|
|
|
|
/// ```text
|
|
|
|
/// Without wraparound: With wraparound:
|
|
|
|
///
|
|
|
|
/// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+
|
|
|
|
/// | | | | | |
|
|
|
|
/// v v v v v v
|
|
|
|
/// +-----------------------------------------+ +-----------------------------------------+
|
|
|
|
/// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX|
|
|
|
|
/// +-----------------------------------------+ +-----------------------------------------+
|
|
|
|
/// ^ ^ ^ ^ ^ ^
|
|
|
|
/// | | | | | |
|
2023-05-29 21:49:43 +02:00
|
|
|
/// +- start --+ | +- end ------+ |
|
2023-04-26 10:51:23 +02:00
|
|
|
/// | | | |
|
2023-05-29 21:49:43 +02:00
|
|
|
/// +- end --------------------+ +- start ----------------+
|
2023-04-26 10:51:23 +02:00
|
|
|
/// ```
|
|
|
|
pub struct DmaRingBuffer<'a, W: Word> {
|
|
|
|
pub(crate) dma_buf: &'a mut [W],
|
2023-05-29 21:49:43 +02:00
|
|
|
start: usize,
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct OverrunError;
|
|
|
|
|
|
|
|
pub trait DmaCtrl {
|
|
|
|
/// Get the NDTR register value, i.e. the space left in the underlying
|
|
|
|
/// buffer until the dma writer wraps.
|
2023-05-29 21:49:43 +02:00
|
|
|
fn get_remaining_transfers(&self) -> usize;
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-04-27 10:48:38 +02:00
|
|
|
/// Get the transfer completed counter.
|
|
|
|
/// This counter is incremented by the dma controller when NDTR is reloaded,
|
2023-04-26 10:51:23 +02:00
|
|
|
/// i.e. when the writing wraps.
|
2023-04-27 10:48:38 +02:00
|
|
|
fn get_complete_count(&self) -> usize;
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-04-27 10:48:38 +02:00
|
|
|
/// Reset the transfer completed counter to 0 and return the value just prior to the reset.
|
|
|
|
fn reset_complete_count(&mut self) -> usize;
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, W: Word> DmaRingBuffer<'a, W> {
|
|
|
|
pub fn new(dma_buf: &'a mut [W]) -> Self {
|
2023-05-29 21:49:43 +02:00
|
|
|
Self { dma_buf, start: 0 }
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Reset the ring buffer to its initial state
|
2023-05-01 19:10:00 +02:00
|
|
|
pub fn clear(&mut self, mut dma: impl DmaCtrl) {
|
2023-05-29 21:49:43 +02:00
|
|
|
self.start = 0;
|
2023-04-27 10:48:38 +02:00
|
|
|
dma.reset_complete_count();
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/// The capacity of the ringbuffer
|
|
|
|
pub const fn cap(&self) -> usize {
|
|
|
|
self.dma_buf.len()
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/// The current position of the ringbuffer
|
|
|
|
fn pos(&self, remaining_transfers: usize) -> usize {
|
|
|
|
self.cap() - remaining_transfers
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Read bytes from the ring buffer
|
2023-05-29 21:49:43 +02:00
|
|
|
/// Return a tuple of the length read and the length remaining in the buffer
|
|
|
|
/// If not all of the bytes were read, then there will be some bytes in the buffer remaining
|
|
|
|
/// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read
|
2023-04-26 10:51:23 +02:00
|
|
|
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
2023-05-29 21:49:43 +02:00
|
|
|
pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
|
|
|
|
/*
|
|
|
|
This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check
|
|
|
|
after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed
|
|
|
|
to fire in the same clock cycle that a register is read, so checking get_complete_count early does
|
|
|
|
not yield relevant information.
|
|
|
|
|
|
|
|
Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full
|
|
|
|
buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error
|
|
|
|
conditions.
|
|
|
|
|
|
|
|
After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that
|
|
|
|
the dma has not overrun within the data we could have copied. We check the data we could have copied
|
|
|
|
rather than the data we actually copied because it costs nothing and confirms an error condition
|
|
|
|
earlier.
|
|
|
|
*/
|
|
|
|
let end = self.pos(dma.get_remaining_transfers());
|
|
|
|
if self.start == end && dma.get_complete_count() == 0 {
|
|
|
|
// No bytes are available in the buffer
|
|
|
|
Ok((0, self.cap()))
|
|
|
|
} else if self.start < end {
|
|
|
|
// The available, unread portion in the ring buffer DOES NOT wrap
|
|
|
|
// Copy out the bytes from the dma buffer
|
|
|
|
let len = self.copy_to(buf, self.start..end);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
compiler_fence(Ordering::SeqCst);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/*
|
|
|
|
first, check if the dma has wrapped at all if it's after end
|
|
|
|
or more than once if it's before start
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
this is in a critical section to try to reduce mushy behavior.
|
|
|
|
it's not ideal but it's the best we can do
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
then, get the current position of of the dma write and check
|
|
|
|
if it's inside data we could have copied
|
|
|
|
*/
|
|
|
|
let (pos, complete_count) =
|
|
|
|
critical_section::with(|_| (self.pos(dma.get_remaining_transfers()), dma.get_complete_count()));
|
|
|
|
if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 {
|
|
|
|
Err(OverrunError)
|
|
|
|
} else {
|
|
|
|
self.start = (self.start + len) % self.cap();
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
Ok((len, self.cap() - self.start))
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
2023-05-29 21:49:43 +02:00
|
|
|
} else if self.start + buf.len() < self.cap() {
|
|
|
|
// The available, unread portion in the ring buffer DOES wrap
|
|
|
|
// The DMA writer has wrapped since we last read and is currently
|
|
|
|
// writing (or the next byte added will be) in the beginning of the ring buffer.
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
// The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
|
|
|
|
|
|
|
|
// Copy out from the dma buffer
|
|
|
|
let len = self.copy_to(buf, self.start..self.cap());
|
2023-04-26 10:51:23 +02:00
|
|
|
|
|
|
|
compiler_fence(Ordering::SeqCst);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/*
|
|
|
|
first, check if the dma has wrapped around more than once
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
then, get the current position of of the dma write and check
|
|
|
|
if it's inside data we could have copied
|
|
|
|
*/
|
|
|
|
let pos = self.pos(dma.get_remaining_transfers());
|
|
|
|
if pos > self.start || pos < end || dma.get_complete_count() > 1 {
|
|
|
|
Err(OverrunError)
|
|
|
|
} else {
|
|
|
|
self.start = (self.start + len) % self.cap();
|
|
|
|
|
|
|
|
Ok((len, self.start + end))
|
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
} else {
|
|
|
|
// The available, unread portion in the ring buffer DOES wrap
|
2023-04-27 10:48:38 +02:00
|
|
|
// The DMA writer has wrapped since we last read and is currently
|
2023-04-26 10:51:23 +02:00
|
|
|
// writing (or the next byte added will be) in the beginning of the ring buffer.
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
// The provided read buffer is large enough to include all bytes from the tail of the dma buffer,
|
|
|
|
// so the next read will not have any unread tail bytes in the ring buffer.
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
// Copy out from the dma buffer
|
|
|
|
let tail = self.copy_to(buf, self.start..self.cap());
|
|
|
|
let head = self.copy_to(&mut buf[tail..], 0..end);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
compiler_fence(Ordering::SeqCst);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
/*
|
|
|
|
first, check if the dma has wrapped around more than once
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
then, get the current position of of the dma write and check
|
|
|
|
if it's inside data we could have copied
|
|
|
|
*/
|
|
|
|
let pos = self.pos(dma.get_remaining_transfers());
|
|
|
|
if pos > self.start || pos < end || dma.reset_complete_count() > 1 {
|
|
|
|
Err(OverrunError)
|
2023-04-26 10:51:23 +02:00
|
|
|
} else {
|
2023-05-29 21:49:43 +02:00
|
|
|
self.start = head;
|
|
|
|
Ok((tail + head, self.cap() - self.start))
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/// Copy from the dma buffer at `data_range` into `buf`
|
|
|
|
fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize {
|
|
|
|
// Limit the number of bytes that can be copied
|
|
|
|
let length = usize::min(data_range.len(), buf.len());
|
|
|
|
|
|
|
|
// Copy from dma buffer into read buffer
|
|
|
|
// We need to do it like this instead of a simple copy_from_slice() because
|
|
|
|
// reading from a part of memory that may be simultaneously written to is unsafe
|
|
|
|
unsafe {
|
|
|
|
let dma_buf = self.dma_buf.as_ptr();
|
|
|
|
|
|
|
|
for i in 0..length {
|
|
|
|
buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
length
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use core::array;
|
2023-05-29 21:49:43 +02:00
|
|
|
use std::{cell, vec};
|
2023-04-26 10:51:23 +02:00
|
|
|
|
|
|
|
use super::*;
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
#[allow(dead_code)]
|
|
|
|
#[derive(PartialEq, Debug)]
|
|
|
|
enum TestCircularTransferRequest {
|
|
|
|
GetCompleteCount(usize),
|
|
|
|
ResetCompleteCount(usize),
|
|
|
|
PositionRequest(usize),
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
struct TestCircularTransfer {
|
|
|
|
len: usize,
|
|
|
|
requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DmaCtrl for &mut TestCircularTransfer {
|
|
|
|
fn get_remaining_transfers(&self) -> usize {
|
|
|
|
match self.requests.borrow_mut().pop().unwrap() {
|
|
|
|
TestCircularTransferRequest::PositionRequest(pos) => {
|
|
|
|
let len = self.len;
|
|
|
|
|
|
|
|
assert!(len >= pos);
|
|
|
|
|
|
|
|
len - pos
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
fn get_complete_count(&self) -> usize {
|
|
|
|
match self.requests.borrow_mut().pop().unwrap() {
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count,
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
fn reset_complete_count(&mut self) -> usize {
|
|
|
|
match self.requests.get_mut().pop().unwrap() {
|
|
|
|
TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count,
|
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
2023-05-29 21:49:43 +02:00
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
impl TestCircularTransfer {
|
|
|
|
pub fn new(len: usize) -> Self {
|
|
|
|
Self {
|
|
|
|
requests: cell::RefCell::new(vec![]),
|
|
|
|
len: len,
|
|
|
|
}
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) {
|
|
|
|
requests.reverse();
|
|
|
|
self.requests.replace(requests);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-05-29 21:49:43 +02:00
|
|
|
fn empty_and_read_not_started() {
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf = [0u8; 16];
|
|
|
|
let ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_read() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
|
|
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
|
|
|
assert_eq!(16, ringbuf.len());
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(8),
|
|
|
|
TestCircularTransferRequest::PositionRequest(10),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut buf = [0; 2];
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
2023-04-26 10:51:23 +02:00
|
|
|
assert_eq!([0, 1], buf);
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(2, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(10),
|
|
|
|
TestCircularTransferRequest::PositionRequest(12),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut buf = [0; 2];
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
2023-04-26 10:51:23 +02:00
|
|
|
assert_eq!([2, 3], buf);
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(4, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(12),
|
|
|
|
TestCircularTransferRequest::PositionRequest(14),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut buf = [0; 8];
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
2023-04-26 10:51:23 +02:00
|
|
|
assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]);
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(12, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_read_with_wrap() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
|
|
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
|
|
|
assert_eq!(16, ringbuf.len());
|
|
|
|
|
|
|
|
/*
|
|
|
|
Read to close to the end of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(14),
|
|
|
|
TestCircularTransferRequest::PositionRequest(16),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 14];
|
|
|
|
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(14, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, read around the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::PositionRequest(8),
|
|
|
|
TestCircularTransferRequest::ResetCompleteCount(1),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(4, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
|
|
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
|
|
|
assert_eq!(16, ringbuf.len());
|
|
|
|
|
|
|
|
/*
|
|
|
|
Read to close to the end of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(14),
|
|
|
|
TestCircularTransferRequest::PositionRequest(16),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 14];
|
|
|
|
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(14, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, read to the end of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::PositionRequest(8),
|
|
|
|
TestCircularTransferRequest::ResetCompleteCount(1),
|
|
|
|
]);
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut buf = [0; 2];
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(0, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-05-29 21:49:43 +02:00
|
|
|
fn can_read_when_dma_writer_wraps_once_with_same_ndtr() {
|
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
|
|
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
|
|
|
assert_eq!(16, ringbuf.len());
|
|
|
|
|
|
|
|
/*
|
|
|
|
Read to about the middle of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(6, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, wrap the DMA controller around
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(1),
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(1),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(12, ringbuf.start);
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
|
|
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
|
|
|
assert_eq!(16, ringbuf.len());
|
|
|
|
|
|
|
|
/*
|
|
|
|
Read a few bytes
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(2),
|
|
|
|
TestCircularTransferRequest::PositionRequest(2),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(2, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, overtake the reader
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(4),
|
|
|
|
TestCircularTransferRequest::PositionRequest(6),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(1),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() {
|
2023-05-29 21:49:43 +02:00
|
|
|
let mut dma = TestCircularTransfer::new(16);
|
|
|
|
|
2023-04-26 10:51:23 +02:00
|
|
|
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
|
|
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
|
|
|
|
|
2023-05-29 21:49:43 +02:00
|
|
|
assert_eq!(0, ringbuf.start);
|
|
|
|
assert_eq!(16, ringbuf.len());
|
|
|
|
|
|
|
|
/*
|
|
|
|
Read to close to the end of the buffer
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(14),
|
|
|
|
TestCircularTransferRequest::PositionRequest(16),
|
|
|
|
TestCircularTransferRequest::GetCompleteCount(0),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 14];
|
|
|
|
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
|
|
assert_eq!(14, ringbuf.start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
Now, overtake the reader
|
|
|
|
*/
|
|
|
|
dma.setup(vec![
|
|
|
|
TestCircularTransferRequest::PositionRequest(8),
|
|
|
|
TestCircularTransferRequest::PositionRequest(10),
|
|
|
|
TestCircularTransferRequest::ResetCompleteCount(2),
|
|
|
|
]);
|
|
|
|
let mut buf = [0; 6];
|
|
|
|
assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
|
2023-04-26 10:51:23 +02:00
|
|
|
}
|
|
|
|
}
|