Merge branch 'main' into can

This commit is contained in:
xoviat
2023-05-30 21:15:26 -05:00
committed by GitHub
158 changed files with 7609 additions and 2303 deletions

View File

@ -912,6 +912,16 @@ fn main() {
println!("cargo:rustc-cfg={}x{}", &chip_name[..9], &chip_name[10..11]);
}
// ========
// stm32wb tl_mbox link sections
if chip_name.starts_with("stm32wb") {
let out_file = out_dir.join("tl_mbox.x").to_string_lossy().to_string();
fs::write(out_file, fs::read_to_string("tl_mbox.x.in").unwrap()).unwrap();
println!("cargo:rustc-link-search={}", out_dir.display());
println!("cargo:rerun-if-changed=tl_mbox.x.in");
}
// =======
// Features for targeting groups of chips

View File

@ -111,24 +111,18 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index
panic!("DMA: error on BDMA@{:08x} channel {}", dma.0 as u32, channel_num);
}
let mut wake = false;
if isr.htif(channel_num) && cr.read().htie() {
// Acknowledge half transfer complete interrupt
dma.ifcr().write(|w| w.set_htif(channel_num, true));
wake = true;
}
if isr.tcif(channel_num) && cr.read().tcie() {
} else if isr.tcif(channel_num) && cr.read().tcie() {
// Acknowledge transfer complete interrupt
dma.ifcr().write(|w| w.set_tcif(channel_num, true));
STATE.complete_count[index].fetch_add(1, Ordering::Release);
wake = true;
} else {
return;
}
if wake {
STATE.ch_wakers[index].wake();
}
STATE.ch_wakers[index].wake();
}
#[cfg(any(bdma_v2, dmamux))]
@ -371,7 +365,7 @@ impl<'a, C: Channel> Future for Transfer<'a, C> {
struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
fn ndtr(&self) -> usize {
fn get_remaining_transfers(&self) -> usize {
let ch = self.0.regs().ch(self.0.num());
unsafe { ch.ndtr().read() }.ndt() as usize
}
@ -457,21 +451,17 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
}
/// Read bytes from the ring buffer
/// Return a tuple of the length read and the length remaining in the buffer
/// If not all of the bytes were read, then there will be some bytes in the buffer remaining
/// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> {
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
}
pub fn is_empty(&self) -> bool {
self.ringbuf.is_empty()
}
pub fn len(&self) -> usize {
self.ringbuf.len()
}
pub fn capacity(&self) -> usize {
self.ringbuf.dma_buf.len()
/// The capacity of the ringbuffer
pub fn cap(&self) -> usize {
self.ringbuf.cap()
}
pub fn set_waker(&mut self, waker: &Waker) {
@ -506,12 +496,6 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
let ch = self.channel.regs().ch(self.channel.num());
unsafe { ch.cr().read() }.en()
}
/// Synchronize the position of the ring buffer to the actual DMA controller position
pub fn reload_position(&mut self) {
let ch = self.channel.regs().ch(self.channel.num());
self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
}
}
impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {

View File

@ -187,24 +187,18 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index:
panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num);
}
let mut wake = false;
if isr.htif(channel_num % 4) && cr.read().htie() {
// Acknowledge half transfer complete interrupt
dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true));
wake = true;
}
if isr.tcif(channel_num % 4) && cr.read().tcie() {
} else if isr.tcif(channel_num % 4) && cr.read().tcie() {
// Acknowledge transfer complete interrupt
dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true));
STATE.complete_count[index].fetch_add(1, Ordering::Release);
wake = true;
} else {
return;
}
if wake {
STATE.ch_wakers[index].wake();
}
STATE.ch_wakers[index].wake();
}
#[cfg(any(dma_v2, dmamux))]
@ -612,7 +606,7 @@ impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> {
struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
fn ndtr(&self) -> usize {
fn get_remaining_transfers(&self) -> usize {
let ch = self.0.regs().st(self.0.num());
unsafe { ch.ndtr().read() }.ndt() as usize
}
@ -713,21 +707,17 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
}
/// Read bytes from the ring buffer
/// Return a tuple of the length read and the length remaining in the buffer
/// If not all of the bytes were read, then there will be some bytes in the buffer remaining
/// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
pub fn read(&mut self, buf: &mut [W]) -> Result<usize, OverrunError> {
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
self.ringbuf.read(DmaCtrlImpl(self.channel.reborrow()), buf)
}
pub fn is_empty(&self) -> bool {
self.ringbuf.is_empty()
}
pub fn len(&self) -> usize {
self.ringbuf.len()
}
pub fn capacity(&self) -> usize {
self.ringbuf.dma_buf.len()
// The capacity of the ringbuffer
pub fn cap(&self) -> usize {
self.ringbuf.cap()
}
pub fn set_waker(&mut self, waker: &Waker) {
@ -766,12 +756,6 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> {
let ch = self.channel.regs().st(self.channel.num());
unsafe { ch.cr().read() }.en()
}
/// Synchronize the position of the ring buffer to the actual DMA controller position
pub fn reload_position(&mut self) {
let ch = self.channel.regs().st(self.channel.num());
self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize;
}
}
impl<'a, C: Channel, W: Word> Drop for RingBuffer<'a, C, W> {

View File

@ -25,14 +25,13 @@ use super::word::Word;
/// +-----------------------------------------+ +-----------------------------------------+
/// ^ ^ ^ ^ ^ ^
/// | | | | | |
/// +- first --+ | +- end ------+ |
/// +- start --+ | +- end ------+ |
/// | | | |
/// +- end --------------------+ +- first ----------------+
/// +- end --------------------+ +- start ----------------+
/// ```
pub struct DmaRingBuffer<'a, W: Word> {
pub(crate) dma_buf: &'a mut [W],
first: usize,
pub ndtr: usize,
start: usize,
}
#[derive(Debug, PartialEq)]
@ -41,7 +40,7 @@ pub struct OverrunError;
pub trait DmaCtrl {
/// Get the NDTR register value, i.e. the space left in the underlying
/// buffer until the dma writer wraps.
fn ndtr(&self) -> usize;
fn get_remaining_transfers(&self) -> usize;
/// Get the transfer completed counter.
/// This counter is incremented by the dma controller when NDTR is reloaded,
@ -54,151 +53,131 @@ pub trait DmaCtrl {
impl<'a, W: Word> DmaRingBuffer<'a, W> {
pub fn new(dma_buf: &'a mut [W]) -> Self {
let ndtr = dma_buf.len();
Self {
dma_buf,
first: 0,
ndtr,
}
Self { dma_buf, start: 0 }
}
/// Reset the ring buffer to its initial state
pub fn clear(&mut self, mut dma: impl DmaCtrl) {
self.first = 0;
self.ndtr = self.dma_buf.len();
self.start = 0;
dma.reset_complete_count();
}
/// The buffer end position
fn end(&self) -> usize {
self.dma_buf.len() - self.ndtr
/// The capacity of the ringbuffer
pub const fn cap(&self) -> usize {
self.dma_buf.len()
}
/// Returns whether the buffer is empty
pub fn is_empty(&self) -> bool {
self.first == self.end()
}
/// The current number of bytes in the buffer
/// This may change at any time if dma is currently active
pub fn len(&self) -> usize {
// Read out a stable end (the dma periheral can change it at anytime)
let end = self.end();
if self.first <= end {
// No wrap
end - self.first
} else {
self.dma_buf.len() - self.first + end
}
/// The current position of the ringbuffer
fn pos(&self, remaining_transfers: usize) -> usize {
self.cap() - remaining_transfers
}
/// Read bytes from the ring buffer
/// Return a tuple of the length read and the length remaining in the buffer
/// If not all of the bytes were read, then there will be some bytes in the buffer remaining
/// The length remaining is the capacity, ring_buf.len(), less the bytes remaining after the read
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<usize, OverrunError> {
let end = self.end();
pub fn read(&mut self, mut dma: impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
/*
This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check
after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed
to fire in the same clock cycle that a register is read, so checking get_complete_count early does
not yield relevant information.
compiler_fence(Ordering::SeqCst);
Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full
buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error
conditions.
if self.first == end {
// The buffer is currently empty
if dma.get_complete_count() > 0 {
// The DMA has written such that the ring buffer wraps at least once
self.ndtr = dma.ndtr();
if self.end() > self.first || dma.get_complete_count() > 1 {
return Err(OverrunError);
}
}
Ok(0)
} else if self.first < end {
After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that
the dma has not overrun within the data we could have copied. We check the data we could have copied
rather than the data we actually copied because it costs nothing and confirms an error condition
earlier.
*/
let end = self.pos(dma.get_remaining_transfers());
if self.start == end && dma.get_complete_count() == 0 {
// No bytes are available in the buffer
Ok((0, self.cap()))
} else if self.start < end {
// The available, unread portion in the ring buffer DOES NOT wrap
if dma.get_complete_count() > 1 {
return Err(OverrunError);
}
// Copy out the bytes from the dma buffer
let len = self.copy_to(buf, self.first..end);
let len = self.copy_to(buf, self.start..end);
compiler_fence(Ordering::SeqCst);
match dma.get_complete_count() {
0 => {
// The DMA writer has not wrapped before nor after the copy
}
1 => {
// The DMA writer has written such that the ring buffer now wraps
self.ndtr = dma.ndtr();
if self.end() > self.first || dma.get_complete_count() > 1 {
// The bytes that we have copied out have overflowed
// as the writer has now both wrapped and is currently writing
// within the region that we have just copied out
return Err(OverrunError);
}
}
_ => {
return Err(OverrunError);
}
}
/*
first, check if the dma has wrapped at all if it's after end
or more than once if it's before start
self.first = (self.first + len) % self.dma_buf.len();
Ok(len)
this is in a critical section to try to reduce mushy behavior.
it's not ideal but it's the best we can do
then, get the current position of of the dma write and check
if it's inside data we could have copied
*/
let (pos, complete_count) =
critical_section::with(|_| (self.pos(dma.get_remaining_transfers()), dma.get_complete_count()));
if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 {
Err(OverrunError)
} else {
self.start = (self.start + len) % self.cap();
Ok((len, self.cap() - self.start))
}
} else if self.start + buf.len() < self.cap() {
// The available, unread portion in the ring buffer DOES wrap
// The DMA writer has wrapped since we last read and is currently
// writing (or the next byte added will be) in the beginning of the ring buffer.
// The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
// Copy out from the dma buffer
let len = self.copy_to(buf, self.start..self.cap());
compiler_fence(Ordering::SeqCst);
/*
first, check if the dma has wrapped around more than once
then, get the current position of of the dma write and check
if it's inside data we could have copied
*/
let pos = self.pos(dma.get_remaining_transfers());
if pos > self.start || pos < end || dma.get_complete_count() > 1 {
Err(OverrunError)
} else {
self.start = (self.start + len) % self.cap();
Ok((len, self.start + end))
}
} else {
// The available, unread portion in the ring buffer DOES wrap
// The DMA writer has wrapped since we last read and is currently
// writing (or the next byte added will be) in the beginning of the ring buffer.
let complete_count = dma.get_complete_count();
if complete_count > 1 {
return Err(OverrunError);
}
// The provided read buffer is large enough to include all bytes from the tail of the dma buffer,
// so the next read will not have any unread tail bytes in the ring buffer.
// If the unread portion wraps then the writer must also have wrapped
assert!(complete_count == 1);
// Copy out from the dma buffer
let tail = self.copy_to(buf, self.start..self.cap());
let head = self.copy_to(&mut buf[tail..], 0..end);
if self.first + buf.len() < self.dma_buf.len() {
// The provided read buffer is not large enough to include all bytes from the tail of the dma buffer.
compiler_fence(Ordering::SeqCst);
// Copy out from the dma buffer
let len = self.copy_to(buf, self.first..self.dma_buf.len());
/*
first, check if the dma has wrapped around more than once
compiler_fence(Ordering::SeqCst);
// We have now copied out the data from dma_buf
// Make sure that the just read part was not overwritten during the copy
self.ndtr = dma.ndtr();
if self.end() > self.first || dma.get_complete_count() > 1 {
// The writer has entered the data that we have just read since we read out `end` in the beginning and until now.
return Err(OverrunError);
}
self.first = (self.first + len) % self.dma_buf.len();
Ok(len)
then, get the current position of of the dma write and check
if it's inside data we could have copied
*/
let pos = self.pos(dma.get_remaining_transfers());
if pos > self.start || pos < end || dma.reset_complete_count() > 1 {
Err(OverrunError)
} else {
// The provided read buffer is large enough to include all bytes from the tail of the dma buffer,
// so the next read will not have any unread tail bytes in the ring buffer.
// Copy out from the dma buffer
let tail = self.copy_to(buf, self.first..self.dma_buf.len());
let head = self.copy_to(&mut buf[tail..], 0..end);
compiler_fence(Ordering::SeqCst);
// We have now copied out the data from dma_buf
// Reset complete counter and make sure that the just read part was not overwritten during the copy
self.ndtr = dma.ndtr();
let complete_count = dma.reset_complete_count();
if self.end() > self.first || complete_count > 1 {
return Err(OverrunError);
}
self.first = head;
Ok(tail + head)
self.start = head;
Ok((tail + head, self.cap() - self.start))
}
}
}
/// Copy from the dma buffer at `data_range` into `buf`
fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize {
// Limit the number of bytes that can be copied
@ -218,203 +197,289 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> {
length
}
}
#[cfg(test)]
mod tests {
use core::array;
use core::cell::RefCell;
use std::{cell, vec};
use super::*;
struct TestCtrl {
next_ndtr: RefCell<Option<usize>>,
complete_count: usize,
#[allow(dead_code)]
#[derive(PartialEq, Debug)]
enum TestCircularTransferRequest {
GetCompleteCount(usize),
ResetCompleteCount(usize),
PositionRequest(usize),
}
impl TestCtrl {
pub const fn new() -> Self {
Self {
next_ndtr: RefCell::new(None),
complete_count: 0,
struct TestCircularTransfer {
len: usize,
requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>,
}
impl DmaCtrl for &mut TestCircularTransfer {
fn get_remaining_transfers(&self) -> usize {
match self.requests.borrow_mut().pop().unwrap() {
TestCircularTransferRequest::PositionRequest(pos) => {
let len = self.len;
assert!(len >= pos);
len - pos
}
_ => unreachable!(),
}
}
pub fn set_next_ndtr(&mut self, ndtr: usize) {
self.next_ndtr.borrow_mut().replace(ndtr);
}
}
impl DmaCtrl for &mut TestCtrl {
fn ndtr(&self) -> usize {
self.next_ndtr.borrow_mut().unwrap()
}
fn get_complete_count(&self) -> usize {
self.complete_count
match self.requests.borrow_mut().pop().unwrap() {
TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count,
_ => unreachable!(),
}
}
fn reset_complete_count(&mut self) -> usize {
let old = self.complete_count;
self.complete_count = 0;
old
match self.requests.get_mut().pop().unwrap() {
TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count,
_ => unreachable!(),
}
}
}
impl TestCircularTransfer {
pub fn new(len: usize) -> Self {
Self {
requests: cell::RefCell::new(vec![]),
len: len,
}
}
pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) {
requests.reverse();
self.requests.replace(requests);
}
}
#[test]
fn empty() {
fn empty_and_read_not_started() {
let mut dma_buf = [0u8; 16];
let ringbuf = DmaRingBuffer::new(&mut dma_buf);
assert!(ringbuf.is_empty());
assert_eq!(0, ringbuf.len());
assert_eq!(0, ringbuf.start);
}
#[test]
fn can_read() {
let mut dma = TestCircularTransfer::new(16);
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
let mut ctrl = TestCtrl::new();
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
ringbuf.ndtr = 6;
assert!(!ringbuf.is_empty());
assert_eq!(10, ringbuf.len());
assert_eq!(0, ringbuf.start);
assert_eq!(16, ringbuf.cap());
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(8),
TestCircularTransferRequest::PositionRequest(10),
TestCircularTransferRequest::GetCompleteCount(0),
]);
let mut buf = [0; 2];
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!([0, 1], buf);
assert_eq!(8, ringbuf.len());
assert_eq!(2, ringbuf.start);
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(10),
TestCircularTransferRequest::PositionRequest(12),
TestCircularTransferRequest::GetCompleteCount(0),
]);
let mut buf = [0; 2];
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!([2, 3], buf);
assert_eq!(6, ringbuf.len());
assert_eq!(4, ringbuf.start);
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(12),
TestCircularTransferRequest::PositionRequest(14),
TestCircularTransferRequest::GetCompleteCount(0),
]);
let mut buf = [0; 8];
assert_eq!(6, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]);
assert_eq!(0, ringbuf.len());
let mut buf = [0; 2];
assert_eq!(0, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!(12, ringbuf.start);
}
#[test]
fn can_read_with_wrap() {
let mut dma = TestCircularTransfer::new(16);
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
let mut ctrl = TestCtrl::new();
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
ringbuf.first = 12;
ringbuf.ndtr = 10;
// The dma controller has written 4 + 6 bytes and has reloaded NDTR
ctrl.complete_count = 1;
ctrl.set_next_ndtr(10);
assert_eq!(0, ringbuf.start);
assert_eq!(16, ringbuf.cap());
assert!(!ringbuf.is_empty());
assert_eq!(6 + 4, ringbuf.len());
/*
Read to close to the end of the buffer
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(14),
TestCircularTransferRequest::PositionRequest(16),
TestCircularTransferRequest::GetCompleteCount(0),
]);
let mut buf = [0; 14];
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!(14, ringbuf.start);
let mut buf = [0; 2];
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!([12, 13], buf);
assert_eq!(6 + 2, ringbuf.len());
let mut buf = [0; 4];
assert_eq!(4, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!([14, 15, 0, 1], buf);
assert_eq!(4, ringbuf.len());
/*
Now, read around the buffer
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(6),
TestCircularTransferRequest::PositionRequest(8),
TestCircularTransferRequest::ResetCompleteCount(1),
]);
let mut buf = [0; 6];
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!(4, ringbuf.start);
}
#[test]
fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() {
let mut dma = TestCircularTransfer::new(16);
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
let mut ctrl = TestCtrl::new();
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
ringbuf.first = 2;
ringbuf.ndtr = 6;
// The dma controller has written 6 + 2 bytes and has reloaded NDTR
ctrl.complete_count = 1;
ctrl.set_next_ndtr(14);
assert_eq!(0, ringbuf.start);
assert_eq!(16, ringbuf.cap());
/*
Read to close to the end of the buffer
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(14),
TestCircularTransferRequest::PositionRequest(16),
TestCircularTransferRequest::GetCompleteCount(0),
]);
let mut buf = [0; 14];
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!(14, ringbuf.start);
/*
Now, read to the end of the buffer
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(6),
TestCircularTransferRequest::PositionRequest(8),
TestCircularTransferRequest::ResetCompleteCount(1),
]);
let mut buf = [0; 2];
assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!([2, 3], buf);
assert_eq!(1, ctrl.complete_count); // The interrupt flag IS NOT cleared
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!(0, ringbuf.start);
}
#[test]
fn can_read_when_dma_writer_is_wrapped_and_read_wraps() {
fn can_read_when_dma_writer_wraps_once_with_same_ndtr() {
let mut dma = TestCircularTransfer::new(16);
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
let mut ctrl = TestCtrl::new();
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
ringbuf.first = 12;
ringbuf.ndtr = 10;
// The dma controller has written 6 + 2 bytes and has reloaded NDTR
ctrl.complete_count = 1;
ctrl.set_next_ndtr(14);
assert_eq!(0, ringbuf.start);
assert_eq!(16, ringbuf.cap());
let mut buf = [0; 10];
assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap());
assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf);
/*
Read to about the middle of the buffer
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(6),
TestCircularTransferRequest::PositionRequest(6),
TestCircularTransferRequest::GetCompleteCount(0),
]);
let mut buf = [0; 6];
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!(6, ringbuf.start);
assert_eq!(0, ctrl.complete_count); // The interrupt flag IS cleared
}
#[test]
fn cannot_read_when_dma_writer_wraps_with_same_ndtr() {
let mut dma_buf = [0u8; 16];
let mut ctrl = TestCtrl::new();
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
ringbuf.first = 6;
ringbuf.ndtr = 10;
ctrl.set_next_ndtr(9);
assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty
// The dma controller has written exactly 16 bytes
ctrl.complete_count = 1;
let mut buf = [0; 2];
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
/*
Now, wrap the DMA controller around
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(6),
TestCircularTransferRequest::GetCompleteCount(1),
TestCircularTransferRequest::PositionRequest(6),
TestCircularTransferRequest::GetCompleteCount(1),
]);
let mut buf = [0; 6];
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!(12, ringbuf.start);
}
#[test]
fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() {
let mut dma = TestCircularTransfer::new(16);
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
let mut ctrl = TestCtrl::new();
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
ringbuf.first = 2;
ringbuf.ndtr = 6;
// The dma controller has written 6 + 3 bytes and has reloaded NDTR
ctrl.complete_count = 1;
ctrl.set_next_ndtr(13);
assert_eq!(0, ringbuf.start);
assert_eq!(16, ringbuf.cap());
let mut buf = [0; 2];
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
/*
Read a few bytes
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(2),
TestCircularTransferRequest::PositionRequest(2),
TestCircularTransferRequest::GetCompleteCount(0),
]);
let mut buf = [0; 6];
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!(2, ringbuf.start);
assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
/*
Now, overtake the reader
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(4),
TestCircularTransferRequest::PositionRequest(6),
TestCircularTransferRequest::GetCompleteCount(1),
]);
let mut buf = [0; 6];
assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
}
#[test]
fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() {
let mut dma = TestCircularTransfer::new(16);
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
let mut ctrl = TestCtrl::new();
let mut ringbuf = DmaRingBuffer::new(&mut dma_buf);
ringbuf.first = 12;
ringbuf.ndtr = 10;
// The dma controller has written 6 + 13 bytes and has reloaded NDTR
ctrl.complete_count = 1;
ctrl.set_next_ndtr(3);
assert_eq!(0, ringbuf.start);
assert_eq!(16, ringbuf.cap());
let mut buf = [0; 2];
assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf));
/*
Read to close to the end of the buffer
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(14),
TestCircularTransferRequest::PositionRequest(16),
TestCircularTransferRequest::GetCompleteCount(0),
]);
let mut buf = [0; 14];
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
assert_eq!(14, ringbuf.start);
assert_eq!(1, ctrl.complete_count); // The complete counter is not reset
/*
Now, overtake the reader
*/
dma.setup(vec![
TestCircularTransferRequest::PositionRequest(8),
TestCircularTransferRequest::PositionRequest(10),
TestCircularTransferRequest::ResetCompleteCount(2),
]);
let mut buf = [0; 6];
assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
}
}

View File

@ -163,7 +163,7 @@ pub(super) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector
bank_offset = 0;
}
if address < region.end() {
if address >= region.base && address < region.end() {
let index_in_region = (address - region.base) / region.erase_size;
return FlashSector {
bank: region.bank,

View File

@ -1,4 +1,4 @@
#![no_std]
#![cfg_attr(not(test), no_std)]
#![cfg_attr(feature = "nightly", feature(async_fn_in_trait, impl_trait_projections))]
// This must go FIRST so that all the other modules see its macros.
@ -41,8 +41,6 @@ pub mod crc;
pub mod flash;
#[cfg(all(spi_v1, rcc_f4))]
pub mod i2s;
#[cfg(stm32wb)]
pub mod ipcc;
pub mod pwm;
#[cfg(quadspi)]
pub mod qspi;

View File

@ -209,39 +209,39 @@ mod tests {
#[test]
fn test_compute_dead_time_value() {
struct test_run {
struct TestRun {
value: u16,
ckd: Ckd,
bits: u8,
}
let fn_results = [
test_run {
TestRun {
value: 1,
ckd: Ckd::DIV1,
bits: 1,
},
test_run {
TestRun {
value: 125,
ckd: Ckd::DIV1,
bits: 125,
},
test_run {
TestRun {
value: 245,
ckd: Ckd::DIV1,
bits: 64 + 245 / 2,
},
test_run {
TestRun {
value: 255,
ckd: Ckd::DIV2,
bits: 127,
},
test_run {
TestRun {
value: 400,
ckd: Ckd::DIV1,
bits: 32 + (400u16 / 8) as u8,
},
test_run {
TestRun {
value: 600,
ckd: Ckd::DIV4,
bits: 64 + (600u16 / 8) as u8,

View File

@ -1,5 +1,3 @@
use core::mem::MaybeUninit;
use embassy_futures::block_on;
use super::cmd::CmdSerial;
@ -10,17 +8,17 @@ use super::{
channels, BleTable, BLE_CMD_BUFFER, CS_BUFFER, EVT_QUEUE, HCI_ACL_DATA_BUFFER, TL_BLE_TABLE, TL_CHANNEL,
TL_REF_TABLE,
};
use crate::ipcc::Ipcc;
use crate::tl_mbox::cmd::CmdPacket;
use crate::tl_mbox::ipcc::Ipcc;
pub struct Ble;
impl Ble {
pub(crate) fn new(ipcc: &mut Ipcc) -> Self {
pub fn enable() {
unsafe {
LinkedListNode::init_head(EVT_QUEUE.as_mut_ptr());
TL_BLE_TABLE = MaybeUninit::new(BleTable {
TL_BLE_TABLE.as_mut_ptr().write_volatile(BleTable {
pcmd_buffer: BLE_CMD_BUFFER.as_mut_ptr().cast(),
pcs_buffer: CS_BUFFER.as_mut_ptr().cast(),
pevt_queue: EVT_QUEUE.as_ptr().cast(),
@ -28,12 +26,10 @@ impl Ble {
});
}
ipcc.c1_set_rx_channel(channels::cpu2::IPCC_BLE_EVENT_CHANNEL, true);
Ble
Ipcc::c1_set_rx_channel(channels::cpu2::IPCC_BLE_EVENT_CHANNEL, true);
}
pub(crate) fn evt_handler(ipcc: &mut Ipcc) {
pub fn evt_handler() {
unsafe {
let mut node_ptr = core::ptr::null_mut();
let node_ptr_ptr: *mut _ = &mut node_ptr;
@ -48,10 +44,10 @@ impl Ble {
}
}
ipcc.c1_clear_flag_channel(channels::cpu2::IPCC_BLE_EVENT_CHANNEL);
Ipcc::c1_clear_flag_channel(channels::cpu2::IPCC_BLE_EVENT_CHANNEL);
}
pub(crate) fn send_cmd(ipcc: &mut Ipcc, buf: &[u8]) {
pub fn send_cmd(buf: &[u8]) {
unsafe {
let pcmd_buffer: *mut CmdPacket = (*TL_REF_TABLE.assume_init().ble_table).pcmd_buffer;
let pcmd_serial: *mut CmdSerial = &mut (*pcmd_buffer).cmd_serial;
@ -63,6 +59,6 @@ impl Ble {
cmd_packet.cmd_serial.ty = TlPacketType::BleCmd as u8;
}
ipcc.c1_set_flag_channel(channels::cpu1::IPCC_BLE_CMD_CHANNEL);
Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_BLE_CMD_CHANNEL);
}
}

View File

@ -50,7 +50,7 @@
//!
pub mod cpu1 {
use crate::ipcc::IpccChannel;
use crate::tl_mbox::ipcc::IpccChannel;
// Not used currently but reserved
pub const IPCC_BLE_CMD_CHANNEL: IpccChannel = IpccChannel::Channel1;
@ -75,7 +75,7 @@ pub mod cpu1 {
}
pub mod cpu2 {
use crate::ipcc::IpccChannel;
use crate::tl_mbox::ipcc::IpccChannel;
pub const IPCC_BLE_EVENT_CHANNEL: IpccChannel = IpccChannel::Channel1;
pub const IPCC_SYSTEM_EVENT_CHANNEL: IpccChannel = IpccChannel::Channel2;

View File

@ -3,7 +3,7 @@ use core::mem::MaybeUninit;
use super::cmd::{AclDataPacket, AclDataSerial};
use super::consts::TlPacketType;
use super::{PacketHeader, TL_EVT_HEADER_SIZE};
use crate::tl_mbox::mm;
use crate::tl_mbox::mm::MemoryManager;
/// the payload of [`Evt`] for a command status event
#[derive(Copy, Clone)]
@ -131,9 +131,6 @@ impl EvtBox {
impl Drop for EvtBox {
fn drop(&mut self) {
use crate::ipcc::Ipcc;
let mut ipcc = Ipcc::new_inner(unsafe { crate::Peripherals::steal() }.IPCC);
mm::MemoryManager::evt_drop(self.ptr, &mut ipcc);
MemoryManager::evt_drop(self.ptr);
}
}

View File

@ -1,6 +1,4 @@
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use crate::ipcc::sealed::Instance;
use self::sealed::Instance;
use crate::peripherals::IPCC;
use crate::rcc::sealed::RccPeripheral;
@ -22,29 +20,17 @@ pub enum IpccChannel {
Channel6 = 5,
}
pub(crate) mod sealed {
pub mod sealed {
pub trait Instance: crate::rcc::RccPeripheral {
fn regs() -> crate::pac::ipcc::Ipcc;
fn set_cpu2(enabled: bool);
}
}
pub struct Ipcc<'d> {
_peri: PeripheralRef<'d, IPCC>,
}
pub struct Ipcc;
impl<'d> Ipcc<'d> {
pub fn new(peri: impl Peripheral<P = IPCC> + 'd, _config: Config) -> Self {
Self::new_inner(peri)
}
pub(crate) fn new_inner(peri: impl Peripheral<P = IPCC> + 'd) -> Self {
into_ref!(peri);
Self { _peri: peri }
}
pub fn init(&mut self) {
impl Ipcc {
pub fn enable(_config: Config) {
IPCC::enable();
IPCC::reset();
IPCC::set_cpu2(true);
@ -61,56 +47,60 @@ impl<'d> Ipcc<'d> {
}
}
pub fn c1_set_rx_channel(&mut self, channel: IpccChannel, enabled: bool) {
pub fn c1_set_rx_channel(channel: IpccChannel, enabled: bool) {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { regs.cpu(0).mr().modify(|w| w.set_chom(channel as usize, !enabled)) }
}
pub fn c1_get_rx_channel(&self, channel: IpccChannel) -> bool {
pub fn c1_get_rx_channel(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { !regs.cpu(0).mr().read().chom(channel as usize) }
}
pub fn c2_set_rx_channel(&mut self, channel: IpccChannel, enabled: bool) {
#[allow(dead_code)]
pub fn c2_set_rx_channel(channel: IpccChannel, enabled: bool) {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { regs.cpu(1).mr().modify(|w| w.set_chom(channel as usize, !enabled)) }
}
pub fn c2_get_rx_channel(&self, channel: IpccChannel) -> bool {
#[allow(dead_code)]
pub fn c2_get_rx_channel(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { !regs.cpu(1).mr().read().chom(channel as usize) }
}
pub fn c1_set_tx_channel(&mut self, channel: IpccChannel, enabled: bool) {
pub fn c1_set_tx_channel(channel: IpccChannel, enabled: bool) {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { regs.cpu(0).mr().modify(|w| w.set_chfm(channel as usize, !enabled)) }
}
pub fn c1_get_tx_channel(&self, channel: IpccChannel) -> bool {
pub fn c1_get_tx_channel(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { !regs.cpu(0).mr().read().chfm(channel as usize) }
}
pub fn c2_set_tx_channel(&mut self, channel: IpccChannel, enabled: bool) {
#[allow(dead_code)]
pub fn c2_set_tx_channel(channel: IpccChannel, enabled: bool) {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { regs.cpu(1).mr().modify(|w| w.set_chfm(channel as usize, !enabled)) }
}
pub fn c2_get_tx_channel(&self, channel: IpccChannel) -> bool {
#[allow(dead_code)]
pub fn c2_get_tx_channel(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
@ -118,53 +108,51 @@ impl<'d> Ipcc<'d> {
}
/// clears IPCC receive channel status for CPU1
pub fn c1_clear_flag_channel(&mut self, channel: IpccChannel) {
pub fn c1_clear_flag_channel(channel: IpccChannel) {
let regs = IPCC::regs();
unsafe { regs.cpu(0).scr().write(|w| w.set_chc(channel as usize, true)) }
}
#[allow(dead_code)]
/// clears IPCC receive channel status for CPU2
pub fn c2_clear_flag_channel(&mut self, channel: IpccChannel) {
pub fn c2_clear_flag_channel(channel: IpccChannel) {
let regs = IPCC::regs();
unsafe { regs.cpu(1).scr().write(|w| w.set_chc(channel as usize, true)) }
}
pub fn c1_set_flag_channel(&mut self, channel: IpccChannel) {
pub fn c1_set_flag_channel(channel: IpccChannel) {
let regs = IPCC::regs();
unsafe { regs.cpu(0).scr().write(|w| w.set_chs(channel as usize, true)) }
}
pub fn c2_set_flag_channel(&mut self, channel: IpccChannel) {
#[allow(dead_code)]
pub fn c2_set_flag_channel(channel: IpccChannel) {
let regs = IPCC::regs();
unsafe { regs.cpu(1).scr().write(|w| w.set_chs(channel as usize, true)) }
}
pub fn c1_is_active_flag(&self, channel: IpccChannel) -> bool {
pub fn c1_is_active_flag(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
unsafe { regs.cpu(0).sr().read().chf(channel as usize) }
}
pub fn c2_is_active_flag(&self, channel: IpccChannel) -> bool {
pub fn c2_is_active_flag(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
unsafe { regs.cpu(1).sr().read().chf(channel as usize) }
}
pub fn is_tx_pending(&self, channel: IpccChannel) -> bool {
!self.c1_is_active_flag(channel) && self.c1_get_tx_channel(channel)
pub fn is_tx_pending(channel: IpccChannel) -> bool {
!Self::c1_is_active_flag(channel) && Self::c1_get_tx_channel(channel)
}
pub fn is_rx_pending(&self, channel: IpccChannel) -> bool {
self.c2_is_active_flag(channel) && self.c1_get_rx_channel(channel)
}
pub fn as_mut_ptr(&self) -> *mut Self {
unsafe { &mut core::ptr::read(self) as *mut _ }
pub fn is_rx_pending(channel: IpccChannel) -> bool {
Self::c2_is_active_flag(channel) && Self::c1_get_rx_channel(channel)
}
}

View File

@ -1,22 +1,20 @@
use core::mem::MaybeUninit;
use super::evt::EvtPacket;
use super::unsafe_linked_list::LinkedListNode;
use super::{
channels, MemManagerTable, BLE_SPARE_EVT_BUF, EVT_POOL, FREE_BUFF_QUEUE, LOCAL_FREE_BUF_QUEUE, POOL_SIZE,
SYS_SPARE_EVT_BUF, TL_MEM_MANAGER_TABLE, TL_REF_TABLE,
};
use crate::ipcc::Ipcc;
use crate::tl_mbox::ipcc::Ipcc;
pub struct MemoryManager;
impl MemoryManager {
pub fn new() -> Self {
pub fn enable() {
unsafe {
LinkedListNode::init_head(FREE_BUFF_QUEUE.as_mut_ptr());
LinkedListNode::init_head(LOCAL_FREE_BUF_QUEUE.as_mut_ptr());
TL_MEM_MANAGER_TABLE = MaybeUninit::new(MemManagerTable {
TL_MEM_MANAGER_TABLE.as_mut_ptr().write_volatile(MemManagerTable {
spare_ble_buffer: BLE_SPARE_EVT_BUF.as_ptr().cast(),
spare_sys_buffer: SYS_SPARE_EVT_BUF.as_ptr().cast(),
ble_pool: EVT_POOL.as_ptr().cast(),
@ -26,31 +24,29 @@ impl MemoryManager {
traces_pool_size: 0,
});
}
MemoryManager
}
pub fn evt_handler(ipcc: &mut Ipcc) {
ipcc.c1_set_tx_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL, false);
pub fn evt_handler() {
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL, false);
Self::send_free_buf();
ipcc.c1_set_flag_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
}
pub fn evt_drop(evt: *mut EvtPacket, ipcc: &mut Ipcc) {
pub fn evt_drop(evt: *mut EvtPacket) {
unsafe {
let list_node = evt.cast();
LinkedListNode::remove_tail(LOCAL_FREE_BUF_QUEUE.as_mut_ptr(), list_node);
}
let channel_is_busy = ipcc.c1_is_active_flag(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
let channel_is_busy = Ipcc::c1_is_active_flag(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
// postpone event buffer freeing to IPCC interrupt handler
if channel_is_busy {
ipcc.c1_set_tx_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL, true);
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL, true);
} else {
Self::send_free_buf();
ipcc.c1_set_flag_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
}
}

View File

@ -1,6 +1,9 @@
use core::mem::MaybeUninit;
use atomic_polyfill::{compiler_fence, Ordering};
use bit_field::BitField;
use embassy_cortex_m::interrupt::{Interrupt, InterruptExt};
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
use embassy_sync::channel::Channel;
@ -12,13 +15,16 @@ use self::shci::{shci_ble_init, ShciBleInitCmdParam};
use self::sys::Sys;
use self::unsafe_linked_list::LinkedListNode;
use crate::interrupt;
use crate::ipcc::Ipcc;
use crate::peripherals::IPCC;
pub use crate::tl_mbox::ipcc::Config;
use crate::tl_mbox::ipcc::Ipcc;
mod ble;
mod channels;
mod cmd;
mod consts;
mod evt;
mod ipcc;
mod mm;
mod shci;
mod sys;
@ -58,13 +64,34 @@ pub struct FusInfoTable {
pub struct ReceiveInterruptHandler {}
impl interrupt::Handler<interrupt::IPCC_C1_RX> for ReceiveInterruptHandler {
unsafe fn on_interrupt() {}
unsafe fn on_interrupt() {
// info!("ipcc rx interrupt");
if Ipcc::is_rx_pending(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL) {
sys::Sys::evt_handler();
} else if Ipcc::is_rx_pending(channels::cpu2::IPCC_BLE_EVENT_CHANNEL) {
ble::Ble::evt_handler();
} else {
todo!()
}
}
}
pub struct TransmitInterruptHandler {}
impl interrupt::Handler<interrupt::IPCC_C1_TX> for TransmitInterruptHandler {
unsafe fn on_interrupt() {}
unsafe fn on_interrupt() {
// info!("ipcc tx interrupt");
if Ipcc::is_tx_pending(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL) {
// TODO: handle this case
let _ = sys::Sys::cmd_evt_handler();
} else if Ipcc::is_tx_pending(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL) {
mm::MemoryManager::evt_handler();
} else {
todo!()
}
}
}
/// # Version
@ -289,21 +316,24 @@ static mut HCI_ACL_DATA_BUFFER: MaybeUninit<[u8; TL_PACKET_HEADER_SIZE + 5 + 251
// TODO: get a better size, this is a placeholder
pub(crate) static TL_CHANNEL: Channel<CriticalSectionRawMutex, EvtBox, 5> = Channel::new();
pub struct TlMbox {
_sys: Sys,
_ble: Ble,
_mm: MemoryManager,
pub struct TlMbox<'d> {
_ipcc: PeripheralRef<'d, IPCC>,
}
impl TlMbox {
impl<'d> TlMbox<'d> {
/// initializes low-level transport between CPU1 and BLE stack on CPU2
pub fn init(
ipcc: &mut Ipcc,
pub fn new(
ipcc: impl Peripheral<P = IPCC> + 'd,
_irqs: impl interrupt::Binding<interrupt::IPCC_C1_RX, ReceiveInterruptHandler>
+ interrupt::Binding<interrupt::IPCC_C1_TX, TransmitInterruptHandler>,
) -> TlMbox {
config: Config,
) -> Self {
into_ref!(ipcc);
unsafe {
TL_REF_TABLE = MaybeUninit::new(RefTable {
compiler_fence(Ordering::AcqRel);
TL_REF_TABLE.as_mut_ptr().write_volatile(RefTable {
device_info_table: TL_DEVICE_INFO_TABLE.as_ptr(),
ble_table: TL_BLE_TABLE.as_ptr(),
thread_table: TL_THREAD_TABLE.as_ptr(),
@ -316,6 +346,10 @@ impl TlMbox {
ble_lld_table: TL_BLE_LLD_TABLE.as_ptr(),
});
// info!("TL_REF_TABLE addr: {:x}", TL_REF_TABLE.as_ptr() as usize);
compiler_fence(Ordering::AcqRel);
TL_SYS_TABLE = MaybeUninit::zeroed();
TL_DEVICE_INFO_TABLE = MaybeUninit::zeroed();
TL_BLE_TABLE = MaybeUninit::zeroed();
@ -334,33 +368,24 @@ impl TlMbox {
CS_BUFFER = MaybeUninit::zeroed();
BLE_CMD_BUFFER = MaybeUninit::zeroed();
HCI_ACL_DATA_BUFFER = MaybeUninit::zeroed();
compiler_fence(Ordering::AcqRel);
}
ipcc.init();
Ipcc::enable(config);
let _sys = Sys::new(ipcc);
let _ble = Ble::new(ipcc);
let _mm = MemoryManager::new();
Sys::enable();
Ble::enable();
MemoryManager::enable();
// rx_irq.disable();
// tx_irq.disable();
//
// rx_irq.set_handler_context(ipcc.as_mut_ptr() as *mut ());
// tx_irq.set_handler_context(ipcc.as_mut_ptr() as *mut ());
//
// rx_irq.set_handler(|ipcc| {
// let ipcc: &mut Ipcc = unsafe { &mut *ipcc.cast() };
// Self::interrupt_ipcc_rx_handler(ipcc);
// });
// tx_irq.set_handler(|ipcc| {
// let ipcc: &mut Ipcc = unsafe { &mut *ipcc.cast() };
// Self::interrupt_ipcc_tx_handler(ipcc);
// });
//
// rx_irq.enable();
// tx_irq.enable();
// enable interrupts
unsafe { crate::interrupt::IPCC_C1_RX::steal() }.unpend();
unsafe { crate::interrupt::IPCC_C1_TX::steal() }.unpend();
TlMbox { _sys, _ble, _mm }
unsafe { crate::interrupt::IPCC_C1_RX::steal() }.enable();
unsafe { crate::interrupt::IPCC_C1_TX::steal() }.enable();
Self { _ipcc: ipcc }
}
pub fn wireless_fw_info(&self) -> Option<WirelessFwInfoTable> {
@ -374,42 +399,19 @@ impl TlMbox {
}
}
pub fn shci_ble_init(&self, ipcc: &mut Ipcc, param: ShciBleInitCmdParam) {
shci_ble_init(ipcc, param);
pub fn shci_ble_init(&self, param: ShciBleInitCmdParam) {
shci_ble_init(param);
}
pub fn send_ble_cmd(&self, ipcc: &mut Ipcc, buf: &[u8]) {
ble::Ble::send_cmd(ipcc, buf);
pub fn send_ble_cmd(&self, buf: &[u8]) {
ble::Ble::send_cmd(buf);
}
// pub fn send_sys_cmd(&self, ipcc: &mut Ipcc, buf: &[u8]) {
// sys::Sys::send_cmd(ipcc, buf);
// pub fn send_sys_cmd(&self, buf: &[u8]) {
// sys::Sys::send_cmd(buf);
// }
pub async fn read(&self) -> EvtBox {
TL_CHANNEL.recv().await
}
#[allow(dead_code)]
fn interrupt_ipcc_rx_handler(ipcc: &mut Ipcc) {
if ipcc.is_rx_pending(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL) {
sys::Sys::evt_handler(ipcc);
} else if ipcc.is_rx_pending(channels::cpu2::IPCC_BLE_EVENT_CHANNEL) {
ble::Ble::evt_handler(ipcc);
} else {
todo!()
}
}
#[allow(dead_code)]
fn interrupt_ipcc_tx_handler(ipcc: &mut Ipcc) {
if ipcc.is_tx_pending(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL) {
// TODO: handle this case
let _ = sys::Sys::cmd_evt_handler(ipcc);
} else if ipcc.is_tx_pending(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL) {
mm::MemoryManager::evt_handler(ipcc);
} else {
todo!()
}
}
}

View File

@ -3,7 +3,7 @@
use super::cmd::CmdPacket;
use super::consts::TlPacketType;
use super::{channels, TL_CS_EVT_SIZE, TL_EVT_HEADER_SIZE, TL_PACKET_HEADER_SIZE, TL_SYS_TABLE};
use crate::ipcc::Ipcc;
use crate::tl_mbox::ipcc::Ipcc;
const SCHI_OPCODE_BLE_INIT: u16 = 0xfc66;
pub const TL_BLE_EVT_CS_PACKET_SIZE: usize = TL_EVT_HEADER_SIZE + TL_CS_EVT_SIZE;
@ -76,7 +76,7 @@ pub struct ShciBleInitCmdPacket {
param: ShciBleInitCmdParam,
}
pub fn shci_ble_init(ipcc: &mut Ipcc, param: ShciBleInitCmdParam) {
pub fn shci_ble_init(param: ShciBleInitCmdParam) {
let mut packet = ShciBleInitCmdPacket {
header: ShciHeader::default(),
param,
@ -95,7 +95,7 @@ pub fn shci_ble_init(ipcc: &mut Ipcc, param: ShciBleInitCmdParam) {
cmd_buf.cmd_serial.ty = TlPacketType::SysCmd as u8;
ipcc.c1_set_flag_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL);
ipcc.c1_set_tx_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, true);
Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL);
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, true);
}
}

View File

@ -1,5 +1,3 @@
use core::mem::MaybeUninit;
use embassy_futures::block_on;
use super::cmd::{CmdPacket, CmdSerial};
@ -7,27 +5,25 @@ use super::consts::TlPacketType;
use super::evt::{CcEvt, EvtBox, EvtSerial};
use super::unsafe_linked_list::LinkedListNode;
use super::{channels, SysTable, SYSTEM_EVT_QUEUE, SYS_CMD_BUF, TL_CHANNEL, TL_REF_TABLE, TL_SYS_TABLE};
use crate::ipcc::Ipcc;
use crate::tl_mbox::ipcc::Ipcc;
pub struct Sys;
impl Sys {
pub(crate) fn new(ipcc: &mut Ipcc) -> Self {
pub fn enable() {
unsafe {
LinkedListNode::init_head(SYSTEM_EVT_QUEUE.as_mut_ptr());
TL_SYS_TABLE = MaybeUninit::new(SysTable {
TL_SYS_TABLE.as_mut_ptr().write_volatile(SysTable {
pcmd_buffer: SYS_CMD_BUF.as_mut_ptr(),
sys_queue: SYSTEM_EVT_QUEUE.as_ptr(),
});
}
ipcc.c1_set_rx_channel(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL, true);
Sys
Ipcc::c1_set_rx_channel(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL, true);
}
pub(crate) fn evt_handler(ipcc: &mut Ipcc) {
pub fn evt_handler() {
unsafe {
let mut node_ptr = core::ptr::null_mut();
let node_ptr_ptr: *mut _ = &mut node_ptr;
@ -43,11 +39,11 @@ impl Sys {
}
}
ipcc.c1_clear_flag_channel(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL);
Ipcc::c1_clear_flag_channel(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL);
}
pub(crate) fn cmd_evt_handler(ipcc: &mut Ipcc) -> CcEvt {
ipcc.c1_set_tx_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, false);
pub fn cmd_evt_handler() -> CcEvt {
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, false);
// ST's command response data structure is really convoluted.
//
@ -68,11 +64,11 @@ impl Sys {
}
#[allow(dead_code)]
pub(crate) fn send_cmd(ipcc: &mut Ipcc, buf: &[u8]) {
pub fn send_cmd(buf: &[u8]) {
unsafe {
// TODO: check this
let cmd_buffer = &mut *(*TL_REF_TABLE.assume_init().sys_table).pcmd_buffer;
let cmd_serial: *mut CmdSerial = &mut (*cmd_buffer).cmd_serial;
let cmd_serial: *mut CmdSerial = &mut cmd_buffer.cmd_serial;
let cmd_serial_buf = cmd_serial.cast();
core::ptr::copy(buf.as_ptr(), cmd_serial_buf, buf.len());
@ -80,8 +76,8 @@ impl Sys {
let cmd_packet = &mut *(*TL_REF_TABLE.assume_init().sys_table).pcmd_buffer;
cmd_packet.cmd_serial.ty = TlPacketType::SysCmd as u8;
ipcc.c1_set_flag_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL);
ipcc.c1_set_tx_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, true);
Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL);
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, true);
}
}
}

View File

@ -13,6 +13,12 @@ use futures::future::{select, Either};
use crate::dma::{NoDma, Transfer};
use crate::gpio::sealed::AFType;
#[cfg(not(any(usart_v1, usart_v2)))]
#[allow(unused_imports)]
use crate::pac::usart::regs::Isr as Sr;
#[cfg(any(usart_v1, usart_v2))]
#[allow(unused_imports)]
use crate::pac::usart::regs::Sr;
#[cfg(not(any(usart_v1, usart_v2)))]
use crate::pac::usart::Lpuart as Regs;
#[cfg(any(usart_v1, usart_v2))]
use crate::pac::usart::Usart as Regs;
@ -32,7 +38,6 @@ impl<T: BasicInstance> interrupt::Handler<T::Interrupt> for InterruptHandler<T>
let (sr, cr1, cr3) = unsafe { (sr(r).read(), r.cr1().read(), r.cr3().read()) };
let mut wake = false;
let has_errors = (sr.pe() && cr1.peie()) || ((sr.fe() || sr.ne() || sr.ore()) && cr3.eie());
if has_errors {
// clear all interrupts and DMA Rx Request
@ -52,35 +57,24 @@ impl<T: BasicInstance> interrupt::Handler<T::Interrupt> for InterruptHandler<T>
w.set_dmar(false);
});
}
} else if cr1.idleie() && sr.idle() {
// IDLE detected: no more data will come
unsafe {
r.cr1().modify(|w| {
// disable idle line detection
w.set_idleie(false);
});
}
} else if cr1.rxneie() {
// We cannot check the RXNE flag as it is auto-cleared by the DMA controller
wake = true;
// It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection
} else {
if cr1.idleie() && sr.idle() {
// IDLE detected: no more data will come
unsafe {
r.cr1().modify(|w| {
// disable idle line detection
w.set_idleie(false);
});
}
wake = true;
}
if cr1.rxneie() {
// We cannot check the RXNE flag as it is auto-cleared by the DMA controller
// It is up to the listener to determine if this in fact was a RX event and disable the RXNE detection
wake = true;
}
return;
}
if wake {
compiler_fence(Ordering::SeqCst);
s.rx_waker.wake();
}
compiler_fence(Ordering::SeqCst);
s.rx_waker.wake();
}
}
@ -1109,9 +1103,9 @@ pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler;
mod buffered;
#[cfg(not(gpdma))]
mod rx_ringbuffered;
mod ringbuffered;
#[cfg(not(gpdma))]
pub use rx_ringbuffered::RingBufferedUartRx;
pub use ringbuffered::RingBufferedUartRx;
use self::sealed::Kind;

View File

@ -2,13 +2,12 @@ use core::future::poll_fn;
use core::sync::atomic::{compiler_fence, Ordering};
use core::task::Poll;
use embassy_hal_common::drop::OnDrop;
use embassy_hal_common::PeripheralRef;
use futures::future::{select, Either};
use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx};
use crate::dma::ringbuffer::OverrunError;
use crate::dma::RingBuffer;
use crate::usart::{Regs, Sr};
pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> {
_peri: PeripheralRef<'d, T>,
@ -24,7 +23,9 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
let request = self.rx_dma.request();
let opts = Default::default();
let ring_buf = unsafe { RingBuffer::new_read(self.rx_dma, request, rdr(T::regs()), dma_buf, opts) };
RingBufferedUartRx {
_peri: self._peri,
ring_buf,
@ -42,11 +43,18 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
Ok(())
}
fn stop(&mut self, err: Error) -> Result<usize, Error> {
self.teardown_uart();
Err(err)
}
/// Start uart background receive
fn setup_uart(&mut self) {
// fence before starting DMA.
compiler_fence(Ordering::SeqCst);
// start the dma controller
self.ring_buf.start();
let r = T::regs();
@ -58,8 +66,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
w.set_rxneie(false);
// enable parity interrupt if not ParityNone
w.set_peie(w.pce());
// disable idle line interrupt
w.set_idleie(false);
// enable idle line interrupt
w.set_idleie(true);
});
r.cr3().modify(|w| {
// enable Error Interrupt: (Frame error, Noise error, Overrun error)
@ -72,6 +80,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
/// Stop uart background receive
fn teardown_uart(&mut self) {
self.ring_buf.request_stop();
let r = T::regs();
// clear all interrupts and DMA Rx Request
// SAFETY: only clears Rx related flags
@ -93,9 +103,6 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
}
compiler_fence(Ordering::SeqCst);
self.ring_buf.request_stop();
while self.ring_buf.is_running() {}
}
/// Read bytes that are readily available in the ring buffer.
@ -111,96 +118,49 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
// Start background receive if it was not already started
// SAFETY: read only
let is_started = unsafe { r.cr3().read().dmar() };
if !is_started {
self.start()?;
}
match unsafe { r.cr3().read().dmar() } {
false => self.start()?,
_ => {}
};
// SAFETY: read only and we only use Rx related flags
let s = unsafe { sr(r).read() };
let has_errors = s.pe() || s.fe() || s.ne() || s.ore();
if has_errors {
self.teardown_uart();
if s.pe() {
return Err(Error::Parity);
} else if s.fe() {
return Err(Error::Framing);
} else if s.ne() {
return Err(Error::Noise);
} else {
return Err(Error::Overrun);
}
}
self.ring_buf.reload_position();
match self.ring_buf.read(buf) {
Ok(len) if len == 0 => {}
Ok(len) => {
assert!(len > 0);
return Ok(len);
}
Err(OverrunError) => {
// Stop any transfer from now on
// The user must re-start to receive any more data
self.teardown_uart();
return Err(Error::Overrun);
}
}
check_for_errors(clear_idle_flag(T::regs()))?;
loop {
self.wait_for_data_or_idle().await?;
match self.ring_buf.read(buf) {
Ok((0, _)) => {}
Ok((len, _)) => {
return Ok(len);
}
Err(_) => {
return self.stop(Error::Overrun);
}
}
self.ring_buf.reload_position();
if !self.ring_buf.is_empty() {
break;
match self.wait_for_data_or_idle().await {
Ok(_) => {}
Err(err) => {
return self.stop(err);
}
}
}
let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?;
assert!(len > 0);
Ok(len)
}
/// Wait for uart idle or dma half-full or full
async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> {
let r = T::regs();
// make sure USART state is restored to neutral state
let _on_drop = OnDrop::new(move || {
// SAFETY: only clears Rx related flags
unsafe {
r.cr1().modify(|w| {
// disable idle line interrupt
w.set_idleie(false);
});
}
});
// SAFETY: only sets Rx related flags
unsafe {
r.cr1().modify(|w| {
// enable idle line interrupt
w.set_idleie(true);
});
}
compiler_fence(Ordering::SeqCst);
let mut dma_init = false;
// Future which completes when there is dma is half full or full
let dma = poll_fn(|cx| {
self.ring_buf.set_waker(cx.waker());
compiler_fence(Ordering::SeqCst);
let status = match dma_init {
false => Poll::Pending,
true => Poll::Ready(()),
};
self.ring_buf.reload_position();
if !self.ring_buf.is_empty() {
// Some data is now available
Poll::Ready(())
} else {
Poll::Pending
}
dma_init = true;
status
});
// Future which completes when idle line is detected
@ -210,28 +170,11 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
compiler_fence(Ordering::SeqCst);
// SAFETY: read only and we only use Rx related flags
let sr = unsafe { sr(r).read() };
// Critical section is needed so that IDLE isn't set after
// our read but before we clear it.
let sr = critical_section::with(|_| clear_idle_flag(T::regs()));
// SAFETY: only clears Rx related flags
unsafe {
// This read also clears the error and idle interrupt flags on v1.
rdr(r).read_volatile();
clear_interrupt_flags(r, sr);
}
let has_errors = sr.pe() || sr.fe() || sr.ne() || sr.ore();
if has_errors {
if sr.pe() {
return Poll::Ready(Err(Error::Parity));
} else if sr.fe() {
return Poll::Ready(Err(Error::Framing));
} else if sr.ne() {
return Poll::Ready(Err(Error::Noise));
} else {
return Poll::Ready(Err(Error::Overrun));
}
}
check_for_errors(sr)?;
if sr.idle() {
// Idle line is detected
@ -243,11 +186,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
match select(dma, uart).await {
Either::Left(((), _)) => Ok(()),
Either::Right((Ok(()), _)) => Ok(()),
Either::Right((Err(e), _)) => {
self.teardown_uart();
Err(e)
}
Either::Right((result, _)) => result,
}
}
}
@ -257,6 +196,37 @@ impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T
self.teardown_uart();
}
}
/// Return an error result if the Sr register has errors
fn check_for_errors(s: Sr) -> Result<(), Error> {
if s.pe() {
Err(Error::Parity)
} else if s.fe() {
Err(Error::Framing)
} else if s.ne() {
Err(Error::Noise)
} else if s.ore() {
Err(Error::Overrun)
} else {
Ok(())
}
}
/// Clear IDLE and return the Sr register
fn clear_idle_flag(r: Regs) -> Sr {
unsafe {
// SAFETY: read only and we only use Rx related flags
let sr = sr(r).read();
// This read also clears the error and idle interrupt flags on v1.
rdr(r).read_volatile();
clear_interrupt_flags(r, sr);
r.cr1().modify(|w| w.set_idleie(true));
sr
}
}
#[cfg(all(feature = "unstable-traits", feature = "nightly"))]
mod eio {

View File

@ -0,0 +1,15 @@
MEMORY
{
RAM_SHARED (xrw) : ORIGIN = 0x20030000, LENGTH = 10K
}
/*
* Scatter the mailbox interface memory sections in shared memory
*/
SECTIONS
{
TL_REF_TABLE (NOLOAD) : { *(TL_REF_TABLE) } >RAM_SHARED
MB_MEM1 (NOLOAD) : { *(MB_MEM1) } >RAM_SHARED
MB_MEM2 (NOLOAD) : { _sMB_MEM2 = . ; *(MB_MEM2) ; _eMB_MEM2 = . ; } >RAM_SHARED
}