embassy/embassy-stm32-wpan/src/sub/mac.rs

125 lines
3.7 KiB
Rust
Raw Normal View History

2023-06-19 01:51:14 +02:00
use core::future::poll_fn;
use core::ptr;
use core::sync::atomic::{AtomicBool, Ordering};
use core::task::Poll;
use embassy_futures::poll_once;
use embassy_stm32::ipcc::Ipcc;
use embassy_sync::waitqueue::AtomicWaker;
use crate::cmd::CmdPacket;
use crate::consts::TlPacketType;
use crate::evt::{EvtBox, EvtPacket};
2023-07-15 21:47:34 +02:00
use crate::mac::commands::MacCommand;
2023-07-20 23:45:04 +02:00
use crate::mac::event::MacEvent;
2023-07-15 21:47:34 +02:00
use crate::mac::typedefs::MacError;
2023-07-09 23:08:39 +02:00
use crate::tables::{MAC_802_15_4_CMD_BUFFER, MAC_802_15_4_NOTIF_RSP_EVT_BUFFER};
2023-06-24 02:59:48 +02:00
use crate::{channels, evt};
2023-06-19 01:51:14 +02:00
static MAC_WAKER: AtomicWaker = AtomicWaker::new();
static MAC_EVT_OUT: AtomicBool = AtomicBool::new(false);
pub struct Mac {
2023-09-18 01:47:22 +02:00
_private: (),
2023-06-19 01:51:14 +02:00
}
impl Mac {
pub(crate) fn new() -> Self {
2023-09-18 01:47:22 +02:00
Self { _private: () }
2023-06-19 01:51:14 +02:00
}
/// `HW_IPCC_MAC_802_15_4_EvtNot`
///
/// This function will stall if the previous `EvtBox` has not been dropped
2023-07-12 16:06:56 +02:00
pub async fn tl_read(&self) -> EvtBox<Self> {
2023-06-19 01:51:14 +02:00
// Wait for the last event box to be dropped
poll_fn(|cx| {
MAC_WAKER.register(cx.waker());
if MAC_EVT_OUT.load(Ordering::SeqCst) {
Poll::Pending
} else {
Poll::Ready(())
}
})
.await;
// Return a new event box
Ipcc::receive(channels::cpu2::IPCC_MAC_802_15_4_NOTIFICATION_ACK_CHANNEL, || unsafe {
// The closure is not async, therefore the closure must execute to completion (cannot be dropped)
// Therefore, the event box is guaranteed to be cleaned up if it's not leaked
MAC_EVT_OUT.store(true, Ordering::SeqCst);
Some(EvtBox::new(MAC_802_15_4_NOTIF_RSP_EVT_BUFFER.as_mut_ptr() as *mut _))
})
.await
}
/// `HW_IPCC_MAC_802_15_4_CmdEvtNot`
2023-07-12 16:06:56 +02:00
pub async fn tl_write_and_get_response(&self, opcode: u16, payload: &[u8]) -> u8 {
self.tl_write(opcode, payload).await;
2023-07-11 17:07:33 +02:00
Ipcc::flush(channels::cpu1::IPCC_MAC_802_15_4_CMD_RSP_CHANNEL).await;
2023-06-19 01:51:14 +02:00
unsafe {
let p_event_packet = MAC_802_15_4_CMD_BUFFER.as_ptr() as *const EvtPacket;
let p_mac_rsp_evt = &((*p_event_packet).evt_serial.evt.payload) as *const u8;
ptr::read_volatile(p_mac_rsp_evt)
}
}
/// `TL_MAC_802_15_4_SendCmd`
2023-07-12 16:06:56 +02:00
pub async fn tl_write(&self, opcode: u16, payload: &[u8]) {
2023-06-19 01:51:14 +02:00
Ipcc::send(channels::cpu1::IPCC_MAC_802_15_4_CMD_RSP_CHANNEL, || unsafe {
CmdPacket::write_into(
MAC_802_15_4_CMD_BUFFER.as_mut_ptr(),
2023-07-11 17:07:33 +02:00
TlPacketType::MacCmd,
2023-06-19 01:51:14 +02:00
opcode,
payload,
);
})
.await;
}
2023-07-10 17:54:48 +02:00
pub async fn send_command<T>(&self, cmd: &T) -> Result<(), MacError>
2023-07-10 17:54:48 +02:00
where
T: MacCommand,
{
2023-07-16 19:41:57 +02:00
let response = self.tl_write_and_get_response(T::OPCODE as u16, cmd.payload()).await;
2023-07-12 16:06:56 +02:00
if response == 0x00 {
Ok(())
} else {
Err(MacError::from(response))
}
}
2023-07-10 17:54:48 +02:00
2023-07-20 23:45:04 +02:00
pub async fn read(&self) -> Result<MacEvent<'_>, ()> {
MacEvent::new(self.tl_read().await)
2023-07-10 17:54:48 +02:00
}
2023-06-19 01:51:14 +02:00
}
2023-06-24 02:59:48 +02:00
impl evt::MemoryManager for Mac {
/// SAFETY: passing a pointer to something other than a managed event packet is UB
unsafe fn drop_event_packet(_: *mut EvtPacket) {
2023-07-20 23:45:04 +02:00
trace!("mac drop event");
2023-06-24 02:59:48 +02:00
// Write the ack
CmdPacket::write_into(
MAC_802_15_4_NOTIF_RSP_EVT_BUFFER.as_mut_ptr() as *mut _,
TlPacketType::OtAck,
0,
&[],
);
// Clear the rx flag
2023-07-20 23:45:04 +02:00
let _ = poll_once(Ipcc::receive::<()>(
2023-06-24 02:59:48 +02:00
channels::cpu2::IPCC_MAC_802_15_4_NOTIFICATION_ACK_CHANNEL,
|| None,
));
// Allow a new read call
MAC_EVT_OUT.store(false, Ordering::SeqCst);
MAC_WAKER.wake();
}
}