use core::sync::atomic::{fence, Ordering}; use embassy_net::{Packet, PacketBox, PacketBoxExt, PacketBuf}; use vcell::VolatileCell; use crate::pac::ETH; #[non_exhaustive] #[derive(Debug, Copy, Clone)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] pub enum Error { NoBufferAvailable, // TODO: Break down this error into several others TransmissionError, } /// Transmit and Receive Descriptor fields #[allow(dead_code)] mod emac_consts { pub const EMAC_DES3_OWN: u32 = 0x8000_0000; pub const EMAC_DES3_CTXT: u32 = 0x4000_0000; pub const EMAC_DES3_FD: u32 = 0x2000_0000; pub const EMAC_DES3_LD: u32 = 0x1000_0000; pub const EMAC_DES3_ES: u32 = 0x0000_8000; pub const EMAC_DES0_BUF1AP: u32 = 0xFFFF_FFFF; pub const EMAC_TDES2_IOC: u32 = 0x8000_0000; pub const EMAC_TDES2_B1L: u32 = 0x0000_3FFF; pub const EMAC_RDES3_IOC: u32 = 0x4000_0000; pub const EMAC_RDES3_PL: u32 = 0x0000_7FFF; pub const EMAC_RDES3_BUF1V: u32 = 0x0100_0000; pub const EMAC_RDES3_PKTLEN: u32 = 0x0000_7FFF; } use emac_consts::*; /// Transmit Descriptor representation /// /// * tdes0: transmit buffer address /// * tdes1: /// * tdes2: buffer lengths /// * tdes3: control and payload/frame length #[repr(C)] struct TDes { tdes0: VolatileCell, tdes1: VolatileCell, tdes2: VolatileCell, tdes3: VolatileCell, } impl TDes { pub const fn new() -> Self { Self { tdes0: VolatileCell::new(0), tdes1: VolatileCell::new(0), tdes2: VolatileCell::new(0), tdes3: VolatileCell::new(0), } } /// Return true if this TDes is not currently owned by the DMA pub fn available(&self) -> bool { self.tdes3.get() & EMAC_DES3_OWN == 0 } } pub(crate) struct TDesRing { td: [TDes; N], buffers: [Option; N], tdidx: usize, } impl TDesRing { pub const fn new() -> Self { const TDES: TDes = TDes::new(); const BUFFERS: Option = None; Self { td: [TDES; N], buffers: [BUFFERS; N], tdidx: 0, } } /// Initialise this TDesRing. Assume TDesRing is corrupt /// /// The current memory address of the buffers inside this TDesRing /// will be stored in the descriptors, so ensure the TDesRing is /// not moved after initialisation. pub(crate) fn init(&mut self) { assert!(N > 0); for td in self.td.iter_mut() { *td = TDes::new(); } self.tdidx = 0; // Initialize the pointers in the DMA engine. (There will be a memory barrier later // before the DMA engine is enabled.) // NOTE (unsafe) Used for atomic writes unsafe { let dma = ETH.ethernet_dma(); dma.dmactx_dlar() .write(|w| w.set_tdesla(&self.td as *const _ as u32)); dma.dmactx_rlr().write(|w| w.set_tdrl((N as u16) - 1)); dma.dmactx_dtpr() .write(|w| w.set_tdt(&self.td[0] as *const _ as u32)); } } /// Return true if a TDes is available for use pub(crate) fn available(&self) -> bool { self.td[self.tdidx].available() } pub(crate) fn transmit(&mut self, pkt: PacketBuf) -> Result<(), Error> { if !self.available() { return Err(Error::NoBufferAvailable); } let x = self.tdidx; let td = &mut self.td[x]; let pkt_len = pkt.len(); assert!(pkt_len as u32 <= EMAC_TDES2_B1L); let address = pkt.as_ptr() as u32; // Read format td.tdes0.set(address); td.tdes2 .set(pkt_len as u32 & EMAC_TDES2_B1L | EMAC_TDES2_IOC); // FD: Contains first buffer of packet // LD: Contains last buffer of packet // Give the DMA engine ownership td.tdes3.set(EMAC_DES3_FD | EMAC_DES3_LD | EMAC_DES3_OWN); self.buffers[x].replace(pkt); // Ensure changes to the descriptor are committed before DMA engine sees tail pointer store. // This will generate an DMB instruction. // "Preceding reads and writes cannot be moved past subsequent writes." fence(Ordering::Release); // Move the tail pointer (TPR) to the next descriptor let x = (x + 1) % N; // NOTE(unsafe) Atomic write unsafe { ETH.ethernet_dma() .dmactx_dtpr() .write(|w| w.set_tdt(&self.td[x] as *const _ as u32)); } self.tdidx = x; Ok(()) } pub(crate) fn on_interrupt(&mut self) -> Result<(), Error> { let previous = (self.tdidx + N - 1) % N; let td = &self.td[previous]; // DMB to ensure that we are reading an updated value, probably not needed at the hardware // level, but this is also a hint to the compiler that we're syncing on the buffer. fence(Ordering::SeqCst); let tdes3 = td.tdes3.get(); if tdes3 & EMAC_DES3_OWN != 0 { // Transmission isn't done yet, probably a receive interrupt that fired this return Ok(()); } assert!(tdes3 & EMAC_DES3_CTXT == 0); // Release the buffer self.buffers[previous].take(); if tdes3 & EMAC_DES3_ES != 0 { Err(Error::TransmissionError) } else { Ok(()) } } } /// Receive Descriptor representation /// /// * rdes0: recieve buffer address /// * rdes1: /// * rdes2: /// * rdes3: OWN and Status #[repr(C)] struct RDes { rdes0: VolatileCell, rdes1: VolatileCell, rdes2: VolatileCell, rdes3: VolatileCell, } impl RDes { pub const fn new() -> Self { Self { rdes0: VolatileCell::new(0), rdes1: VolatileCell::new(0), rdes2: VolatileCell::new(0), rdes3: VolatileCell::new(0), } } /// Return true if this RDes is acceptable to us #[inline(always)] pub fn valid(&self) -> bool { // Write-back descriptor is valid if: // // Contains first buffer of packet AND contains last buf of // packet AND no errors AND not a context descriptor self.rdes3.get() & (EMAC_DES3_FD | EMAC_DES3_LD | EMAC_DES3_ES | EMAC_DES3_CTXT) == (EMAC_DES3_FD | EMAC_DES3_LD) } /// Return true if this RDes is not currently owned by the DMA #[inline(always)] pub fn available(&self) -> bool { self.rdes3.get() & EMAC_DES3_OWN == 0 // Owned by us } #[inline(always)] pub fn set_ready(&mut self, buf_addr: u32) { self.rdes0.set(buf_addr); self.rdes3 .set(EMAC_RDES3_BUF1V | EMAC_RDES3_IOC | EMAC_DES3_OWN); } } pub(crate) struct RDesRing { rd: [RDes; N], buffers: [Option; N], read_idx: usize, tail_idx: usize, } impl RDesRing { pub const fn new() -> Self { const RDES: RDes = RDes::new(); const BUFFERS: Option = None; Self { rd: [RDES; N], buffers: [BUFFERS; N], read_idx: 0, tail_idx: 0, } } pub(crate) fn init(&mut self) { assert!(N > 1); for desc in self.rd.iter_mut() { *desc = RDes::new(); } let mut last_index = 0; for (index, buf) in self.buffers.iter_mut().enumerate() { let pkt = match PacketBox::new(Packet::new()) { Some(p) => p, None => { if index == 0 { panic!("Could not allocate at least one buffer for Ethernet receiving"); } else { break; } } }; let addr = pkt.as_ptr() as u32; *buf = Some(pkt); self.rd[index].set_ready(addr); last_index = index; } self.tail_idx = (last_index + 1) % N; unsafe { let dma = ETH.ethernet_dma(); dma.dmacrx_dlar() .write(|w| w.set_rdesla(self.rd.as_ptr() as u32)); dma.dmacrx_rlr().write(|w| w.set_rdrl((N as u16) - 1)); // We manage to allocate all buffers, set the index to the last one, that means // that the DMA won't consider the last one as ready, because it (unfortunately) // stops at the tail ptr and wraps at the end of the ring, which means that we // can't tell it to stop after the last buffer. let tail_ptr = &self.rd[last_index] as *const _ as u32; fence(Ordering::Release); dma.dmacrx_dtpr().write(|w| w.set_rdt(tail_ptr)); } } pub(crate) fn on_interrupt(&mut self) { // TODO! } pub(crate) fn pop_packet(&mut self) -> Option { // Not sure if the contents of the write buffer on the M7 can affects reads, so we are using // a DMB here just in case, it also serves as a hint to the compiler that we're syncing the // buffer (I think .-.) fence(Ordering::SeqCst); let read_available = self.rd[self.read_idx].available(); if !read_available && self.read_idx == self.tail_idx { // Nothing to do return None; } let pkt = if read_available { let pkt = self.buffers[self.read_idx].take(); let len = (self.rd[self.read_idx].rdes3.get() & EMAC_RDES3_PKTLEN) as usize; assert!(pkt.is_some()); let valid = self.rd[self.read_idx].valid(); self.read_idx = (self.read_idx + 1) % N; if valid { pkt.map(|p| p.slice(0..len)) } else { None } } else { None }; match PacketBox::new(Packet::new()) { Some(b) => { let addr = b.as_ptr() as u32; self.buffers[self.tail_idx].replace(b); self.rd[self.tail_idx].set_ready(addr); // "Preceding reads and writes cannot be moved past subsequent writes." fence(Ordering::Release); // NOTE(unsafe) atomic write unsafe { ETH.ethernet_dma() .dmacrx_dtpr() .write(|w| w.set_rdt(&self.rd[self.read_idx] as *const _ as u32)); } self.tail_idx = (self.tail_idx + 1) % N; } None => {} } pkt } } pub struct DescriptorRing { pub(crate) tx: TDesRing, pub(crate) rx: RDesRing, } impl DescriptorRing { pub const fn new() -> Self { Self { tx: TDesRing::new(), rx: RDesRing::new(), } } pub fn init(&mut self) { self.tx.init(); self.rx.init(); } }