diff --git a/embassy-stm32/src/flash/common.rs b/embassy-stm32/src/flash/common.rs index 47e94f75..59429e34 100644 --- a/embassy-stm32/src/flash/common.rs +++ b/embassy-stm32/src/flash/common.rs @@ -1,7 +1,5 @@ use embassy_hal_common::drop::OnDrop; use embassy_hal_common::{into_ref, PeripheralRef}; -use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex; -use embassy_sync::mutex::{Mutex, MutexGuard}; use super::{family, Error, FlashLayout, FlashRegion, FlashSector, FLASH_BASE, FLASH_SIZE, WRITE_SIZE}; use crate::flash::FlashBank; @@ -22,96 +20,21 @@ impl<'d> Flash<'d> { } pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { - Self::blocking_read_inner(FLASH_BASE as u32 + offset, bytes) - } - - fn blocking_read_inner(start_address: u32, bytes: &mut [u8]) -> Result<(), Error> { - assert!(start_address >= FLASH_BASE as u32); - if start_address as usize + bytes.len() > FLASH_BASE + FLASH_SIZE { - return Err(Error::Size); - } - - let flash_data = unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) }; - bytes.copy_from_slice(flash_data); - Ok(()) + let start_address = FLASH_BASE as u32 + offset; + blocking_read(start_address, bytes) } pub fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> { let start_address = FLASH_BASE as u32 + offset; - // No need to take lock here as we only have one mut flash reference. - - unsafe { Flash::blocking_write_inner(start_address, buf) } - } - - unsafe fn blocking_write_inner(start_address: u32, buf: &[u8]) -> Result<(), Error> { - assert!(start_address >= FLASH_BASE as u32); - if start_address as usize + buf.len() > FLASH_BASE + FLASH_SIZE { - return Err(Error::Size); - } - if (start_address as usize - FLASH_BASE) % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 { - return Err(Error::Unaligned); - } - - trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address); - - family::clear_all_err(); - family::unlock(); - family::begin_write(); - - let _ = OnDrop::new(|| { - family::end_write(); - family::lock(); - }); - - let mut address = start_address; - for chunk in buf.chunks(WRITE_SIZE) { - unsafe { family::blocking_write(address, chunk.try_into().unwrap())? }; - address += WRITE_SIZE as u32; - } - Ok(()) + unsafe { blocking_write(start_address, buf) } } pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { let start_address = FLASH_BASE as u32 + from; let end_address = FLASH_BASE as u32 + to; - unsafe { Flash::blocking_erase_inner(start_address, end_address) } - } - - unsafe fn blocking_erase_inner(start_address: u32, end_address: u32) -> Result<(), Error> { - let regions = family::get_flash_regions(); - - // Test if the address range is aligned at sector base addresses - let mut address = start_address; - while address < end_address { - let sector = get_sector(address, regions); - if sector.start != address { - return Err(Error::Unaligned); - } - address += sector.size; - } - if address != end_address { - return Err(Error::Unaligned); - } - - trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address); - - family::clear_all_err(); - family::unlock(); - - let _ = OnDrop::new(|| { - family::lock(); - }); - - let mut address = start_address; - while address < end_address { - let sector = get_sector(address, regions); - trace!("Erasing sector: {}", sector); - family::blocking_erase_sector(§or)?; - address += sector.size; - } - Ok(()) + unsafe { blocking_erase(start_address, end_address) } } pub(crate) fn release(self) -> PeripheralRef<'d, crate::peripherals::FLASH> { @@ -132,14 +55,79 @@ impl Drop for FlashLayout<'_> { } } -static REGION_LOCK: Mutex = Mutex::new(()); - -fn take_lock_spin() -> MutexGuard<'static, CriticalSectionRawMutex, ()> { - loop { - if let Ok(guard) = REGION_LOCK.try_lock() { - return guard; - } +fn blocking_read(start_address: u32, bytes: &mut [u8]) -> Result<(), Error> { + assert!(start_address >= FLASH_BASE as u32); + if start_address as usize + bytes.len() > FLASH_BASE + FLASH_SIZE { + return Err(Error::Size); } + + let flash_data = unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) }; + bytes.copy_from_slice(flash_data); + Ok(()) +} + +unsafe fn blocking_write(start_address: u32, buf: &[u8]) -> Result<(), Error> { + assert!(start_address >= FLASH_BASE as u32); + if start_address as usize + buf.len() > FLASH_BASE + FLASH_SIZE { + return Err(Error::Size); + } + if (start_address as usize - FLASH_BASE) % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 { + return Err(Error::Unaligned); + } + + trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address); + + let mut address = start_address; + for chunk in buf.chunks(WRITE_SIZE) { + critical_section::with(|_| { + family::clear_all_err(); + family::unlock(); + family::begin_write(); + let _ = OnDrop::new(|| { + family::end_write(); + family::lock(); + }); + family::blocking_write(address, chunk.try_into().unwrap()) + })?; + address += WRITE_SIZE as u32; + } + Ok(()) +} + +unsafe fn blocking_erase(start_address: u32, end_address: u32) -> Result<(), Error> { + let regions = family::get_flash_regions(); + + // Test if the address range is aligned at sector base addresses + let mut address = start_address; + while address < end_address { + let sector = get_sector(address, regions); + if sector.start != address { + return Err(Error::Unaligned); + } + address += sector.size; + } + if address != end_address { + return Err(Error::Unaligned); + } + + trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address); + + let mut address = start_address; + while address < end_address { + let sector = get_sector(address, regions); + trace!("Erasing sector: {}", sector); + + critical_section::with(|_| { + family::clear_all_err(); + family::unlock(); + let _ = OnDrop::new(|| { + family::lock(); + }); + family::blocking_erase_sector(§or) + })?; + address += sector.size; + } + Ok(()) } pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector { @@ -169,38 +157,19 @@ pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector impl FlashRegion { pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { - unsafe { self.blocking_read_inner(offset, bytes) } + let start_address = self.base + offset; + blocking_read(start_address, bytes) } pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { - unsafe { self.blocking_write_inner(offset, bytes) } + let start_address = self.base + offset; + unsafe { blocking_write(start_address, bytes) } } pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { - unsafe { self.blocking_erase_inner(from, to) } - } - - unsafe fn blocking_read_inner(&self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { - Flash::blocking_read_inner(self.base + offset, bytes) - } - - unsafe fn blocking_write_inner(&self, offset: u32, bytes: &[u8]) -> Result<(), Error> { - let start_address = self.base + offset; - - // Protect agains simultaneous write/erase to multiple regions. - let _guard = take_lock_spin(); - - Flash::blocking_write_inner(start_address, bytes) - } - - unsafe fn blocking_erase_inner(&self, from: u32, to: u32) -> Result<(), Error> { let start_address = self.base + from; let end_address = self.base + to; - - // Protect agains simultaneous write/erase to multiple regions. - let _guard = take_lock_spin(); - - Flash::blocking_erase_inner(start_address, end_address) + unsafe { blocking_erase(start_address, end_address) } } } @@ -208,15 +177,19 @@ foreach_flash_region! { ($type_name:ident, $write_size:literal, $erase_size:literal) => { impl crate::_generated::flash_regions::$type_name { pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> { - unsafe { self.0.blocking_read_inner(offset, bytes) } + let start_address = self.0.base + offset; + blocking_read(start_address, bytes) } pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> { - unsafe { self.0.blocking_write_inner(offset, bytes) } + let start_address = self.0.base + offset; + unsafe { blocking_write(start_address, bytes) } } pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { - unsafe { self.0.blocking_erase_inner(from, to) } + let start_address = self.0.base + from; + let end_address = self.0.base + to; + unsafe { blocking_erase(start_address, end_address) } } } @@ -228,7 +201,7 @@ foreach_flash_region! { const READ_SIZE: usize = 1; fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { - unsafe { self.0.blocking_read_inner(offset, bytes) } + self.blocking_read(offset, bytes) } fn capacity(&self) -> usize { @@ -241,11 +214,11 @@ foreach_flash_region! { const ERASE_SIZE: usize = $erase_size; fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> { - unsafe { self.0.blocking_write_inner(offset, bytes) } + self.blocking_write(offset, bytes) } fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> { - unsafe { self.0.blocking_erase_inner(from, to) } + self.blocking_erase(from, to) } } };