Wrap write/erase operations in cs
This commit is contained in:
parent
a78e10e003
commit
f3dcb5eb22
@ -1,7 +1,5 @@
|
|||||||
use embassy_hal_common::drop::OnDrop;
|
use embassy_hal_common::drop::OnDrop;
|
||||||
use embassy_hal_common::{into_ref, PeripheralRef};
|
use embassy_hal_common::{into_ref, PeripheralRef};
|
||||||
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
|
|
||||||
use embassy_sync::mutex::{Mutex, MutexGuard};
|
|
||||||
|
|
||||||
use super::{family, Error, FlashLayout, FlashRegion, FlashSector, FLASH_BASE, FLASH_SIZE, WRITE_SIZE};
|
use super::{family, Error, FlashLayout, FlashRegion, FlashSector, FLASH_BASE, FLASH_SIZE, WRITE_SIZE};
|
||||||
use crate::flash::FlashBank;
|
use crate::flash::FlashBank;
|
||||||
@ -22,96 +20,21 @@ impl<'d> Flash<'d> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
||||||
Self::blocking_read_inner(FLASH_BASE as u32 + offset, bytes)
|
let start_address = FLASH_BASE as u32 + offset;
|
||||||
}
|
blocking_read(start_address, bytes)
|
||||||
|
|
||||||
fn blocking_read_inner(start_address: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
|
||||||
assert!(start_address >= FLASH_BASE as u32);
|
|
||||||
if start_address as usize + bytes.len() > FLASH_BASE + FLASH_SIZE {
|
|
||||||
return Err(Error::Size);
|
|
||||||
}
|
|
||||||
|
|
||||||
let flash_data = unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) };
|
|
||||||
bytes.copy_from_slice(flash_data);
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> {
|
pub fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> {
|
||||||
let start_address = FLASH_BASE as u32 + offset;
|
let start_address = FLASH_BASE as u32 + offset;
|
||||||
|
|
||||||
// No need to take lock here as we only have one mut flash reference.
|
unsafe { blocking_write(start_address, buf) }
|
||||||
|
|
||||||
unsafe { Flash::blocking_write_inner(start_address, buf) }
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn blocking_write_inner(start_address: u32, buf: &[u8]) -> Result<(), Error> {
|
|
||||||
assert!(start_address >= FLASH_BASE as u32);
|
|
||||||
if start_address as usize + buf.len() > FLASH_BASE + FLASH_SIZE {
|
|
||||||
return Err(Error::Size);
|
|
||||||
}
|
|
||||||
if (start_address as usize - FLASH_BASE) % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 {
|
|
||||||
return Err(Error::Unaligned);
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address);
|
|
||||||
|
|
||||||
family::clear_all_err();
|
|
||||||
family::unlock();
|
|
||||||
family::begin_write();
|
|
||||||
|
|
||||||
let _ = OnDrop::new(|| {
|
|
||||||
family::end_write();
|
|
||||||
family::lock();
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut address = start_address;
|
|
||||||
for chunk in buf.chunks(WRITE_SIZE) {
|
|
||||||
unsafe { family::blocking_write(address, chunk.try_into().unwrap())? };
|
|
||||||
address += WRITE_SIZE as u32;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
|
pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
|
||||||
let start_address = FLASH_BASE as u32 + from;
|
let start_address = FLASH_BASE as u32 + from;
|
||||||
let end_address = FLASH_BASE as u32 + to;
|
let end_address = FLASH_BASE as u32 + to;
|
||||||
|
|
||||||
unsafe { Flash::blocking_erase_inner(start_address, end_address) }
|
unsafe { blocking_erase(start_address, end_address) }
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn blocking_erase_inner(start_address: u32, end_address: u32) -> Result<(), Error> {
|
|
||||||
let regions = family::get_flash_regions();
|
|
||||||
|
|
||||||
// Test if the address range is aligned at sector base addresses
|
|
||||||
let mut address = start_address;
|
|
||||||
while address < end_address {
|
|
||||||
let sector = get_sector(address, regions);
|
|
||||||
if sector.start != address {
|
|
||||||
return Err(Error::Unaligned);
|
|
||||||
}
|
|
||||||
address += sector.size;
|
|
||||||
}
|
|
||||||
if address != end_address {
|
|
||||||
return Err(Error::Unaligned);
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address);
|
|
||||||
|
|
||||||
family::clear_all_err();
|
|
||||||
family::unlock();
|
|
||||||
|
|
||||||
let _ = OnDrop::new(|| {
|
|
||||||
family::lock();
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut address = start_address;
|
|
||||||
while address < end_address {
|
|
||||||
let sector = get_sector(address, regions);
|
|
||||||
trace!("Erasing sector: {}", sector);
|
|
||||||
family::blocking_erase_sector(§or)?;
|
|
||||||
address += sector.size;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn release(self) -> PeripheralRef<'d, crate::peripherals::FLASH> {
|
pub(crate) fn release(self) -> PeripheralRef<'d, crate::peripherals::FLASH> {
|
||||||
@ -132,14 +55,79 @@ impl Drop for FlashLayout<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static REGION_LOCK: Mutex<CriticalSectionRawMutex, ()> = Mutex::new(());
|
fn blocking_read(start_address: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
||||||
|
assert!(start_address >= FLASH_BASE as u32);
|
||||||
|
if start_address as usize + bytes.len() > FLASH_BASE + FLASH_SIZE {
|
||||||
|
return Err(Error::Size);
|
||||||
|
}
|
||||||
|
|
||||||
fn take_lock_spin() -> MutexGuard<'static, CriticalSectionRawMutex, ()> {
|
let flash_data = unsafe { core::slice::from_raw_parts(start_address as *const u8, bytes.len()) };
|
||||||
loop {
|
bytes.copy_from_slice(flash_data);
|
||||||
if let Ok(guard) = REGION_LOCK.try_lock() {
|
Ok(())
|
||||||
return guard;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsafe fn blocking_write(start_address: u32, buf: &[u8]) -> Result<(), Error> {
|
||||||
|
assert!(start_address >= FLASH_BASE as u32);
|
||||||
|
if start_address as usize + buf.len() > FLASH_BASE + FLASH_SIZE {
|
||||||
|
return Err(Error::Size);
|
||||||
}
|
}
|
||||||
|
if (start_address as usize - FLASH_BASE) % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 {
|
||||||
|
return Err(Error::Unaligned);
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address);
|
||||||
|
|
||||||
|
let mut address = start_address;
|
||||||
|
for chunk in buf.chunks(WRITE_SIZE) {
|
||||||
|
critical_section::with(|_| {
|
||||||
|
family::clear_all_err();
|
||||||
|
family::unlock();
|
||||||
|
family::begin_write();
|
||||||
|
let _ = OnDrop::new(|| {
|
||||||
|
family::end_write();
|
||||||
|
family::lock();
|
||||||
|
});
|
||||||
|
family::blocking_write(address, chunk.try_into().unwrap())
|
||||||
|
})?;
|
||||||
|
address += WRITE_SIZE as u32;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn blocking_erase(start_address: u32, end_address: u32) -> Result<(), Error> {
|
||||||
|
let regions = family::get_flash_regions();
|
||||||
|
|
||||||
|
// Test if the address range is aligned at sector base addresses
|
||||||
|
let mut address = start_address;
|
||||||
|
while address < end_address {
|
||||||
|
let sector = get_sector(address, regions);
|
||||||
|
if sector.start != address {
|
||||||
|
return Err(Error::Unaligned);
|
||||||
|
}
|
||||||
|
address += sector.size;
|
||||||
|
}
|
||||||
|
if address != end_address {
|
||||||
|
return Err(Error::Unaligned);
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address);
|
||||||
|
|
||||||
|
let mut address = start_address;
|
||||||
|
while address < end_address {
|
||||||
|
let sector = get_sector(address, regions);
|
||||||
|
trace!("Erasing sector: {}", sector);
|
||||||
|
|
||||||
|
critical_section::with(|_| {
|
||||||
|
family::clear_all_err();
|
||||||
|
family::unlock();
|
||||||
|
let _ = OnDrop::new(|| {
|
||||||
|
family::lock();
|
||||||
|
});
|
||||||
|
family::blocking_erase_sector(§or)
|
||||||
|
})?;
|
||||||
|
address += sector.size;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector {
|
pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector {
|
||||||
@ -169,38 +157,19 @@ pub(crate) fn get_sector(address: u32, regions: &[&FlashRegion]) -> FlashSector
|
|||||||
|
|
||||||
impl FlashRegion {
|
impl FlashRegion {
|
||||||
pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
||||||
unsafe { self.blocking_read_inner(offset, bytes) }
|
let start_address = self.base + offset;
|
||||||
|
blocking_read(start_address, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> {
|
pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> {
|
||||||
unsafe { self.blocking_write_inner(offset, bytes) }
|
let start_address = self.base + offset;
|
||||||
|
unsafe { blocking_write(start_address, bytes) }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
|
pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
|
||||||
unsafe { self.blocking_erase_inner(from, to) }
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn blocking_read_inner(&self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
|
||||||
Flash::blocking_read_inner(self.base + offset, bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn blocking_write_inner(&self, offset: u32, bytes: &[u8]) -> Result<(), Error> {
|
|
||||||
let start_address = self.base + offset;
|
|
||||||
|
|
||||||
// Protect agains simultaneous write/erase to multiple regions.
|
|
||||||
let _guard = take_lock_spin();
|
|
||||||
|
|
||||||
Flash::blocking_write_inner(start_address, bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn blocking_erase_inner(&self, from: u32, to: u32) -> Result<(), Error> {
|
|
||||||
let start_address = self.base + from;
|
let start_address = self.base + from;
|
||||||
let end_address = self.base + to;
|
let end_address = self.base + to;
|
||||||
|
unsafe { blocking_erase(start_address, end_address) }
|
||||||
// Protect agains simultaneous write/erase to multiple regions.
|
|
||||||
let _guard = take_lock_spin();
|
|
||||||
|
|
||||||
Flash::blocking_erase_inner(start_address, end_address)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -208,15 +177,19 @@ foreach_flash_region! {
|
|||||||
($type_name:ident, $write_size:literal, $erase_size:literal) => {
|
($type_name:ident, $write_size:literal, $erase_size:literal) => {
|
||||||
impl crate::_generated::flash_regions::$type_name {
|
impl crate::_generated::flash_regions::$type_name {
|
||||||
pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
||||||
unsafe { self.0.blocking_read_inner(offset, bytes) }
|
let start_address = self.0.base + offset;
|
||||||
|
blocking_read(start_address, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> {
|
pub fn blocking_write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Error> {
|
||||||
unsafe { self.0.blocking_write_inner(offset, bytes) }
|
let start_address = self.0.base + offset;
|
||||||
|
unsafe { blocking_write(start_address, bytes) }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
|
pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
|
||||||
unsafe { self.0.blocking_erase_inner(from, to) }
|
let start_address = self.0.base + from;
|
||||||
|
let end_address = self.0.base + to;
|
||||||
|
unsafe { blocking_erase(start_address, end_address) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,7 +201,7 @@ foreach_flash_region! {
|
|||||||
const READ_SIZE: usize = 1;
|
const READ_SIZE: usize = 1;
|
||||||
|
|
||||||
fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
|
fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
|
||||||
unsafe { self.0.blocking_read_inner(offset, bytes) }
|
self.blocking_read(offset, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn capacity(&self) -> usize {
|
fn capacity(&self) -> usize {
|
||||||
@ -241,11 +214,11 @@ foreach_flash_region! {
|
|||||||
const ERASE_SIZE: usize = $erase_size;
|
const ERASE_SIZE: usize = $erase_size;
|
||||||
|
|
||||||
fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
|
fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
|
||||||
unsafe { self.0.blocking_write_inner(offset, bytes) }
|
self.blocking_write(offset, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
|
fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
|
||||||
unsafe { self.0.blocking_erase_inner(from, to) }
|
self.blocking_erase(from, to)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
Loading…
Reference in New Issue
Block a user