2022-07-23 14:00:19 +02:00
|
|
|
use embassy_hal_common::{into_ref, PeripheralRef};
|
2023-03-25 16:04:45 +01:00
|
|
|
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
|
|
|
|
use embassy_sync::mutex::{Mutex, MutexGuard};
|
2022-06-12 22:15:44 +02:00
|
|
|
use embedded_storage::nor_flash::{ErrorType, NorFlash, NorFlashError, NorFlashErrorKind, ReadNorFlash};
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
pub use crate::_generated::flash_regions::*;
|
2023-03-25 16:04:45 +01:00
|
|
|
pub use crate::pac::{FLASH_BASE, FLASH_SIZE, WRITE_SIZE};
|
2022-06-12 22:15:44 +02:00
|
|
|
use crate::peripherals::FLASH;
|
2022-07-23 14:00:19 +02:00
|
|
|
use crate::Peripheral;
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2022-05-02 15:36:02 +02:00
|
|
|
#[cfg_attr(any(flash_wl, flash_wb, flash_l0, flash_l1, flash_l4), path = "l.rs")]
|
|
|
|
#[cfg_attr(flash_f3, path = "f3.rs")]
|
2022-07-11 02:57:46 +02:00
|
|
|
#[cfg_attr(flash_f4, path = "f4.rs")]
|
2022-05-03 16:16:37 +02:00
|
|
|
#[cfg_attr(flash_f7, path = "f7.rs")]
|
2022-05-06 09:21:29 +02:00
|
|
|
#[cfg_attr(flash_h7, path = "h7.rs")]
|
2022-05-02 15:36:02 +02:00
|
|
|
mod family;
|
|
|
|
|
2022-04-20 13:49:59 +02:00
|
|
|
pub struct Flash<'d> {
|
2022-07-23 14:00:19 +02:00
|
|
|
_inner: PeripheralRef<'d, FLASH>,
|
2022-04-20 13:49:59 +02:00
|
|
|
}
|
|
|
|
|
2023-03-29 11:52:18 +02:00
|
|
|
pub struct FlashRegionSettings {
|
|
|
|
pub base: usize,
|
|
|
|
pub size: usize,
|
|
|
|
pub erase_size: usize,
|
|
|
|
pub write_size: usize,
|
|
|
|
pub erase_value: u8,
|
|
|
|
}
|
|
|
|
|
2023-03-29 11:31:45 +02:00
|
|
|
#[derive(Debug, PartialEq)]
|
|
|
|
pub struct FlashSector {
|
|
|
|
pub index: u8,
|
|
|
|
pub start: u32,
|
|
|
|
pub size: u32,
|
|
|
|
}
|
|
|
|
|
2023-03-25 16:04:45 +01:00
|
|
|
static REGION_LOCK: Mutex<CriticalSectionRawMutex, ()> = Mutex::new(());
|
|
|
|
|
2022-04-20 13:49:59 +02:00
|
|
|
impl<'d> Flash<'d> {
|
2022-07-23 14:00:19 +02:00
|
|
|
pub fn new(p: impl Peripheral<P = FLASH> + 'd) -> Self {
|
|
|
|
into_ref!(p);
|
2022-07-23 01:29:35 +02:00
|
|
|
Self { _inner: p }
|
2022-04-20 13:49:59 +02:00
|
|
|
}
|
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
pub fn into_regions(self) -> FlashRegions {
|
|
|
|
FlashRegions::take()
|
|
|
|
}
|
|
|
|
|
2022-04-20 13:49:59 +02:00
|
|
|
pub fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
2023-03-25 05:58:40 +01:00
|
|
|
if offset as usize + bytes.len() > FLASH_SIZE {
|
2022-04-20 13:49:59 +02:00
|
|
|
return Err(Error::Size);
|
|
|
|
}
|
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
let first_address = FLASH_BASE as u32 + offset;
|
|
|
|
let flash_data = unsafe { core::slice::from_raw_parts(first_address as *const u8, bytes.len()) };
|
2022-04-20 13:49:59 +02:00
|
|
|
bytes.copy_from_slice(flash_data);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> {
|
2023-03-25 05:58:40 +01:00
|
|
|
if offset as usize + buf.len() > FLASH_SIZE {
|
2022-04-20 13:49:59 +02:00
|
|
|
return Err(Error::Size);
|
|
|
|
}
|
2023-03-25 16:04:45 +01:00
|
|
|
if offset as usize % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 {
|
2022-04-20 13:49:59 +02:00
|
|
|
return Err(Error::Unaligned);
|
|
|
|
}
|
|
|
|
|
2023-03-25 16:04:45 +01:00
|
|
|
let start_address = FLASH_BASE as u32 + offset;
|
|
|
|
trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address);
|
|
|
|
|
|
|
|
// No need to take lock here as we only have one mut flash reference.
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2022-09-30 06:00:46 +02:00
|
|
|
unsafe {
|
2023-03-25 05:58:40 +01:00
|
|
|
family::clear_all_err();
|
2022-09-30 06:00:46 +02:00
|
|
|
family::unlock();
|
2023-03-25 16:04:45 +01:00
|
|
|
let res = Flash::blocking_write_all(start_address, buf);
|
2022-09-30 06:00:46 +02:00
|
|
|
family::lock();
|
|
|
|
res
|
|
|
|
}
|
2022-04-20 13:49:59 +02:00
|
|
|
}
|
|
|
|
|
2023-03-25 16:04:45 +01:00
|
|
|
unsafe fn blocking_write_all(start_address: u32, buf: &[u8]) -> Result<(), Error> {
|
|
|
|
family::begin_write();
|
|
|
|
let mut address = start_address;
|
|
|
|
for chunk in buf.chunks(WRITE_SIZE) {
|
|
|
|
let res = unsafe { family::blocking_write(address, chunk.try_into().unwrap()) };
|
|
|
|
if res.is_err() {
|
|
|
|
family::end_write();
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
address += WRITE_SIZE as u32;
|
|
|
|
}
|
|
|
|
|
|
|
|
family::end_write();
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-04-20 13:49:59 +02:00
|
|
|
pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
|
2023-03-25 05:58:40 +01:00
|
|
|
if to < from || to as usize > FLASH_SIZE {
|
2022-04-20 13:49:59 +02:00
|
|
|
return Err(Error::Size);
|
|
|
|
}
|
2023-03-25 16:04:45 +01:00
|
|
|
|
|
|
|
let start_address = FLASH_BASE as u32 + from;
|
|
|
|
let end_address = FLASH_BASE as u32 + to;
|
|
|
|
if !family::is_eraseable_range(start_address, end_address) {
|
2022-04-20 13:49:59 +02:00
|
|
|
return Err(Error::Unaligned);
|
|
|
|
}
|
2023-03-25 16:04:45 +01:00
|
|
|
trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address);
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2022-09-30 06:00:46 +02:00
|
|
|
unsafe {
|
2023-03-25 05:58:40 +01:00
|
|
|
family::clear_all_err();
|
2022-09-30 06:00:46 +02:00
|
|
|
family::unlock();
|
2023-03-25 16:04:45 +01:00
|
|
|
let res = family::blocking_erase(start_address, end_address);
|
2022-09-30 06:00:46 +02:00
|
|
|
family::lock();
|
|
|
|
res
|
|
|
|
}
|
2022-04-20 13:49:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Flash<'_> {
|
|
|
|
fn drop(&mut self) {
|
2022-09-30 06:00:46 +02:00
|
|
|
unsafe { family::lock() };
|
2022-04-20 13:49:59 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-25 17:00:52 +01:00
|
|
|
impl Drop for FlashRegions {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
unsafe { family::lock() };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
pub trait FlashRegion {
|
2023-03-29 11:52:18 +02:00
|
|
|
const SETTINGS: FlashRegionSettings;
|
2023-03-25 05:58:40 +01:00
|
|
|
|
|
|
|
fn blocking_read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Error> {
|
2023-03-29 11:52:18 +02:00
|
|
|
if offset as usize + bytes.len() > Self::SETTINGS.size {
|
2023-03-25 05:58:40 +01:00
|
|
|
return Err(Error::Size);
|
|
|
|
}
|
|
|
|
|
2023-03-29 11:52:18 +02:00
|
|
|
let first_address = Self::SETTINGS.base as u32 + offset;
|
2023-03-25 05:58:40 +01:00
|
|
|
let flash_data = unsafe { core::slice::from_raw_parts(first_address as *const u8, bytes.len()) };
|
|
|
|
bytes.copy_from_slice(flash_data);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn blocking_write(&mut self, offset: u32, buf: &[u8]) -> Result<(), Error> {
|
2023-03-29 11:52:18 +02:00
|
|
|
if offset as usize + buf.len() > Self::SETTINGS.size {
|
2023-03-25 05:58:40 +01:00
|
|
|
return Err(Error::Size);
|
|
|
|
}
|
2023-03-29 11:52:18 +02:00
|
|
|
if offset as usize % Self::SETTINGS.write_size != 0 || buf.len() as usize % Self::SETTINGS.write_size != 0 {
|
2023-03-25 05:58:40 +01:00
|
|
|
return Err(Error::Unaligned);
|
|
|
|
}
|
|
|
|
|
2023-03-29 11:52:18 +02:00
|
|
|
let start_address = Self::SETTINGS.base as u32 + offset;
|
2023-03-25 16:04:45 +01:00
|
|
|
trace!("Writing {} bytes from 0x{:x}", buf.len(), start_address);
|
2023-03-25 05:58:40 +01:00
|
|
|
|
2023-03-25 16:04:45 +01:00
|
|
|
// Protect agains simultaneous write/erase to multiple regions.
|
|
|
|
let _guard = take_lock_spin();
|
2023-03-25 05:58:40 +01:00
|
|
|
|
2023-03-25 16:04:45 +01:00
|
|
|
unsafe {
|
|
|
|
family::clear_all_err();
|
2023-03-25 05:58:40 +01:00
|
|
|
family::unlock();
|
2023-03-25 16:04:45 +01:00
|
|
|
let res = Flash::blocking_write_all(start_address, buf);
|
2023-03-25 05:58:40 +01:00
|
|
|
family::lock();
|
|
|
|
res
|
2023-03-25 16:04:45 +01:00
|
|
|
}
|
2023-03-25 05:58:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
|
2023-03-29 11:52:18 +02:00
|
|
|
if to < from || to as usize > Self::SETTINGS.size {
|
2023-03-25 05:58:40 +01:00
|
|
|
return Err(Error::Size);
|
|
|
|
}
|
2023-03-29 11:52:18 +02:00
|
|
|
if (from as usize % Self::SETTINGS.erase_size) != 0 || (to as usize % Self::SETTINGS.erase_size) != 0 {
|
2023-03-25 05:58:40 +01:00
|
|
|
return Err(Error::Unaligned);
|
|
|
|
}
|
|
|
|
|
2023-03-29 11:52:18 +02:00
|
|
|
let start_address = Self::SETTINGS.base as u32 + from;
|
|
|
|
let end_address = Self::SETTINGS.base as u32 + to;
|
2023-03-25 16:04:45 +01:00
|
|
|
trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address);
|
2023-03-25 05:58:40 +01:00
|
|
|
|
2023-03-25 16:04:45 +01:00
|
|
|
// Protect agains simultaneous write/erase to multiple regions.
|
|
|
|
let _guard = take_lock_spin();
|
2023-03-25 05:58:40 +01:00
|
|
|
|
2023-03-25 16:04:45 +01:00
|
|
|
unsafe {
|
|
|
|
family::clear_all_err();
|
2023-03-25 05:58:40 +01:00
|
|
|
family::unlock();
|
2023-03-25 16:04:45 +01:00
|
|
|
let res = family::blocking_erase(start_address, end_address);
|
2023-03-25 05:58:40 +01:00
|
|
|
family::lock();
|
|
|
|
res
|
2023-03-25 16:04:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn take_lock_spin() -> MutexGuard<'static, CriticalSectionRawMutex, ()> {
|
|
|
|
loop {
|
|
|
|
if let Ok(guard) = REGION_LOCK.try_lock() {
|
|
|
|
return guard;
|
|
|
|
}
|
2023-03-25 05:58:40 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-20 13:49:59 +02:00
|
|
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
|
|
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
|
|
|
pub enum Error {
|
|
|
|
Prog,
|
|
|
|
Size,
|
|
|
|
Miss,
|
|
|
|
Seq,
|
|
|
|
Protected,
|
|
|
|
Unaligned,
|
2022-05-03 16:16:37 +02:00
|
|
|
Parallelism,
|
2022-04-20 13:49:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NorFlashError for Error {
|
|
|
|
fn kind(&self) -> NorFlashErrorKind {
|
|
|
|
match self {
|
|
|
|
Self::Size => NorFlashErrorKind::OutOfBounds,
|
|
|
|
Self::Unaligned => NorFlashErrorKind::NotAligned,
|
|
|
|
_ => NorFlashErrorKind::Other,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
foreach_flash_region! {
|
|
|
|
($name:ident) => {
|
|
|
|
impl ErrorType for crate::_generated::flash_regions::$name {
|
|
|
|
type Error = Error;
|
|
|
|
}
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
impl ReadNorFlash for crate::_generated::flash_regions::$name {
|
2023-03-29 11:52:18 +02:00
|
|
|
const READ_SIZE: usize = <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.write_size;
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> {
|
|
|
|
self.blocking_read(offset, bytes)
|
|
|
|
}
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
fn capacity(&self) -> usize {
|
2023-03-29 11:52:18 +02:00
|
|
|
<crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.size
|
2023-03-25 05:58:40 +01:00
|
|
|
}
|
|
|
|
}
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
impl NorFlash for crate::_generated::flash_regions::$name {
|
2023-03-29 11:52:18 +02:00
|
|
|
const WRITE_SIZE: usize = <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.write_size;
|
|
|
|
const ERASE_SIZE: usize = <crate::_generated::flash_regions::$name as FlashRegion>::SETTINGS.erase_size;
|
2022-04-20 13:49:59 +02:00
|
|
|
|
2023-03-25 05:58:40 +01:00
|
|
|
fn erase(&mut self, from: u32, to: u32) -> Result<(), Self::Error> {
|
|
|
|
self.blocking_erase(from, to)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> {
|
|
|
|
self.blocking_write(offset, bytes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2022-04-20 13:49:59 +02:00
|
|
|
}
|