Add is_eraseable_range and split write into consecutive parts

This commit is contained in:
Rasmus Melchior Jacobsen 2023-03-25 16:04:45 +01:00
parent 245147634b
commit bc69eb596e
6 changed files with 236 additions and 208 deletions

View File

@ -1,13 +1,13 @@
use core::convert::TryInto; use core::convert::TryInto;
use core::mem::size_of;
use core::ptr::write_volatile; use core::ptr::write_volatile;
use super::FlashRegion; use atomic_polyfill::{fence, Ordering};
use super::{FlashRegion, BANK1, WRITE_SIZE};
use crate::flash::Error; use crate::flash::Error;
use crate::pac; use crate::pac;
pub(crate) const MAX_WRITE_SIZE: usize = super::BANK1::WRITE_SIZE; const ERASE_SIZE: usize = BANK1::ERASE_SIZE;
pub(crate) const MAX_ERASE_SIZE: usize = super::BANK1::ERASE_SIZE;
pub(crate) unsafe fn lock() { pub(crate) unsafe fn lock() {
pac::FLASH.cr().modify(|w| w.set_lock(true)); pac::FLASH.cr().modify(|w| w.set_lock(true));
@ -18,33 +18,35 @@ pub(crate) unsafe fn unlock() {
pac::FLASH.keyr().write(|w| w.set_fkeyr(0xCDEF_89AB)); pac::FLASH.keyr().write(|w| w.set_fkeyr(0xCDEF_89AB));
} }
pub(crate) unsafe fn blocking_write(first_address: u32, buf: &[u8]) -> Result<(), Error> { pub(crate) unsafe fn begin_write() {
assert_eq!(0, WRITE_SIZE % 2);
pac::FLASH.cr().write(|w| w.set_pg(true)); pac::FLASH.cr().write(|w| w.set_pg(true));
let ret = {
let mut ret: Result<(), Error> = Ok(());
let mut address = first_address;
let chunks = buf.chunks_exact(size_of::<u16>());
assert!(chunks.remainder().is_empty());
for chunk in chunks {
write_volatile(address as *mut u16, u16::from_le_bytes(chunk.try_into().unwrap()));
address += chunk.len() as u32;
ret = blocking_wait_ready();
if ret.is_err() {
break;
}
}
ret
};
pac::FLASH.cr().write(|w| w.set_pg(false));
ret
} }
pub(crate) unsafe fn blocking_erase(from_address: u32, to_address: u32) -> Result<(), Error> { pub(crate) unsafe fn end_write() {
for page in (from_address..to_address).step_by(MAX_ERASE_SIZE) { pac::FLASH.cr().write(|w| w.set_pg(false));
}
pub(crate) unsafe fn blocking_write(start_address: u32, buf: &[u8; WRITE_SIZE]) -> Result<(), Error> {
let mut address = start_address;
for chunk in buf.chunks(2) {
write_volatile(address as *mut u16, u16::from_le_bytes(chunk.try_into().unwrap()));
address += chunk.len() as u32;
// prevents parallelism errors
fence(Ordering::SeqCst);
}
blocking_wait_ready()
}
pub(crate) fn is_eraseable_range(start_address: u32, end_address: u32) -> bool {
start_address % ERASE_SIZE as u32 == 0 && end_address % ERASE_SIZE as u32 == 0
}
pub(crate) unsafe fn blocking_erase(start_address: u32, end_address: u32) -> Result<(), Error> {
for page in (start_address..end_address).step_by(ERASE_SIZE) {
pac::FLASH.cr().modify(|w| { pac::FLASH.cr().modify(|w| {
w.set_per(true); w.set_per(true);
}); });
@ -71,7 +73,6 @@ pub(crate) unsafe fn blocking_erase(from_address: u32, to_address: u32) -> Resul
return ret; return ret;
} }
} }
Ok(()) Ok(())
} }
@ -89,7 +90,7 @@ pub(crate) unsafe fn clear_all_err() {
}); });
} }
pub(crate) unsafe fn blocking_wait_ready() -> Result<(), Error> { unsafe fn blocking_wait_ready() -> Result<(), Error> {
loop { loop {
let sr = pac::FLASH.sr().read(); let sr = pac::FLASH.sr().read();

View File

@ -1,23 +1,19 @@
use core::convert::TryInto; use core::convert::TryInto;
use core::mem::size_of;
use core::ptr::write_volatile; use core::ptr::write_volatile;
use core::sync::atomic::{fence, Ordering}; use core::sync::atomic::{fence, Ordering};
use embassy_hal_common::stm32::flash::f4::{get_sector, SECOND_BANK_SECTOR_OFFSET}; use embassy_hal_common::stm32::flash::f4::{get_sector, SECOND_BANK_SECTOR_OFFSET};
use super::{FlashRegion, FLASH_SIZE}; use super::{FLASH_SIZE, WRITE_SIZE};
use crate::flash::Error; use crate::flash::Error;
use crate::pac; use crate::pac;
pub(crate) const MAX_WRITE_SIZE: usize = super::BANK1_REGION3::WRITE_SIZE; fn is_dual_bank() -> bool {
pub(crate) const MAX_ERASE_SIZE: usize = super::BANK1_REGION3::ERASE_SIZE;
unsafe fn is_dual_bank() -> bool {
match FLASH_SIZE / 1024 { match FLASH_SIZE / 1024 {
// 1 MB devices depend on configuration // 1 MB devices depend on configuration
1024 => { 1024 => {
if cfg!(any(stm32f427, stm32f429, stm32f437, stm32f439, stm32f469, stm32f479)) { if cfg!(any(stm32f427, stm32f429, stm32f437, stm32f439, stm32f469, stm32f479)) {
pac::FLASH.optcr().read().db1m() unsafe { pac::FLASH.optcr().read().db1m() }
} else { } else {
false false
} }
@ -38,49 +34,53 @@ pub(crate) unsafe fn unlock() {
pac::FLASH.keyr().write(|w| w.set_key(0xCDEF_89AB)); pac::FLASH.keyr().write(|w| w.set_key(0xCDEF_89AB));
} }
pub(crate) unsafe fn blocking_write(first_address: u32, buf: &[u8]) -> Result<(), Error> { pub(crate) unsafe fn begin_write() {
assert_eq!(0, WRITE_SIZE % 4);
pac::FLASH.cr().write(|w| { pac::FLASH.cr().write(|w| {
w.set_pg(true); w.set_pg(true);
w.set_psize(pac::flash::vals::Psize::PSIZE32); w.set_psize(pac::flash::vals::Psize::PSIZE32);
}); });
let ret = {
let mut ret: Result<(), Error> = Ok(());
let mut address = first_address;
for chunk in buf.chunks(MAX_WRITE_SIZE) {
let vals = chunk.chunks_exact(size_of::<u32>());
assert!(vals.remainder().is_empty());
for val in vals {
write_volatile(address as *mut u32, u32::from_le_bytes(val.try_into().unwrap()));
address += val.len() as u32;
// prevents parallelism errors
fence(Ordering::SeqCst);
}
ret = blocking_wait_ready();
if ret.is_err() {
break;
}
}
ret
};
pac::FLASH.cr().write(|w| w.set_pg(false));
ret
} }
pub(crate) unsafe fn blocking_erase(from_address: u32, to_address: u32) -> Result<(), Error> { pub(crate) unsafe fn end_write() {
let mut addr = from_address; pac::FLASH.cr().write(|w| w.set_pg(false));
let dual_bank = is_dual_bank(); }
while addr < to_address { pub(crate) unsafe fn blocking_write(start_address: u32, buf: &[u8; WRITE_SIZE]) -> Result<(), Error> {
let sector = get_sector(addr, dual_bank, FLASH_SIZE as u32); let mut address = start_address;
erase_sector(sector.index)?; for val in buf.chunks(4) {
addr += sector.size; write_volatile(address as *mut u32, u32::from_le_bytes(val.try_into().unwrap()));
address += val.len() as u32;
// prevents parallelism errors
fence(Ordering::SeqCst);
} }
blocking_wait_ready()
}
pub(crate) fn is_eraseable_range(start_address: u32, end_address: u32) -> bool {
let dual_bank = is_dual_bank();
let mut address = start_address;
while address < end_address {
let sector = get_sector(address, dual_bank, FLASH_SIZE as u32);
if sector.start != address {
return false;
}
address += sector.size;
}
address == end_address
}
pub(crate) unsafe fn blocking_erase(start_address: u32, end_address: u32) -> Result<(), Error> {
let dual_bank = is_dual_bank();
let mut address = start_address;
while address < end_address {
let sector = get_sector(address, dual_bank, FLASH_SIZE as u32);
erase_sector(sector.index)?;
address += sector.size;
}
Ok(()) Ok(())
} }
@ -116,7 +116,7 @@ pub(crate) unsafe fn clear_all_err() {
}); });
} }
pub(crate) unsafe fn blocking_wait_ready() -> Result<(), Error> { unsafe fn blocking_wait_ready() -> Result<(), Error> {
loop { loop {
let sr = pac::FLASH.sr().read(); let sr = pac::FLASH.sr().read();

View File

@ -1,17 +1,13 @@
use core::convert::TryInto; use core::convert::TryInto;
use core::mem::size_of;
use core::ptr::write_volatile; use core::ptr::write_volatile;
use core::sync::atomic::{fence, Ordering}; use core::sync::atomic::{fence, Ordering};
use embassy_hal_common::stm32::flash::f7::get_sector; use embassy_hal_common::stm32::flash::f7::get_sector;
use super::FlashRegion; use super::WRITE_SIZE;
use crate::flash::Error; use crate::flash::Error;
use crate::pac; use crate::pac;
pub(crate) const MAX_WRITE_SIZE: usize = super::BANK1_REGION3::WRITE_SIZE;
pub(crate) const MAX_ERASE_SIZE: usize = super::BANK1_REGION3::ERASE_SIZE;
pub(crate) unsafe fn lock() { pub(crate) unsafe fn lock() {
pac::FLASH.cr().modify(|w| w.set_lock(true)); pac::FLASH.cr().modify(|w| w.set_lock(true));
} }
@ -21,49 +17,51 @@ pub(crate) unsafe fn unlock() {
pac::FLASH.keyr().write(|w| w.set_key(0xCDEF_89AB)); pac::FLASH.keyr().write(|w| w.set_key(0xCDEF_89AB));
} }
pub(crate) unsafe fn blocking_write(first_address: u32, buf: &[u8]) -> Result<(), Error> { pub(crate) unsafe fn begin_write() {
assert_eq!(0, WRITE_SIZE % 4);
pac::FLASH.cr().write(|w| { pac::FLASH.cr().write(|w| {
w.set_pg(true); w.set_pg(true);
w.set_psize(pac::flash::vals::Psize::PSIZE32); w.set_psize(pac::flash::vals::Psize::PSIZE32);
}); });
let ret = {
let mut ret: Result<(), Error> = Ok(());
let mut address = first_address;
for chunk in buf.chunks(MAX_WRITE_SIZE) {
let vals = chunk.chunks_exact(size_of::<u32>());
assert!(vals.remainder().is_empty());
for val in vals {
write_volatile(address as *mut u32, u32::from_le_bytes(val.try_into().unwrap()));
address += val.len() as u32;
// prevents parallelism errors
fence(Ordering::SeqCst);
}
ret = blocking_wait_ready();
if ret.is_err() {
break;
}
}
ret
};
pac::FLASH.cr().write(|w| w.set_pg(false));
ret
} }
pub(crate) unsafe fn blocking_erase(from_address: u32, to_address: u32) -> Result<(), Error> { pub(crate) unsafe fn end_write() {
let start_sector = get_sector(from_address); pac::FLASH.cr().write(|w| w.set_pg(false));
let end_sector = get_sector(to_address); }
for sector in start_sector.index..end_sector.index {
let ret = erase_sector(sector as u8); pub(crate) unsafe fn blocking_write(start_address: u32, buf: &[u8; WRITE_SIZE]) -> Result<(), Error> {
if ret.is_err() { let mut address = start_address;
return ret; for val in buf.chunks(4) {
} write_volatile(address as *mut u32, u32::from_le_bytes(val.try_into().unwrap()));
address += val.len() as u32;
// prevents parallelism errors
fence(Ordering::SeqCst);
} }
blocking_wait_ready()
}
pub(crate) fn is_eraseable_range(start_address: u32, end_address: u32) -> bool {
let mut address = start_address;
while address < end_address {
let sector = get_sector(address);
if sector.start != address {
return false;
}
address += sector.size;
}
address == end_address
}
pub(crate) unsafe fn blocking_erase(start_address: u32, end_address: u32) -> Result<(), Error> {
let mut address = start_address;
while address < end_address {
let sector = get_sector(address);
erase_sector(sector.index)?;
address += sector.size;
}
Ok(()) Ok(())
} }
@ -106,7 +104,7 @@ pub(crate) unsafe fn clear_all_err() {
}); });
} }
pub(crate) unsafe fn blocking_wait_ready() -> Result<(), Error> { unsafe fn blocking_wait_ready() -> Result<(), Error> {
loop { loop {
let sr = pac::FLASH.sr().read(); let sr = pac::FLASH.sr().read();

View File

@ -1,15 +1,13 @@
use core::convert::TryInto; use core::convert::TryInto;
use core::mem::size_of;
use core::ptr::write_volatile; use core::ptr::write_volatile;
use super::{FlashRegion, FLASH_SIZE}; use atomic_polyfill::{fence, Ordering};
use super::{FlashRegion, FLASH_SIZE, WRITE_SIZE};
use crate::flash::Error; use crate::flash::Error;
use crate::pac; use crate::pac;
const WRITE_SIZE: usize = super::BANK1::WRITE_SIZE;
const ERASE_SIZE: usize = super::BANK1::ERASE_SIZE; const ERASE_SIZE: usize = super::BANK1::ERASE_SIZE;
pub(crate) const MAX_WRITE_SIZE: usize = WRITE_SIZE;
pub(crate) const MAX_ERASE_SIZE: usize = ERASE_SIZE;
const SECOND_BANK_OFFSET: usize = 0x0010_0000; const SECOND_BANK_OFFSET: usize = 0x0010_0000;
const fn is_dual_bank() -> bool { const fn is_dual_bank() -> bool {
@ -33,59 +31,60 @@ pub(crate) unsafe fn unlock() {
} }
} }
pub(crate) unsafe fn blocking_write(first_address: u32, buf: &[u8]) -> Result<(), Error> { pub(crate) unsafe fn begin_write() {
let bank = if !is_dual_bank() || (first_address - super::FLASH_BASE as u32) < SECOND_BANK_OFFSET as u32 { assert_eq!(0, WRITE_SIZE % 4);
}
pub(crate) unsafe fn end_write() {}
pub(crate) unsafe fn blocking_write(start_address: u32, buf: &[u8; WRITE_SIZE]) -> Result<(), Error> {
// We cannot have the write setup sequence in begin_write as it depends on the address
let bank = if !is_dual_bank() || (start_address - super::FLASH_BASE as u32) < SECOND_BANK_OFFSET as u32 {
pac::FLASH.bank(0) pac::FLASH.bank(0)
} else { } else {
pac::FLASH.bank(1) pac::FLASH.bank(1)
}; };
bank.cr().write(|w| { bank.cr().write(|w| {
w.set_pg(true); w.set_pg(true);
w.set_psize(2); // 32 bits at once w.set_psize(2); // 32 bits at once
}); });
cortex_m::asm::isb(); cortex_m::asm::isb();
cortex_m::asm::dsb(); cortex_m::asm::dsb();
core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst); fence(Ordering::SeqCst);
let ret = { let mut res = None;
let mut ret: Result<(), Error> = Ok(()); let mut address = start_address;
let mut address = first_address; for val in buf.chunks(4) {
'outer: for chunk in buf.chunks(WRITE_SIZE) { write_volatile(address as *mut u32, u32::from_le_bytes(val.try_into().unwrap()));
let vals = chunk.chunks_exact(size_of::<u32>()); address += val.len() as u32;
assert!(vals.remainder().is_empty());
for val in vals {
write_volatile(address as *mut u32, u32::from_le_bytes(val.try_into().unwrap()));
address += val.len() as u32;
ret = blocking_wait_ready(bank); res = Some(blocking_wait_ready(bank));
bank.sr().modify(|w| { bank.sr().modify(|w| {
if w.eop() { if w.eop() {
w.set_eop(true); w.set_eop(true);
}
});
if ret.is_err() {
break 'outer;
}
} }
});
if res.unwrap().is_err() {
break;
} }
ret }
};
bank.cr().write(|w| w.set_pg(false)); bank.cr().write(|w| w.set_pg(false));
cortex_m::asm::isb(); cortex_m::asm::isb();
cortex_m::asm::dsb(); cortex_m::asm::dsb();
core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst); fence(Ordering::SeqCst);
ret res.unwrap()
} }
pub(crate) unsafe fn blocking_erase(from: u32, to: u32) -> Result<(), Error> { pub(crate) fn is_eraseable_range(start_address: u32, end_address: u32) -> bool {
let start_sector = (from - super::FLASH_BASE as u32) / ERASE_SIZE as u32; start_address % ERASE_SIZE as u32 == 0 && end_address % ERASE_SIZE as u32 == 0
let end_sector = (to - super::FLASH_BASE as u32) / ERASE_SIZE as u32; }
pub(crate) unsafe fn blocking_erase(start_address: u32, end_address: u32) -> Result<(), Error> {
let start_sector = (start_address - super::FLASH_BASE as u32) / ERASE_SIZE as u32;
let end_sector = (end_address - super::FLASH_BASE as u32) / ERASE_SIZE as u32;
for sector in start_sector..end_sector { for sector in start_sector..end_sector {
let bank = if sector >= 8 { 1 } else { 0 }; let bank = if sector >= 8 { 1 } else { 0 };
let ret = erase_sector(pac::FLASH.bank(bank), (sector % 8) as u8); let ret = erase_sector(pac::FLASH.bank(bank), (sector % 8) as u8);
@ -93,7 +92,6 @@ pub(crate) unsafe fn blocking_erase(from: u32, to: u32) -> Result<(), Error> {
return ret; return ret;
} }
} }
Ok(()) Ok(())
} }
@ -157,7 +155,7 @@ unsafe fn bank_clear_all_err(bank: pac::flash::Bank) {
}); });
} }
pub(crate) unsafe fn blocking_wait_ready(bank: pac::flash::Bank) -> Result<(), Error> { unsafe fn blocking_wait_ready(bank: pac::flash::Bank) -> Result<(), Error> {
loop { loop {
let sr = bank.sr().read(); let sr = bank.sr().read();

View File

@ -1,14 +1,12 @@
use core::convert::TryInto;
use core::ptr::write_volatile; use core::ptr::write_volatile;
use super::FlashRegion; use atomic_polyfill::{fence, Ordering};
use super::{FlashRegion, WRITE_SIZE};
use crate::flash::Error; use crate::flash::Error;
use crate::pac; use crate::pac;
const WRITE_SIZE: usize = super::BANK1::WRITE_SIZE;
const ERASE_SIZE: usize = super::BANK1::ERASE_SIZE; const ERASE_SIZE: usize = super::BANK1::ERASE_SIZE;
pub(crate) const MAX_WRITE_SIZE: usize = WRITE_SIZE;
pub(crate) const MAX_ERASE_SIZE: usize = ERASE_SIZE;
pub(crate) unsafe fn lock() { pub(crate) unsafe fn lock() {
#[cfg(any(flash_wl, flash_wb, flash_l4))] #[cfg(any(flash_wl, flash_wb, flash_l4))]
@ -39,35 +37,37 @@ pub(crate) unsafe fn unlock() {
} }
} }
pub(crate) unsafe fn blocking_write(first_address: u32, buf: &[u8]) -> Result<(), Error> { pub(crate) unsafe fn begin_write() {
assert_eq!(0, WRITE_SIZE % 4);
#[cfg(any(flash_wl, flash_wb, flash_l4))] #[cfg(any(flash_wl, flash_wb, flash_l4))]
pac::FLASH.cr().write(|w| w.set_pg(true)); pac::FLASH.cr().write(|w| w.set_pg(true));
let ret = {
let mut ret: Result<(), Error> = Ok(());
let mut address = first_address;
for chunk in buf.chunks(WRITE_SIZE) {
for val in chunk.chunks(4) {
write_volatile(address as *mut u32, u32::from_le_bytes(val[0..4].try_into().unwrap()));
address += val.len() as u32;
}
ret = blocking_wait_ready();
if ret.is_err() {
break;
}
}
ret
};
#[cfg(any(flash_wl, flash_wb, flash_l4))]
pac::FLASH.cr().write(|w| w.set_pg(false));
ret
} }
pub(crate) unsafe fn blocking_erase(from_address: u32, to_address: u32) -> Result<(), Error> { pub(crate) unsafe fn end_write() {
for page in (from_address..to_address).step_by(ERASE_SIZE) { #[cfg(any(flash_wl, flash_wb, flash_l4))]
pac::FLASH.cr().write(|w| w.set_pg(false));
}
pub(crate) unsafe fn blocking_write(start_address: u32, buf: &[u8; WRITE_SIZE]) -> Result<(), Error> {
let mut address = start_address;
for val in buf.chunks(4) {
write_volatile(address as *mut u32, u32::from_le_bytes(val.try_into().unwrap()));
address += val.len() as u32;
// prevents parallelism errors
fence(Ordering::SeqCst);
}
blocking_wait_ready()
}
pub(crate) fn is_eraseable_range(start_address: u32, end_address: u32) -> bool {
start_address % ERASE_SIZE as u32 == 0 && end_address % ERASE_SIZE as u32 == 0
}
pub(crate) unsafe fn blocking_erase(start_address: u32, end_address: u32) -> Result<(), Error> {
for page in (start_address..end_address).step_by(ERASE_SIZE) {
#[cfg(any(flash_l0, flash_l1))] #[cfg(any(flash_l0, flash_l1))]
{ {
pac::FLASH.pecr().modify(|w| { pac::FLASH.pecr().modify(|w| {
@ -155,7 +155,7 @@ pub(crate) unsafe fn clear_all_err() {
}); });
} }
pub(crate) unsafe fn blocking_wait_ready() -> Result<(), Error> { unsafe fn blocking_wait_ready() -> Result<(), Error> {
loop { loop {
let sr = pac::FLASH.sr().read(); let sr = pac::FLASH.sr().read();

View File

@ -1,8 +1,10 @@
use embassy_hal_common::{into_ref, PeripheralRef}; use embassy_hal_common::{into_ref, PeripheralRef};
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
use embassy_sync::mutex::{Mutex, MutexGuard};
use embedded_storage::nor_flash::{ErrorType, NorFlash, NorFlashError, NorFlashErrorKind, ReadNorFlash}; use embedded_storage::nor_flash::{ErrorType, NorFlash, NorFlashError, NorFlashErrorKind, ReadNorFlash};
pub use crate::_generated::flash_regions::*; pub use crate::_generated::flash_regions::*;
pub use crate::pac::{FLASH_BASE, FLASH_SIZE}; pub use crate::pac::{FLASH_BASE, FLASH_SIZE, WRITE_SIZE};
use crate::peripherals::FLASH; use crate::peripherals::FLASH;
use crate::Peripheral; use crate::Peripheral;
@ -17,6 +19,8 @@ pub struct Flash<'d> {
_inner: PeripheralRef<'d, FLASH>, _inner: PeripheralRef<'d, FLASH>,
} }
static REGION_LOCK: Mutex<CriticalSectionRawMutex, ()> = Mutex::new(());
impl<'d> Flash<'d> { impl<'d> Flash<'d> {
pub fn new(p: impl Peripheral<P = FLASH> + 'd) -> Self { pub fn new(p: impl Peripheral<P = FLASH> + 'd) -> Self {
into_ref!(p); into_ref!(p);
@ -33,7 +37,6 @@ impl<'d> Flash<'d> {
} }
let first_address = FLASH_BASE as u32 + offset; let first_address = FLASH_BASE as u32 + offset;
let flash_data = unsafe { core::slice::from_raw_parts(first_address as *const u8, bytes.len()) }; let flash_data = unsafe { core::slice::from_raw_parts(first_address as *const u8, bytes.len()) };
bytes.copy_from_slice(flash_data); bytes.copy_from_slice(flash_data);
Ok(()) Ok(())
@ -43,39 +46,56 @@ impl<'d> Flash<'d> {
if offset as usize + buf.len() > FLASH_SIZE { if offset as usize + buf.len() > FLASH_SIZE {
return Err(Error::Size); return Err(Error::Size);
} }
if offset as usize % family::MAX_WRITE_SIZE != 0 || buf.len() as usize % family::MAX_WRITE_SIZE != 0 { if offset as usize % WRITE_SIZE != 0 || buf.len() as usize % WRITE_SIZE != 0 {
return Err(Error::Unaligned); return Err(Error::Unaligned);
} }
let first_address = FLASH_BASE as u32 + offset; let start_address = FLASH_BASE as u32 + offset;
trace!("Writing {} bytes at 0x{:x}", buf.len(), first_address); trace!("Writing {} bytes at 0x{:x}", buf.len(), start_address);
// No need to take lock here as we only have one mut flash reference.
unsafe { unsafe {
family::clear_all_err(); family::clear_all_err();
family::unlock(); family::unlock();
let res = family::blocking_write(first_address, buf); let res = Flash::blocking_write_all(start_address, buf);
family::lock(); family::lock();
res res
} }
} }
unsafe fn blocking_write_all(start_address: u32, buf: &[u8]) -> Result<(), Error> {
family::begin_write();
let mut address = start_address;
for chunk in buf.chunks(WRITE_SIZE) {
let res = unsafe { family::blocking_write(address, chunk.try_into().unwrap()) };
if res.is_err() {
family::end_write();
return res;
}
address += WRITE_SIZE as u32;
}
family::end_write();
Ok(())
}
pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { pub fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
if to < from || to as usize > FLASH_SIZE { if to < from || to as usize > FLASH_SIZE {
return Err(Error::Size); return Err(Error::Size);
} }
if (from as usize % family::MAX_ERASE_SIZE) != 0 || (to as usize % family::MAX_ERASE_SIZE) != 0 {
let start_address = FLASH_BASE as u32 + from;
let end_address = FLASH_BASE as u32 + to;
if !family::is_eraseable_range(start_address, end_address) {
return Err(Error::Unaligned); return Err(Error::Unaligned);
} }
trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address);
let from_address = FLASH_BASE as u32 + from;
let to_address = FLASH_BASE as u32 + to;
unsafe { unsafe {
family::clear_all_err(); family::clear_all_err();
family::unlock(); family::unlock();
let res = family::blocking_erase(from_address, to_address); let res = family::blocking_erase(start_address, end_address);
family::lock(); family::lock();
res res
} }
@ -101,7 +121,6 @@ pub trait FlashRegion {
} }
let first_address = Self::BASE as u32 + offset; let first_address = Self::BASE as u32 + offset;
let flash_data = unsafe { core::slice::from_raw_parts(first_address as *const u8, bytes.len()) }; let flash_data = unsafe { core::slice::from_raw_parts(first_address as *const u8, bytes.len()) };
bytes.copy_from_slice(flash_data); bytes.copy_from_slice(flash_data);
Ok(()) Ok(())
@ -115,17 +134,19 @@ pub trait FlashRegion {
return Err(Error::Unaligned); return Err(Error::Unaligned);
} }
let first_address = Self::BASE as u32 + offset; let start_address = Self::BASE as u32 + offset;
trace!("Writing {} bytes from 0x{:x}", buf.len(), first_address); trace!("Writing {} bytes from 0x{:x}", buf.len(), start_address);
critical_section::with(|_| unsafe { // Protect agains simultaneous write/erase to multiple regions.
let _guard = take_lock_spin();
unsafe {
family::clear_all_err(); family::clear_all_err();
family::unlock(); family::unlock();
let res = family::blocking_write(first_address, buf); let res = Flash::blocking_write_all(start_address, buf);
family::lock(); family::lock();
res res
}) }
} }
fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> { fn blocking_erase(&mut self, from: u32, to: u32) -> Result<(), Error> {
@ -136,18 +157,28 @@ pub trait FlashRegion {
return Err(Error::Unaligned); return Err(Error::Unaligned);
} }
let from_address = Self::BASE as u32 + from; let start_address = Self::BASE as u32 + from;
let to_address = Self::BASE as u32 + to; let end_address = Self::BASE as u32 + to;
trace!("Erasing from 0x{:x} to 0x{:x}", from_address, to_address); trace!("Erasing from 0x{:x} to 0x{:x}", start_address, end_address);
critical_section::with(|_| unsafe { // Protect agains simultaneous write/erase to multiple regions.
let _guard = take_lock_spin();
unsafe {
family::clear_all_err(); family::clear_all_err();
family::unlock(); family::unlock();
let res = family::blocking_erase(from_address, to_address); let res = family::blocking_erase(start_address, end_address);
family::lock(); family::lock();
res res
}) }
}
}
fn take_lock_spin() -> MutexGuard<'static, CriticalSectionRawMutex, ()> {
loop {
if let Ok(guard) = REGION_LOCK.try_lock() {
return guard;
}
} }
} }