rp: allow for MPU-based stack guards on core 0 as well

using these will require some linker script intervention. setting the
core0 stack needs linker intervention anyway (to provide _stack_start),
having it also provide _stack_end for the guard to use is not that much
of a stretch.
This commit is contained in:
pennae 2023-07-20 16:57:54 +02:00
parent 4d6b3c57b1
commit e9445ec72d
2 changed files with 74 additions and 27 deletions

View File

@ -219,6 +219,74 @@ select_bootloader! {
default => BOOT_LOADER_W25Q080 default => BOOT_LOADER_W25Q080
} }
/// Installs a stack guard for the CORE0 stack in MPU region 0.
/// Will fail if the MPU is already confgigured. This function requires
/// a `_stack_end` symbol to be defined by the linker script, and expexcts
/// `_stack_end` to be located at the lowest address (largest depth) of
/// the stack.
///
/// This method can *only* set up stack guards on the currently
/// executing core. Stack guards for CORE1 are set up automatically,
/// only CORE0 should ever use this.
///
/// # Usage
///
/// ```no_run
/// #![feature(type_alias_impl_trait)]
/// use embassy_rp::install_core0_stack_guard;
/// use embassy_executor::{Executor, Spawner};
///
/// #[embassy_executor::main]
/// async fn main(_spawner: Spawner) {
/// // set up by the linker as follows:
/// //
/// // MEMORY {
/// // STACK0: ORIGIN = 0x20040000, LENGTH = 4K
/// // }
/// //
/// // _stack_end = ORIGIN(STACK0);
/// // _stack_start = _stack_end + LENGTH(STACK0);
/// //
/// install_core0_stack_guard().expect("MPU already configured");
/// let p = embassy_rp::init(Default::default());
///
/// // ...
/// }
/// ```
pub fn install_core0_stack_guard() -> Result<(), ()> {
extern "C" {
static mut _stack_end: usize;
}
unsafe { install_stack_guard(&mut _stack_end as *mut usize) }
}
#[inline(always)]
fn install_stack_guard(stack_bottom: *mut usize) -> Result<(), ()> {
let core = unsafe { cortex_m::Peripherals::steal() };
// Fail if MPU is already configured
if core.MPU.ctrl.read() != 0 {
return Err(());
}
// The minimum we can protect is 32 bytes on a 32 byte boundary, so round up which will
// just shorten the valid stack range a tad.
let addr = (stack_bottom as u32 + 31) & !31;
// Mask is 1 bit per 32 bytes of the 256 byte range... clear the bit for the segment we want
let subregion_select = 0xff ^ (1 << ((addr >> 5) & 7));
unsafe {
core.MPU.ctrl.write(5); // enable mpu with background default map
core.MPU.rbar.write((addr & !0xff) | (1 << 4)); // set address and update RNR
core.MPU.rasr.write(
1 // enable region
| (0x7 << 1) // size 2^(7 + 1) = 256
| (subregion_select << 8)
| 0x10000000, // XN = disable instruction fetch; no other bits means no permissions
);
}
Ok(())
}
pub mod config { pub mod config {
use crate::clocks::ClockConfig; use crate::clocks::ClockConfig;

View File

@ -52,41 +52,20 @@ use core::sync::atomic::{compiler_fence, AtomicBool, Ordering};
use crate::interrupt::InterruptExt; use crate::interrupt::InterruptExt;
use crate::peripherals::CORE1; use crate::peripherals::CORE1;
use crate::{gpio, interrupt, pac}; use crate::{gpio, install_stack_guard, interrupt, pac};
const PAUSE_TOKEN: u32 = 0xDEADBEEF; const PAUSE_TOKEN: u32 = 0xDEADBEEF;
const RESUME_TOKEN: u32 = !0xDEADBEEF; const RESUME_TOKEN: u32 = !0xDEADBEEF;
static IS_CORE1_INIT: AtomicBool = AtomicBool::new(false); static IS_CORE1_INIT: AtomicBool = AtomicBool::new(false);
#[inline(always)] #[inline(always)]
fn install_stack_guard(stack_bottom: *mut usize) { fn core1_setup(stack_bottom: *mut usize) {
let core = unsafe { cortex_m::Peripherals::steal() }; if let Err(_) = install_stack_guard(stack_bottom) {
// currently only happens if the MPU was already set up, which
// Trap if MPU is already configured // would indicate that the core is already in use from outside
if core.MPU.ctrl.read() != 0 { // embassy, somehow. trap if so since we can't deal with that.
cortex_m::asm::udf(); cortex_m::asm::udf();
} }
// The minimum we can protect is 32 bytes on a 32 byte boundary, so round up which will
// just shorten the valid stack range a tad.
let addr = (stack_bottom as u32 + 31) & !31;
// Mask is 1 bit per 32 bytes of the 256 byte range... clear the bit for the segment we want
let subregion_select = 0xff ^ (1 << ((addr >> 5) & 7));
unsafe {
core.MPU.ctrl.write(5); // enable mpu with background default map
core.MPU.rbar.write((addr & !0xff) | (1 << 4)); // set address and update RNR
core.MPU.rasr.write(
1 // enable region
| (0x7 << 1) // size 2^(7 + 1) = 256
| (subregion_select << 8)
| 0x10000000, // XN = disable instruction fetch; no other bits means no permissions
);
}
}
#[inline(always)]
fn core1_setup(stack_bottom: *mut usize) {
install_stack_guard(stack_bottom);
unsafe { unsafe {
gpio::init(); gpio::init();
} }