Skip to content

Add multicore example #24

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Apr 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions cortex-a-rt/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,24 @@
//! `__sbss` and `__ebss` is zeroed, and the memory between `__sdata` and
//! `__edata` is initialised with the data found at `__sidata`.
//!
//! The stacks look like:
//!
//! ```text
//! +------------------+ <----_stack_top
//! | UND Stack | } _und_stack_size bytes
//! +------------------+
//! | SVC Stack | } _svc_stack_size bytes
//! +------------------+
//! | ABT Stack | } _abt_stack_size bytes
//! +------------------+
//! | IRQ Stack | } _irq_stack_size bytes
//! +------------------+
//! | FIQ Stack | } _fiq_stack_size bytes
//! +------------------+
//! | SYS Stack | } No specific size
//! +------------------+
//! ```
//!
//! ### C-Compatible Functions
//!
//! * `kmain` - the `extern "C"` entry point to your application.
Expand Down
5 changes: 4 additions & 1 deletion cortex-ar/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ version = "0.1.0"
arbitrary-int = "1.3.0"
bitbybit = "1.3.3"
num_enum = { version = "0.7", default-features = false }
critical-section = {version = "1.2.0", features = ["restore-state-bool"], optional = true}
critical-section = {version = "1.2.0", features = ["restore-state-u8"], optional = true}
defmt = {version = "0.3", optional = true}

[build-dependencies]
Expand All @@ -38,5 +38,8 @@ arm-targets = {version = "0.1.0", path = "../arm-targets"}
# Adds a critical-section implementation that only disables interrupts.
# This is not sound on multi-core systems because interrupts are per-core.
critical-section-single-core = ["critical-section"]
# Adds a critical-section implementation that disables interrupts and does
# a CAS spinlock.
critical-section-multi-core = ["critical-section"]
# Adds defmt::Format implementation for the register types
defmt = ["dep:defmt"]
12 changes: 12 additions & 0 deletions cortex-ar/src/asm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,3 +56,15 @@ pub fn sev() {
core::arch::asm!("sev");
}
}

/// Which core are we?
///
/// Return the bottom 24-bits of the MPIDR
#[inline]
pub fn core_id() -> u32 {
let r: u32;
unsafe {
core::arch::asm!("MRC p15, 0, {}, c0, c0, 5", out(reg) r, options(nomem, nostack, preserves_flags));
}
return r & 0x00FF_FFFF;
}
157 changes: 137 additions & 20 deletions cortex-ar/src/critical_section.rs
Original file line number Diff line number Diff line change
@@ -1,30 +1,147 @@
//! Code that implements the `critical-section` traits on Cortex-R or Cortex-A when only a single
//! core is used.
//! Code that implements the `critical-section` traits on Cortex-R or Cortex-A
//!
//! Only valid if you have a single core.
//! We have single-core and multi-core versions. Select with the
//! `critical-section-single-core` and `critical-section-multi-core` features.

use core::sync::atomic;
#[cfg(feature = "critical-section-single-core")]
mod single_core {
struct SingleCoreCriticalSection;

struct SingleCoreCriticalSection;
critical_section::set_impl!(SingleCoreCriticalSection);
critical_section::set_impl!(SingleCoreCriticalSection);

unsafe impl critical_section::Impl for SingleCoreCriticalSection {
unsafe fn acquire() -> critical_section::RawRestoreState {
// the i bit means "masked"
let was_active = !crate::register::Cpsr::read().i();
crate::interrupt::disable();
atomic::compiler_fence(atomic::Ordering::SeqCst);
was_active
/// Indicates the critical section was entered with interrupts on
pub const INT_ON: u8 = 0;

/// Indicates the critical section was entered with interrupts off
pub const INT_OFF: u8 = 1;

#[cfg(feature = "critical-section-single-core")]
unsafe impl critical_section::Impl for SingleCoreCriticalSection {
unsafe fn acquire() -> critical_section::RawRestoreState {
use core::sync::atomic;
// the i bit means "masked"
let was_active = !crate::register::Cpsr::read().i();
crate::interrupt::disable();
atomic::compiler_fence(atomic::Ordering::SeqCst);
if was_active {
INT_ON
} else {
INT_OFF
}
}

unsafe fn release(was_active: critical_section::RawRestoreState) {
use core::sync::atomic;
// Only re-enable interrupts if they were enabled before the critical section.
if was_active == INT_ON {
atomic::compiler_fence(atomic::Ordering::SeqCst);
// Safety: This is OK because we're releasing a lock that was
// entered with interrupts enabled
unsafe {
crate::interrupt::enable();
}
}
}
}
}

#[cfg(feature = "critical-section-multi-core")]
mod multi_core {
struct MultiCoreCriticalSection;

critical_section::set_impl!(MultiCoreCriticalSection);

/// The default value for our spin-lock
pub const UNLOCKED: u32 = 0xFFFF_FFFF;

/// Indicates the critical section was entered with interrupts on, and the spin-lock unlocked
pub const INT_ON_UNLOCKED: u8 = 0;

/// Indicates the critical section was entered with interrupts off, and the spin-lock locked (by us)
pub const INT_OFF_LOCKED: u8 = 1;

/// Indicates the critical section was entered with interrupts off, and the spin-lock unlocked
pub const INT_OFF_UNLOCKED: u8 = 2;

pub static CORE_SPIN_LOCK: core::sync::atomic::AtomicU32 =
core::sync::atomic::AtomicU32::new(UNLOCKED);
unsafe impl critical_section::Impl for MultiCoreCriticalSection {
unsafe fn acquire() -> critical_section::RawRestoreState {
use core::sync::atomic;

// the i bit means "masked"
let was_active = !crate::register::Cpsr::read().i();
crate::interrupt::disable();

let core_id = crate::asm::core_id();

let locked_already = loop {
match CORE_SPIN_LOCK.compare_exchange(
Copy link
Contributor

@robamu robamu Apr 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we are on ARM, so maybe compare_exchange_weak works as well? I'd need to check the Armv7a ref manual again, but IIRC, it uses these special instructions (LL/SC) to load and store .

Copy link
Contributor Author

@jonathanpallant jonathanpallant Apr 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think spurious failure here would be bad, as we might believe we already hold the lock when it in fact we took the lock. Then we would unlock incorrectly.

But I'm happy for someone to argue that it would in fact be correct.

Copy link
Contributor

@robamu robamu Apr 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you are right. It probably only works when there is a re-try on all error cases.

UNLOCKED,
core_id,
atomic::Ordering::Acquire,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am trying to understand why this does not have to be AcqRel .. is it because of the compiler fence after the loop?

Copy link
Contributor Author

@jonathanpallant jonathanpallant Apr 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Mara's example at https://marabos.nl/atomics/building-spinlock.html uses Acquire, Relaxed, as does spin.

As I understand it, we need a load-Acquire when locking the spin-lock, which works with the the store-Release when the spin-lock is unlocked to ensure that the unlock completes before the lock can be taken. Because the lock can only be locked by one thing at a time, there is no subsequent load-Acquire of the spin-lock that we need to ensure happens after the spin-lock is locked. But my grasp on this is tenuous at best.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, I'll definitely read that. I read Jons Rust for Rustacean book about that, but its a very tough subject..

atomic::Ordering::Relaxed,
) {
Ok(_) => {
// we got the lock
break false;
}
Err(n) if n == core_id => {
// we already held the lock
break true;
}
Err(_) => {
// someone else holds the lock
core::hint::spin_loop();
}
}
};

atomic::compiler_fence(atomic::Ordering::SeqCst);

match (was_active, locked_already) {
(true, true) => {
panic!("Invalid CS state?!");
}
(true, false) => {
// we need to turn interrupts on, and release the lock
INT_ON_UNLOCKED
}
(false, false) => {
// we need release the lock
INT_OFF_UNLOCKED
}
(false, true) => {
// we need to do nothing
INT_OFF_LOCKED
}
}
}

unsafe fn release(was_active: critical_section::RawRestoreState) {
use core::sync::atomic;

unsafe fn release(was_active: critical_section::RawRestoreState) {
// Only re-enable interrupts if they were enabled before the critical section.
if was_active {
atomic::compiler_fence(atomic::Ordering::SeqCst);
// Safety: This is OK because we're releasing a lock that was
// entered with interrupts enabled
unsafe {
crate::interrupt::enable();
match was_active {
INT_OFF_LOCKED => {
// do nothing
}
INT_OFF_UNLOCKED => {
// the spin-lock was unlocked before, so unlock it
CORE_SPIN_LOCK.store(UNLOCKED, atomic::Ordering::Release);
}
INT_ON_UNLOCKED => {
// the spin-lock was unlocked before, so unlock it
CORE_SPIN_LOCK.store(UNLOCKED, atomic::Ordering::Release);
// Safety: This is OK because we're releasing a lock that was
// entered with interrupts enabled
unsafe {
crate::interrupt::enable();
}
}
_ => {
unreachable!()
}
}
}
}
Expand Down
1 change: 0 additions & 1 deletion cortex-ar/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

#![no_std]

#[cfg(feature = "critical-section-single-core")]
mod critical_section;

pub mod asm;
Expand Down
12 changes: 9 additions & 3 deletions cortex-r-rt/link.x
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
/*
Basic Cortex-R linker script.

You must supply a file called `memory.x` which defines the memory regions 'CODE' and 'DATA'.
You must supply a file called `memory.x` which defines the memory regions
'VECTORS', 'CODE' and 'DATA'.

The stack pointer(s) will be (near) the top of the DATA region by default.

Expand All @@ -10,13 +11,17 @@ Based upon the linker script from https://github.com/rust-embedded/cortex-m

INCLUDE memory.x

ENTRY(_vector_table);
ENTRY(_start);
EXTERN(_vector_table);
EXTERN(_start);

SECTIONS {
.text : {
.vector_table ORIGIN(VECTORS) : {
/* The vector table must come first */
*(.vector_table)
} > VECTORS

.text : {
/* Now the rest of the code */
*(.text .text*)
} > CODE
Expand Down Expand Up @@ -76,6 +81,7 @@ remainder is our system mode stack.
You must keep _stack_top and the stack sizes aligned to eight byte boundaries.
*/
PROVIDE(_stack_top = ORIGIN(DATA) + LENGTH(DATA));
PROVIDE(_hyp_stack_size = 0x400);
PROVIDE(_und_stack_size = 0x400);
PROVIDE(_svc_stack_size = 0x400);
PROVIDE(_abt_stack_size = 0x400);
Expand Down
Loading