66extern crate log;
77extern crate alloc;
88
9- use axhal:: irq:: { IPI_IRQ , IpiTarget } ;
10- use axhal:: percpu:: this_cpu_id;
9+ use alloc:: { sync:: Arc , vec:: Vec } ;
10+ use core:: sync:: atomic:: { AtomicBool , Ordering } ;
11+
12+ use axhal:: {
13+ irq:: { IPI_IRQ , IpiTarget } ,
14+ percpu:: this_cpu_id,
15+ } ;
16+ use axtask:: AxCpuMask ;
1117use kspin:: SpinNoIrq ;
1218use lazyinit:: LazyInit ;
19+ use queue:: IpiEventQueue ;
20+
21+ use crate :: event:: { Callback , MulticastCallback } ;
1322
1423mod event;
1524mod queue;
25+ #[ cfg( feature = "smp" ) ]
26+ mod tlb;
1627
17- pub use event:: { Callback , MulticastCallback } ;
18- use queue:: IpiEventQueue ;
28+ static SECONDARY_CPUS_STARTED : AtomicBool = AtomicBool :: new ( false ) ;
29+
30+ pub fn start_secondary_cpus_done ( ) {
31+ SECONDARY_CPUS_STARTED . store ( true , Ordering :: Release ) ;
32+ }
33+
34+ pub fn secondary_cpus_ready ( ) -> bool {
35+ SECONDARY_CPUS_STARTED . load ( Ordering :: Acquire )
36+ }
1937
2038#[ percpu:: def_percpu]
2139static IPI_EVENT_QUEUE : LazyInit < SpinNoIrq < IpiEventQueue > > = LazyInit :: new ( ) ;
@@ -28,34 +46,164 @@ pub fn init() {
2846}
2947
3048/// Executes a callback on the specified destination CPU via IPI.
31- pub fn run_on_cpu < T : Into < Callback > > ( dest_cpu : usize , callback : T ) {
32- info ! ( "Send IPI event to CPU {dest_cpu}" ) ;
49+ pub fn run_on_cpu < T : Into < Callback > > ( name : & ' static str , dest_cpu : usize , callback : T , wait : bool ) {
50+ info ! ( "Send IPI event to CPU {}" , dest_cpu ) ;
3351 if dest_cpu == this_cpu_id ( ) {
3452 // Execute callback on current CPU immediately
3553 callback. into ( ) . call ( ) ;
3654 } else {
55+ let done_flag = if wait {
56+ Some ( Arc :: new ( AtomicBool :: new ( false ) ) )
57+ } else {
58+ None
59+ } ;
3760 unsafe { IPI_EVENT_QUEUE . remote_ref_raw ( dest_cpu) }
3861 . lock ( )
39- . push ( this_cpu_id ( ) , callback. into ( ) ) ;
62+ . push ( name , this_cpu_id ( ) , callback. into ( ) , done_flag . clone ( ) ) ;
4063 axhal:: irq:: send_ipi ( IPI_IRQ , IpiTarget :: Other { cpu_id : dest_cpu } ) ;
64+ if wait {
65+ if let Some ( df) = done_flag {
66+ while !df. load ( Ordering :: Acquire ) {
67+ core:: hint:: spin_loop ( ) ;
68+ }
69+ }
70+ }
71+ }
72+ }
73+
74+ pub fn run_on_bitmask_except_self < T : Into < MulticastCallback > > (
75+ name : & ' static str ,
76+ callback : T ,
77+ cpu_mask : AxCpuMask ,
78+ wait : bool ,
79+ ) {
80+ let current_cpu_id = this_cpu_id ( ) ;
81+ let cpu_num = axconfig:: plat:: CPU_NUM ;
82+ let callback = callback. into ( ) ;
83+
84+ let mut done_flags: Vec < Arc < AtomicBool > > = Vec :: new ( ) ;
85+
86+ for cpu_id in 0 ..cpu_num {
87+ if cpu_id != current_cpu_id && cpu_mask. get ( cpu_id) {
88+ let done_flag = if wait {
89+ Some ( Arc :: new ( AtomicBool :: new ( false ) ) )
90+ } else {
91+ None
92+ } ;
93+ if let Some ( df) = & done_flag {
94+ done_flags. push ( df. clone ( ) ) ;
95+ }
96+
97+ unsafe { IPI_EVENT_QUEUE . remote_ref_raw ( cpu_id) }
98+ . lock ( )
99+ . push (
100+ name,
101+ current_cpu_id,
102+ callback. clone ( ) . into_unicast ( ) ,
103+ done_flag,
104+ ) ;
105+ }
106+ }
107+ if done_flags. is_empty ( ) {
108+ return ;
109+ }
110+ for cpu_id in 0 ..cpu_num {
111+ if cpu_id != current_cpu_id && cpu_mask. get ( cpu_id) {
112+ axhal:: irq:: send_ipi ( IPI_IRQ , IpiTarget :: Other { cpu_id } ) ;
113+ }
114+ }
115+ if wait {
116+ for df in done_flags {
117+ while !df. load ( Ordering :: Acquire ) {
118+ core:: hint:: spin_loop ( ) ;
119+ }
120+ }
121+ }
122+ }
123+
124+ pub fn run_on_each_cpu_except_self < T : Into < MulticastCallback > > (
125+ name : & ' static str ,
126+ callback : T ,
127+ wait : bool ,
128+ ) {
129+ let current_cpu_id = this_cpu_id ( ) ;
130+ let cpu_num = axconfig:: plat:: CPU_NUM ;
131+ let callback = callback. into ( ) ;
132+
133+ let mut done_flags: Vec < Arc < AtomicBool > > = Vec :: new ( ) ;
134+
135+ // Push the callback to all other CPUs' IPI event queues
136+ for cpu_id in 0 ..cpu_num {
137+ if cpu_id != current_cpu_id {
138+ let done_flag = if wait {
139+ Some ( Arc :: new ( AtomicBool :: new ( false ) ) )
140+ } else {
141+ None
142+ } ;
143+ if let Some ( df) = & done_flag {
144+ done_flags. push ( df. clone ( ) ) ;
145+ }
146+
147+ unsafe { IPI_EVENT_QUEUE . remote_ref_raw ( cpu_id) }
148+ . lock ( )
149+ . push (
150+ name,
151+ current_cpu_id,
152+ callback. clone ( ) . into_unicast ( ) ,
153+ done_flag,
154+ ) ;
155+ }
156+ }
157+ if done_flags. is_empty ( ) {
158+ return ;
159+ }
160+ // Send IPI to all other CPUs to trigger their callbacks
161+ axhal:: irq:: send_ipi (
162+ IPI_IRQ ,
163+ IpiTarget :: AllExceptCurrent {
164+ cpu_id : current_cpu_id,
165+ cpu_num,
166+ } ,
167+ ) ;
168+ if wait {
169+ for df in done_flags {
170+ while !df. load ( Ordering :: Acquire ) {
171+ core:: hint:: spin_loop ( ) ;
172+ }
173+ }
41174 }
42175}
43176
44177/// Executes a callback on all other CPUs via IPI.
45- pub fn run_on_each_cpu < T : Into < MulticastCallback > > ( callback : T ) {
178+ pub fn run_on_each_cpu < T : Into < MulticastCallback > > ( name : & ' static str , callback : T , wait : bool ) {
46179 info ! ( "Send IPI event to all other CPUs" ) ;
47180 let current_cpu_id = this_cpu_id ( ) ;
48181 let cpu_num = axconfig:: plat:: CPU_NUM ;
49182 let callback = callback. into ( ) ;
50183
51184 // Execute callback on current CPU immediately
52185 callback. clone ( ) . call ( ) ;
186+
187+ let mut done_flags: Vec < Arc < AtomicBool > > = Vec :: new ( ) ;
53188 // Push the callback to all other CPUs' IPI event queues
54189 for cpu_id in 0 ..cpu_num {
55190 if cpu_id != current_cpu_id {
191+ let done_flag = if wait {
192+ Some ( Arc :: new ( AtomicBool :: new ( false ) ) )
193+ } else {
194+ None
195+ } ;
196+ if let Some ( df) = & done_flag {
197+ done_flags. push ( df. clone ( ) ) ;
198+ }
56199 unsafe { IPI_EVENT_QUEUE . remote_ref_raw ( cpu_id) }
57200 . lock ( )
58- . push ( current_cpu_id, callback. clone ( ) . into_unicast ( ) ) ;
201+ . push (
202+ name,
203+ current_cpu_id,
204+ callback. clone ( ) . into_unicast ( ) ,
205+ done_flag,
206+ ) ;
59207 }
60208 }
61209 // Send IPI to all other CPUs to trigger their callbacks
@@ -66,15 +214,27 @@ pub fn run_on_each_cpu<T: Into<MulticastCallback>>(callback: T) {
66214 cpu_num,
67215 } ,
68216 ) ;
217+ if wait {
218+ for df in done_flags {
219+ while !df. load ( Ordering :: Acquire ) {
220+ core:: hint:: spin_loop ( ) ;
221+ }
222+ }
223+ }
69224}
70225
71- /// The handler for IPI events. It retrieves the events from the queue and calls the corresponding callbacks.
226+ /// The handler for IPI events. It retrieves the events from the queue and calls
227+ /// the corresponding callbacks.
72228pub fn ipi_handler ( ) {
73- while let Some ( ( src_cpu_id, callback) ) = unsafe { IPI_EVENT_QUEUE . current_ref_mut_raw ( ) }
74- . lock ( )
75- . pop_one ( )
229+ while let Some ( ( _name, src_cpu_id, callback, done) ) =
230+ unsafe { IPI_EVENT_QUEUE . current_ref_raw ( ) }
231+ . lock ( )
232+ . pop_one ( )
76233 {
77234 debug ! ( "Received IPI event from CPU {src_cpu_id}" ) ;
78235 callback. call ( ) ;
236+ if let Some ( done) = done {
237+ done. store ( true , Ordering :: Release ) ;
238+ }
79239 }
80240}
0 commit comments