Skip to content

Commit 9b8c0eb

Browse files
authored
Rollup merge of #77657 - fusion-engineering-forks:cleanup-cloudabi-sync, r=dtolnay
Cleanup cloudabi mutexes and condvars This gets rid of lots of unnecessary unsafety. All the AtomicU32s were wrapped in UnsafeCell or UnsafeCell<MaybeUninit>, and raw pointers were used to get to the AtomicU32 inside. This change cleans that up by using AtomicU32 directly. Also replaces a UnsafeCell<u32> by a safer Cell<u32>. @rustbot modify labels: +C-cleanup
2 parents b183ef2 + b3be11e commit 9b8c0eb

File tree

3 files changed

+65
-77
lines changed

3 files changed

+65
-77
lines changed

library/std/src/sys/cloudabi/condvar.rs

+18-23
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
use crate::cell::UnsafeCell;
21
use crate::mem;
32
use crate::sync::atomic::{AtomicU32, Ordering};
43
use crate::sys::cloudabi::abi;
@@ -12,37 +11,36 @@ extern "C" {
1211
}
1312

1413
pub struct Condvar {
15-
condvar: UnsafeCell<AtomicU32>,
14+
condvar: AtomicU32,
1615
}
1716

1817
pub type MovableCondvar = Condvar;
1918

2019
unsafe impl Send for Condvar {}
2120
unsafe impl Sync for Condvar {}
2221

23-
const NEW: Condvar =
24-
Condvar { condvar: UnsafeCell::new(AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0)) };
25-
2622
impl Condvar {
2723
pub const fn new() -> Condvar {
28-
NEW
24+
Condvar { condvar: AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0) }
2925
}
3026

3127
pub unsafe fn init(&mut self) {}
3228

3329
pub unsafe fn notify_one(&self) {
34-
let condvar = self.condvar.get();
35-
if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
36-
let ret = abi::condvar_signal(condvar as *mut abi::condvar, abi::scope::PRIVATE, 1);
30+
if self.condvar.load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
31+
let ret = abi::condvar_signal(
32+
&self.condvar as *const AtomicU32 as *mut abi::condvar,
33+
abi::scope::PRIVATE,
34+
1,
35+
);
3736
assert_eq!(ret, abi::errno::SUCCESS, "Failed to signal on condition variable");
3837
}
3938
}
4039

4140
pub unsafe fn notify_all(&self) {
42-
let condvar = self.condvar.get();
43-
if (*condvar).load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
41+
if self.condvar.load(Ordering::Relaxed) != abi::CONDVAR_HAS_NO_WAITERS.0 {
4442
let ret = abi::condvar_signal(
45-
condvar as *mut abi::condvar,
43+
&self.condvar as *const AtomicU32 as *mut abi::condvar,
4644
abi::scope::PRIVATE,
4745
abi::nthreads::MAX,
4846
);
@@ -53,20 +51,19 @@ impl Condvar {
5351
pub unsafe fn wait(&self, mutex: &Mutex) {
5452
let mutex = mutex::raw(mutex);
5553
assert_eq!(
56-
(*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
54+
mutex.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
5755
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
5856
"This lock is not write-locked by this thread"
5957
);
6058

6159
// Call into the kernel to wait on the condition variable.
62-
let condvar = self.condvar.get();
6360
let subscription = abi::subscription {
6461
type_: abi::eventtype::CONDVAR,
6562
union: abi::subscription_union {
6663
condvar: abi::subscription_condvar {
67-
condvar: condvar as *mut abi::condvar,
64+
condvar: &self.condvar as *const AtomicU32 as *mut abi::condvar,
6865
condvar_scope: abi::scope::PRIVATE,
69-
lock: mutex as *mut abi::lock,
66+
lock: mutex as *const AtomicU32 as *mut abi::lock,
7067
lock_scope: abi::scope::PRIVATE,
7168
},
7269
},
@@ -86,23 +83,22 @@ impl Condvar {
8683
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
8784
let mutex = mutex::raw(mutex);
8885
assert_eq!(
89-
(*mutex).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
86+
mutex.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
9087
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
9188
"This lock is not write-locked by this thread"
9289
);
9390

9491
// Call into the kernel to wait on the condition variable.
95-
let condvar = self.condvar.get();
9692
let timeout =
9793
checked_dur2intervals(&dur).expect("overflow converting duration to nanoseconds");
9894
let subscriptions = [
9995
abi::subscription {
10096
type_: abi::eventtype::CONDVAR,
10197
union: abi::subscription_union {
10298
condvar: abi::subscription_condvar {
103-
condvar: condvar as *mut abi::condvar,
99+
condvar: &self.condvar as *const AtomicU32 as *mut abi::condvar,
104100
condvar_scope: abi::scope::PRIVATE,
105-
lock: mutex as *mut abi::lock,
101+
lock: mutex as *const AtomicU32 as *mut abi::lock,
106102
lock_scope: abi::scope::PRIVATE,
107103
},
108104
},
@@ -124,7 +120,7 @@ impl Condvar {
124120
let mut nevents: mem::MaybeUninit<usize> = mem::MaybeUninit::uninit();
125121
let ret = abi::poll(
126122
subscriptions.as_ptr(),
127-
mem::MaybeUninit::first_ptr_mut(&mut events),
123+
mem::MaybeUninit::slice_as_mut_ptr(&mut events),
128124
2,
129125
nevents.as_mut_ptr(),
130126
);
@@ -144,9 +140,8 @@ impl Condvar {
144140
}
145141

146142
pub unsafe fn destroy(&self) {
147-
let condvar = self.condvar.get();
148143
assert_eq!(
149-
(*condvar).load(Ordering::Relaxed),
144+
self.condvar.load(Ordering::Relaxed),
150145
abi::CONDVAR_HAS_NO_WAITERS.0,
151146
"Attempted to destroy a condition variable with blocked threads"
152147
);

library/std/src/sys/cloudabi/mutex.rs

+25-29
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use crate::cell::UnsafeCell;
1+
use crate::cell::Cell;
22
use crate::mem;
33
use crate::mem::MaybeUninit;
44
use crate::sync::atomic::{AtomicU32, Ordering};
@@ -17,7 +17,7 @@ pub struct Mutex(RWLock);
1717

1818
pub type MovableMutex = Mutex;
1919

20-
pub unsafe fn raw(m: &Mutex) -> *mut AtomicU32 {
20+
pub unsafe fn raw(m: &Mutex) -> &AtomicU32 {
2121
rwlock::raw(&m.0)
2222
}
2323

@@ -50,28 +50,23 @@ impl Mutex {
5050
}
5151

5252
pub struct ReentrantMutex {
53-
lock: UnsafeCell<MaybeUninit<AtomicU32>>,
54-
recursion: UnsafeCell<MaybeUninit<u32>>,
53+
lock: AtomicU32,
54+
recursion: Cell<u32>,
5555
}
5656

57+
unsafe impl Send for ReentrantMutex {}
58+
unsafe impl Sync for ReentrantMutex {}
59+
5760
impl ReentrantMutex {
5861
pub const unsafe fn uninitialized() -> ReentrantMutex {
59-
ReentrantMutex {
60-
lock: UnsafeCell::new(MaybeUninit::uninit()),
61-
recursion: UnsafeCell::new(MaybeUninit::uninit()),
62-
}
62+
ReentrantMutex { lock: AtomicU32::new(abi::LOCK_UNLOCKED.0), recursion: Cell::new(0) }
6363
}
6464

65-
pub unsafe fn init(&self) {
66-
*self.lock.get() = MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0));
67-
*self.recursion.get() = MaybeUninit::new(0);
68-
}
65+
pub unsafe fn init(&self) {}
6966

7067
pub unsafe fn try_lock(&self) -> bool {
7168
// Attempt to acquire the lock.
72-
let lock = (*self.lock.get()).as_mut_ptr();
73-
let recursion = (*self.recursion.get()).as_mut_ptr();
74-
if let Err(old) = (*lock).compare_exchange(
69+
if let Err(old) = self.lock.compare_exchange(
7570
abi::LOCK_UNLOCKED.0,
7671
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
7772
Ordering::Acquire,
@@ -80,22 +75,22 @@ impl ReentrantMutex {
8075
// If we fail to acquire the lock, it may be the case
8176
// that we've already acquired it and may need to recurse.
8277
if old & !abi::LOCK_KERNEL_MANAGED.0 == __pthread_thread_id.0 | abi::LOCK_WRLOCKED.0 {
83-
*recursion += 1;
78+
self.recursion.set(self.recursion.get() + 1);
8479
true
8580
} else {
8681
false
8782
}
8883
} else {
8984
// Success.
90-
assert_eq!(*recursion, 0, "Mutex has invalid recursion count");
85+
assert_eq!(self.recursion.get(), 0, "Mutex has invalid recursion count");
9186
true
9287
}
9388
}
9489

9590
pub unsafe fn lock(&self) {
9691
if !self.try_lock() {
9792
// Call into the kernel to acquire a write lock.
98-
let lock = self.lock.get();
93+
let lock = &self.lock as *const AtomicU32;
9994
let subscription = abi::subscription {
10095
type_: abi::eventtype::LOCK_WRLOCK,
10196
union: abi::subscription_union {
@@ -116,17 +111,17 @@ impl ReentrantMutex {
116111
}
117112

118113
pub unsafe fn unlock(&self) {
119-
let lock = (*self.lock.get()).as_mut_ptr();
120-
let recursion = (*self.recursion.get()).as_mut_ptr();
121114
assert_eq!(
122-
(*lock).load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
115+
self.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
123116
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
124117
"This mutex is locked by a different thread"
125118
);
126119

127-
if *recursion > 0 {
128-
*recursion -= 1;
129-
} else if !(*lock)
120+
let r = self.recursion.get();
121+
if r > 0 {
122+
self.recursion.set(r - 1);
123+
} else if !self
124+
.lock
130125
.compare_exchange(
131126
__pthread_thread_id.0 | abi::LOCK_WRLOCKED.0,
132127
abi::LOCK_UNLOCKED.0,
@@ -137,19 +132,20 @@ impl ReentrantMutex {
137132
{
138133
// Lock is managed by kernelspace. Call into the kernel
139134
// to unblock waiting threads.
140-
let ret = abi::lock_unlock(lock as *mut abi::lock, abi::scope::PRIVATE);
135+
let ret = abi::lock_unlock(
136+
&self.lock as *const AtomicU32 as *mut abi::lock,
137+
abi::scope::PRIVATE,
138+
);
141139
assert_eq!(ret, abi::errno::SUCCESS, "Failed to unlock a mutex");
142140
}
143141
}
144142

145143
pub unsafe fn destroy(&self) {
146-
let lock = (*self.lock.get()).as_mut_ptr();
147-
let recursion = (*self.recursion.get()).as_mut_ptr();
148144
assert_eq!(
149-
(*lock).load(Ordering::Relaxed),
145+
self.lock.load(Ordering::Relaxed),
150146
abi::LOCK_UNLOCKED.0,
151147
"Attempted to destroy locked mutex"
152148
);
153-
assert_eq!(*recursion, 0, "Recursion counter invalid");
149+
assert_eq!(self.recursion.get(), 0, "Recursion counter invalid");
154150
}
155151
}

0 commit comments

Comments
 (0)