Skip to content

Commit 93247a2

Browse files
committed
move page rounding logic into StackRestoreGuard and split mmap and non-mmap implementations
1 parent b219b65 commit 93247a2

File tree

3 files changed

+173
-120
lines changed

3 files changed

+173
-120
lines changed

src/alloc_stack_restore_guard.rs

+47
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
use crate::{get_stack_limit, set_stack_limit};
2+
3+
pub struct StackRestoreGuard {
4+
new_stack: *mut u8,
5+
stack_bytes: usize,
6+
old_stack_limit: Option<usize>,
7+
}
8+
9+
const ALIGNMENT: usize = 16;
10+
11+
impl StackRestoreGuard {
12+
pub fn new(stack_bytes: usize) -> StackRestoreGuard {
13+
// On these platforms we do not use stack guards. this is very unfortunate,
14+
// but there is not much we can do about it without OS support.
15+
// We simply allocate the requested size from the global allocator with a suitable
16+
// alignment.
17+
let stack_bytes = stack_bytes
18+
.checked_add(ALIGNMENT - 1)
19+
.expect("unreasonably large stack requested")
20+
/ ALIGNMENT
21+
* ALIGNMENT;
22+
let layout = std::alloc::Layout::from_size_align(stack_bytes, ALIGNMENT).unwrap();
23+
let ptr = unsafe { std::alloc::alloc(layout) };
24+
assert!(!ptr.is_null(), "unable to allocate stack");
25+
StackRestoreGuard {
26+
new_stack: ptr,
27+
stack_bytes,
28+
old_stack_limit: get_stack_limit(),
29+
}
30+
}
31+
32+
pub fn stack_area(&self) -> (*mut u8, usize) {
33+
(self.new_stack, self.stack_bytes)
34+
}
35+
}
36+
37+
impl Drop for StackRestoreGuard {
38+
fn drop(&mut self) {
39+
unsafe {
40+
std::alloc::dealloc(
41+
self.new_stack,
42+
std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, ALIGNMENT),
43+
);
44+
}
45+
set_stack_limit(self.old_stack_limit);
46+
}
47+
}

src/lib.rs

+19-120
Original file line numberDiff line numberDiff line change
@@ -135,123 +135,30 @@ fn set_stack_limit(l: Option<usize>) {
135135

136136
psm_stack_manipulation! {
137137
yes {
138-
struct StackRestoreGuard {
139-
new_stack: *mut std::ffi::c_void,
140-
stack_bytes: usize,
141-
old_stack_limit: Option<usize>,
142-
}
143-
144-
impl StackRestoreGuard {
145-
#[cfg(any(target_arch = "wasm32",target_os = "hermit"))]
146-
unsafe fn new(stack_bytes: usize, _page_size: usize) -> StackRestoreGuard {
147-
let layout = std::alloc::Layout::from_size_align(stack_bytes, 16).unwrap();
148-
let ptr = std::alloc::alloc(layout);
149-
assert!(!ptr.is_null(), "unable to allocate stack");
150-
StackRestoreGuard {
151-
new_stack: ptr as *mut _,
152-
stack_bytes,
153-
old_stack_limit: get_stack_limit(),
154-
}
155-
}
156-
157-
#[cfg(not(any(target_arch = "wasm32",target_os = "hermit")))]
158-
unsafe fn new(stack_bytes: usize, page_size: usize) -> StackRestoreGuard {
159-
let new_stack = libc::mmap(
160-
std::ptr::null_mut(),
161-
stack_bytes,
162-
libc::PROT_NONE,
163-
libc::MAP_PRIVATE |
164-
libc::MAP_ANON,
165-
-1, // Some implementations assert fd = -1 if MAP_ANON is specified
166-
0
167-
);
168-
assert_ne!(
169-
new_stack,
170-
libc::MAP_FAILED,
171-
"mmap failed to allocate stack: {}",
172-
std::io::Error::last_os_error()
173-
);
174-
let guard = StackRestoreGuard {
175-
new_stack,
176-
stack_bytes,
177-
old_stack_limit: get_stack_limit(),
178-
};
179-
let above_guard_page = new_stack.add(page_size);
180-
#[cfg(not(target_os = "openbsd"))]
181-
let result = libc::mprotect(
182-
above_guard_page,
183-
stack_bytes - page_size,
184-
libc::PROT_READ | libc::PROT_WRITE
185-
);
186-
#[cfg(target_os = "openbsd")]
187-
let result = if libc::mmap(
188-
above_guard_page,
189-
stack_bytes - page_size,
190-
libc::PROT_READ | libc::PROT_WRITE,
191-
libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK,
192-
-1,
193-
0) == above_guard_page {
194-
0
195-
} else {
196-
-1
197-
};
198-
assert_ne!(
199-
result,
200-
-1,
201-
"mprotect/mmap failed: {}",
202-
std::io::Error::last_os_error()
203-
);
204-
guard
205-
}
206-
}
138+
#[cfg(not(any(target_arch = "wasm32",target_os = "hermit")))]
139+
#[path = "mmap_stack_restore_guard.rs"]
140+
mod stack_restore_guard;
207141

208-
impl Drop for StackRestoreGuard {
209-
fn drop(&mut self) {
210-
#[cfg(any(target_arch = "wasm32",target_os = "hermit"))]
211-
unsafe {
212-
std::alloc::dealloc(
213-
self.new_stack as *mut u8,
214-
std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, 16),
215-
);
216-
}
217-
#[cfg(not(any(target_arch = "wasm32",target_os = "hermit")))]
218-
unsafe {
219-
// FIXME: check the error code and decide what to do with it.
220-
// Perhaps a debug_assertion?
221-
libc::munmap(self.new_stack, self.stack_bytes);
222-
}
223-
set_stack_limit(self.old_stack_limit);
224-
}
225-
}
142+
#[cfg(any(target_arch = "wasm32",target_os = "hermit"))]
143+
#[path = "alloc_stack_restore_guard.rs"]
144+
mod stack_restore_guard;
226145

227-
fn _grow(stack_size: usize, callback: &mut dyn FnMut()) {
228-
// Calculate a number of pages we want to allocate for the new stack.
229-
// For maximum portability we want to produce a stack that is aligned to a page and has
230-
// a size that’s a multiple of page size. Furthermore we want to allocate two extras pages
231-
// for the stack guard. To achieve that we do our calculations in number of pages and
232-
// convert to bytes last.
233-
let page_size = page_size();
234-
let requested_pages = stack_size
235-
.checked_add(page_size - 1)
236-
.expect("unreasonably large stack requested") / page_size;
237-
let stack_pages = std::cmp::max(1, requested_pages) + 2;
238-
let stack_bytes = stack_pages.checked_mul(page_size)
239-
.expect("unreasonably large stack requested");
146+
use stack_restore_guard::StackRestoreGuard;
240147

241-
// Next, there are a couple of approaches to how we allocate the new stack. We take the
242-
// most obvious path and use `mmap`. We also `mprotect` a guard page into our
243-
// allocation.
244-
//
245-
// We use a guard pattern to ensure we deallocate the allocated stack when we leave
246-
// this function and also try to uphold various safety invariants required by `psm`
247-
// (such as not unwinding from the callback we pass to it).
248-
//
148+
fn _grow(requested_stack_size: usize, callback: &mut dyn FnMut()) {
249149
// Other than that this code has no meaningful gotchas.
250150
unsafe {
251-
let guard = StackRestoreGuard::new(stack_bytes, page_size);
252-
let above_guard_page = guard.new_stack.add(page_size);
253-
set_stack_limit(Some(above_guard_page as usize));
254-
let panic = psm::on_stack(above_guard_page as *mut _, stack_size, move || {
151+
// We use a guard pattern to ensure we deallocate the allocated stack when we leave
152+
// this function and also try to uphold various safety invariants required by `psm`
153+
// (such as not unwinding from the callback we pass to it).
154+
// `StackRestoreGuard` allocates a memory area with suitable size and alignment.
155+
// It also sets up stack guards if supported on target.
156+
let guard = StackRestoreGuard::new(requested_stack_size);
157+
let (stack_base,allocated_stack_size) = guard.stack_area();
158+
debug_assert!(allocated_stack_size >= requested_stack_size);
159+
set_stack_limit(Some(stack_base as usize));
160+
// TODO should we not pass `allocated_stack_size` here?
161+
let panic = psm::on_stack(stack_base, requested_stack_size, move || {
255162
std::panic::catch_unwind(std::panic::AssertUnwindSafe(callback)).err()
256163
});
257164
drop(guard);
@@ -260,14 +167,6 @@ psm_stack_manipulation! {
260167
}
261168
}
262169
}
263-
264-
fn page_size() -> usize {
265-
// FIXME: consider caching the page size.
266-
#[cfg(not(any(target_arch = "wasm32",target_os = "hermit")))]
267-
unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize }
268-
#[cfg(any(target_arch = "wasm32",target_os = "hermit"))]
269-
{ 65536 }
270-
}
271170
}
272171

273172
no {

src/mmap_stack_restore_guard.rs

+107
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
use crate::{get_stack_limit, set_stack_limit};
2+
3+
pub struct StackRestoreGuard {
4+
mapping: *mut u8,
5+
size_with_guard: usize,
6+
page_size: usize,
7+
old_stack_limit: Option<usize>,
8+
}
9+
10+
impl StackRestoreGuard {
11+
pub fn new(requested_size: usize) -> StackRestoreGuard {
12+
// For maximum portability we want to produce a stack that is aligned to a page and has
13+
// a size that’s a multiple of page size. It is natural to use mmap to allocate
14+
// these pages. Furthermore, we want to allocate two extras pages for the stack guard.
15+
// To achieve that we do our calculations in number of pages and convert to bytes last.
16+
let page_size = page_size();
17+
let requested_pages = requested_size
18+
.checked_add(page_size - 1)
19+
.expect("unreasonably large stack requested")
20+
/ page_size;
21+
let page_count_with_guard = std::cmp::max(1, requested_pages) + 2;
22+
let size_with_guard = page_count_with_guard
23+
.checked_mul(page_size)
24+
.expect("unreasonably large stack requested");
25+
26+
// Next, there are a couple of approaches to how we allocate the new stack. If it is
27+
// available, we take the most obvious path and use `mmap`.
28+
unsafe {
29+
let new_stack = libc::mmap(
30+
std::ptr::null_mut(),
31+
size_with_guard,
32+
libc::PROT_NONE,
33+
libc::MAP_PRIVATE | libc::MAP_ANON,
34+
-1, // Some implementations assert fd = -1 if MAP_ANON is specified
35+
0,
36+
);
37+
assert_ne!(
38+
new_stack,
39+
libc::MAP_FAILED,
40+
"mmap failed to allocate stack: {}",
41+
std::io::Error::last_os_error()
42+
);
43+
let guard = StackRestoreGuard {
44+
mapping: new_stack as *mut u8,
45+
page_size,
46+
size_with_guard,
47+
old_stack_limit: get_stack_limit(),
48+
};
49+
// We leave a guard page without read/write access in our allocation.
50+
// TODO we allocated two extra pages for guard pages, but here we only use one?
51+
let above_guard_page = new_stack.add(page_size);
52+
#[cfg(not(target_os = "openbsd"))]
53+
let result = libc::mprotect(
54+
above_guard_page,
55+
size_with_guard - page_size,
56+
libc::PROT_READ | libc::PROT_WRITE,
57+
);
58+
#[cfg(target_os = "openbsd")]
59+
let result = if libc::mmap(
60+
above_guard_page,
61+
size_with_guard - page_size,
62+
libc::PROT_READ | libc::PROT_WRITE,
63+
libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK,
64+
-1,
65+
0,
66+
) == above_guard_page
67+
{
68+
0
69+
} else {
70+
-1
71+
};
72+
assert_ne!(
73+
result,
74+
-1,
75+
"mprotect/mmap failed: {}",
76+
std::io::Error::last_os_error()
77+
);
78+
guard
79+
}
80+
}
81+
82+
// TODO this should return a *mut [u8], but pointer slices only got proper support with Rust 1.79.
83+
pub fn stack_area(&self) -> (*mut u8, usize) {
84+
unsafe {
85+
(
86+
self.mapping.add(self.page_size),
87+
self.size_with_guard - self.page_size,
88+
)
89+
}
90+
}
91+
}
92+
93+
impl Drop for StackRestoreGuard {
94+
fn drop(&mut self) {
95+
unsafe {
96+
// FIXME: check the error code and decide what to do with it.
97+
// Perhaps a debug_assertion?
98+
libc::munmap(self.mapping as *mut std::ffi::c_void, self.size_with_guard);
99+
}
100+
set_stack_limit(self.old_stack_limit);
101+
}
102+
}
103+
104+
fn page_size() -> usize {
105+
// FIXME: consider caching the page size.
106+
unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize }
107+
}

0 commit comments

Comments
 (0)