diff --git a/src/libcore/alloc.rs b/src/libcore/alloc.rs index 1b06baeb711c2..0824f772accd5 100644 --- a/src/libcore/alloc.rs +++ b/src/libcore/alloc.rs @@ -1,7 +1,5 @@ //! Memory allocation APIs -// ignore-tidy-undocumented-unsafe - #![stable(feature = "alloc_module", since = "1.28.0")] use crate::cmp; @@ -88,6 +86,7 @@ impl Layout { return Err(LayoutErr { private: () }); } + // SAFETY: performed checks above unsafe { Ok(Layout::from_size_align_unchecked(size, align)) } @@ -120,11 +119,11 @@ impl Layout { #[inline] pub fn new() -> Self { let (size, align) = size_align::(); - // Note that the align is guaranteed by rustc to be a power of two and + debug_assert!(Layout::from_size_align(size, align).is_ok()); + // SAFETY: Note that the align is guaranteed by rustc to be a power of two and // the size+align combo is guaranteed to fit in our address space. As a // result use the unchecked constructor here to avoid inserting code // that panics if it isn't optimized well enough. - debug_assert!(Layout::from_size_align(size, align).is_ok()); unsafe { Layout::from_size_align_unchecked(size, align) } @@ -137,8 +136,8 @@ impl Layout { #[inline] pub fn for_value(t: &T) -> Self { let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); - // See rationale in `new` for why this us using an unsafe variant below debug_assert!(Layout::from_size_align(size, align).is_ok()); + // SAFETY: See rationale in `new` for why this us using an unsafe variant below unsafe { Layout::from_size_align_unchecked(size, align) } @@ -243,9 +242,9 @@ impl Layout { let alloc_size = padded_size.checked_mul(n) .ok_or(LayoutErr { private: () })?; + // SAFETY: `self.align` is already known to be valid and `alloc_size` has been + // padded already. unsafe { - // self.align is already known to be valid and alloc_size has been - // padded already. Ok((Layout::from_size_align_unchecked(alloc_size, self.align()), padded_size)) } } @@ -1074,6 +1073,7 @@ pub unsafe trait Alloc { { let k = Layout::new::(); if k.size() > 0 { + // SAFETY: layout has nonzero size unsafe { self.alloc(k).map(|p| p.cast()) } } else { Err(AllocErr) @@ -1143,6 +1143,7 @@ pub unsafe trait Alloc { { match Layout::array::(n) { Ok(ref layout) if layout.size() > 0 => { + // SAFETY: layout has nonzero size unsafe { self.alloc(layout.clone()).map(|p| p.cast()) } diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 87d8e7aff058d..4943067891edb 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -187,8 +187,6 @@ //! ``` //! -// ignore-tidy-undocumented-unsafe - #![stable(feature = "rust1", since = "1.0.0")] use crate::cmp::Ordering; @@ -369,6 +367,7 @@ impl Cell { if ptr::eq(self, other) { return; } + // SAFETY: not threadsafe, but it's OK since we know `Cell` isn't threadsafe unsafe { ptr::swap(self.value.get(), other.value.get()); } @@ -388,6 +387,7 @@ impl Cell { /// ``` #[stable(feature = "move_cell", since = "1.17.0")] pub fn replace(&self, val: T) -> T { + // SAFETY: not threadsafe, but it's OK since we know `Cell` isn't threadsafe mem::replace(unsafe { &mut *self.value.get() }, val) } @@ -424,6 +424,7 @@ impl Cell { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn get(&self) -> T { + // SAFETY: not threadsafe, but it's OK since we know `Cell` isn't threadsafe unsafe{ *self.value.get() } } @@ -491,6 +492,7 @@ impl Cell { #[inline] #[stable(feature = "cell_get_mut", since = "1.11.0")] pub fn get_mut(&mut self) -> &mut T { + // SAFETY: not threadsafe, but it's OK since we know `Cell` isn't threadsafe unsafe { &mut *self.value.get() } @@ -512,6 +514,7 @@ impl Cell { #[inline] #[stable(feature = "as_cell", since = "1.37.0")] pub fn from_mut(t: &mut T) -> &Cell { + // SAFETY: `&mut` ensures unique access unsafe { &*(t as *mut T as *const Cell) } @@ -557,6 +560,7 @@ impl Cell<[T]> { /// ``` #[stable(feature = "as_cell", since = "1.37.0")] pub fn as_slice_of_cells(&self) -> &[Cell] { + // SAFETY: `Cell` has the same memory layout as `T` unsafe { &*(self as *const Cell<[T]> as *const [Cell]) } @@ -825,6 +829,8 @@ impl RefCell { pub fn try_borrow(&self) -> Result, BorrowError> { match BorrowRef::new(&self.borrow) { Some(b) => Ok(Ref { + // SAFETY: `BorrowRef` ensures that there is only immutable access + // to the value while borrowed value: unsafe { &*self.value.get() }, borrow: b, }), @@ -903,6 +909,7 @@ impl RefCell { pub fn try_borrow_mut(&self) -> Result, BorrowMutError> { match BorrowRefMut::new(&self.borrow) { Some(b) => Ok(RefMut { + // SAFETY: `BorrowRef` gurantees unique access value: unsafe { &mut *self.value.get() }, borrow: b, }), @@ -954,6 +961,7 @@ impl RefCell { #[inline] #[stable(feature = "cell_get_mut", since = "1.11.0")] pub fn get_mut(&mut self) -> &mut T { + // SAFETY: `&mut` guarantees unique access unsafe { &mut *self.value.get() } diff --git a/src/libcore/fmt/float.rs b/src/libcore/fmt/float.rs index b52b56b1bdbc2..fe96ae9bb6e70 100644 --- a/src/libcore/fmt/float.rs +++ b/src/libcore/fmt/float.rs @@ -2,8 +2,6 @@ use crate::fmt::{Formatter, Result, LowerExp, UpperExp, Display, Debug}; use crate::mem::MaybeUninit; use crate::num::flt2dec; -// ignore-tidy-undocumented-unsafe - // Don't inline this so callers don't use the stack space this function // requires unless they have to. #[inline(never)] @@ -11,6 +9,7 @@ fn float_to_decimal_common_exact(fmt: &mut Formatter<'_>, num: &T, sign: flt2dec::Sign, precision: usize) -> Result where T: flt2dec::DecodableFloat { + // SAFETY: possible undefined behavior, see comment unsafe { let mut buf = MaybeUninit::<[u8; 1024]>::uninit(); // enough for f32 and f64 let mut parts = MaybeUninit::<[flt2dec::Part<'_>; 4]>::uninit(); @@ -33,6 +32,7 @@ fn float_to_decimal_common_shortest(fmt: &mut Formatter<'_>, num: &T, sign: flt2dec::Sign, precision: usize) -> Result where T: flt2dec::DecodableFloat { + // SAFETY: possible undefined behavior, see comment unsafe { // enough for f32 and f64 let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninit(); @@ -73,6 +73,7 @@ fn float_to_exponential_common_exact(fmt: &mut Formatter<'_>, num: &T, upper: bool) -> Result where T: flt2dec::DecodableFloat { + // SAFETY: possible undefined behavior, see comment unsafe { let mut buf = MaybeUninit::<[u8; 1024]>::uninit(); // enough for f32 and f64 let mut parts = MaybeUninit::<[flt2dec::Part<'_>; 6]>::uninit(); @@ -92,6 +93,7 @@ fn float_to_exponential_common_shortest(fmt: &mut Formatter<'_>, upper: bool) -> Result where T: flt2dec::DecodableFloat { + // SAFETY: possible undefined behavior, see comment unsafe { // enough for f32 and f64 let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninit(); diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs index 5a039144f667f..87b4aeb72b034 100644 --- a/src/libcore/fmt/mod.rs +++ b/src/libcore/fmt/mod.rs @@ -1,7 +1,5 @@ //! Utilities for formatting and printing strings. -// ignore-tidy-undocumented-unsafe - #![stable(feature = "rust1", since = "1.0.0")] use crate::cell::{UnsafeCell, Cell, RefCell, Ref, RefMut}; @@ -279,6 +277,7 @@ impl<'a> ArgumentV1<'a> { issue = "0")] pub fn new<'b, T>(x: &'b T, f: fn(&T, &mut Formatter<'_>) -> Result) -> ArgumentV1<'b> { + // SAFETY: relies on `T` being sized to avoid undefined behavior unsafe { ArgumentV1 { formatter: mem::transmute(f), @@ -296,6 +295,7 @@ impl<'a> ArgumentV1<'a> { fn as_usize(&self) -> Option { if self.formatter as usize == ArgumentV1::show_usize as usize { + // SAFETY: if the formatter is `show_usize`, it means it came in as `&usize` Some(unsafe { *(self.value as *const _ as *const usize) }) } else { None @@ -1355,6 +1355,8 @@ impl<'a> Formatter<'a> { let mut align = old_align; if self.sign_aware_zero_pad() { // a sign always goes first + // SAFETY: `formatted.sign` is always generated from `determine_sign` which is + // valid UTF-8 let sign = unsafe { str::from_utf8_unchecked(formatted.sign) }; self.buf.write_str(sign)?; @@ -1386,6 +1388,8 @@ impl<'a> Formatter<'a> { fn write_formatted_parts(&mut self, formatted: &flt2dec::Formatted<'_>) -> Result { fn write_bytes(buf: &mut dyn Write, s: &[u8]) -> Result { + // SAFETY: `formatted.sign` is always generated from `determine_sign` which is + // valid UTF-8 buf.write_str(unsafe { str::from_utf8_unchecked(s) }) } diff --git a/src/libcore/fmt/num.rs b/src/libcore/fmt/num.rs index 3c7aefc090f8e..efd815f30454f 100644 --- a/src/libcore/fmt/num.rs +++ b/src/libcore/fmt/num.rs @@ -1,8 +1,5 @@ //! Integer and floating-point number formatting -// ignore-tidy-undocumented-unsafe - - use crate::fmt; use crate::ops::{Div, Rem, Sub}; use crate::str; @@ -83,6 +80,8 @@ trait GenericRadix { } } let buf = &buf[curr..]; + // SAFETY: only chars in `buf` are created by `Self::digit` which are assumed to be + // valid UTF-8 let buf = unsafe { str::from_utf8_unchecked(slice::from_raw_parts( MaybeUninit::first_ptr(buf), buf.len() @@ -191,49 +190,63 @@ static DEC_DIGITS_LUT: &[u8; 200] = macro_rules! impl_Display { ($($t:ident),* as $u:ident via $conv_fn:ident named $name:ident) => { fn $name(mut n: $u, is_nonnegative: bool, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // 2^128 is about 3*10^38, so 39 gives an extra byte of space let mut buf = [MaybeUninit::::uninit(); 39]; let mut curr = buf.len() as isize; let buf_ptr = MaybeUninit::first_ptr_mut(&mut buf); let lut_ptr = DEC_DIGITS_LUT.as_ptr(); - unsafe { - // need at least 16 bits for the 4-characters-at-a-time to work. - assert!(crate::mem::size_of::<$u>() >= 2); + // need at least 16 bits for the 4-characters-at-a-time to work. + assert!(crate::mem::size_of::<$u>() >= 2); - // eagerly decode 4 characters at a time - while n >= 10000 { - let rem = (n % 10000) as isize; - n /= 10000; + // eagerly decode 4 characters at a time + while n >= 10000 { + let rem = (n % 10000) as isize; + n /= 10000; - let d1 = (rem / 100) << 1; - let d2 = (rem % 100) << 1; - curr -= 4; + let d1 = (rem / 100) << 1; + let d2 = (rem % 100) << 1; + curr -= 4; + // SAFETY: `d1`, `d2` are each max 198, so `buf_ptr[d1..d1 + 1]` is safe to access + unsafe { ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); } + } - // if we reach here numbers are <= 9999, so at most 4 chars long - let mut n = n as isize; // possibly reduce 64bit math + // if we reach here numbers are <= 9999, so at most 4 chars long + let mut n = n as isize; // possibly reduce 64bit math - // decode 2 more chars, if > 2 chars - if n >= 100 { - let d1 = (n % 100) << 1; - n /= 100; - curr -= 2; + // decode 2 more chars, if > 2 chars + if n >= 100 { + let d1 = (n % 100) << 1; + n /= 100; + curr -= 2; + // SAFETY: `d1` is max 198, so `buf_ptr[d1..d1 + 1]` is safe to access + unsafe { ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); } + } - // decode last 1 or 2 chars - if n < 10 { - curr -= 1; + // decode last 1 or 2 chars + if n < 10 { + curr -= 1; + // SAFETY: `curr` is still less than `buf.len()` and since `n` < 10, `n + '0'` is + // valid UTF-8 + unsafe { *buf_ptr.offset(curr) = (n as u8) + b'0'; - } else { - let d1 = n << 1; - curr -= 2; + } + } else { + let d1 = n << 1; + curr -= 2; + // SAFETY: `d1` is max 18, so `buf_ptr[d1..d1 + 1]` is safe to access + unsafe { ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); } } + // SAFETY: `curr` > 0 (since we made `buf` large enough), and all the chars are valid + // UTF-8 since `DEC_DIGITS_LUT` is let buf_slice = unsafe { str::from_utf8_unchecked( slice::from_raw_parts(buf_ptr.offset(curr), buf.len() - curr as usize)) diff --git a/src/libcore/hash/mod.rs b/src/libcore/hash/mod.rs index 0082363692df6..4e05789e8537c 100644 --- a/src/libcore/hash/mod.rs +++ b/src/libcore/hash/mod.rs @@ -79,8 +79,6 @@ //! } //! ``` -// ignore-tidy-undocumented-unsafe - #![stable(feature = "rust1", since = "1.0.0")] use crate::fmt; @@ -569,6 +567,8 @@ mod impls { fn hash_slice(data: &[$ty], state: &mut H) { let newlen = data.len() * mem::size_of::<$ty>(); let ptr = data.as_ptr() as *const u8; + // SAFETY: all of the requirements for `from_raw_parts` are guaranteed since + // `data` is a slice state.write(unsafe { slice::from_raw_parts(ptr, newlen) }) } } @@ -688,7 +688,7 @@ mod impls { // Thin pointer state.write_usize(*self as *const () as usize); } else { - // Fat pointer + // SAFETY: since it's not a thin pointer, it's a fat pointer let (a, b) = unsafe { *(self as *const Self as *const (usize, usize)) }; @@ -705,7 +705,7 @@ mod impls { // Thin pointer state.write_usize(*self as *const () as usize); } else { - // Fat pointer + // SAFETY: since it's not a thin pointer, it's a fat pointer let (a, b) = unsafe { *(self as *const Self as *const (usize, usize)) }; diff --git a/src/libcore/hash/sip.rs b/src/libcore/hash/sip.rs index 194d9e6e2f8ad..279707d58b7bd 100644 --- a/src/libcore/hash/sip.rs +++ b/src/libcore/hash/sip.rs @@ -1,7 +1,5 @@ //! An implementation of SipHash. -// ignore-tidy-undocumented-unsafe - #![allow(deprecated)] // the types in this module are deprecated use crate::marker::PhantomData; @@ -222,8 +220,10 @@ impl Hasher { let needed = 8 - self.ntail; let fill = cmp::min(length, needed); if fill == 8 { + // SAFETY: `msg` has exactly `sizeof(u64)` bytes self.tail = unsafe { load_int_le!(msg, 0, u64) }; } else { + // SAFETY: fill < 7 self.tail |= unsafe { u8to64_le(msg, 0, fill) } << (8 * self.ntail); if length < needed { self.ntail += length; @@ -236,6 +236,7 @@ impl Hasher { // Buffered tail is now flushed, process new input. self.ntail = length - needed; + // SAFETY: self.ntail + needed - 1 = length - 1 < 8 self.tail = unsafe { u8to64_le(msg, needed, self.ntail) }; } } @@ -270,6 +271,7 @@ impl super::Hasher for Hasher { // see short_write comment for explanation #[inline] fn write_usize(&mut self, i: usize) { + // SAFETY: `bytes` leaves scope as `i` does let bytes = unsafe { crate::slice::from_raw_parts(&i as *const usize as *const u8, mem::size_of::()) }; @@ -291,6 +293,7 @@ impl super::Hasher for Hasher { if self.ntail != 0 { needed = 8 - self.ntail; + // SAFETY: `needed < 8` since `self.ntail != 0` self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << 8 * self.ntail; if length < needed { self.ntail += length; @@ -309,6 +312,7 @@ impl super::Hasher for Hasher { let mut i = needed; while i < len - left { + // SAFETY: i + 8 <= length let mi = unsafe { load_int_le!(msg, i, u64) }; self.state.v3 ^= mi; @@ -318,6 +322,7 @@ impl super::Hasher for Hasher { i += 8; } + // SAFETY: left < 8 self.tail = unsafe { u8to64_le(msg, i, left) }; self.ntail = left; } diff --git a/src/libcore/hint.rs b/src/libcore/hint.rs index f68a3e5a76fd7..81d14a6560864 100644 --- a/src/libcore/hint.rs +++ b/src/libcore/hint.rs @@ -2,8 +2,6 @@ //! Hints to compiler that affects how code should be emitted or optimized. -// ignore-tidy-undocumented-unsafe - use crate::intrinsics; /// Informs the compiler that this point in the code is not reachable, enabling @@ -71,10 +69,12 @@ pub fn spin_loop() { ) )] { #[cfg(target_arch = "x86")] { + // SAFETY: technically does nothing unsafe { crate::arch::x86::_mm_pause() }; } #[cfg(target_arch = "x86_64")] { + // SAFETY: technically does nothing unsafe { crate::arch::x86_64::_mm_pause() }; } } @@ -86,9 +86,11 @@ pub fn spin_loop() { ) )] { #[cfg(target_arch = "aarch64")] { + // SAFETY: hardware hint unsafe { crate::arch::aarch64::__yield() }; } #[cfg(target_arch = "arm")] { + // SAFETY: hardware hint unsafe { crate::arch::arm::__yield() }; } } @@ -116,6 +118,7 @@ pub fn black_box(dummy: T) -> T { // this. LLVM's intepretation of inline assembly is that it's, well, a black // box. This isn't the greatest implementation since it probably deoptimizes // more than we want, but it's so far good enough. + // SAFETY: compiler hint unsafe { asm!("" : : "r"(&dummy)); return dummy; diff --git a/src/libcore/iter/adapters/zip.rs b/src/libcore/iter/adapters/zip.rs index 14d9d5499b880..ece4d3ceb8ea7 100644 --- a/src/libcore/iter/adapters/zip.rs +++ b/src/libcore/iter/adapters/zip.rs @@ -1,5 +1,3 @@ -// ignore-tidy-undocumented-unsafe - use crate::cmp; use super::super::{Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator, TrustedLen}; @@ -165,11 +163,13 @@ impl ZipImpl for Zip if self.index < self.len { let i = self.index; self.index += 1; + // SAFETY: checked that `i < min(a.len(), b.len())` unsafe { Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) } } else if A::may_have_side_effect() && self.index < self.a.len() { // match the base implementation's potential side effects + // SAFETY: checked that `index < a.len()` unsafe { self.a.get_unchecked(self.index); } @@ -194,9 +194,11 @@ impl ZipImpl for Zip let i = self.index; self.index += 1; if A::may_have_side_effect() { + // SAFETY: i < end < self.len unsafe { self.a.get_unchecked(i); } } if B::may_have_side_effect() { + // SAFETY: i < end < self.len unsafe { self.b.get_unchecked(i); } } } @@ -229,6 +231,7 @@ impl ZipImpl for Zip if self.index < self.len { self.len -= 1; let i = self.len; + // SAFETY: i < min(a.len(), b.len()) unsafe { Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) } diff --git a/src/libcore/mem/maybe_uninit.rs b/src/libcore/mem/maybe_uninit.rs index d35a5ce57fe9f..8cd0d46465775 100644 --- a/src/libcore/mem/maybe_uninit.rs +++ b/src/libcore/mem/maybe_uninit.rs @@ -1,8 +1,6 @@ use crate::intrinsics; use crate::mem::ManuallyDrop; -// ignore-tidy-undocumented-unsafe - /// A wrapper type to construct uninitialized instances of `T`. /// /// # Initialization invariant @@ -292,6 +290,7 @@ impl MaybeUninit { #[unstable(feature = "maybe_uninit_uninit_array", issue = "0")] #[inline(always)] pub fn uninit_array() -> [Self; LEN] { + // SAFETY: see type-level documentation unsafe { MaybeUninit::<[MaybeUninit; LEN]>::uninit().assume_init() } @@ -341,6 +340,7 @@ impl MaybeUninit { #[inline] pub fn zeroed() -> MaybeUninit { let mut u = MaybeUninit::::uninit(); + // SAFETY: depends on `T`, see above comment unsafe { u.as_mut_ptr().write_bytes(0u8, 1); } @@ -354,6 +354,7 @@ impl MaybeUninit { #[unstable(feature = "maybe_uninit_extra", issue = "63567")] #[inline(always)] pub fn write(&mut self, val: T) -> &mut T { + // SAFETY: initializes field, and returns reference to the value unsafe { self.value = ManuallyDrop::new(val); self.get_mut() @@ -394,6 +395,7 @@ impl MaybeUninit { #[stable(feature = "maybe_uninit", since = "1.36.0")] #[inline(always)] pub fn as_ptr(&self) -> *const T { + // SAFETY: unsafe if uninitialized unsafe { &*self.value as *const T } } @@ -431,6 +433,7 @@ impl MaybeUninit { #[stable(feature = "maybe_uninit", since = "1.36.0")] #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut T { + // SAFETY: unsafe if uninitialized unsafe { &mut *self.value as *mut T } } diff --git a/src/libcore/option.rs b/src/libcore/option.rs index 958f31c0fd22a..bf10a994b4940 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -133,8 +133,6 @@ //! [`Box`]: ../../std/boxed/struct.Box.html //! [`i32`]: ../../std/primitive.i32.html -// ignore-tidy-undocumented-unsafe - #![stable(feature = "rust1", since = "1.0.0")] use crate::iter::{FromIterator, FusedIterator, TrustedLen}; @@ -298,6 +296,7 @@ impl Option { #[inline] #[stable(feature = "pin", since = "1.33.0")] pub fn as_pin_ref(self: Pin<&Self>) -> Option> { + // SAFETY: data already pinned unsafe { Pin::get_ref(self).as_ref().map(|x| Pin::new_unchecked(x)) } @@ -309,6 +308,7 @@ impl Option { #[inline] #[stable(feature = "pin", since = "1.33.0")] pub fn as_pin_mut(self: Pin<&mut Self>) -> Option> { + // SAFETY: data already pinned unsafe { Pin::get_unchecked_mut(self).as_mut().map(|x| Pin::new_unchecked(x)) } @@ -845,6 +845,7 @@ impl Option { match *self { Some(ref mut v) => v, + // SAFETY: `*self` is always `Some` None => unsafe { hint::unreachable_unchecked() }, } } diff --git a/src/libcore/panicking.rs b/src/libcore/panicking.rs index b88dc336097f3..6dfc8ac78dded 100644 --- a/src/libcore/panicking.rs +++ b/src/libcore/panicking.rs @@ -20,8 +20,6 @@ //! one function. Currently, the actual symbol is declared in the standard //! library, but the location of this may change over time. -// ignore-tidy-undocumented-unsafe - #![allow(dead_code, missing_docs)] #![unstable(feature = "core_panic", reason = "internal details of the implementation of the `panic!` \ @@ -39,6 +37,7 @@ use crate::panic::{Location, PanicInfo}; #[lang = "panic"] pub fn panic(expr_file_line_col: &(&'static str, &'static str, u32, u32)) -> ! { if cfg!(feature = "panic_immediate_abort") { + // SAFETY: ends the program unsafe { super::intrinsics::abort() } } @@ -60,6 +59,7 @@ pub fn panic(expr_file_line_col: &(&'static str, &'static str, u32, u32)) -> ! { #[lang = "panic"] pub fn panic(expr: &str, location: &Location<'_>) -> ! { if cfg!(feature = "panic_immediate_abort") { + // SAFETY: ends the program unsafe { super::intrinsics::abort() } } @@ -79,6 +79,7 @@ pub fn panic(expr: &str, location: &Location<'_>) -> ! { fn panic_bounds_check(file_line_col: &(&'static str, u32, u32), index: usize, len: usize) -> ! { if cfg!(feature = "panic_immediate_abort") { + // SAFETY: ends the program unsafe { super::intrinsics::abort() } } @@ -92,6 +93,7 @@ fn panic_bounds_check(file_line_col: &(&'static str, u32, u32), #[lang = "panic_bounds_check"] fn panic_bounds_check(location: &Location<'_>, index: usize, len: usize) -> ! { if cfg!(feature = "panic_immediate_abort") { + // SAFETY: ends the program unsafe { super::intrinsics::abort() } } @@ -107,6 +109,7 @@ fn panic_bounds_check(location: &Location<'_>, index: usize, len: usize) -> ! { #[cfg_attr( feature="panic_immediate_abort" ,inline)] pub fn panic_fmt(fmt: fmt::Arguments<'_>, file_line_col: &(&'static str, u32, u32)) -> ! { if cfg!(feature = "panic_immediate_abort") { + // SAFETY: ends the program unsafe { super::intrinsics::abort() } } @@ -119,6 +122,7 @@ pub fn panic_fmt(fmt: fmt::Arguments<'_>, file_line_col: &(&'static str, u32, u3 let (file, line, col) = *file_line_col; let location = Location::internal_constructor(file, line, col); let pi = PanicInfo::internal_constructor(Some(&fmt), &location); + // SAFETY: psuedo-FFI call to end the program unsafe { panic_impl(&pi) } } @@ -128,6 +132,7 @@ pub fn panic_fmt(fmt: fmt::Arguments<'_>, file_line_col: &(&'static str, u32, u3 #[cfg_attr( feature="panic_immediate_abort" ,inline)] pub fn panic_fmt(fmt: fmt::Arguments<'_>, location: &Location<'_>) -> ! { if cfg!(feature = "panic_immediate_abort") { + // SAFETY: ends the program unsafe { super::intrinsics::abort() } } @@ -138,5 +143,6 @@ pub fn panic_fmt(fmt: fmt::Arguments<'_>, location: &Location<'_>) -> ! { } let pi = PanicInfo::internal_constructor(Some(&fmt), location); + // SAFETY: psuedo-FFI call to end the program unsafe { panic_impl(&pi) } } diff --git a/src/libcore/ptr/mod.rs b/src/libcore/ptr/mod.rs index 5a75730cf2bd4..b1f2c99fe8fbf 100644 --- a/src/libcore/ptr/mod.rs +++ b/src/libcore/ptr/mod.rs @@ -65,8 +65,6 @@ //! [`write_volatile`]: ./fn.write_volatile.html //! [`NonNull::dangling`]: ./struct.NonNull.html#method.dangling -// ignore-tidy-undocumented-unsafe - #![stable(feature = "rust1", since = "1.0.0")] use crate::intrinsics; @@ -251,6 +249,7 @@ pub(crate) struct FatPtr { #[inline] #[unstable(feature = "slice_from_raw_parts", reason = "recently added", issue = "36925")] pub fn slice_from_raw_parts(data: *const T, len: usize) -> *const [T] { + // SAFETY: `FatPtr.data` and `Repr.rust` are both `usize` in the same location unsafe { Repr { raw: FatPtr { data, len } }.rust } } @@ -267,6 +266,7 @@ pub fn slice_from_raw_parts(data: *const T, len: usize) -> *const [T] { #[inline] #[unstable(feature = "slice_from_raw_parts", reason = "recently added", issue = "36925")] pub fn slice_from_raw_parts_mut(data: *mut T, len: usize) -> *mut [T] { + // SAFETY: `FatPtr.data` and `Repr.rust_mut` are both usize in the same location unsafe { Repr { raw: FatPtr { data, len } }.rust_mut } } @@ -1233,6 +1233,7 @@ impl *const T { #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized { + // SAFETY: see documentation unsafe { intrinsics::arith_offset(self, count) } @@ -1723,6 +1724,7 @@ impl *const T { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } + // SAFETY: `align` is a power of two unsafe { align_offset(self, align) } @@ -1931,6 +1933,7 @@ impl *mut T { #[stable(feature = "ptr_wrapping_offset", since = "1.16.0")] #[inline] pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized { + // SAFETY: see documentation unsafe { intrinsics::arith_offset(self, count) as *mut T } @@ -2574,6 +2577,7 @@ impl *mut T { if !align.is_power_of_two() { panic!("align_offset: align is not a power-of-two"); } + // SAFETY: `align` is a power of two unsafe { align_offset(self, align) } diff --git a/src/libcore/ptr/non_null.rs b/src/libcore/ptr/non_null.rs index 7599991f0f15a..b47bce043e6cc 100644 --- a/src/libcore/ptr/non_null.rs +++ b/src/libcore/ptr/non_null.rs @@ -7,8 +7,6 @@ use crate::mem; use crate::ptr::Unique; use crate::cmp::Ordering; -// ignore-tidy-undocumented-unsafe - /// `*mut T` but non-zero and covariant. /// /// This is often the correct thing to use when building data structures using @@ -68,6 +66,7 @@ impl NonNull { #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub const fn dangling() -> Self { + // SAFETY: must not be dereferenced, but `mem::align_of::() > 0` if `T` is sized unsafe { let ptr = mem::align_of::() as *mut T; NonNull::new_unchecked(ptr) @@ -92,6 +91,7 @@ impl NonNull { #[inline] pub fn new(ptr: *mut T) -> Option { if !ptr.is_null() { + // SAFETY: just checked that `ptr > 0` Some(unsafe { Self::new_unchecked(ptr) }) } else { None @@ -131,6 +131,7 @@ impl NonNull { #[stable(feature = "nonnull_cast", since = "1.27.0")] #[inline] pub const fn cast(self) -> NonNull { + // SAFETY: `self.pointer` is non-null unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) } @@ -207,6 +208,7 @@ impl hash::Hash for NonNull { impl From> for NonNull { #[inline] fn from(unique: Unique) -> Self { + // SAFETY: `Unique::as_ptr()` can't be null unsafe { NonNull::new_unchecked(unique.as_ptr()) } } } @@ -215,6 +217,7 @@ impl From> for NonNull { impl From<&mut T> for NonNull { #[inline] fn from(reference: &mut T) -> Self { + // SAFETY: references can't be null unsafe { NonNull { pointer: reference as *mut T } } } } @@ -223,6 +226,7 @@ impl From<&mut T> for NonNull { impl From<&T> for NonNull { #[inline] fn from(reference: &T) -> Self { + // SAFETY: references can't be null unsafe { NonNull { pointer: reference as *const T } } } } diff --git a/src/libcore/ptr/unique.rs b/src/libcore/ptr/unique.rs index 11a3aed1ab41b..6fad9798e245e 100644 --- a/src/libcore/ptr/unique.rs +++ b/src/libcore/ptr/unique.rs @@ -5,8 +5,6 @@ use crate::marker::{PhantomData, Unsize}; use crate::mem; use crate::ptr::NonNull; -// ignore-tidy-undocumented-unsafe - /// A wrapper around a raw non-null `*mut T` that indicates that the possessor /// of this wrapper owns the referent. Useful for building abstractions like /// `Box`, `Vec`, `String`, and `HashMap`. @@ -71,6 +69,7 @@ impl Unique { // FIXME: rename to dangling() to match NonNull? #[inline] pub const fn empty() -> Self { + // SAFETY: must not be dereferenced, but `mem::align_of::() > 0` if `T` is sized unsafe { Unique::new_unchecked(mem::align_of::() as *mut T) } @@ -93,6 +92,7 @@ impl Unique { #[inline] pub fn new(ptr: *mut T) -> Option { if !ptr.is_null() { + // SAFETY: just checked that `ptr > 0` Some(unsafe { Unique { pointer: ptr as _, _marker: PhantomData } }) } else { None @@ -128,6 +128,7 @@ impl Unique { /// Casts to a pointer of another type. #[inline] pub const fn cast(self) -> Unique { + // SAFETY: `self.pointer` is non-null unsafe { Unique::new_unchecked(self.as_ptr() as *mut U) } @@ -169,6 +170,7 @@ impl fmt::Pointer for Unique { impl From<&mut T> for Unique { #[inline] fn from(reference: &mut T) -> Self { + // SAFETY: references can't be null unsafe { Unique { pointer: reference as *mut T, _marker: PhantomData } } } } @@ -177,6 +179,7 @@ impl From<&mut T> for Unique { impl From<&T> for Unique { #[inline] fn from(reference: &T) -> Self { + // SAFETY: references can't be null unsafe { Unique { pointer: reference as *const T, _marker: PhantomData } } } } @@ -185,6 +188,7 @@ impl From<&T> for Unique { impl From> for Unique { #[inline] fn from(p: NonNull) -> Self { + // SAFETY: `NonNull::as_ptr()` can't be null unsafe { Unique::new_unchecked(p.as_ptr()) } } } diff --git a/src/libcore/slice/memchr.rs b/src/libcore/slice/memchr.rs index 2a2169dd348c2..9f3136365580b 100644 --- a/src/libcore/slice/memchr.rs +++ b/src/libcore/slice/memchr.rs @@ -1,8 +1,6 @@ // Original implementation taken from rust-memchr. // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch -// ignore-tidy-undocumented-unsafe - use crate::cmp; use crate::mem; @@ -63,6 +61,9 @@ pub fn memchr(x: u8, text: &[u8]) -> Option { if len >= 2 * usize_bytes { while offset <= len - 2 * usize_bytes { + // SAFETY: both `u` and `v` can be read since + // `ptr + offset + usize_bytes <= ptr + len - usize_bytes < ptr + len` + // means the pointers are in bounds unsafe { let u = *(ptr.add(offset) as *const usize); let v = *(ptr.add(offset + usize_bytes) as *const usize); @@ -95,7 +96,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { type Chunk = usize; let (min_aligned_offset, max_aligned_offset) = { - // We call this just to obtain the length of the prefix and suffix. + // SAFETY: We call this just to obtain the length of the prefix and suffix. // In the middle we always process two chunks at once. let (prefix, _, suffix) = unsafe { text.align_to::<(Chunk, Chunk)>() }; (prefix.len(), len - suffix.len()) @@ -113,6 +114,8 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { let chunk_bytes = mem::size_of::(); while offset > min_aligned_offset { + // SAFETY: since `offset` is always aligned, `offset > min_aligned_offset` means + // that `offset - 2 * chunk_bytes` is within bounds. unsafe { let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index c8fe9f9861315..706a25461aa90 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -1,5 +1,4 @@ // ignore-tidy-filelength -// ignore-tidy-undocumented-unsafe //! Slice management and manipulation. //! @@ -63,10 +62,11 @@ impl [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - // SAFETY: const sound because we transmute out the length field as a usize (which it must be) #[allow(unused_attributes)] #[allow_internal_unstable(const_fn_union)] pub const fn len(&self) -> usize { + // SAFETY: const sound because we transmute out the length field as a usize + // (which it must be) unsafe { crate::ptr::Repr { rust: self }.raw.len } @@ -441,7 +441,8 @@ impl [T] { #[unstable(feature = "slice_ptr_range", issue = "65807")] #[inline] pub fn as_ptr_range(&self) -> Range<*const T> { - // The `add` here is safe, because: + let start = self.as_ptr(); + // SAFETY: The `add` here is safe, because: // // - Both pointers are part of the same object, as pointing directly // past the object also counts. @@ -458,7 +459,6 @@ impl [T] { // the end of the address space. // // See the documentation of pointer::add. - let start = self.as_ptr(); let end = unsafe { start.add(self.len()) }; start..end } @@ -482,8 +482,8 @@ impl [T] { #[unstable(feature = "slice_ptr_range", issue = "65807")] #[inline] pub fn as_mut_ptr_range(&mut self) -> Range<*mut T> { - // See as_ptr_range() above for why `add` here is safe. let start = self.as_mut_ptr(); + // SAFETY: See `as_ptr_range() above for why `add` here is safe. let end = unsafe { start.add(self.len()) }; start..end } @@ -509,6 +509,8 @@ impl [T] { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn swap(&mut self, a: usize, b: usize) { + // SAFETY: `self[a]` and `self[b]` are both properly aligned (since they're + // taken from a slice) and valid for reads/writes (since this would panic otherwise) unsafe { // Can't take two mutable loans from one vector, so instead just cast // them to their raw pointers to do the swap @@ -553,11 +555,16 @@ impl [T] { // Use the llvm.bswap intrinsic to reverse u8s in a usize let chunk = mem::size_of::(); while i + chunk - 1 < ln / 2 { + // SAFETY: see inline comments unsafe { + // within bounds since: 0 <= i < ln let pa: *mut T = self.get_unchecked_mut(i); + // within bounds since: 0 <= i + chunk - 2 < ln - i - chunk < ln let pb: *mut T = self.get_unchecked_mut(ln - i - chunk); + // both are valid for reads since they're in this slice let va = ptr::read_unaligned(pa as *mut usize); let vb = ptr::read_unaligned(pb as *mut usize); + // and they're valid for writes for the same reason ptr::write_unaligned(pa as *mut usize, vb.swap_bytes()); ptr::write_unaligned(pb as *mut usize, va.swap_bytes()); } @@ -569,6 +576,7 @@ impl [T] { // Use rotate-by-16 to reverse u16s in a u32 let chunk = mem::size_of::() / 2; while i + chunk - 1 < ln / 2 { + // SAFETY: see above block unsafe { let pa: *mut T = self.get_unchecked_mut(i); let pb: *mut T = self.get_unchecked_mut(ln - i - chunk); @@ -583,6 +591,7 @@ impl [T] { while i < ln / 2 { // Unsafe swap to avoid the bounds check in safe swap. + // SAFETY: safe since `0 <= i < ln` and `-1 <= i - 1 <= 2i - i - 1 < ln - i - 1 < ln` unsafe { let pa: *mut T = self.get_unchecked_mut(i); let pb: *mut T = self.get_unchecked_mut(ln - i - 1); @@ -608,11 +617,15 @@ impl [T] { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn iter(&self) -> Iter<'_, T> { + // SAFETY: it's invariant that `[ptr, ptr + self.len())` all point to valid `T` unsafe { let ptr = self.as_ptr(); assume(!ptr.is_null()); let end = if mem::size_of::() == 0 { + // ZSTs don't take up any space, it is an error to dereference this. + // However, casting this to a `*u8` lets us create an iterator with the right end + // anyway. (ptr as *const u8).wrapping_add(self.len()) as *const T } else { ptr.add(self.len()) @@ -640,6 +653,7 @@ impl [T] { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn iter_mut(&mut self) -> IterMut<'_, T> { + // SAFETY: it's invariant that `[ptr, ptr + self.len())` all point to valid `T` unsafe { let ptr = self.as_mut_ptr(); assume(!ptr.is_null()); @@ -1075,6 +1089,7 @@ impl [T] { let len = self.len(); let ptr = self.as_mut_ptr(); + // SAFETY: it's invariant that `[ptr, ptr + self.len())` all point to valid `T` unsafe { assert!(mid <= len); @@ -1510,14 +1525,14 @@ impl [T] { while size > 1 { let half = size / 2; let mid = base + half; - // mid is always in [0, size), that means mid is >= 0 and < size. + // SAFETY: `mid` is always in `[0, size)`, that means `0 <= mid < size. // mid >= 0: by definition // mid < size: mid = size / 2 + size / 4 + size / 8 ... let cmp = f(unsafe { s.get_unchecked(mid) }); base = if cmp == Greater { base } else { mid }; size -= half; } - // base is always in [0, size) because base <= mid. + // SAFETY: `base` is always in `[0, size)` because `base <= mid`. let cmp = f(unsafe { s.get_unchecked(base) }); if cmp == Equal { Ok(base) } else { Err(base + (cmp == Less) as usize) } @@ -1959,6 +1974,10 @@ impl [T] { let mut next_read: usize = 1; let mut next_write: usize = 1; + // SAFETY: `ptr_read`, `prev_ptr_write`, `ptr_write` are all in bounds since + // ptr + 1 < ptr_read = ptr + next_read < ptr + len + // ptr + 1 < prev_ptr_write = ptr + next_write - 1 + // < ptr_write = ptr + next_write < ptr + len unsafe { // Avoid bounds checks by using raw pointers. while next_read < len { @@ -2042,6 +2061,8 @@ impl [T] { assert!(mid <= self.len()); let k = self.len() - mid; + // SAFETY: this just requires `[p, self.len())` are valid for reading and writing, which + // they must be. unsafe { let p = self.as_mut_ptr(); rotate::ptr_rotate(mid, p.add(mid), k); @@ -2083,6 +2104,8 @@ impl [T] { assert!(k <= self.len()); let mid = self.len() - k; + // SAFETY: this just requires `[p, self.len())` are valid for reading and writing, which + // they must be. unsafe { let p = self.as_mut_ptr(); rotate::ptr_rotate(mid, p.add(mid), k); @@ -2217,6 +2240,9 @@ impl [T] { pub fn copy_from_slice(&mut self, src: &[T]) where T: Copy { assert_eq!(self.len(), src.len(), "destination and source slices have different lengths"); + // SAFETY: it's possible we might try to copy from two overlapping + // slices which would cause undefined behavior, although this should be + // impossible in safe code. unsafe { ptr::copy_nonoverlapping( src.as_ptr(), self.as_mut_ptr(), self.len()); @@ -2270,6 +2296,7 @@ impl [T] { assert!(src_end <= self.len(), "src is out of bounds"); let count = src_end - src_start; assert!(dest <= self.len() - count, "dest is out of bounds"); + // SAFETY: `src_start`, `src_end`, `dest` are all within bounds unsafe { ptr::copy( self.as_ptr().add(src_start), @@ -2330,6 +2357,9 @@ impl [T] { pub fn swap_with_slice(&mut self, other: &mut [T]) { assert!(self.len() == other.len(), "destination and source slices have different lengths"); + // SAFETY: it's possible we might try to copy from two overlapping + // slices which would cause undefined behavior, although this should be + // impossible in safe code. unsafe { ptr::swap_nonoverlapping( self.as_mut_ptr(), other.as_mut_ptr(), self.len()); @@ -2362,6 +2392,7 @@ impl [T] { // iterative stein’s algorithm // We should still make this `const fn` (and revert to recursive algorithm if we do) // because relying on llvm to consteval all this is… well, it makes me uncomfortable. + // SAFETY: we make sure that `a` and `b` are nonzero let (ctz_a, mut ctz_b) = unsafe { if a == 0 { return b; } if b == 0 { return a; } @@ -2377,6 +2408,7 @@ impl [T] { mem::swap(&mut a, &mut b); } b = b - a; + // SAFETY: we make sure that `b` is nonzero unsafe { if b == 0 { break; @@ -2762,6 +2794,7 @@ impl SliceIndex<[T]> for usize { #[inline] fn get(self, slice: &[T]) -> Option<&T> { if self < slice.len() { + // SAFETY: since it's `usize`, `0 <= self < slice.len()` unsafe { Some(self.get_unchecked(slice)) } @@ -2773,6 +2806,7 @@ impl SliceIndex<[T]> for usize { #[inline] fn get_mut(self, slice: &mut [T]) -> Option<&mut T> { if self < slice.len() { + // SAFETY: since it's `usize`, `0 <= self < slice.len()` unsafe { Some(self.get_unchecked_mut(slice)) } @@ -2813,6 +2847,7 @@ impl SliceIndex<[T]> for ops::Range { if self.start > self.end || self.end > slice.len() { None } else { + // SAFETY: `0 <= start <= end <= slice.len()` since `start` is `usize` unsafe { Some(self.get_unchecked(slice)) } @@ -2824,6 +2859,7 @@ impl SliceIndex<[T]> for ops::Range { if self.start > self.end || self.end > slice.len() { None } else { + // SAFETY: `0 <= start <= end <= slice.len()` since `start` is `usize` unsafe { Some(self.get_unchecked_mut(slice)) } @@ -2847,6 +2883,7 @@ impl SliceIndex<[T]> for ops::Range { } else if self.end > slice.len() { slice_index_len_fail(self.end, slice.len()); } + // SAFETY: `0 <= start <= end <= slice.len()` since `start` is `usize` unsafe { self.get_unchecked(slice) } @@ -2859,6 +2896,7 @@ impl SliceIndex<[T]> for ops::Range { } else if self.end > slice.len() { slice_index_len_fail(self.end, slice.len()); } + // SAFETY: `0 <= start <= end <= slice.len()` since `start` is `usize` unsafe { self.get_unchecked_mut(slice) } @@ -3160,6 +3198,7 @@ macro_rules! iterator { // Helper function for creating a slice from the iterator. #[inline(always)] fn make_slice(&self) -> &'a [T] { + // SAFETY: `[ptr, ptr + len)` are guaranteed to be valid unsafe { from_raw_parts(self.ptr, len!(self)) } } @@ -3213,6 +3252,8 @@ macro_rules! iterator { #[inline] fn next(&mut self) -> Option<$elem> { // could be implemented with slices, but this avoids bounds checks + // SAFETY: we can call `next_unchecked` as long as the iterator is + // not empty. unsafe { assume(!self.ptr.is_null()); if mem::size_of::() != 0 { @@ -3250,7 +3291,7 @@ macro_rules! iterator { } return None; } - // We are in bounds. `post_inc_start` does the right thing even for ZSTs. + // SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs. unsafe { self.post_inc_start(n as isize); Some(next_unchecked!(self)) @@ -3275,6 +3316,7 @@ macro_rules! iterator { else { Ok(i + 1) } }).err() .map(|i| { + // SAFETY: Generates no code unsafe { assume(i < n) }; i }) @@ -3293,6 +3335,7 @@ macro_rules! iterator { else { Ok(i) } }).err() .map(|i| { + // SAFETY: Generates no code unsafe { assume(i < n) }; i }) @@ -3306,6 +3349,8 @@ macro_rules! iterator { #[inline] fn next_back(&mut self) -> Option<$elem> { // could be implemented with slices, but this avoids bounds checks + // SAFETY: we can call `next_back_unchecked` as along as the iterator is + // not empty. unsafe { assume(!self.ptr.is_null()); if mem::size_of::() != 0 { @@ -3326,7 +3371,7 @@ macro_rules! iterator { self.end = self.ptr; return None; } - // We are in bounds. `pre_dec_end` does the right thing even for ZSTs. + // SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs. unsafe { self.pre_dec_end(n as isize); Some(next_back_unchecked!(self)) @@ -3523,6 +3568,7 @@ impl<'a, T> IterMut<'a, T> { /// ``` #[stable(feature = "iter_to_slice", since = "1.4.0")] pub fn into_slice(self) -> &'a mut [T] { + // SAFETY: `[ptr, ptr + len)` are guaranteed to be valid unsafe { from_raw_parts_mut(self.ptr, len!(self)) } } @@ -5365,6 +5411,7 @@ pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] /// Converts a reference to T into a slice of length 1 (without copying). #[stable(feature = "from_ref", since = "1.28.0")] pub fn from_ref(s: &T) -> &[T] { + // SAFETY: the reference guarantees it's valid for reads during its lifetime unsafe { from_raw_parts(s, 1) } @@ -5373,6 +5420,7 @@ pub fn from_ref(s: &T) -> &[T] { /// Converts a reference to T into a slice of length 1 (without copying). #[stable(feature = "from_ref", since = "1.28.0")] pub fn from_mut(s: &mut T) -> &mut [T] { + // SAFETY: the mut reference guarantees it's valid for reads and writes during its lifetime unsafe { from_raw_parts_mut(s, 1) } @@ -5481,6 +5529,7 @@ impl SlicePartialEq for [A] if self.as_ptr() == other.as_ptr() { return true; } + // SAFETY: both are references of slices of the same size unsafe { let size = mem::size_of_val(self); memcmp(self.as_ptr() as *const u8, @@ -5558,6 +5607,8 @@ impl SliceOrd for [A] impl SliceOrd for [u8] { #[inline] fn compare(&self, other: &[u8]) -> Ordering { + // SAFETY: both are references of slices, which are guaranteed to be + // valid up to the min of their lengths let order = unsafe { memcmp(self.as_ptr(), other.as_ptr(), cmp::min(self.len(), other.len())) @@ -5623,6 +5674,7 @@ impl SliceContains for u8 { impl SliceContains for i8 { fn slice_contains(&self, x: &[Self]) -> bool { let byte = *self as u8; + // SAFETY: just a way to cast the slice from `i8` to `u8` let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) }; memchr::memchr(byte, bytes).is_some() } diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs index a719a51b61605..f816e82fbde7b 100644 --- a/src/libcore/slice/sort.rs +++ b/src/libcore/slice/sort.rs @@ -6,8 +6,6 @@ //! Unstable sorting is compatible with libcore because it doesn't allocate memory, unlike our //! stable sorting implementation. -// ignore-tidy-undocumented-unsafe - use crate::cmp; use crate::mem::{self, MaybeUninit}; use crate::ptr; @@ -20,6 +18,7 @@ struct CopyOnDrop { impl Drop for CopyOnDrop { fn drop(&mut self) { + // SAFETY: both `*src` and `*dest` are (supposedly) properly aligned `T` unsafe { ptr::copy_nonoverlapping(self.src, self.dest, 1); } } } @@ -29,6 +28,7 @@ fn shift_head(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool { let len = v.len(); + // SAFETY: See comments in block unsafe { // If the first two elements are out-of-order... if len >= 2 && is_less(v.get_unchecked(1), v.get_unchecked(0)) { @@ -61,6 +61,7 @@ fn shift_tail(v: &mut [T], is_less: &mut F) where F: FnMut(&T, &T) -> bool { let len = v.len(); + // SAFETY: See comments in block unsafe { // If the last two elements are out-of-order... if len >= 2 && is_less(v.get_unchecked(len - 1), v.get_unchecked(len - 2)) { @@ -104,6 +105,7 @@ fn partial_insertion_sort(v: &mut [T], is_less: &mut F) -> bool let mut i = 1; for _ in 0..MAX_STEPS { + // SAFETY: 0 < i < len unsafe { // Find the next pair of adjacent out-of-order elements. while i < len && !is_less(v.get_unchecked(i), v.get_unchecked(i - 1)) { @@ -221,6 +223,7 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut offsets_l = [MaybeUninit::::uninit(); BLOCK]; // The current block on the right side (from `r.sub(block_r)` to `r`). + // SAFETY: safe because we only end up accessing `r.offset(-1)` let mut r = unsafe { l.add(v.len()) }; let mut block_r = BLOCK; let mut start_r = ptr::null_mut(); @@ -269,6 +272,8 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut elem = l; for i in 0..block_l { + // SAFETY: `elem` and `end_l` are always within an offset of `[0, block_l)` + // from `l` when accessed unsafe { // Branchless comparison. *end_l = i as u8; @@ -285,6 +290,8 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut elem = r; for i in 0..block_r { + // SAFETY: `elem` and `end_r` are always within an offset of `[0, block_r)` + // from `r` when accessed unsafe { // Branchless comparison. elem = elem.offset(-1); @@ -304,6 +311,7 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize // Instead of swapping one pair at the time, it is more efficient to perform a cyclic // permutation. This is not strictly equivalent to swapping, but produces a similar // result using fewer memory operations. + // SAFETY: safe since we keep `start_l < end_l` and `start_r < end_r` unsafe { let tmp = ptr::read(left!()); ptr::copy_nonoverlapping(right!(), left!(), 1); @@ -324,11 +332,13 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize if start_l == end_l { // All out-of-order elements in the left block were moved. Move to the next block. + // SAFETY: we check that `width(l, r) > 2 * block` before dereferencing l = unsafe { l.offset(block_l as isize) }; } if start_r == end_r { // All out-of-order elements in the right block were moved. Move to the previous block. + // SAFETY: we check that `width(l, r) > 2 * block` before dereferencing r = unsafe { r.offset(-(block_r as isize)) }; } @@ -346,6 +356,10 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize // Move its remaining out-of-order elements to the far right. debug_assert_eq!(width(l, r), block_l); while start_l < end_l { + // SAFETY: this is safe since + // - `l < start_l < end_l < r`, + // - `l` only increases, and + // - `r` only decreases unsafe { end_l = end_l.offset(-1); ptr::swap(l.offset(*end_l as isize), r.offset(-1)); @@ -358,6 +372,7 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize // Move its remaining out-of-order elements to the far left. debug_assert_eq!(width(l, r), block_r); while start_r < end_r { + // SAFETY: see above comment unsafe { end_r = end_r.offset(-1); ptr::swap(l, r.offset(-(*end_r as isize) - 1)); @@ -389,6 +404,7 @@ fn partition(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool) // Read the pivot into a stack-allocated variable for efficiency. If a following comparison // operation panics, the pivot will be automatically written back into the slice. + // SAFETY: `pivot` points to the first element of `v` let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); let _pivot_guard = CopyOnDrop { src: &mut *tmp, @@ -399,6 +415,7 @@ fn partition(v: &mut [T], pivot: usize, is_less: &mut F) -> (usize, bool) // Find the first pair of out-of-order elements. let mut l = 0; let mut r = v.len(); + // SAFETY: safe since `0 <= l < r <= v.len()`, `l` only increases, and `r` only decreases unsafe { // Find the first element greater then or equal to the pivot. while l < r && is_less(v.get_unchecked(l), pivot) { @@ -438,6 +455,7 @@ fn partition_equal(v: &mut [T], pivot: usize, is_less: &mut F) -> usize // Read the pivot into a stack-allocated variable for efficiency. If a following comparison // operation panics, the pivot will be automatically written back into the slice. + // SAFETY: `pivot` points to the first element of `v` let mut tmp = mem::ManuallyDrop::new(unsafe { ptr::read(pivot) }); let _pivot_guard = CopyOnDrop { src: &mut *tmp, @@ -449,6 +467,7 @@ fn partition_equal(v: &mut [T], pivot: usize, is_less: &mut F) -> usize let mut l = 0; let mut r = v.len(); loop { + // SAFETY: safe since `0 <= l < r <= v.een()`, `l` only increases, and `r` only decreases unsafe { // Find the first element greater that the pivot. while l < r && !is_less(pivot, v.get_unchecked(l)) { @@ -548,6 +567,8 @@ fn choose_pivot(v: &mut [T], is_less: &mut F) -> (usize, bool) if len >= 8 { // Swaps indices so that `v[a] <= v[b]`. + // SAFETY: `a`, `b`, and `c` are all in bounds, and if `len >= SHORTEST_MEDIAN_OF_MEDIANS` + // then they are still in bounds plus-or-minus 1 let mut sort2 = |a: &mut usize, b: &mut usize| unsafe { if is_less(v.get_unchecked(*b), v.get_unchecked(*a)) { ptr::swap(a, b); diff --git a/src/libcore/str/lossy.rs b/src/libcore/str/lossy.rs index 762de0489a975..b2bc01539aafa 100644 --- a/src/libcore/str/lossy.rs +++ b/src/libcore/str/lossy.rs @@ -3,8 +3,6 @@ use crate::str as core_str; use crate::fmt::{self, Write}; use crate::mem; -// ignore-tidy-undocumented-unsafe - /// Lossy UTF-8 string. #[unstable(feature = "str_internals", issue = "0")] pub struct Utf8Lossy { @@ -17,6 +15,7 @@ impl Utf8Lossy { } pub fn from_bytes(bytes: &[u8]) -> &Utf8Lossy { + // SAFETY: both use the same memory layout, and UTF-8 correctness isn't required unsafe { mem::transmute(bytes) } } @@ -61,6 +60,7 @@ impl<'a> Iterator for Utf8LossyChunksIter<'a> { while i < self.source.len() { let i_ = i; + // SAFETY: 0 <= i < self.source.len() let byte = unsafe { *self.source.get_unchecked(i) }; i += 1; @@ -70,6 +70,7 @@ impl<'a> Iterator for Utf8LossyChunksIter<'a> { let w = core_str::utf8_char_width(byte); macro_rules! error { () => ({ + // SAFETY: we have checked up to `i` that source is valid UTF-8 unsafe { let r = Utf8LossyChunk { valid: core_str::from_utf8_unchecked(&self.source[0..i_]), @@ -130,6 +131,7 @@ impl<'a> Iterator for Utf8LossyChunksIter<'a> { } let r = Utf8LossyChunk { + // SAFETY: we have checked that the entire source is valid UTF-8 valid: unsafe { core_str::from_utf8_unchecked(self.source) }, broken: &[], }; diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index 25b7eec5b3343..dceffa900fb30 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -1,5 +1,4 @@ // ignore-tidy-filelength -// ignore-tidy-undocumented-unsafe //! String manipulation. //! @@ -337,6 +336,7 @@ impl Utf8Error { #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> { run_utf8_validation(v)?; + // SAFETY: just ran validation Ok(unsafe { from_utf8_unchecked(v) }) } @@ -375,6 +375,7 @@ pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> { #[stable(feature = "str_mut_extras", since = "1.20.0")] pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> { run_utf8_validation(v)?; + // SAFETY: just ran validation Ok(unsafe { from_utf8_unchecked_mut(v) }) } @@ -567,7 +568,7 @@ impl<'a> Iterator for Chars<'a> { #[inline] fn next(&mut self) -> Option { next_code_point(&mut self.iter).map(|ch| { - // str invariant says `ch` is a valid Unicode Scalar Value + // SAFETY: str invariant says `ch` is a valid Unicode Scalar Value unsafe { char::from_u32_unchecked(ch) } @@ -616,7 +617,7 @@ impl<'a> DoubleEndedIterator for Chars<'a> { #[inline] fn next_back(&mut self) -> Option { next_code_point_reverse(&mut self.iter).map(|ch| { - // str invariant says `ch` is a valid Unicode Scalar Value + // SAFETY: str invariant says `ch` is a valid Unicode Scalar Value unsafe { char::from_u32_unchecked(ch) } @@ -648,6 +649,7 @@ impl<'a> Chars<'a> { #[stable(feature = "iter_to_slice", since = "1.4.0")] #[inline] pub fn as_str(&self) -> &'a str { + // SAFETY: Chars is only made from a str, which guarantees the iter is valid utf8 unsafe { from_utf8_unchecked(self.iter.as_slice()) } } } @@ -1080,6 +1082,7 @@ impl<'a, P: Pattern<'a>> SplitInternal<'a, P> { fn get_end(&mut self) -> Option<&'a str> { if !self.finished && (self.allow_trailing_empty || self.end - self.start > 0) { self.finished = true; + // SAFETY: `self.start` and `self.end` always lie on unicode boundaries unsafe { let string = self.matcher.haystack().get_unchecked(self.start..self.end); Some(string) @@ -1095,6 +1098,7 @@ impl<'a, P: Pattern<'a>> SplitInternal<'a, P> { let haystack = self.matcher.haystack(); match self.matcher.next_match() { + // SAFETY: `Searcher` guarantees that `a` and `b` lie on unicode boundaries Some((a, b)) => unsafe { let elt = haystack.get_unchecked(self.start..a); self.start = b; @@ -1120,11 +1124,13 @@ impl<'a, P: Pattern<'a>> SplitInternal<'a, P> { let haystack = self.matcher.haystack(); match self.matcher.next_match_back() { + // SAFETY: `Searcher` guarantees that `a` and `b` lie on unicode boundaries Some((a, b)) => unsafe { let elt = haystack.get_unchecked(b..self.end); self.end = a; Some(elt) }, + // SAFETY: `self.start` and `self.end` always lie on unicode boundaries None => unsafe { self.finished = true; Some(haystack.get_unchecked(self.start..self.end)) @@ -1253,6 +1259,7 @@ where impl<'a, P: Pattern<'a>> MatchIndicesInternal<'a, P> { #[inline] fn next(&mut self) -> Option<(usize, &'a str)> { + // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries self.0.next_match().map(|(start, end)| unsafe { (start, self.0.haystack().get_unchecked(start..end)) }) @@ -1262,6 +1269,7 @@ impl<'a, P: Pattern<'a>> MatchIndicesInternal<'a, P> { fn next_back(&mut self) -> Option<(usize, &'a str)> where P::Searcher: ReverseSearcher<'a> { + // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries self.0.next_match_back().map(|(start, end)| unsafe { (start, self.0.haystack().get_unchecked(start..end)) }) @@ -1307,6 +1315,7 @@ where impl<'a, P: Pattern<'a>> MatchesInternal<'a, P> { #[inline] fn next(&mut self) -> Option<&'a str> { + // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries self.0.next_match().map(|(a, b)| unsafe { // Indices are known to be on utf8 boundaries self.0.haystack().get_unchecked(a..b) @@ -1317,6 +1326,7 @@ impl<'a, P: Pattern<'a>> MatchesInternal<'a, P> { fn next_back(&mut self) -> Option<&'a str> where P::Searcher: ReverseSearcher<'a> { + // SAFETY: `Searcher` guarantees that `start` and `end` lie on unicode boundaries self.0.next_match_back().map(|(a, b)| unsafe { // Indices are known to be on utf8 boundaries self.0.haystack().get_unchecked(a..b) @@ -1538,6 +1548,9 @@ fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { if align != usize::max_value() && align.wrapping_sub(index) % usize_bytes == 0 { let ptr = v.as_ptr(); while index < blocks_end { + // SAFETY: since `align - index` and `ascii_block_size` are multiples of + // `usize_bytes`, `ptr.add(index)` is always aligned with a `usize` so we + // may cast directly to a `const` pointer. unsafe { let block = ptr.add(index) as *const usize; // break if there is a nonascii byte @@ -1760,6 +1773,7 @@ mod traits { if self.start <= self.end && slice.is_char_boundary(self.start) && slice.is_char_boundary(self.end) { + // SAFETY: just checked that `start` and `end` are on a char boundary Some(unsafe { self.get_unchecked(slice) }) } else { None @@ -1770,6 +1784,7 @@ mod traits { if self.start <= self.end && slice.is_char_boundary(self.start) && slice.is_char_boundary(self.end) { + // SAFETY: just checked that `start` and `end` are on a char boundary Some(unsafe { self.get_unchecked_mut(slice) }) } else { None @@ -1799,6 +1814,7 @@ mod traits { if self.start <= self.end && slice.is_char_boundary(self.start) && slice.is_char_boundary(self.end) { + // SAFETY: just checked that `start` and `end` are on a char boundary unsafe { self.get_unchecked_mut(slice) } } else { super::slice_error_fail(slice, self.start, self.end) @@ -1827,6 +1843,7 @@ mod traits { #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { if slice.is_char_boundary(self.end) { + // SAFETY: just checked that `end` is on a char boundary Some(unsafe { self.get_unchecked(slice) }) } else { None @@ -1835,6 +1852,7 @@ mod traits { #[inline] fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> { if slice.is_char_boundary(self.end) { + // SAFETY: just checked that `end` is on a char boundary Some(unsafe { self.get_unchecked_mut(slice) }) } else { None @@ -1857,8 +1875,8 @@ mod traits { } #[inline] fn index_mut(self, slice: &mut str) -> &mut Self::Output { - // is_char_boundary checks that the index is in [0, .len()] if slice.is_char_boundary(self.end) { + // SAFETY: just checked that `end` is on a char boundary unsafe { self.get_unchecked_mut(slice) } } else { super::slice_error_fail(slice, 0, self.end) @@ -1888,6 +1906,7 @@ mod traits { #[inline] fn get(self, slice: &str) -> Option<&Self::Output> { if slice.is_char_boundary(self.start) { + // SAFETY: just checked that `start` is on a char boundary Some(unsafe { self.get_unchecked(slice) }) } else { None @@ -1896,6 +1915,7 @@ mod traits { #[inline] fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> { if slice.is_char_boundary(self.start) { + // SAFETY: just checked that `start` is on a char boundary Some(unsafe { self.get_unchecked_mut(slice) }) } else { None @@ -1920,8 +1940,8 @@ mod traits { } #[inline] fn index_mut(self, slice: &mut str) -> &mut Self::Output { - // is_char_boundary checks that the index is in [0, .len()] if slice.is_char_boundary(self.start) { + // SAFETY: just checked that `start` is on a char boundary unsafe { self.get_unchecked_mut(slice) } } else { super::slice_error_fail(slice, self.start, slice.len()) @@ -2167,7 +2187,6 @@ impl str { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline(always)] - // SAFETY: const sound because we transmute two types with the same layout #[allow(unused_attributes)] #[allow_internal_unstable(const_fn_union)] pub const fn as_bytes(&self) -> &[u8] { @@ -2176,6 +2195,7 @@ impl str { str: &'a str, slice: &'a [u8], } + // SAFETY: const sound because we transmute two types with the same layout unsafe { Slices { str: self }.slice } } @@ -2501,6 +2521,7 @@ impl str { pub fn split_at(&self, mid: usize) -> (&str, &str) { // is_char_boundary checks that the index is in [0, .len()] if self.is_char_boundary(mid) { + // SAFETY: just checked that `mid` is on a char boundary unsafe { (self.get_unchecked(0..mid), self.get_unchecked(mid..self.len())) @@ -2548,6 +2569,7 @@ impl str { if self.is_char_boundary(mid) { let len = self.len(); let ptr = self.as_mut_ptr(); + // SAFETY: just checked that `mid` is on a char boundary unsafe { (from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, mid)), from_utf8_unchecked_mut(slice::from_raw_parts_mut( @@ -3746,8 +3768,8 @@ impl str { if let Some((_, b)) = matcher.next_reject_back() { j = b; } + // SAFETY: `Searcher` is known to return valid indices unsafe { - // Searcher is known to return valid indices self.get_unchecked(i..j) } } @@ -3785,8 +3807,8 @@ impl str { if let Some((a, _)) = matcher.next_reject() { i = a; } + // SAFETY: `Searcher` is known to return valid indices unsafe { - // Searcher is known to return valid indices self.get_unchecked(i..self.len()) } } @@ -3833,8 +3855,8 @@ impl str { if let Some((_, b)) = matcher.next_reject_back() { j = b; } + // SAFETY: `Searcher` is known to return valid indices unsafe { - // Searcher is known to return valid indices self.get_unchecked(0..j) } } @@ -4029,6 +4051,7 @@ impl str { /// ``` #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] pub fn make_ascii_uppercase(&mut self) { + // SAFETY: safe because we transmute two types with the same layout let me = unsafe { self.as_bytes_mut() }; me.make_ascii_uppercase() } @@ -4054,6 +4077,7 @@ impl str { /// ``` #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] pub fn make_ascii_lowercase(&mut self) { + // SAFETY: safe because we transmute two types with the same layout let me = unsafe { self.as_bytes_mut() }; me.make_ascii_lowercase() } @@ -4216,6 +4240,7 @@ impl Default for &str { #[stable(feature = "default_mut_str", since = "1.28.0")] impl Default for &mut str { /// Creates an empty mutable str + // SAFETY: `str` is guranteed to be UTF-8 fn default() -> Self { unsafe { from_utf8_unchecked_mut(&mut []) } } } @@ -4270,6 +4295,7 @@ impl_fn_for_zst! { #[derive(Clone)] struct UnsafeBytesToStr impl<'a> Fn = |bytes: &'a [u8]| -> &'a str { + // SAFETY: not safe unsafe { from_utf8_unchecked(bytes) } }; } diff --git a/src/libcore/str/pattern.rs b/src/libcore/str/pattern.rs index a494274118a74..57032776ea59c 100644 --- a/src/libcore/str/pattern.rs +++ b/src/libcore/str/pattern.rs @@ -3,8 +3,6 @@ //! For more details, see the traits [`Pattern`], [`Searcher`], //! [`ReverseSearcher`], and [`DoubleEndedSearcher`]. -// ignore-tidy-undocumented-unsafe - #![unstable(feature = "pattern", reason = "API not fully fleshed out and ready to be stabilized", issue = "27721")] @@ -276,6 +274,14 @@ unsafe impl<'a> Searcher<'a> for CharSearcher<'a> { #[inline] fn next(&mut self) -> SearchStep { let old_finger = self.finger; + // SAFETY: 1-4 guarantee safety of `get_unchecked` + // 1. `self.finger` and `self.finger_back` are kept on unicode boundaries + // (this is invariant) + // 2. `self.finger >= 0` since it starts at 0 and only increases + // 3. `self.finger < self.finger_back` because otherwise the char `iter` + // would return `SearchStep::Done` + // 4. `self.finger` comes before the end of the haystack because `self.finger_back` + // starts at the end and only decreases let slice = unsafe { self.haystack.get_unchecked(old_finger..self.finger_back) }; let mut iter = slice.chars(); let old_len = iter.iter.len(); @@ -303,6 +309,7 @@ unsafe impl<'a> Searcher<'a> for CharSearcher<'a> { return None; }; // the last byte of the utf8 encoded needle + // SAFETY: we have an invariant that `utf8_size < 5` let last_byte = unsafe { *self.utf8_encoded.get_unchecked(self.utf8_size - 1) }; if let Some(index) = memchr::memchr(last_byte, bytes) { // The new finger is the index of the byte we found, @@ -346,6 +353,7 @@ unsafe impl<'a> ReverseSearcher<'a> for CharSearcher<'a> { #[inline] fn next_back(&mut self) -> SearchStep { let old_finger = self.finger_back; + // SAFETY: see the comment for next() above let slice = unsafe { self.haystack.get_unchecked(self.finger..old_finger) }; let mut iter = slice.chars(); let old_len = iter.iter.len(); @@ -373,6 +381,7 @@ unsafe impl<'a> ReverseSearcher<'a> for CharSearcher<'a> { return None; }; // the last byte of the utf8 encoded needle + // SAFETY: we have an invariant that `utf8_size < 5` let last_byte = unsafe { *self.utf8_encoded.get_unchecked(self.utf8_size - 1) }; if let Some(index) = memchr::memrchr(last_byte, bytes) { // we searched a slice that was offset by self.finger, diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index d311cb16b64d3..cd5d4d1984221 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -112,8 +112,6 @@ //! println!("live threads: {}", old_thread_count + 1); //! ``` -// ignore-tidy-undocumented-unsafe - #![stable(feature = "rust1", since = "1.0.0")] #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))] #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))] @@ -355,6 +353,7 @@ impl AtomicBool { #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] pub fn get_mut(&mut self) -> &mut bool { + // SAFETY: the mutable reference guarantees unique ownership unsafe { &mut *(self.v.get() as *mut bool) } } @@ -405,6 +404,7 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn load(&self, order: Ordering) -> bool { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_load(self.v.get(), order) != 0 } } @@ -437,6 +437,7 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, val: bool, order: Ordering) { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_store(self.v.get(), val as u8, order); } @@ -468,6 +469,7 @@ impl AtomicBool { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn swap(&self, val: bool, order: Ordering) -> bool { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } } @@ -562,6 +564,7 @@ impl AtomicBool { success: Ordering, failure: Ordering) -> Result { + // SAFETY: data races are prevented by atomic intrinsics match unsafe { atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure) } { @@ -618,6 +621,7 @@ impl AtomicBool { success: Ordering, failure: Ordering) -> Result { + // SAFETY: data races are prevented by atomic intrinsics match unsafe { atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure) } { @@ -664,6 +668,7 @@ impl AtomicBool { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_and(self.v.get(), val as u8, order) != 0 } } @@ -759,6 +764,7 @@ impl AtomicBool { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_or(self.v.get(), val as u8, order) != 0 } } @@ -800,6 +806,7 @@ impl AtomicBool { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 } } } @@ -839,6 +846,7 @@ impl AtomicPtr { #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] pub fn get_mut(&mut self) -> &mut *mut T { + // SAFETY: the mutable reference guarantees unique ownership unsafe { &mut *self.p.get() } } @@ -890,6 +898,7 @@ impl AtomicPtr { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn load(&self, order: Ordering) -> *mut T { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T } } @@ -924,6 +933,7 @@ impl AtomicPtr { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, ptr: *mut T, order: Ordering) { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); } @@ -957,6 +967,7 @@ impl AtomicPtr { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "ptr")] pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } } @@ -1040,6 +1051,7 @@ impl AtomicPtr { success: Ordering, failure: Ordering) -> Result<*mut T, *mut T> { + // SAFETY: data races are prevented by atomic intrinsics unsafe { let res = atomic_compare_exchange(self.p.get() as *mut usize, current as usize, @@ -1100,6 +1112,7 @@ impl AtomicPtr { success: Ordering, failure: Ordering) -> Result<*mut T, *mut T> { + // SAFETY: data races are prevented by atomic intrinsics unsafe { let res = atomic_compare_exchange_weak(self.p.get() as *mut usize, current as usize, @@ -1245,6 +1258,7 @@ assert_eq!(some_var.load(Ordering::SeqCst), 5); #[inline] #[$stable_access] pub fn get_mut(&mut self) -> &mut $int_type { + // SAFETY: the mutable reference guarantees unique ownership unsafe { &mut *self.v.get() } } } @@ -1299,6 +1313,7 @@ assert_eq!(some_var.load(Ordering::Relaxed), 5); #[inline] #[$stable] pub fn load(&self, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_load(self.v.get(), order) } } } @@ -1333,6 +1348,7 @@ assert_eq!(some_var.load(Ordering::Relaxed), 10); #[inline] #[$stable] pub fn store(&self, val: $int_type, order: Ordering) { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_store(self.v.get(), val, order); } } } @@ -1363,6 +1379,7 @@ assert_eq!(some_var.swap(10, Ordering::Relaxed), 5); #[$stable] #[$cfg_cas] pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_swap(self.v.get(), val, order) } } } @@ -1465,6 +1482,7 @@ assert_eq!(some_var.load(Ordering::Relaxed), 10); new: $int_type, success: Ordering, failure: Ordering) -> Result<$int_type, $int_type> { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } } } @@ -1517,6 +1535,7 @@ loop { new: $int_type, success: Ordering, failure: Ordering) -> Result<$int_type, $int_type> { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) } @@ -1551,6 +1570,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); #[$stable] #[$cfg_cas] pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_add(self.v.get(), val, order) } } } @@ -1583,6 +1603,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); #[$stable] #[$cfg_cas] pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_sub(self.v.get(), val, order) } } } @@ -1618,6 +1639,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b100001); #[$stable] #[$cfg_cas] pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_and(self.v.get(), val, order) } } } @@ -1654,6 +1676,7 @@ assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31)); #[$stable_nand] #[$cfg_cas] pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_nand(self.v.get(), val, order) } } } @@ -1689,6 +1712,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b111111); #[$stable] #[$cfg_cas] pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_or(self.v.get(), val, order) } } } @@ -1724,6 +1748,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b011110); #[$stable] #[$cfg_cas] pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { atomic_xor(self.v.get(), val, order) } } } @@ -1835,6 +1860,7 @@ assert!(max_foo == 42); issue = "48655")] #[$cfg_cas] pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { $max_fn(self.v.get(), val, order) } } } @@ -1887,6 +1913,7 @@ assert_eq!(min_foo, 12); issue = "48655")] #[$cfg_cas] pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type { + // SAFETY: data races are prevented by atomic intrinsics unsafe { $min_fn(self.v.get(), val, order) } } } @@ -2430,6 +2457,7 @@ pub fn fence(order: Ordering) { // https://github.com/WebAssembly/tool-conventions/issues/59. We should // follow that discussion and implement a solution when one comes about! #[cfg(not(target_arch = "wasm32"))] + // SAFETY: using an atomic fence is safe unsafe { match order { Acquire => intrinsics::atomic_fence_acq(), @@ -2518,6 +2546,7 @@ pub fn fence(order: Ordering) { #[inline] #[stable(feature = "compiler_fences", since = "1.21.0")] pub fn compiler_fence(order: Ordering) { + // SAFETY: doesn't compile to machine code unsafe { match order { Acquire => intrinsics::atomic_singlethreadfence_acq(),