diff --git a/.gitignore b/.gitignore index 4314e1036..b4c9a0d07 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +default.sled *db *conf *snap.* diff --git a/CHANGELOG.md b/CHANGELOG.md index a8ae64fa5..c1bbc4f22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ ## Breaking Changes +* #1349 The "measure_allocs" feature has been removed. * #1135 The "no_metrics" anti-feature has been replaced with the "metrics" positive feature. * #1178 the `Event` enum has become a unified struct that allows diff --git a/Cargo.toml b/Cargo.toml index 53dc23bfe..25d5ad7fb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,6 @@ event_log = [] metrics = ["num-format"] no_logs = ["log/max_level_off"] no_inline = [] -measure_allocs = [] pretty_backtrace = ["color-backtrace"] io_uring = ["rio"] docs = [] diff --git a/benchmarks/stress2/Cargo.toml b/benchmarks/stress2/Cargo.toml index 32f54efc2..60b97f4e8 100644 --- a/benchmarks/stress2/Cargo.toml +++ b/benchmarks/stress2/Cargo.toml @@ -20,11 +20,11 @@ event_log = ["sled/event_log"] compression = ["sled/compression"] no_logs = ["sled/no_logs"] metrics = ["sled/metrics"] -measure_allocs = ["sled/measure_allocs"] jemalloc = ["jemallocator"] logging = ["env_logger", "log", "color-backtrace"] dh = ["dhat"] memshred = [] +measure_allocs = [] [dependencies] rand = "0.7.3" diff --git a/benchmarks/stress2/src/main.rs b/benchmarks/stress2/src/main.rs index 3e2ae602d..db174baaf 100644 --- a/benchmarks/stress2/src/main.rs +++ b/benchmarks/stress2/src/main.rs @@ -14,8 +14,8 @@ use rand::{thread_rng, Rng}; #[cfg(feature = "jemalloc")] mod alloc { - use std::alloc::Layout; use jemallocator::Jemalloc; + use std::alloc::Layout; #[global_allocator] static ALLOCATOR: Jemalloc = Jemalloc; @@ -23,7 +23,7 @@ mod alloc { #[cfg(feature = "memshred")] mod alloc { - use std::alloc::{System, Layout}; + use std::alloc::{Layout, System}; #[global_allocator] static ALLOCATOR: Alloc = Alloc; @@ -42,11 +42,35 @@ mod alloc { unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { std::ptr::write_bytes(ptr, 0xde, layout.size()); System.dealloc(ptr, layout) - } } } +#[cfg(feature = "measure_allocs")] +mod alloc { + use std::alloc::{Layout, System}; + use std::sync::atomic::{AtomicUsize, Ordering::Release}; + + pub static ALLOCATIONS: AtomicUsize = AtomicUsize::new(0); + pub static ALLOCATED_BYTES: AtomicUsize = AtomicUsize::new(0); + + #[global_allocator] + static ALLOCATOR: Alloc = Alloc; + + #[derive(Default, Debug, Clone, Copy)] + struct Alloc; + + unsafe impl std::alloc::GlobalAlloc for Alloc { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + ALLOCATIONS.fetch_add(1, Release); + ALLOCATED_BYTES.fetch_add(layout.size(), Release); + System.alloc(layout) + } + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + System.dealloc(ptr, layout) + } + } +} #[global_allocator] #[cfg(feature = "dh")] @@ -385,6 +409,17 @@ fn main() { ((ops * 1_000) / (time * 1_000)).to_formatted_string(&Locale::en) ); + #[cfg(feature = "measure_allocs")] + println!( + "allocated {} bytes in {} allocations", + alloc::ALLOCATED_BYTES + .load(Ordering::Acquire) + .to_formatted_string(&Locale::en), + alloc::ALLOCATIONS + .load(Ordering::Acquire) + .to_formatted_string(&Locale::en), + ); + #[cfg(feature = "metrics")] sled::print_profile(); } diff --git a/src/lib.rs b/src/lib.rs index 95b87e6f1..89274e036 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -243,14 +243,6 @@ mod flusher; /// The event log helps debug concurrency issues. pub mod event_log; -#[cfg(feature = "measure_allocs")] -mod measure_allocs; - -#[cfg(feature = "measure_allocs")] -#[global_allocator] -static ALLOCATOR: measure_allocs::TrackingAllocator = - measure_allocs::TrackingAllocator; - /// Opens a `Db` with a default configuration at the /// specified path. This will create a new storage /// directory at the specified path if it does diff --git a/src/measure_allocs.rs b/src/measure_allocs.rs deleted file mode 100644 index ee25f14fd..000000000 --- a/src/measure_allocs.rs +++ /dev/null @@ -1,23 +0,0 @@ -#![allow(unsafe_code)] - -use std::sync::atomic::{AtomicUsize, Ordering::Release}; - -// define a passthrough allocator that tracks alloc calls. -// adapted from the flatbuffer codebase -use std::alloc::{GlobalAlloc, Layout, System}; - -pub(crate) struct TrackingAllocator; - -pub static ALLOCATIONS: AtomicUsize = AtomicUsize::new(0); -pub static ALLOCATED_BYTES: AtomicUsize = AtomicUsize::new(0); - -unsafe impl GlobalAlloc for TrackingAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - ALLOCATIONS.fetch_add(1, Release); - ALLOCATED_BYTES.fetch_add(layout.size(), Release); - System.alloc(layout) - } - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - System.dealloc(ptr, layout) - } -} diff --git a/src/metrics.rs b/src/metrics.rs index 0d6642529..379256d0d 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -145,10 +145,6 @@ pub struct Metrics { pub tree_traverse: Histogram, pub write_to_log: Histogram, pub written_bytes: Histogram, - #[cfg(feature = "measure_allocs")] - pub allocations: CachePadded, - #[cfg(feature = "measure_allocs")] - pub allocated_bytes: CachePadded, } impl Metrics { @@ -437,27 +433,6 @@ impl Metrics { sz("seg util end", &self.segment_utilization_shutdown), ])); - #[cfg(feature = "measure_allocs")] - { - ret.push_str(&format!( - "{}\n", - std::iter::repeat("-").take(134).collect::() - )); - ret.push_str("allocation statistics:\n"); - ret.push_str(&format!( - "total allocations: {}\n", - measure_allocs::ALLOCATIONS - .load(Acquire) - .to_formatted_string(&Locale::en) - )); - ret.push_str(&format!( - "allocated bytes: {}\n", - measure_allocs::ALLOCATED_BYTES - .load(Acquire) - .to_formatted_string(&Locale::en) - )); - } - ret } } diff --git a/src/node.rs b/src/node.rs index 281d228ec..69da9b782 100644 --- a/src/node.rs +++ b/src/node.rs @@ -2225,27 +2225,25 @@ impl Inner { if size == 0 || self.index_key(0).unwrap_slice() > key { return Err(0); } - let mut base = 0_usize; - while size > 1 { - let half = size / 2; - let mid = base + half; - // mid is always in [0, size), that means mid is >= 0 and < size. - // mid >= 0: by definition - // mid < size: mid = size / 2 + size / 4 + size / 8 ... + let mut left = 0; + let mut right = size; + while left < right { + let mid = left + size / 2; + let l = self.index_key(mid); let cmp = crate::fastcmp(l.unwrap_slice(), key); - base = if cmp == Greater { base } else { mid }; - size -= half; - } - // base is always in [0, size) because base <= mid. - let l = self.index_key(base); - let cmp = crate::fastcmp(l.unwrap_slice(), key); - if cmp == Equal { - Ok(base) - } else { - Err(base + (cmp == Less) as usize) + if cmp == Less { + left = mid + 1; + } else if cmp == Greater { + right = mid; + } else { + return Ok(mid); + } + + size = right - left; } + Err(left) } pub(crate) fn can_merge_child(&self, pid: u64) -> bool {