@@ -29,10 +29,12 @@ use crate::alloc_tracker::{
29
29
const DEFAULT_MIN_ALLOC_BYTES_FOR_PROFILING : u64 = 64 * 1024 ;
30
30
const DEFAULT_REPORTING_INTERVAL_BYTES : u64 = 1024 * 1024 * 1024 ;
31
31
32
+ /// This custom target name is used to filter profiling events in the tracing
33
+ /// subscriber. It is also included in the printed log.
32
34
pub const JEMALLOC_PROFILER_TARGET : & str = "jemprof" ;
33
35
34
36
/// Atomics are used to communicate configurations between the start/stop
35
- /// endpoints and the JemallocProfiled allocator wrapper.
37
+ /// endpoints and the [ JemallocProfiled] allocator wrapper.
36
38
///
37
39
/// The flags are padded to avoid false sharing of the CPU cache line between
38
40
/// threads. 128 bytes is the cache line size on x86_64 and arm64.
@@ -130,11 +132,13 @@ pub fn stop_profiling() {
130
132
131
133
/// Wraps the Jemalloc global allocator calls with tracking routines.
132
134
///
133
- /// The tracking routines are called only when [ENABLED] is set to true (calling
134
- /// [start_profiling()]), but we don't enforce any synchronization (we load it with
135
- /// Ordering::Relaxed) because it's fine to miss or record extra allocation events.
135
+ /// The tracking routines are called only when FLAGS.enabled is set to true
136
+ /// (calling [start_profiling()]). We load it with [Ordering::Relaxed] because
137
+ /// it's fine to miss or record extra allocation events and prefer limiting the
138
+ /// performance impact when profiling is not enabled.
136
139
///
137
- /// It's important to ensure that no allocations are performed inside the allocator!
140
+ /// Note: It's important to ensure that no allocations are performed inside the
141
+ /// allocator! It can cause an abort, a panic or even a deadlock.
138
142
pub struct JemallocProfiled ( pub Jemalloc ) ;
139
143
140
144
unsafe impl GlobalAlloc for JemallocProfiled {
@@ -176,13 +180,13 @@ unsafe impl GlobalAlloc for JemallocProfiled {
176
180
177
181
/// Prints both a backtrace and a Tokio tracing log
178
182
///
179
- /// Warning: stdout might allocate a buffer on first use
183
+ /// Warning: stdout writer might allocate a buffer on first use
180
184
fn identify_callsite ( callsite_hash : u64 , stat : AllocStat ) {
181
185
// To generate a complete trace:
182
186
// - tokio/tracing feature must be enabled, otherwise un-instrumented tasks will not propagate
183
187
// parent spans
184
- // - the tracing fmt subscriber filter must keep all spans for this event (TRACE level)
185
- // See the logger configuration for more details.
188
+ // - the tracing fmt subscriber filter must keep all spans for this event (TRACE level). See the
189
+ // logger configuration for more details.
186
190
trace ! ( target: JEMALLOC_PROFILER_TARGET , callsite=callsite_hash, allocs=stat. count, size=%stat. size) ;
187
191
}
188
192
@@ -195,7 +199,7 @@ fn backtrace_hash() -> u64 {
195
199
hasher. finish ( )
196
200
}
197
201
198
- /// Warning: allocating inside this function can cause an error (abort, panic or even deadlock).
202
+ /// Warning: this function should not allocate!
199
203
#[ cold]
200
204
fn track_alloc_call ( ptr : * mut u8 , layout : Layout ) {
201
205
if layout. size ( ) >= FLAGS . min_alloc_bytes_for_profiling . load ( Ordering :: Relaxed ) as usize {
@@ -212,7 +216,7 @@ fn track_alloc_call(ptr: *mut u8, layout: Layout) {
212
216
}
213
217
AllocRecordingResponse :: TrackerFull ( table_name) => {
214
218
// this message might be displayed multiple times but that's fine
215
- // warning: stdout might allocate a buffer on first use
219
+ // warning: stdout writer might allocate a buffer on first use
216
220
error ! ( "heap profiling stopped, {table_name} full" ) ;
217
221
FLAGS . enabled . store ( false , Ordering :: Relaxed ) ;
218
222
}
@@ -222,28 +226,23 @@ fn track_alloc_call(ptr: *mut u8, layout: Layout) {
222
226
}
223
227
}
224
228
225
- /// Warning: allocating inside this function can cause an error (abort, panic or even deadlock).
229
+ /// Warning: this function should not allocate!
226
230
#[ cold]
227
231
fn track_dealloc_call ( ptr : * mut u8 , layout : Layout ) {
228
232
if layout. size ( ) >= FLAGS . min_alloc_bytes_for_profiling . load ( Ordering :: Relaxed ) as usize {
229
233
ALLOCATION_TRACKER . lock ( ) . unwrap ( ) . record_deallocation ( ptr) ;
230
234
}
231
235
}
232
236
233
- /// Warning: allocating inside this function can cause an error (abort, panic or even deadlock).
237
+ /// Warning: this function should not allocate!
234
238
#[ cold]
235
- fn track_realloc_call (
236
- old_ptr : * mut u8 ,
237
- new_pointer : * mut u8 ,
238
- current_layout : Layout ,
239
- new_size : usize ,
240
- ) {
239
+ fn track_realloc_call ( old_ptr : * mut u8 , new_ptr : * mut u8 , current_layout : Layout , new_size : usize ) {
241
240
if current_layout. size ( ) >= FLAGS . min_alloc_bytes_for_profiling . load ( Ordering :: Relaxed ) as usize
242
241
{
243
242
let recording_response = ALLOCATION_TRACKER . lock ( ) . unwrap ( ) . record_reallocation (
244
243
new_size as u64 ,
245
244
old_ptr,
246
- new_pointer ,
245
+ new_ptr ,
247
246
) ;
248
247
249
248
match recording_response {
0 commit comments