1- use std:: sync:: atomic:: Ordering ;
2-
3- use crossbeam:: deque:: Injector ;
41use crossbeam:: queue:: SegQueue ;
52use dynqueue:: IntoDynQueue ;
6- use parking_lot:: { MutexGuard , RwLock } ;
73use rayon:: iter:: { IntoParallelIterator , ParallelIterator } ;
4+ use std:: sync:: atomic:: Ordering ;
85
96use crate :: collector:: dropper:: DropMessage ;
10- use crate :: collector:: { Collector , GcExclusiveWarrant } ;
7+ use crate :: collector:: Collector ;
118use crate :: concurrency:: lockout:: Lockout ;
129
10+ use parking_lot:: MutexGuard ;
11+
1312impl Collector {
1413 pub ( super ) fn do_collect ( & self , gc_guard : MutexGuard < ' _ , ( ) > ) {
15- // Be careful modifying this method. The tracked data and tracked handles can change underneath us
14+ // TODO: Improve this comment
15+ // Be careful modifying this method. The tracked data, reference counts, and to some extent
16+ // the graph, can change underneath us.
17+ //
1618 // Currently the state is this, as far as I can tell:
17- // - New handles are conservatively seen as roots if seen at all while we are touching handles
18- // (there is nowhere a new "secret root" can be created and then the old root stashed and seen as non-rooted)
19- // - New data is treated as a special case, and only deallocated if it existed at the start of collection
20- // - Deleted handles cannot make the graph "more connected" if the deletion was not observed
19+ // - New data is always seen as rooted as long is it is allocated after the graph freezing step
20+ // - After graph freezing (where we take all the Lockouts we can) there is no way to
21+ // smuggle items in or out of the graph
22+ // - The reference count preperation is conservative (if concurrently modified, the graph will simply look more connected)
2123
2224 trace ! ( "Beginning collection" ) ;
2325 let _atomic_spinlock_guard = self . atomic_spinlock . lock_exclusive ( ) ;
2426
25- let current_collection = self
26- . tracked_data
27- . current_collection_number
28- . load ( Ordering :: SeqCst ) ;
29-
3027 // Here we synchronize destructors: this ensures that handles in objects in the background thread are dropped
31- // Otherwise we'd see those handles as rooted and keep them around.
28+ // Otherwise we'd see those handles as rooted and keep them around. (This would not lead to incorrectness, but
29+ // this improves consistency and determinism.)
30+ //
3231 // This makes a lot of sense in the background thread (since it's totally async),
3332 // but may slow direct calls to `collect`.
3433 self . synchronize_destructors ( ) ;
3534
36- // The warrant system prevents us from scanning in-use data
37- let warrants: Injector < GcExclusiveWarrant > = Injector :: new ( ) ;
38-
3935 // eprintln!("tracked data {:?}", tracked_data);
4036 // eprintln!("tracked handles {:?}", tracked_handles);
4137
42- // In this step we calculate what's not rooted by marking all data definitively in a Gc
43- self . tracked_data . data . par_iter ( |data| {
44- // If data.last_marked == 0, then it is new data. Update that we've seen this data
45- // (this step helps synchronize what data is valid to be deallocated)
46- if data. last_marked . load ( Ordering :: SeqCst ) == 0 {
47- data. last_marked
48- . store ( current_collection - 1 , Ordering :: SeqCst ) ;
38+ // First, go through the data, resetting all the reference count trackers,
39+ // and taking exclusive warrants where possible
40+ self . tracked_data . par_iter ( |data| {
41+ unsafe {
42+ // Safe as we are the collector
43+ Lockout :: try_take_exclusive_access_unsafe ( & data) ;
4944 }
45+ // This can be done concurrently with the `Lockout` managment, since the ref-count snapshot is conservative
46+ // TODO: Double check this logic
47+ data. ref_cnt . prepare_for_collection ( ) ;
48+ } ) ;
5049
51- if let Some ( warrant) = Lockout :: get_exclusive_warrant ( data. clone ( ) ) {
52- // Save that warrant so things can't shift around under us
53- warrants. push ( warrant) ;
54-
55- // Now figure out what handles are not rooted
50+ // Then adjust reference counts to figure out what is rooted
51+ self . tracked_data . par_iter ( |data| {
52+ if Lockout :: unsafe_exclusive_access_taken ( & data) {
5653 data. underlying_allocation . scan ( |h| {
57- h. handle_ref
58- . v
59- . last_non_rooted
60- . store ( current_collection, Ordering :: SeqCst ) ;
54+ h. data_ref . ref_cnt . found_once_internally ( ) ;
6155 } ) ;
6256 } else {
63- // eprintln!("failed to get warrant!");
64- // If we can't get the warrant, then this data must be in use, so we can mark it
65- data. last_marked . store ( current_collection, Ordering :: SeqCst ) ;
57+ // Someone else had this data during the collection, so it is clearly rooted
58+ data. ref_cnt . override_mark_as_rooted ( ) ;
6659 }
6760 } ) ;
6861
69- // The handles that were not just marked need to be treated as roots
62+ // Now we need to translate our set of roots into a queue
63+ // TODO: This is the only allocation in the collector at this point, probably is removable or re-usable
7064 let roots = SegQueue :: new ( ) ;
71- self . tracked_data . handles . par_iter ( |handle| {
72- // If the `last_non_rooted` number was not now, then it is a root
73- if handle. last_non_rooted . load ( Ordering :: SeqCst ) != current_collection {
74- roots. push ( handle) ;
65+ self . tracked_data . par_iter ( |data| {
66+ if data. ref_cnt . is_rooted ( ) {
67+ // We need to scan data that dynamically becomes rooted, so we use the `override_mark_as_rooted`
68+ // flag to track what we've enqued to scan already
69+ data. ref_cnt . override_mark_as_rooted ( ) ;
70+ roots. push ( data) ;
7571 }
7672 } ) ;
7773
78- // eprintln!("roots {:?}", roots);
79-
80- // This step is dfs through the object graph (starting with the roots)
81- // We mark each object we find
8274 let dfs_stack = roots. into_dyn_queue ( ) ;
83- dfs_stack
84- . into_par_iter ( )
85- . for_each ( |( queue, handle) | unsafe {
86- handle. underlying_data . with_data ( |data| {
87- // If this data is new, we don't want to `Scan` it, since we may not have its Lockout
88- // Any handles inside this could not of been seen in step 1, so they'll be rooted anyway
89- if data. last_marked . load ( Ordering :: SeqCst ) != 0 {
90- // Essential note! All non-new non-warranted data is automatically marked
91- // Thus we will never accidentally scan non-warranted data here
92- let previous_mark =
93- data. last_marked . swap ( current_collection, Ordering :: SeqCst ) ;
94-
95- // Since we've done an atomic swap, we know we've already scanned this iff it was marked
96- // (excluding data marked because we couldn't get its warrant, who's handles would be seen as roots)
97- // This stops us for scanning data more than once and, crucially, concurrently scanning the same data
98- if previous_mark != current_collection {
99- data. last_marked . store ( current_collection, Ordering :: SeqCst ) ;
100-
101- data. underlying_allocation . scan ( |h| {
102- let mut should_enque = false ;
103- h. handle_ref . v . underlying_data . with_data ( |scanned_data| {
104- if scanned_data. last_marked . load ( Ordering :: SeqCst )
105- != current_collection
106- {
107- should_enque = true ;
108- }
109- } ) ;
110- if should_enque {
111- queue. enqueue ( h. handle_ref . v ) ;
112- }
113- } ) ;
114- }
75+ dfs_stack. into_par_iter ( ) . for_each ( |( queue, data) | {
76+ debug_assert ! ( !data. deallocated. load( Ordering :: SeqCst ) ) ;
77+
78+ if Lockout :: unsafe_exclusive_access_taken ( & data) {
79+ data. underlying_allocation . scan ( |h| {
80+ let ref_cnt = & h. data_ref . ref_cnt ;
81+ // We need to scan data that dynamically becomes rooted, so we use the `override_mark_as_rooted`
82+ // flag to track what we've enqued to scan already. (So we can't just use `is_rooted` here.)
83+ if !ref_cnt. was_overriden_as_rooted ( ) {
84+ // This is technically racy, since we check the rooting status, THEN mark as rooted/enqueue
85+ // But that doesn't matter since the worse that can happen is that we enqueue the data twice
86+ ref_cnt. override_mark_as_rooted ( ) ;
87+ queue. enqueue ( h. data_ref . clone ( ) ) ;
11588 }
116- } )
117- } ) ;
118- // We're done scanning things, and have established what is marked. Release the warrants
119- drop ( warrants) ;
89+ } ) ;
90+ } else {
91+ // Someone else had this data during the collection, so it is clearly rooted
92+ data. ref_cnt . override_mark_as_rooted ( ) ;
93+ }
94+ } ) ;
95+
96+ // We are done scanning, so release any warrants
97+ self . tracked_data . par_iter ( |data| unsafe {
98+ Lockout :: try_release_exclusive_access_unsafe ( & data) ;
99+ } ) ;
100+
101+ // Since new refcnts are created as rooted, and new data is created with new refcnts, we
102+ // can safely treat the refcnt data as definitive
120103
121104 // Now cleanup by removing all the data that is done for
122- let to_drop = RwLock :: new ( Vec :: new ( ) ) ;
105+ let to_drop = self . dropper . get_buffer ( ) ;
123106
124- self . tracked_data . data . par_retain ( |data| {
125- // Mark the new data as in use for now
126- // This stops us deallocating data that was allocated during collection
127- if data . last_marked . load ( Ordering :: SeqCst ) == 0 {
128- data . last_marked . store ( current_collection , Ordering :: SeqCst ) ;
107+ self . tracked_data . par_retain ( |data| {
108+ let is_marked = data. ref_cnt . is_rooted ( ) ;
109+ if is_marked {
110+ // this is marked so retain it
111+ return true ;
129112 }
130113
131- // If this is true, we just marked this data
132- if data. last_marked . load ( Ordering :: SeqCst ) == current_collection {
133- // so retain it
134- true
135- } else {
136- // Otherwise we didn't mark it and it should be deallocated
137- // eprintln!("deallocating {:?}", data_ptr);
138- // Send it to the drop thread to be dropped
139- to_drop. write ( ) . push ( data. clone ( ) ) ;
114+ // Otherwise we didn't mark it and it should be deallocated
115+ // eprintln!("deallocating {:?}", data_ptr);
116+ // Send it to the drop thread to be dropped
117+ to_drop. push ( data. clone ( ) ) ;
140118
141- // Don't retain this data
142- false
143- }
119+ // Don't retain this data
120+ false
144121 } ) ;
145122
146123 // Send off the data to be dropped in the background
@@ -153,11 +130,6 @@ impl Collector {
153130 self . trigger
154131 . set_data_count_after_collection ( self . tracked_data_count ( ) ) ;
155132
156- // update collection number
157- self . tracked_data
158- . current_collection_number
159- . fetch_add ( 1 , Ordering :: SeqCst ) ;
160-
161133 drop ( gc_guard) ;
162134
163135 trace ! ( "Collection finished" ) ;
0 commit comments