From d9f2cebca2500198166131588741fabb0feeae48 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 9 Jun 2022 12:58:44 +0800 Subject: [PATCH 1/3] Delegate for PlanProcessEdges Replaced core functinalities of PlanProcessEdges with PlanTracingDelegate. --- src/scheduler/gc_work.rs | 119 +++++++++++++++++++++++++++++++++------ 1 file changed, 101 insertions(+), 18 deletions(-) diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 615458f0a1..0cf960680c 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -629,7 +629,7 @@ pub struct PlanProcessEdges< P: Plan + PlanTraceObject, const KIND: TraceKind, > { - plan: &'static P, + delegate: PlanTracingDelegate, base: ProcessEdgesBase, } @@ -641,12 +641,17 @@ impl + Plan, const KIND: TraceKin fn new(edges: Vec
, roots: bool, mmtk: &'static MMTK) -> Self { let base = ProcessEdgesBase::new(edges, roots, mmtk); let plan = base.plan().downcast_ref::

().unwrap(); - Self { plan, base } + let delegate = PlanTracingDelegate::new(plan); + Self { delegate, base } } #[inline(always)] fn create_scan_work(&self, nodes: Vec) -> Box> { - Box::new(PlanScanObjects::::new(self.plan, nodes, false)) + Box::new(PlanScanObjects::::new( + self.delegate.clone(), + nodes, + false, + )) } #[inline(always)] @@ -654,8 +659,10 @@ impl + Plan, const KIND: TraceKin if object.is_null() { return object; } - self.plan - .trace_object::(self, object, self.worker()) + let worker = self.worker(); + let queue = &mut self.base.nodes; + let delegate = &self.delegate; + delegate.trace_object(queue, object, worker) } #[inline] @@ -690,38 +697,114 @@ impl + Plan, const KIND: TraceKin /// This provides an implementation of scanning objects work. Each object will be scanned by calling `scan_object()` /// in `PlanTraceObject`. -pub struct PlanScanObjects + PlanTraceObject> { - plan: &'static P, +pub struct PlanScanObjects, const KIND: TraceKind> { + delegate: PlanTracingDelegate, buffer: Vec, #[allow(dead_code)] concurrent: bool, - phantom: PhantomData, } -impl + PlanTraceObject> PlanScanObjects { - pub fn new(plan: &'static P, buffer: Vec, concurrent: bool) -> Self { +impl, const KIND: TraceKind> PlanScanObjects { + pub fn new( + delegate: PlanTracingDelegate, + buffer: Vec, + concurrent: bool, + ) -> Self { Self { - plan, + delegate, buffer, concurrent, - phantom: PhantomData, } } } -impl + PlanTraceObject> GCWork - for PlanScanObjects +impl, const KIND: TraceKind> GCWork + for PlanScanObjects { - fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { + fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { trace!("PlanScanObjects"); { let tls = worker.tls; - let mut closure = ObjectsClosure::::new(worker); + let mut closure = ObjectsClosure::>::new(worker); for object in &self.buffer { - ::VMScanning::scan_object(tls, *object, &mut closure); - self.plan.post_scan_object(*object); + ::VMScanning::scan_object(tls, *object, &mut closure); + self.delegate.post_scan_object(*object); } } trace!("PlanScanObjects End"); } } + +pub trait TracingDelegate: 'static + Send + Clone { + type VM: VMBinding; + + fn trace_object( + &self, + trace: &mut T, + object: ObjectReference, + worker: &mut GCWorker, + ) -> ObjectReference; + + fn may_move_objects() -> bool; + + fn post_scan_object(&self, object: ObjectReference); +} + +pub struct PlanTracingDelegate, const KIND: TraceKind> { + plan: &'static P, +} + +impl, const KIND: TraceKind> PlanTracingDelegate { + pub fn new(plan: &'static P) -> Self { + Self { plan } + } +} + +impl, const KIND: TraceKind> Clone + for PlanTracingDelegate +{ + fn clone(&self) -> Self { + Self { + plan: self.plan.clone(), + } + } +} + +impl, const KIND: TraceKind> TracingDelegate + for PlanTracingDelegate +{ + type VM = P::VM; + + #[inline] + fn trace_object( + &self, + trace: &mut T, + object: ObjectReference, + worker: &mut GCWorker, + ) -> ObjectReference { + self.plan.trace_object::(trace, object, worker) + } + + #[inline(always)] + fn may_move_objects() -> bool { + P::may_move_objects::() + } + + #[inline] + fn post_scan_object(&self, object: ObjectReference) { + self.plan.post_scan_object(object); + } +} + + +const OBJECT_QUEUE_CAPACITY: usize = 4096; + +impl TransitiveClosure for Vec { + #[inline] + fn process_node(&mut self, object: ObjectReference) { + if self.is_empty() { + self.reserve(OBJECT_QUEUE_CAPACITY); + } + self.push(object); + } +} From 766ab5bb903087062102981f8d862072abb0f77d Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 9 Jun 2022 17:50:08 +0800 Subject: [PATCH 2/3] Decouple reference processor from ProcessEdgesWork But not the related work packets, yet. --- src/scheduler/mod.rs | 1 + src/util/reference_processor.rs | 71 +++++++++++++++++++++++++-------- 2 files changed, 55 insertions(+), 17 deletions(-) diff --git a/src/scheduler/mod.rs b/src/scheduler/mod.rs index 79f8ccde5a..33c33c476c 100644 --- a/src/scheduler/mod.rs +++ b/src/scheduler/mod.rs @@ -24,6 +24,7 @@ pub use controller::GCController; pub(crate) mod gc_work; pub use gc_work::ProcessEdgesWork; +pub use gc_work::TracingDelegate; // TODO: We shouldn't need to expose ScanStackRoot. However, OpenJDK uses it. // We should do some refactoring related to Scanning::SCAN_MUTATORS_IN_SAFEPOINT // to make sure this type is not exposed to the bindings. diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index 7998a7bba9..1cba7d9c44 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -1,4 +1,5 @@ use std::collections::HashSet; +use std::marker::PhantomData; use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; use std::sync::Mutex; @@ -10,6 +11,39 @@ use crate::util::VMWorkerThread; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; +pub trait TraceObjectClosure: Send { + type VM: VMBinding; + + fn trace_object(&mut self, object: ObjectReference) -> ObjectReference; +} + +struct ObjectCollector { + nodes: Vec, + phantom_data: PhantomData, +} + +impl ObjectCollector { + fn new() -> Self { + Self { + nodes: Vec::new(), + phantom_data: PhantomData, + } + } +} + +/// ProcessEdgesWork wrapper +struct PEWWrapper<'a, E: ProcessEdgesWork> { + pew: &'a mut E, +} + +impl <'a, E: ProcessEdgesWork> TraceObjectClosure for PEWWrapper<'a, E> { + type VM = E::VM; + + fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { + self.pew.trace_object(object) + } +} + /// Holds all reference processors for each weak reference Semantics. /// Currently this is based on Java's weak reference semantics (soft/weak/phantom). /// We should make changes to make this general rather than Java specific. @@ -64,7 +98,7 @@ impl ReferenceProcessors { /// However, for some plans like mark compact, at the point we do ref scanning, we do not know /// the forwarding addresses yet, thus we cannot do forwarding during scan refs. And for those /// plans, this separate step is required. - pub fn forward_refs(&self, trace: &mut E, mmtk: &'static MMTK) { + pub fn forward_refs(&self, trace: &mut E, mmtk: &'static MMTK) { debug_assert!( mmtk.plan.constraints().needs_forward_after_liveness, "A plan with needs_forward_after_liveness=false does not need a separate forward step" @@ -80,7 +114,7 @@ impl ReferenceProcessors { // Methods for scanning weak references. It needs to be called in a decreasing order of reference strengths, i.e. soft > weak > phantom /// Scan soft references. - pub fn scan_soft_refs(&self, trace: &mut E, mmtk: &'static MMTK) { + pub fn scan_soft_refs(&self, trace: &mut E, mmtk: &'static MMTK) { // For soft refs, it is up to the VM to decide when to reclaim this. // If this is not an emergency collection, we have no heap stress. We simply retain soft refs. if !mmtk.plan.is_emergency_collection() { @@ -95,13 +129,13 @@ impl ReferenceProcessors { } /// Scan weak references. - pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { + pub fn scan_weak_refs(&self, trace: &mut E, mmtk: &'static MMTK) { self.weak .scan::(trace, mmtk.plan.is_current_gc_nursery()); } /// Scan phantom references. - pub fn scan_phantom_refs( + pub fn scan_phantom_refs( &self, trace: &mut E, mmtk: &'static MMTK, @@ -215,7 +249,7 @@ impl ReferenceProcessor { // easier to understand. #[inline(always)] - fn get_forwarded_referent( + fn get_forwarded_referent( e: &mut E, referent: ObjectReference, ) -> ObjectReference { @@ -223,7 +257,7 @@ impl ReferenceProcessor { } #[inline(always)] - fn get_forwarded_reference( + fn get_forwarded_reference( e: &mut E, object: ObjectReference, ) -> ObjectReference { @@ -231,7 +265,7 @@ impl ReferenceProcessor { } #[inline(always)] - fn keep_referent_alive( + fn keep_referent_alive( e: &mut E, referent: ObjectReference, ) -> ObjectReference { @@ -280,13 +314,13 @@ impl ReferenceProcessor { /// Forward the reference tables in the reference processor. This is only needed if a plan does not forward /// objects in their first transitive closure. /// nursery is not used for this. - pub fn forward(&self, trace: &mut E, _nursery: bool) { + pub fn forward(&self, trace: &mut E, _nursery: bool) { let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.forward({:?})", self.semantics); // Forward a single reference #[inline(always)] - fn forward_reference( + fn forward_reference( trace: &mut E, reference: ObjectReference, ) -> ObjectReference { @@ -338,7 +372,7 @@ impl ReferenceProcessor { // TODO: nursery is currently ignored. We used to use Vec for the reference table, and use an int // to point to the reference that we last scanned. However, when we use HashSet for reference table, // we can no longer do that. - fn scan(&self, trace: &mut E, _nursery: bool) { + fn scan(&self, trace: &mut E, _nursery: bool) { let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); @@ -377,7 +411,7 @@ impl ReferenceProcessor { /// It retains the referent if the reference is definitely reachable. This method does /// not update reference or referent. So after this method, scan() should be used to update /// the references/referents. - fn retain(&self, trace: &mut E, _nursery: bool) { + fn retain(&self, trace: &mut E, _nursery: bool) { debug_assert!(self.semantics == Semantics::SOFT); let sync = self.sync.lock().unwrap(); @@ -418,7 +452,7 @@ impl ReferenceProcessor { /// /// If a None value is returned, the reference can be removed from the reference table. Otherwise, the updated reference should be kept /// in the reference table. - fn process_reference( + fn process_reference( &self, trace: &mut E, reference: ObjectReference, @@ -483,7 +517,6 @@ impl ReferenceProcessor { use crate::scheduler::GCWork; use crate::scheduler::GCWorker; use crate::MMTK; -use std::marker::PhantomData; #[derive(Default)] pub struct SoftRefProcessing(PhantomData); @@ -491,7 +524,8 @@ impl GCWork for SoftRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - mmtk.reference_processors.scan_soft_refs(&mut w, mmtk); + let mut c = PEWWrapper { pew: &mut w }; + mmtk.reference_processors.scan_soft_refs(&mut c, mmtk); w.flush(); } } @@ -507,7 +541,8 @@ impl GCWork for WeakRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - mmtk.reference_processors.scan_weak_refs(&mut w, mmtk); + let mut c = PEWWrapper { pew: &mut w }; + mmtk.reference_processors.scan_weak_refs(&mut c, mmtk); w.flush(); } } @@ -523,7 +558,8 @@ impl GCWork for PhantomRefProcessing { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - mmtk.reference_processors.scan_phantom_refs(&mut w, mmtk); + let mut c = PEWWrapper { pew: &mut w }; + mmtk.reference_processors.scan_phantom_refs(&mut c, mmtk); w.flush(); } } @@ -539,7 +575,8 @@ impl GCWork for RefForwarding { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = E::new(vec![], false, mmtk); w.set_worker(worker); - mmtk.reference_processors.forward_refs(&mut w, mmtk); + let mut c = PEWWrapper { pew: &mut w }; + mmtk.reference_processors.forward_refs(&mut c, mmtk); w.flush(); } } From aef59c901ee5ee87ccee8f11bd136558643d6c6c Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 10 Jun 2022 23:57:38 +0800 Subject: [PATCH 3/3] SFTTracingDelegate --- src/policy/space.rs | 2 +- src/scheduler/gc_work.rs | 65 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/src/policy/space.rs b/src/policy/space.rs index f3c63eddf4..d98350d1c5 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -120,7 +120,7 @@ pub trait SFT { // In this way, we can store the refs with in SFT (which cannot have parameters with generic type parameters) use crate::util::erase_vm::define_erased_vm_mut_ref; -define_erased_vm_mut_ref!(SFTProcessEdgesMutRef = SFTProcessEdges); +define_erased_vm_mut_ref!(SFTProcessEdgesMutRef = Vec); define_erased_vm_mut_ref!(GCWorkerMutRef = GCWorker); /// Print debug info for SFT. Should be false when committed. diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 0cf960680c..286b82ef2b 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -525,7 +525,7 @@ impl ProcessEdgesWork for SFTProcessEdges { // Erase type parameter let worker = GCWorkerMutRef::new(self.worker()); - let trace = SFTProcessEdgesMutRef::new(self); + let trace = SFTProcessEdgesMutRef::new::(&mut self.base.nodes); // Invoke trace object on sft let sft = crate::mmtk::SFT_MAP.get(object.to_address()); @@ -808,3 +808,66 @@ impl TransitiveClosure for Vec { self.push(object); } } + +pub struct SFTTracingDelegate { + phantom_data: PhantomData, +} + +impl SFTTracingDelegate { + pub fn new() -> Self { + Self { phantom_data: PhantomData } + } +} + +impl Clone for SFTTracingDelegate { + fn clone(&self) -> Self { + Self { phantom_data: PhantomData } + } +} + +impl TracingDelegate + for SFTTracingDelegate +{ + type VM = VM; + + #[inline] + fn trace_object( + &self, + trace: &mut T, + object: ObjectReference, + worker: &mut GCWorker, + ) -> ObjectReference { + // SFT only supports Vec. Currently, Rust doesn't have a way to check + // if T is Vec. + let vec_object_reference: &mut Vec = unsafe { std::mem::transmute(trace) }; + + use crate::policy::space::*; + + if object.is_null() { + return object; + } + + // Make sure we have valid SFT entries for the object. + #[cfg(debug_assertions)] + crate::mmtk::SFT_MAP.assert_valid_entries_for_object::(object); + + // Erase type parameter + let worker = GCWorkerMutRef::new(worker); + let trace = SFTProcessEdgesMutRef::new::(vec_object_reference); + + // Invoke trace object on sft + let sft = crate::mmtk::SFT_MAP.get(object.to_address()); + sft.sft_trace_object(trace, object, worker) + } + + #[inline(always)] + fn may_move_objects() -> bool { + true + } + + #[inline] + fn post_scan_object(&self, object: ObjectReference) { + // Do nothing. If a plan needs to do anything, it cannot use + // SFTTracingDelegate + } +}