@@ -149,6 +149,17 @@ struct copy_space {
149
149
size_t nslabs ;
150
150
};
151
151
152
+ enum copy_space_forward_result {
153
+ // We went to forward an edge, but the target was already forwarded, so we
154
+ // just updated the edge.
155
+ COPY_SPACE_FORWARD_UPDATED ,
156
+ // We went to forward an edge and evacuated the referent to a new location.
157
+ COPY_SPACE_FORWARD_EVACUATED ,
158
+ // We went to forward an edge but failed to acquire memory for its new
159
+ // location.
160
+ COPY_SPACE_FORWARD_FAILED ,
161
+ };
162
+
152
163
struct copy_space_allocator {
153
164
uintptr_t hp ;
154
165
uintptr_t limit ;
@@ -473,9 +484,7 @@ copy_space_allocator_release_partly_full_block(struct copy_space_allocator *allo
473
484
static inline struct gc_ref
474
485
copy_space_allocate (struct copy_space_allocator * alloc ,
475
486
struct copy_space * space ,
476
- size_t size ,
477
- void (* get_more_empty_blocks )(void * data ),
478
- void * data ) {
487
+ size_t size ) {
479
488
GC_ASSERT (size > 0 );
480
489
GC_ASSERT (size <= gc_allocator_large_threshold ());
481
490
size = align_up (size , gc_allocator_small_granule_size ());
@@ -490,8 +499,8 @@ copy_space_allocate(struct copy_space_allocator *alloc,
490
499
goto done ;
491
500
copy_space_allocator_release_full_block (alloc , space );
492
501
}
493
- while (!copy_space_allocator_acquire_empty_block (alloc , space ))
494
- get_more_empty_blocks ( data );
502
+ if (!copy_space_allocator_acquire_empty_block (alloc , space ))
503
+ return gc_ref_null ( );
495
504
// The newly acquired block is empty and is therefore large enough for
496
505
// a small allocation.
497
506
@@ -588,12 +597,13 @@ copy_space_gc_during_evacuation(void *data) {
588
597
GC_CRASH ();
589
598
}
590
599
591
- static inline int
600
+ static inline enum copy_space_forward_result
592
601
copy_space_forward_atomic (struct copy_space * space , struct gc_edge edge ,
593
602
struct gc_ref old_ref ,
594
603
struct copy_space_allocator * alloc ) {
595
604
struct gc_atomic_forward fwd = gc_atomic_forward_begin (old_ref );
596
605
606
+ retry :
597
607
if (fwd .state == GC_FORWARDING_STATE_NOT_FORWARDED )
598
608
gc_atomic_forward_acquire (& fwd );
599
609
@@ -605,33 +615,34 @@ copy_space_forward_atomic(struct copy_space *space, struct gc_edge edge,
605
615
case GC_FORWARDING_STATE_ACQUIRED : {
606
616
// We claimed the object successfully; evacuating is up to us.
607
617
size_t bytes = gc_atomic_forward_object_size (& fwd );
608
- struct gc_ref new_ref =
609
- copy_space_allocate (alloc , space , bytes ,
610
- copy_space_gc_during_evacuation , NULL );
618
+ struct gc_ref new_ref = copy_space_allocate (alloc , space , bytes );
619
+ if (gc_ref_is_null (new_ref )) {
620
+ gc_atomic_forward_abort (& fwd );
621
+ return COPY_SPACE_FORWARD_FAILED ;
622
+ }
611
623
// Copy object contents before committing, as we don't know what
612
624
// part of the object (if any) will be overwritten by the
613
625
// commit.
614
626
memcpy (gc_ref_heap_object (new_ref ), gc_ref_heap_object (old_ref ), bytes );
615
627
gc_atomic_forward_commit (& fwd , new_ref );
616
628
gc_edge_update (edge , new_ref );
617
- return 1 ;
629
+ return COPY_SPACE_FORWARD_EVACUATED ;
618
630
}
619
631
case GC_FORWARDING_STATE_BUSY :
620
632
// Someone else claimed this object first. Spin until new address
621
633
// known, or evacuation aborts.
622
634
for (size_t spin_count = 0 ;; spin_count ++ ) {
623
635
if (gc_atomic_forward_retry_busy (& fwd ))
624
- break ;
636
+ goto retry ;
625
637
yield_for_spin (spin_count );
626
638
}
627
- GC_ASSERT (fwd .state == GC_FORWARDING_STATE_FORWARDED );
628
- // Fall through.
639
+ GC_CRASH (); // Unreachable.
629
640
case GC_FORWARDING_STATE_FORWARDED :
630
641
// The object has been evacuated already. Update the edge;
631
642
// whoever forwarded the object will make sure it's eventually
632
643
// traced.
633
644
gc_edge_update (edge , gc_ref (gc_atomic_forward_address (& fwd )));
634
- return 0 ;
645
+ return COPY_SPACE_FORWARD_UPDATED ;
635
646
}
636
647
}
637
648
@@ -640,6 +651,7 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
640
651
struct gc_edge edge ,
641
652
struct gc_ref old_ref ) {
642
653
struct gc_atomic_forward fwd = gc_atomic_forward_begin (old_ref );
654
+ retry :
643
655
switch (fwd .state ) {
644
656
case GC_FORWARDING_STATE_NOT_FORWARDED :
645
657
return 0 ;
@@ -648,11 +660,10 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
648
660
// known.
649
661
for (size_t spin_count = 0 ;; spin_count ++ ) {
650
662
if (gc_atomic_forward_retry_busy (& fwd ))
651
- break ;
663
+ goto retry ;
652
664
yield_for_spin (spin_count );
653
665
}
654
- GC_ASSERT (fwd .state == GC_FORWARDING_STATE_FORWARDED );
655
- // Fall through.
666
+ GC_CRASH (); // Unreachable.
656
667
case GC_FORWARDING_STATE_FORWARDED :
657
668
gc_edge_update (edge , gc_ref (gc_atomic_forward_address (& fwd )));
658
669
return 1 ;
@@ -661,24 +672,24 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
661
672
}
662
673
}
663
674
664
- static inline int
675
+ static inline enum copy_space_forward_result
665
676
copy_space_forward_nonatomic (struct copy_space * space , struct gc_edge edge ,
666
677
struct gc_ref old_ref ,
667
678
struct copy_space_allocator * alloc ) {
668
679
uintptr_t forwarded = gc_object_forwarded_nonatomic (old_ref );
669
680
if (forwarded ) {
670
681
gc_edge_update (edge , gc_ref (forwarded ));
671
- return 0 ;
682
+ return COPY_SPACE_FORWARD_UPDATED ;
672
683
} else {
673
684
size_t size ;
674
685
gc_trace_object (old_ref , NULL , NULL , NULL , & size );
675
- struct gc_ref new_ref =
676
- copy_space_allocate ( alloc , space , size ,
677
- copy_space_gc_during_evacuation , NULL ) ;
686
+ struct gc_ref new_ref = copy_space_allocate ( alloc , space , size );
687
+ if ( gc_ref_is_null ( new_ref ))
688
+ return COPY_SPACE_FORWARD_FAILED ;
678
689
memcpy (gc_ref_heap_object (new_ref ), gc_ref_heap_object (old_ref ), size );
679
690
gc_object_forward_nonatomic (old_ref , new_ref );
680
691
gc_edge_update (edge , new_ref );
681
- return 1 ;
692
+ return COPY_SPACE_FORWARD_EVACUATED ;
682
693
}
683
694
}
684
695
@@ -694,7 +705,7 @@ copy_space_forward_if_traced_nonatomic(struct copy_space *space,
694
705
return 0 ;
695
706
}
696
707
697
- static inline int
708
+ static inline enum copy_space_forward_result
698
709
copy_space_forward (struct copy_space * src_space , struct copy_space * dst_space ,
699
710
struct gc_edge edge ,
700
711
struct gc_ref old_ref ,
0 commit comments