Skip to content

Commit ba65e32

Browse files
committed
pcc / copy-space: Allow allocations to fail
This fixes an issue in which minor collection of a nursery full of live data can fail because of fragmentation, whereas really it should just fall back to promotion.
1 parent 5fdb14c commit ba65e32

File tree

2 files changed

+104
-47
lines changed

2 files changed

+104
-47
lines changed

src/copy-space.h

+35-24
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,17 @@ struct copy_space {
149149
size_t nslabs;
150150
};
151151

152+
enum copy_space_forward_result {
153+
// We went to forward an edge, but the target was already forwarded, so we
154+
// just updated the edge.
155+
COPY_SPACE_FORWARD_UPDATED,
156+
// We went to forward an edge and evacuated the referent to a new location.
157+
COPY_SPACE_FORWARD_EVACUATED,
158+
// We went to forward an edge but failed to acquire memory for its new
159+
// location.
160+
COPY_SPACE_FORWARD_FAILED,
161+
};
162+
152163
struct copy_space_allocator {
153164
uintptr_t hp;
154165
uintptr_t limit;
@@ -473,9 +484,7 @@ copy_space_allocator_release_partly_full_block(struct copy_space_allocator *allo
473484
static inline struct gc_ref
474485
copy_space_allocate(struct copy_space_allocator *alloc,
475486
struct copy_space *space,
476-
size_t size,
477-
void (*get_more_empty_blocks)(void *data),
478-
void *data) {
487+
size_t size) {
479488
GC_ASSERT(size > 0);
480489
GC_ASSERT(size <= gc_allocator_large_threshold());
481490
size = align_up(size, gc_allocator_small_granule_size());
@@ -490,8 +499,8 @@ copy_space_allocate(struct copy_space_allocator *alloc,
490499
goto done;
491500
copy_space_allocator_release_full_block(alloc, space);
492501
}
493-
while (!copy_space_allocator_acquire_empty_block(alloc, space))
494-
get_more_empty_blocks(data);
502+
if (!copy_space_allocator_acquire_empty_block(alloc, space))
503+
return gc_ref_null();
495504
// The newly acquired block is empty and is therefore large enough for
496505
// a small allocation.
497506

@@ -588,12 +597,13 @@ copy_space_gc_during_evacuation(void *data) {
588597
GC_CRASH();
589598
}
590599

591-
static inline int
600+
static inline enum copy_space_forward_result
592601
copy_space_forward_atomic(struct copy_space *space, struct gc_edge edge,
593602
struct gc_ref old_ref,
594603
struct copy_space_allocator *alloc) {
595604
struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
596605

606+
retry:
597607
if (fwd.state == GC_FORWARDING_STATE_NOT_FORWARDED)
598608
gc_atomic_forward_acquire(&fwd);
599609

@@ -605,33 +615,34 @@ copy_space_forward_atomic(struct copy_space *space, struct gc_edge edge,
605615
case GC_FORWARDING_STATE_ACQUIRED: {
606616
// We claimed the object successfully; evacuating is up to us.
607617
size_t bytes = gc_atomic_forward_object_size(&fwd);
608-
struct gc_ref new_ref =
609-
copy_space_allocate(alloc, space, bytes,
610-
copy_space_gc_during_evacuation, NULL);
618+
struct gc_ref new_ref = copy_space_allocate(alloc, space, bytes);
619+
if (gc_ref_is_null(new_ref)) {
620+
gc_atomic_forward_abort(&fwd);
621+
return COPY_SPACE_FORWARD_FAILED;
622+
}
611623
// Copy object contents before committing, as we don't know what
612624
// part of the object (if any) will be overwritten by the
613625
// commit.
614626
memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref), bytes);
615627
gc_atomic_forward_commit(&fwd, new_ref);
616628
gc_edge_update(edge, new_ref);
617-
return 1;
629+
return COPY_SPACE_FORWARD_EVACUATED;
618630
}
619631
case GC_FORWARDING_STATE_BUSY:
620632
// Someone else claimed this object first. Spin until new address
621633
// known, or evacuation aborts.
622634
for (size_t spin_count = 0;; spin_count++) {
623635
if (gc_atomic_forward_retry_busy(&fwd))
624-
break;
636+
goto retry;
625637
yield_for_spin(spin_count);
626638
}
627-
GC_ASSERT(fwd.state == GC_FORWARDING_STATE_FORWARDED);
628-
// Fall through.
639+
GC_CRASH(); // Unreachable.
629640
case GC_FORWARDING_STATE_FORWARDED:
630641
// The object has been evacuated already. Update the edge;
631642
// whoever forwarded the object will make sure it's eventually
632643
// traced.
633644
gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
634-
return 0;
645+
return COPY_SPACE_FORWARD_UPDATED;
635646
}
636647
}
637648

@@ -640,6 +651,7 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
640651
struct gc_edge edge,
641652
struct gc_ref old_ref) {
642653
struct gc_atomic_forward fwd = gc_atomic_forward_begin(old_ref);
654+
retry:
643655
switch (fwd.state) {
644656
case GC_FORWARDING_STATE_NOT_FORWARDED:
645657
return 0;
@@ -648,11 +660,10 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
648660
// known.
649661
for (size_t spin_count = 0;; spin_count++) {
650662
if (gc_atomic_forward_retry_busy(&fwd))
651-
break;
663+
goto retry;
652664
yield_for_spin(spin_count);
653665
}
654-
GC_ASSERT(fwd.state == GC_FORWARDING_STATE_FORWARDED);
655-
// Fall through.
666+
GC_CRASH(); // Unreachable.
656667
case GC_FORWARDING_STATE_FORWARDED:
657668
gc_edge_update(edge, gc_ref(gc_atomic_forward_address(&fwd)));
658669
return 1;
@@ -661,24 +672,24 @@ copy_space_forward_if_traced_atomic(struct copy_space *space,
661672
}
662673
}
663674

664-
static inline int
675+
static inline enum copy_space_forward_result
665676
copy_space_forward_nonatomic(struct copy_space *space, struct gc_edge edge,
666677
struct gc_ref old_ref,
667678
struct copy_space_allocator *alloc) {
668679
uintptr_t forwarded = gc_object_forwarded_nonatomic(old_ref);
669680
if (forwarded) {
670681
gc_edge_update(edge, gc_ref(forwarded));
671-
return 0;
682+
return COPY_SPACE_FORWARD_UPDATED;
672683
} else {
673684
size_t size;
674685
gc_trace_object(old_ref, NULL, NULL, NULL, &size);
675-
struct gc_ref new_ref =
676-
copy_space_allocate(alloc, space, size,
677-
copy_space_gc_during_evacuation, NULL);
686+
struct gc_ref new_ref = copy_space_allocate(alloc, space, size);
687+
if (gc_ref_is_null(new_ref))
688+
return COPY_SPACE_FORWARD_FAILED;
678689
memcpy(gc_ref_heap_object(new_ref), gc_ref_heap_object(old_ref), size);
679690
gc_object_forward_nonatomic(old_ref, new_ref);
680691
gc_edge_update(edge, new_ref);
681-
return 1;
692+
return COPY_SPACE_FORWARD_EVACUATED;
682693
}
683694
}
684695

@@ -694,7 +705,7 @@ copy_space_forward_if_traced_nonatomic(struct copy_space *space,
694705
return 0;
695706
}
696707

697-
static inline int
708+
static inline enum copy_space_forward_result
698709
copy_space_forward(struct copy_space *src_space, struct copy_space *dst_space,
699710
struct gc_edge edge,
700711
struct gc_ref old_ref,

src/pcc.c

+69-23
Original file line numberDiff line numberDiff line change
@@ -287,6 +287,31 @@ static inline int edge_is_from_survivor(struct gc_heap *heap,
287287
return copy_space_contains_edge_aligned(heap_new_space(heap), edge);
288288
}
289289

290+
static inline int forward(struct copy_space *src_space,
291+
struct copy_space *dst_space,
292+
struct gc_edge edge,
293+
struct gc_ref ref,
294+
struct copy_space_allocator *dst_alloc) {
295+
switch (copy_space_forward(src_space, dst_space, edge, ref, dst_alloc)) {
296+
case COPY_SPACE_FORWARD_UPDATED:
297+
return 0;
298+
case COPY_SPACE_FORWARD_EVACUATED:
299+
return 1;
300+
case COPY_SPACE_FORWARD_FAILED:
301+
// If space is really tight and reordering of objects during evacuation
302+
// resulted in more end-of-block fragmentation and thus block use than
303+
// before collection started, we can actually run out of memory while
304+
// collecting. We should probably attempt to expand the heap here, at
305+
// least by a single block; it's better than the alternatives. For now,
306+
// abort.
307+
fprintf(stderr, "Out of memory\n");
308+
GC_CRASH();
309+
break;
310+
default:
311+
GC_CRASH();
312+
}
313+
}
314+
290315
static inline int do_minor_trace(struct gc_heap *heap, struct gc_edge edge,
291316
struct gc_ref ref,
292317
struct gc_trace_worker_data *data) {
@@ -324,16 +349,32 @@ static inline int do_minor_trace(struct gc_heap *heap, struct gc_edge edge,
324349
// However however, it is hard to distinguish between edges from promoted
325350
// objects and edges from old objects, so we mostly just rely on an
326351
// idempotent "log if unlogged" operation instead.
327-
int promote = copy_space_should_promote(new_space, ref);
328-
struct copy_space *dst_space = promote ? old_space : new_space;
329-
struct copy_space_allocator *alloc = promote
330-
? trace_worker_old_space_allocator(data)
331-
: trace_worker_new_space_allocator(data);
332-
// Update the remembered set for promoted-to-survivor edges.
333-
if (!promote && !edge_is_from_survivor(heap, edge)
334-
&& remember_edge_to_survivor_object(heap, edge))
335-
gc_field_set_writer_add_edge(trace_worker_field_logger(data), edge);
336-
return copy_space_forward(new_space, dst_space, edge, ref, alloc);
352+
if (!copy_space_should_promote(new_space, ref)) {
353+
// Try to leave the object in newspace as a survivor. If the edge is from
354+
// a promoted object, we will need to add it to the remembered set.
355+
if (!edge_is_from_survivor(heap, edge)
356+
&& remember_edge_to_survivor_object(heap, edge)) {
357+
// Log the edge even though in rare conditions the referent could end up
358+
// being promoted by us (if we run out of newspace) or a remote
359+
// evacuation thread (if they run out of newspace).
360+
gc_field_set_writer_add_edge(trace_worker_field_logger(data), edge);
361+
}
362+
switch (copy_space_forward(new_space, new_space, edge, ref,
363+
trace_worker_new_space_allocator(data))) {
364+
case COPY_SPACE_FORWARD_UPDATED:
365+
return 0;
366+
case COPY_SPACE_FORWARD_EVACUATED:
367+
return 1;
368+
case COPY_SPACE_FORWARD_FAILED:
369+
// Ran out of newspace! Fall through to promote instead.
370+
break;
371+
default:
372+
GC_CRASH();
373+
}
374+
}
375+
// Promote the object.
376+
return forward(new_space, old_space, edge, ref,
377+
trace_worker_old_space_allocator(data));
337378
} else {
338379
// Note that although the target of the edge might not be in lospace, this
339380
// will do what we want and return 1 if and only if ref is was a young
@@ -354,16 +395,16 @@ static inline int do_trace(struct gc_heap *heap, struct gc_edge edge,
354395
struct copy_space *new_space = heap_new_space(heap);
355396
struct copy_space *old_space = heap_old_space(heap);
356397
if (new_space_contains(heap, ref))
357-
return copy_space_forward(new_space, old_space, edge, ref,
358-
trace_worker_old_space_allocator(data));
398+
return forward(new_space, old_space, edge, ref,
399+
trace_worker_old_space_allocator(data));
359400
if (old_space_contains(heap, ref))
360-
return copy_space_forward(old_space, old_space, edge, ref,
361-
trace_worker_old_space_allocator(data));
401+
return forward(old_space, old_space, edge, ref,
402+
trace_worker_old_space_allocator(data));
362403
} else {
363404
if (GC_LIKELY(copy_space_contains(heap_mono_space(heap), ref)))
364-
return copy_space_forward(heap_mono_space(heap), heap_mono_space(heap),
365-
edge, ref,
366-
trace_worker_mono_space_allocator(data));
405+
return forward(heap_mono_space(heap), heap_mono_space(heap),
406+
edge, ref,
407+
trace_worker_mono_space_allocator(data));
367408
}
368409

369410
// Fall through for objects in large or extern spaces.
@@ -916,12 +957,17 @@ void* gc_allocate_slow(struct gc_mutator *mut, size_t size) {
916957
if (size > gc_allocator_large_threshold())
917958
return allocate_large(mut, size);
918959

919-
struct gc_ref ret =
920-
copy_space_allocate(&mut->allocator,
921-
heap_allocation_space(mutator_heap(mut)),
922-
size,
923-
get_more_empty_blocks_for_mutator,
924-
mut);
960+
struct gc_ref ret;
961+
while (1) {
962+
ret = copy_space_allocate(&mut->allocator,
963+
heap_allocation_space(mutator_heap(mut)),
964+
size);
965+
if (gc_ref_is_null(ret))
966+
trigger_collection(mut, GC_COLLECTION_MINOR);
967+
else
968+
break;
969+
}
970+
925971
gc_clear_fresh_allocation(ret, size);
926972
return gc_ref_heap_object(ret);
927973
}

0 commit comments

Comments
 (0)