1
/* -----------------------------------------------------------------------------
3
* (c) The GHC Team 1998-2008
5
* Generational garbage collector: evacuation functions
7
* Documentation on the architecture of the Garbage Collector can be
8
* found in the online commentary:
10
* http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12
* ---------------------------------------------------------------------------*/
14
#include "PosixSource.h"
23
#include "MarkStack.h"
26
#include "LdvProfile.h"
28
#if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC)
29
StgWord64 whitehole_spin = 0;
32
#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
33
#define evacuate(p) evacuate1(p)
34
#define HEAP_ALLOCED_GC(p) HEAP_ALLOCED(p)
37
#if !defined(PARALLEL_GC)
38
#define copy_tag_nolock(p, info, src, size, stp, tag) \
39
copy_tag(p, info, src, size, stp, tag)
42
/* Used to avoid long recursion due to selector thunks
44
#define MAX_THUNK_SELECTOR_DEPTH 16
46
static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
47
STATIC_INLINE void evacuate_large(StgPtr p);
49
/* -----------------------------------------------------------------------------
50
Allocate some space in which to copy an object.
51
-------------------------------------------------------------------------- */
54
alloc_for_copy (nat size, generation *gen)
59
/* Find out where we're going, using the handy "to" pointer in
60
* the gen of the source object. If it turns out we need to
61
* evacuate to an older generation, adjust it here (see comment
64
if (gen < gct->evac_gen) {
65
if (gct->eager_promotion) {
68
gct->failed_to_evac = rtsTrue;
72
ws = &gct->gens[gen->no];
73
// this compiles to a single mem access to gen->abs_no only
75
/* chain a new block onto the to-space for the destination gen if
79
ws->todo_free += size;
80
if (ws->todo_free > ws->todo_lim) {
81
to = todo_block_full(size, ws);
83
ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
88
/* -----------------------------------------------------------------------------
90
-------------------------------------------------------------------------- */
92
STATIC_INLINE GNUC_ATTR_HOT void
93
copy_tag(StgClosure **p, const StgInfoTable *info,
94
StgClosure *src, nat size, generation *gen, StgWord tag)
99
to = alloc_for_copy(size,gen);
103
for (i = 1; i < size; i++) { // unroll for small i
107
// if (to+size+2 < bd->start + BLOCK_SIZE_W) {
108
// __builtin_prefetch(to + size + 2, 1);
111
#if defined(PARALLEL_GC)
113
const StgInfoTable *new_info;
114
new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
115
if (new_info != info) {
116
return evacuate(p); // does the failed_to_evac stuff
118
*p = TAG_CLOSURE(tag,(StgClosure*)to);
122
src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
123
*p = TAG_CLOSURE(tag,(StgClosure*)to);
127
// We store the size of the just evacuated object in the LDV word so that
128
// the profiler can guess the position of the next object later.
129
SET_EVACUAEE_FOR_LDV(from, size);
133
#if defined(PARALLEL_GC)
135
copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
136
StgClosure *src, nat size, generation *gen, StgWord tag)
141
to = alloc_for_copy(size,gen);
145
for (i = 1; i < size; i++) { // unroll for small i
149
// if somebody else reads the forwarding pointer, we better make
150
// sure there's a closure at the end of it.
152
*p = TAG_CLOSURE(tag,(StgClosure*)to);
153
src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
155
// if (to+size+2 < bd->start + BLOCK_SIZE_W) {
156
// __builtin_prefetch(to + size + 2, 1);
160
// We store the size of the just evacuated object in the LDV word so that
161
// the profiler can guess the position of the next object later.
162
SET_EVACUAEE_FOR_LDV(from, size);
167
/* Special version of copy() for when we only want to copy the info
168
* pointer of an object, but reserve some padding after it. This is
169
* used to optimise evacuation of TSOs.
172
copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve,
173
nat size_to_copy, generation *gen)
179
#if defined(PARALLEL_GC)
181
info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
182
if (info == (W_)&stg_WHITEHOLE_info) {
188
if (IS_FORWARDING_PTR(info)) {
189
src->header.info = (const StgInfoTable *)info;
190
evacuate(p); // does the failed_to_evac stuff
194
info = (W_)src->header.info;
197
to = alloc_for_copy(size_to_reserve, gen);
201
for (i = 1; i < size_to_copy; i++) { // unroll for small i
206
src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
207
*p = (StgClosure *)to;
210
// We store the size of the just evacuated object in the LDV word so that
211
// the profiler can guess the position of the next object later.
212
SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
214
if (size_to_reserve - size_to_copy > 0)
215
LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
222
/* Copy wrappers that don't tag the closure after copying */
223
STATIC_INLINE GNUC_ATTR_HOT void
224
copy(StgClosure **p, const StgInfoTable *info,
225
StgClosure *src, nat size, generation *gen)
227
copy_tag(p,info,src,size,gen,0);
230
/* -----------------------------------------------------------------------------
231
Evacuate a large object
233
This just consists of removing the object from the (doubly-linked)
234
gen->large_objects list, and linking it on to the (singly-linked)
235
gen->new_large_objects list, from where it will be scavenged later.
237
Convention: bd->flags has BF_EVACUATED set for a large object
238
that has been evacuated, or unset otherwise.
239
-------------------------------------------------------------------------- */
242
evacuate_large(StgPtr p)
245
generation *gen, *new_gen;
250
ACQUIRE_SPIN_LOCK(&gen->sync_large_objects);
252
// already evacuated?
253
if (bd->flags & BF_EVACUATED) {
254
/* Don't forget to set the gct->failed_to_evac flag if we didn't get
255
* the desired destination (see comments in evacuate()).
257
if (gen < gct->evac_gen) {
258
gct->failed_to_evac = rtsTrue;
259
TICK_GC_FAILED_PROMOTION();
261
RELEASE_SPIN_LOCK(&gen->sync_large_objects);
265
// remove from large_object list
267
bd->u.back->link = bd->link;
268
} else { // first object in the list
269
gen->large_objects = bd->link;
272
bd->link->u.back = bd->u.back;
275
/* link it on to the evacuated large object list of the destination gen
278
if (new_gen < gct->evac_gen) {
279
if (gct->eager_promotion) {
280
new_gen = gct->evac_gen;
282
gct->failed_to_evac = rtsTrue;
286
ws = &gct->gens[new_gen->no];
288
bd->flags |= BF_EVACUATED;
289
initBdescr(bd, new_gen, new_gen->to);
291
// If this is a block of pinned objects, we don't have to scan
292
// these objects, because they aren't allowed to contain any
293
// pointers. For these blocks, we skip the scavenge stage and put
294
// them straight on the scavenged_large_objects list.
295
if (bd->flags & BF_PINNED) {
296
ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS);
297
if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync_large_objects); }
298
dbl_link_onto(bd, &new_gen->scavenged_large_objects);
299
new_gen->n_scavenged_large_blocks += bd->blocks;
300
if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync_large_objects); }
302
bd->link = ws->todo_large_objects;
303
ws->todo_large_objects = bd;
306
RELEASE_SPIN_LOCK(&gen->sync_large_objects);
309
/* ----------------------------------------------------------------------------
312
This is called (eventually) for every live object in the system.
314
The caller to evacuate specifies a desired generation in the
315
gct->evac_gen thread-local variable. The following conditions apply to
316
evacuating an object which resides in generation M when we're
317
collecting up to generation N
319
if M >= gct->evac_gen
323
if M < gct->evac_gen evac to gct->evac_gen, step 0
325
if the object is already evacuated, then we check which generation
328
if M >= gct->evac_gen do nothing
329
if M < gct->evac_gen set gct->failed_to_evac flag to indicate that we
330
didn't manage to evacuate this object into gct->evac_gen.
335
evacuate() is the single most important function performance-wise
336
in the GC. Various things have been tried to speed it up, but as
337
far as I can tell the code generated by gcc 3.2 with -O2 is about
338
as good as it's going to get. We pass the argument to evacuate()
339
in a register using the 'regparm' attribute (see the prototype for
340
evacuate() near the top of this file).
342
Changing evacuate() to take an (StgClosure **) rather than
343
returning the new pointer seems attractive, because we can avoid
344
writing back the pointer when it hasn't changed (eg. for a static
345
object, or an object in a generation > N). However, I tried it and
346
it doesn't help. One reason is that the (StgClosure **) pointer
347
gets spilled to the stack inside evacuate(), resulting in far more
348
extra reads/writes than we save.
349
------------------------------------------------------------------------- */
351
REGPARM1 GNUC_ATTR_HOT void
352
evacuate(StgClosure **p)
357
const StgInfoTable *info;
363
/* The tag and the pointer are split, to be merged after evacing */
364
tag = GET_CLOSURE_TAG(q);
365
q = UNTAG_CLOSURE(q);
367
ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info);
369
if (!HEAP_ALLOCED_GC(q)) {
371
if (!major_gc) return;
374
switch (info->type) {
377
if (info->srt_bitmap != 0) {
378
if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
380
*THUNK_STATIC_LINK((StgClosure *)q) = gct->static_objects;
381
gct->static_objects = (StgClosure *)q;
384
link = (StgPtr)cas((StgPtr)THUNK_STATIC_LINK((StgClosure *)q),
386
(StgWord)gct->static_objects);
388
gct->static_objects = (StgClosure *)q;
396
if (info->srt_bitmap != 0 &&
397
*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
399
*FUN_STATIC_LINK((StgClosure *)q) = gct->static_objects;
400
gct->static_objects = (StgClosure *)q;
403
link = (StgPtr)cas((StgPtr)FUN_STATIC_LINK((StgClosure *)q),
405
(StgWord)gct->static_objects);
407
gct->static_objects = (StgClosure *)q;
414
/* If q->saved_info != NULL, then it's a revertible CAF - it'll be
415
* on the CAF list, so don't do anything with it here (we'll
416
* scavenge it later).
418
if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
420
*IND_STATIC_LINK((StgClosure *)q) = gct->static_objects;
421
gct->static_objects = (StgClosure *)q;
424
link = (StgPtr)cas((StgPtr)IND_STATIC_LINK((StgClosure *)q),
426
(StgWord)gct->static_objects);
428
gct->static_objects = (StgClosure *)q;
435
if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
437
*STATIC_LINK(info,(StgClosure *)q) = gct->static_objects;
438
gct->static_objects = (StgClosure *)q;
441
link = (StgPtr)cas((StgPtr)STATIC_LINK(info,(StgClosure *)q),
443
(StgWord)gct->static_objects);
445
gct->static_objects = (StgClosure *)q;
449
/* I am assuming that static_objects pointers are not
450
* written to other objects, and thus, no need to retag. */
453
case CONSTR_NOCAF_STATIC:
454
/* no need to put these on the static linked list, they don't need
460
barf("evacuate(static): strange closure type %d", (int)(info->type));
466
if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {
468
// pointer into to-space: just return it. It might be a pointer
469
// into a generation that we aren't collecting (> N), or it
470
// might just be a pointer into to-space. The latter doesn't
471
// happen often, but allowing it makes certain things a bit
472
// easier; e.g. scavenging an object is idempotent, so it's OK to
473
// have an object on the mutable list multiple times.
474
if (bd->flags & BF_EVACUATED) {
475
// We aren't copying this object, so we have to check
476
// whether it is already in the target generation. (this is
477
// the write barrier).
478
if (bd->gen < gct->evac_gen) {
479
gct->failed_to_evac = rtsTrue;
480
TICK_GC_FAILED_PROMOTION();
485
/* evacuate large objects by re-linking them onto a different list.
487
if (bd->flags & BF_LARGE) {
489
if (info->type == TSO &&
490
((StgTSO *)q)->what_next == ThreadRelocated) {
491
q = (StgClosure *)((StgTSO *)q)->_link;
495
evacuate_large((P_)q);
499
/* If the object is in a gen that we're compacting, then we
500
* need to use an alternative evacuate procedure.
502
if (!is_marked((P_)q,bd)) {
504
push_mark_stack((P_)q);
511
info = q->header.info;
512
if (IS_FORWARDING_PTR(info))
514
/* Already evacuated, just return the forwarding address.
515
* HOWEVER: if the requested destination generation (gct->evac_gen) is
516
* older than the actual generation (because the object was
517
* already evacuated to a younger generation) then we have to
518
* set the gct->failed_to_evac flag to indicate that we couldn't
519
* manage to promote the object to the desired generation.
522
* Optimisation: the check is fairly expensive, but we can often
523
* shortcut it if either the required generation is 0, or the
524
* current object (the EVACUATED) is in a high enough generation.
525
* We know that an EVACUATED always points to an object in the
526
* same or an older generation. gen is the lowest generation that the
527
* current object would be evacuated to, so we only do the full
528
* check if gen is too low.
530
StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
531
*p = TAG_CLOSURE(tag,e);
532
if (gen < gct->evac_gen) { // optimisation
533
if (Bdescr((P_)e)->gen < gct->evac_gen) {
534
gct->failed_to_evac = rtsTrue;
535
TICK_GC_FAILED_PROMOTION();
541
switch (INFO_PTR_TO_STRUCT(info)->type) {
550
copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen);
553
// For ints and chars of low value, save space by replacing references to
554
// these with closures with references to common, shared ones in the RTS.
556
// * Except when compiling into Windows DLLs which don't support cross-package
557
// data references very well.
561
#if defined(__PIC__) && defined(mingw32_HOST_OS)
562
copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen,tag);
564
StgWord w = (StgWord)q->payload[0];
565
if (info == Czh_con_info &&
566
// unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
567
(StgChar)w <= MAX_CHARLIKE) {
568
*p = TAG_CLOSURE(tag,
569
(StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
572
else if (info == Izh_con_info &&
573
(StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
574
*p = TAG_CLOSURE(tag,
575
(StgClosure *)INTLIKE_CLOSURE((StgInt)w)
579
copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen,tag);
588
copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen,tag);
593
copy(p,info,q,sizeofW(StgThunk)+1,gen);
599
#ifdef NO_PROMOTE_THUNKS
602
copy(p,info,q,sizeofW(StgThunk)+2,gen);
610
copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen,tag);
614
copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen,tag);
618
copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen);
624
copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen,tag);
630
const StgInfoTable *i;
631
r = ((StgInd*)q)->indirectee;
632
if (GET_CLOSURE_TAG(r) == 0) {
634
if (IS_FORWARDING_PTR(i)) {
635
r = (StgClosure *)UN_FORWARDING_PTR(i);
638
if (i == &stg_TSO_info
639
|| i == &stg_WHITEHOLE_info
640
|| i == &stg_BLOCKING_QUEUE_CLEAN_info
641
|| i == &stg_BLOCKING_QUEUE_DIRTY_info) {
642
copy(p,info,q,sizeofW(StgInd),gen);
645
ASSERT(i != &stg_IND_info);
656
copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen);
660
copy(p,info,q,bco_sizeW((StgBCO *)q),gen);
664
eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
668
// follow chains of indirections, don't evacuate them
669
q = ((StgInd*)q)->indirectee;
680
case CATCH_STM_FRAME:
681
case CATCH_RETRY_FRAME:
682
case ATOMICALLY_FRAME:
683
// shouldn't see these
684
barf("evacuate: stack frame at %p\n", q);
687
copy(p,info,q,pap_sizeW((StgPAP*)q),gen);
691
copy(p,info,q,ap_sizeW((StgAP*)q),gen);
695
copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen);
699
// just copy the block
700
copy(p,info,q,arr_words_sizeW((StgArrWords *)q),gen);
703
case MUT_ARR_PTRS_CLEAN:
704
case MUT_ARR_PTRS_DIRTY:
705
case MUT_ARR_PTRS_FROZEN:
706
case MUT_ARR_PTRS_FROZEN0:
707
// just copy the block
708
copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen);
713
StgTSO *tso = (StgTSO *)q;
715
/* Deal with redirected TSOs (a TSO that's had its stack enlarged).
717
if (tso->what_next == ThreadRelocated) {
718
q = (StgClosure *)tso->_link;
723
/* To evacuate a small TSO, we need to adjust the stack pointer
730
mine = copyPart(p,(StgClosure *)tso, tso_sizeW(tso),
731
sizeofW(StgTSO), gen);
733
new_tso = (StgTSO *)*p;
734
move_TSO(tso, new_tso);
735
for (r = tso->sp, s = new_tso->sp;
736
r < tso->stack+tso->stack_size;) {
745
copy(p,info,q,sizeofW(StgTRecChunk),gen);
749
barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
755
/* -----------------------------------------------------------------------------
756
Evaluate a THUNK_SELECTOR if possible.
758
p points to a THUNK_SELECTOR that we want to evaluate. The
759
result of "evaluating" it will be evacuated and a pointer to the
760
to-space closure will be returned.
762
If the THUNK_SELECTOR could not be evaluated (its selectee is still
763
a THUNK, for example), then the THUNK_SELECTOR itself will be
765
-------------------------------------------------------------------------- */
767
unchain_thunk_selectors(StgSelector *p, StgClosure *val)
774
ASSERT(p->header.info == &stg_WHITEHOLE_info);
775
// val must be in to-space. Not always: when we recursively
776
// invoke eval_thunk_selector(), the recursive calls will not
777
// evacuate the value (because we want to select on the value,
778
// not evacuate it), so in this case val is in from-space.
779
// ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
781
prev = (StgSelector*)((StgClosure *)p)->payload[0];
783
// Update the THUNK_SELECTOR with an indirection to the
784
// value. The value is still in from-space at this stage.
786
// (old note: Why not do upd_evacuee(q,p)? Because we have an
787
// invariant that an EVACUATED closure always points to an
788
// object in the same or an older generation (required by
789
// the short-cut test in the EVACUATED case, below).
790
if ((StgClosure *)p == val) {
791
// must be a loop; just leave a BLACKHOLE in place. This
792
// can happen when we have a chain of selectors that
793
// eventually loops back on itself. We can't leave an
794
// indirection pointing to itself, and we want the program
795
// to deadlock if it ever enters this closure, so
796
// BLACKHOLE is correct.
798
// XXX we do not have BLACKHOLEs any more; replace with
799
// a THUNK_SELECTOR again. This will go into a loop if it is
800
// entered, and should result in a NonTermination exception.
801
((StgThunk *)p)->payload[0] = val;
803
SET_INFO(p, &stg_sel_0_upd_info);
805
((StgInd *)p)->indirectee = val;
807
SET_INFO(p, &stg_IND_info);
810
// For the purposes of LDV profiling, we have created an
812
LDV_RECORD_CREATE(p);
819
eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
820
// NB. for legacy reasons, p & q are swapped around :(
825
StgClosure *selectee;
826
StgSelector *prev_thunk_selector;
830
prev_thunk_selector = NULL;
831
// this is a chain of THUNK_SELECTORs that we are going to update
832
// to point to the value of the current THUNK_SELECTOR. Each
833
// closure on the chain is a WHITEHOLE, and points to the next in the
834
// chain with payload[0].
838
bd = Bdescr((StgPtr)p);
839
if (HEAP_ALLOCED_GC(p)) {
840
// If the THUNK_SELECTOR is in to-space or in a generation that we
841
// are not collecting, then bale out early. We won't be able to
842
// save any space in any case, and updating with an indirection is
843
// trickier in a non-collected gen: we would have to update the
845
if (bd->flags & BF_EVACUATED) {
846
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
847
*q = (StgClosure *)p;
848
// shortcut, behave as for: if (evac) evacuate(q);
849
if (evac && bd->gen < gct->evac_gen) {
850
gct->failed_to_evac = rtsTrue;
851
TICK_GC_FAILED_PROMOTION();
855
// we don't update THUNK_SELECTORS in the compacted
856
// generation, because compaction does not remove the INDs
857
// that result, this causes confusion later
858
// (scavenge_mark_stack doesn't deal with IND). BEWARE! This
859
// bit is very tricky to get right. If you make changes
860
// around here, test by compiling stage 3 with +RTS -c -RTS.
861
if (bd->flags & BF_MARKED) {
862
// must call evacuate() to mark this closure if evac==rtsTrue
863
*q = (StgClosure *)p;
864
if (evac) evacuate(q);
865
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
871
// WHITEHOLE the selector thunk, since it is now under evaluation.
872
// This is important to stop us going into an infinite loop if
873
// this selector thunk eventually refers to itself.
874
#if defined(THREADED_RTS)
875
// In threaded mode, we'll use WHITEHOLE to lock the selector
876
// thunk while we evaluate it.
879
info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
880
} while (info_ptr == (W_)&stg_WHITEHOLE_info);
882
// make sure someone else didn't get here first...
883
if (IS_FORWARDING_PTR(info_ptr) ||
884
INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) {
885
// v. tricky now. The THUNK_SELECTOR has been evacuated
886
// by another thread, and is now either a forwarding ptr or IND.
887
// We need to extract ourselves from the current situation
888
// as cleanly as possible.
889
// - unlock the closure
890
// - update *q, we may have done *some* evaluation
891
// - if evac, we need to call evacuate(), because we
892
// need the write-barrier stuff.
893
// - undo the chain we've built to point to p.
894
SET_INFO(p, (const StgInfoTable *)info_ptr);
895
*q = (StgClosure *)p;
896
if (evac) evacuate(q);
897
unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
902
// Save the real info pointer (NOTE: not the same as get_itbl()).
903
info_ptr = (StgWord)p->header.info;
904
SET_INFO(p,&stg_WHITEHOLE_info);
907
field = INFO_PTR_TO_STRUCT(info_ptr)->layout.selector_offset;
909
// The selectee might be a constructor closure,
910
// so we untag the pointer.
911
selectee = UNTAG_CLOSURE(p->selectee);
914
// selectee now points to the closure that we're trying to select
915
// a field from. It may or may not be in to-space: we try not to
916
// end up in to-space, but it's impractical to avoid it in
917
// general. The compacting GC scatters to-space pointers in
918
// from-space during marking, for example. We rely on the property
919
// that evacuate() doesn't mind if it gets passed a to-space pointer.
921
info = (StgInfoTable*)selectee->header.info;
923
if (IS_FORWARDING_PTR(info)) {
924
// We don't follow pointers into to-space; the constructor
925
// has already been evacuated, so we won't save any space
926
// leaks by evaluating this selector thunk anyhow.
930
info = INFO_PTR_TO_STRUCT(info);
931
switch (info->type) {
933
goto bale_out; // about to be evacuated by another thread (or a loop).
942
case CONSTR_NOCAF_STATIC:
944
// check that the size is in range
945
ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
946
info->layout.payload.nptrs));
948
// Select the right field from the constructor
949
val = selectee->payload[field];
952
// For the purposes of LDV profiling, we have destroyed
953
// the original selector thunk, p.
954
SET_INFO(p, (StgInfoTable *)info_ptr);
955
LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p);
956
SET_INFO(p, &stg_WHITEHOLE_info);
959
// the closure in val is now the "value" of the
960
// THUNK_SELECTOR in p. However, val may itself be a
961
// THUNK_SELECTOR, in which case we want to continue
962
// evaluating until we find the real value, and then
963
// update the whole chain to point to the value.
965
info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
966
if (!IS_FORWARDING_PTR(info_ptr))
968
info = INFO_PTR_TO_STRUCT(info_ptr);
969
switch (info->type) {
973
val = ((StgInd *)val)->indirectee;
976
((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
977
prev_thunk_selector = p;
978
p = (StgSelector*)val;
984
((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
985
prev_thunk_selector = p;
989
// update the other selectors in the chain *before*
990
// evacuating the value. This is necessary in the case
991
// where the value turns out to be one of the selectors
992
// in the chain (i.e. we have a loop), and evacuating it
993
// would corrupt the chain.
994
unchain_thunk_selectors(prev_thunk_selector, val);
996
// evacuate() cannot recurse through
997
// eval_thunk_selector(), because we know val is not
999
if (evac) evacuate(q);
1006
// Again, we might need to untag a constructor.
1007
selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1013
const StgInfoTable *i;
1014
r = ((StgInd*)selectee)->indirectee;
1016
// establish whether this BH has been updated, and is now an
1017
// indirection, as in evacuate().
1018
if (GET_CLOSURE_TAG(r) == 0) {
1020
if (IS_FORWARDING_PTR(i)) {
1021
r = (StgClosure *)UN_FORWARDING_PTR(i);
1024
if (i == &stg_TSO_info
1025
|| i == &stg_WHITEHOLE_info
1026
|| i == &stg_BLOCKING_QUEUE_CLEAN_info
1027
|| i == &stg_BLOCKING_QUEUE_DIRTY_info) {
1030
ASSERT(i != &stg_IND_info);
1033
selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1037
case THUNK_SELECTOR:
1041
// recursively evaluate this selector. We don't want to
1042
// recurse indefinitely, so we impose a depth bound.
1043
if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
1047
gct->thunk_selector_depth++;
1048
// rtsFalse says "don't evacuate the result". It will,
1049
// however, update any THUNK_SELECTORs that are evaluated
1051
eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
1052
gct->thunk_selector_depth--;
1054
// did we actually manage to evaluate it?
1055
if (val == selectee) goto bale_out;
1057
// Of course this pointer might be tagged...
1058
selectee = UNTAG_CLOSURE(val);
1071
// not evaluated yet
1075
barf("eval_thunk_selector: strange selectee %d",
1080
// We didn't manage to evaluate this thunk; restore the old info
1081
// pointer. But don't forget: we still need to evacuate the thunk itself.
1082
SET_INFO(p, (const StgInfoTable *)info_ptr);
1083
// THREADED_RTS: we just unlocked the thunk, so another thread
1084
// might get in and update it. copy() will lock it again and
1085
// check whether it was updated in the meantime.
1086
*q = (StgClosure *)p;
1088
copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest);
1090
unchain_thunk_selectors(prev_thunk_selector, *q);