51
force_compaction_(false),
52
compacting_collection_(false),
53
compact_on_next_gc_(false),
54
previous_marked_count_(0),
62
sweep_precisely_(false),
64
was_marked_incrementally_(false),
65
collect_maps_(FLAG_collect_maps),
57
live_young_objects_size_(0),
58
live_old_pointer_objects_size_(0),
59
live_old_data_objects_size_(0),
60
live_code_objects_size_(0),
61
live_map_objects_size_(0),
62
live_cell_objects_size_(0),
63
live_lo_objects_size_(0),
67
migration_slots_buffer_(NULL),
67
69
code_flusher_(NULL),
68
70
encountered_weak_maps_(NULL) { }
74
class VerifyMarkingVisitor: public ObjectVisitor {
76
void VisitPointers(Object** start, Object** end) {
77
for (Object** current = start; current < end; current++) {
78
if ((*current)->IsHeapObject()) {
79
HeapObject* object = HeapObject::cast(*current);
80
ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
87
static void VerifyMarking(Address bottom, Address top) {
88
VerifyMarkingVisitor visitor;
90
Address next_object_must_be_here_or_later = bottom;
92
for (Address current = bottom;
94
current += kPointerSize) {
95
object = HeapObject::FromAddress(current);
96
if (MarkCompactCollector::IsMarked(object)) {
97
ASSERT(current >= next_object_must_be_here_or_later);
98
object->Iterate(&visitor);
99
next_object_must_be_here_or_later = current + object->Size();
105
static void VerifyMarking(NewSpace* space) {
106
Address end = space->top();
107
NewSpacePageIterator it(space->bottom(), end);
108
// The bottom position is at the start of its page. Allows us to use
109
// page->body() as start of range on all pages.
110
ASSERT_EQ(space->bottom(),
111
NewSpacePage::FromAddress(space->bottom())->body());
112
while (it.has_next()) {
113
NewSpacePage* page = it.next();
114
Address limit = it.has_next() ? page->body_limit() : end;
115
ASSERT(limit == end || !page->Contains(end));
116
VerifyMarking(page->body(), limit);
121
static void VerifyMarking(PagedSpace* space) {
122
PageIterator it(space);
124
while (it.has_next()) {
126
VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
131
static void VerifyMarking(Heap* heap) {
132
VerifyMarking(heap->old_pointer_space());
133
VerifyMarking(heap->old_data_space());
134
VerifyMarking(heap->code_space());
135
VerifyMarking(heap->cell_space());
136
VerifyMarking(heap->map_space());
137
VerifyMarking(heap->new_space());
139
VerifyMarkingVisitor visitor;
141
LargeObjectIterator it(heap->lo_space());
142
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
143
if (MarkCompactCollector::IsMarked(obj)) {
144
obj->Iterate(&visitor);
148
heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
152
class VerifyEvacuationVisitor: public ObjectVisitor {
154
void VisitPointers(Object** start, Object** end) {
155
for (Object** current = start; current < end; current++) {
156
if ((*current)->IsHeapObject()) {
157
HeapObject* object = HeapObject::cast(*current);
158
CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
165
static void VerifyEvacuation(Address bottom, Address top) {
166
VerifyEvacuationVisitor visitor;
168
Address next_object_must_be_here_or_later = bottom;
170
for (Address current = bottom;
172
current += kPointerSize) {
173
object = HeapObject::FromAddress(current);
174
if (MarkCompactCollector::IsMarked(object)) {
175
ASSERT(current >= next_object_must_be_here_or_later);
176
object->Iterate(&visitor);
177
next_object_must_be_here_or_later = current + object->Size();
183
static void VerifyEvacuation(NewSpace* space) {
184
NewSpacePageIterator it(space->bottom(), space->top());
185
VerifyEvacuationVisitor visitor;
187
while (it.has_next()) {
188
NewSpacePage* page = it.next();
189
Address current = page->body();
190
Address limit = it.has_next() ? page->body_limit() : space->top();
191
ASSERT(limit == space->top() || !page->Contains(space->top()));
192
while (current < limit) {
193
HeapObject* object = HeapObject::FromAddress(current);
194
object->Iterate(&visitor);
195
current += object->Size();
201
static void VerifyEvacuation(PagedSpace* space) {
202
PageIterator it(space);
204
while (it.has_next()) {
206
if (p->IsEvacuationCandidate()) continue;
207
VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
212
static void VerifyEvacuation(Heap* heap) {
213
VerifyEvacuation(heap->old_pointer_space());
214
VerifyEvacuation(heap->old_data_space());
215
VerifyEvacuation(heap->code_space());
216
VerifyEvacuation(heap->cell_space());
217
VerifyEvacuation(heap->map_space());
218
VerifyEvacuation(heap->new_space());
220
VerifyEvacuationVisitor visitor;
221
heap->IterateStrongRoots(&visitor, VISIT_ALL);
226
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
227
p->MarkEvacuationCandidate();
228
evacuation_candidates_.Add(p);
232
bool MarkCompactCollector::StartCompaction() {
234
ASSERT(evacuation_candidates_.length() == 0);
236
CollectEvacuationCandidates(heap()->old_pointer_space());
237
CollectEvacuationCandidates(heap()->old_data_space());
239
if (FLAG_compact_code_space) {
240
CollectEvacuationCandidates(heap()->code_space());
243
heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
244
heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
245
heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
247
compacting_ = evacuation_candidates_.length() > 0;
71
254
void MarkCompactCollector::CollectGarbage() {
72
255
// Make sure that Prepare() has been called. The individual steps below will
73
256
// update the state as they proceed.
74
257
ASSERT(state_ == PREPARE_GC);
75
258
ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
77
// Prepare has selected whether to compact the old generation or not.
79
if (IsCompacting()) tracer_->set_is_compacting();
81
260
MarkLiveObjects();
261
ASSERT(heap_->incremental_marking()->IsStopped());
83
if (FLAG_collect_maps) ClearNonLiveTransitions();
263
if (collect_maps_) ClearNonLiveTransitions();
87
SweepLargeObjectSpace();
90
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
91
EncodeForwardingAddresses();
93
heap()->MarkMapPointersAsEncoded(true);
95
heap()->MarkMapPointersAsEncoded(false);
96
heap()->isolate()->pc_to_code_cache()->Flush();
101
heap()->isolate()->pc_to_code_cache()->Flush();
268
if (FLAG_verify_heap) {
269
VerifyMarking(heap_);
275
if (!collect_maps_) ReattachInitialMaps();
277
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
106
// Save the count of marked objects remaining after the collection and
107
// null out the GC tracer.
108
previous_marked_count_ = tracer_->marked_count();
109
ASSERT(previous_marked_count_ == 0);
286
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
287
PageIterator it(space);
289
while (it.has_next()) {
291
CHECK(p->markbits()->IsClean());
292
CHECK_EQ(0, p->LiveBytes());
296
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
297
NewSpacePageIterator it(space->bottom(), space->top());
299
while (it.has_next()) {
300
NewSpacePage* p = it.next();
301
CHECK(p->markbits()->IsClean());
302
CHECK_EQ(0, p->LiveBytes());
306
void MarkCompactCollector::VerifyMarkbitsAreClean() {
307
VerifyMarkbitsAreClean(heap_->old_pointer_space());
308
VerifyMarkbitsAreClean(heap_->old_data_space());
309
VerifyMarkbitsAreClean(heap_->code_space());
310
VerifyMarkbitsAreClean(heap_->cell_space());
311
VerifyMarkbitsAreClean(heap_->map_space());
312
VerifyMarkbitsAreClean(heap_->new_space());
314
LargeObjectIterator it(heap_->lo_space());
315
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
316
MarkBit mark_bit = Marking::MarkBitFrom(obj);
317
ASSERT(Marking::IsWhite(mark_bit));
323
static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
324
PageIterator it(space);
326
while (it.has_next()) {
327
Bitmap::Clear(it.next());
332
static void ClearMarkbitsInNewSpace(NewSpace* space) {
333
NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
335
while (it.has_next()) {
336
Bitmap::Clear(it.next());
341
void MarkCompactCollector::ClearMarkbits() {
342
ClearMarkbitsInPagedSpace(heap_->code_space());
343
ClearMarkbitsInPagedSpace(heap_->map_space());
344
ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
345
ClearMarkbitsInPagedSpace(heap_->old_data_space());
346
ClearMarkbitsInPagedSpace(heap_->cell_space());
347
ClearMarkbitsInNewSpace(heap_->new_space());
349
LargeObjectIterator it(heap_->lo_space());
350
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
351
MarkBit mark_bit = Marking::MarkBitFrom(obj);
353
mark_bit.Next().Clear();
358
bool Marking::TransferMark(Address old_start, Address new_start) {
359
// This is only used when resizing an object.
360
ASSERT(MemoryChunk::FromAddress(old_start) ==
361
MemoryChunk::FromAddress(new_start));
363
// If the mark doesn't move, we don't check the color of the object.
364
// It doesn't matter whether the object is black, since it hasn't changed
365
// size, so the adjustment to the live data count will be zero anyway.
366
if (old_start == new_start) return false;
368
MarkBit new_mark_bit = MarkBitFrom(new_start);
369
MarkBit old_mark_bit = MarkBitFrom(old_start);
372
ObjectColor old_color = Color(old_mark_bit);
375
if (Marking::IsBlack(old_mark_bit)) {
376
old_mark_bit.Clear();
377
ASSERT(IsWhite(old_mark_bit));
378
Marking::MarkBlack(new_mark_bit);
380
} else if (Marking::IsGrey(old_mark_bit)) {
381
ASSERT(heap_->incremental_marking()->IsMarking());
382
old_mark_bit.Clear();
383
old_mark_bit.Next().Clear();
384
ASSERT(IsWhite(old_mark_bit));
385
heap_->incremental_marking()->WhiteToGreyAndPush(
386
HeapObject::FromAddress(new_start), new_mark_bit);
387
heap_->incremental_marking()->RestartIfNotMarking();
391
ObjectColor new_color = Color(new_mark_bit);
392
ASSERT(new_color == old_color);
399
const char* AllocationSpaceName(AllocationSpace space) {
401
case NEW_SPACE: return "NEW_SPACE";
402
case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
403
case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
404
case CODE_SPACE: return "CODE_SPACE";
405
case MAP_SPACE: return "MAP_SPACE";
406
case CELL_SPACE: return "CELL_SPACE";
407
case LO_SPACE: return "LO_SPACE";
416
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
417
ASSERT(space->identity() == OLD_POINTER_SPACE ||
418
space->identity() == OLD_DATA_SPACE ||
419
space->identity() == CODE_SPACE);
421
int number_of_pages = space->CountTotalPages();
423
PageIterator it(space);
424
const int kMaxMaxEvacuationCandidates = 1000;
425
int max_evacuation_candidates = Min(
426
kMaxMaxEvacuationCandidates,
427
static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
429
if (FLAG_stress_compaction || FLAG_always_compact) {
430
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
435
Candidate() : fragmentation_(0), page_(NULL) { }
436
Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
438
int fragmentation() { return fragmentation_; }
439
Page* page() { return page_; }
446
Candidate candidates[kMaxMaxEvacuationCandidates];
449
if (it.has_next()) it.next(); // Never compact the first page.
450
int fragmentation = 0;
451
Candidate* least = NULL;
452
while (it.has_next()) {
454
p->ClearEvacuationCandidate();
455
if (FLAG_stress_compaction) {
456
int counter = space->heap()->ms_count();
457
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
458
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
460
fragmentation = space->Fragmentation(p);
462
if (fragmentation != 0) {
463
if (count < max_evacuation_candidates) {
464
candidates[count++] = Candidate(fragmentation, p);
467
for (int i = 0; i < max_evacuation_candidates; i++) {
469
candidates[i].fragmentation() < least->fragmentation()) {
470
least = candidates + i;
474
if (least->fragmentation() < fragmentation) {
475
*least = Candidate(fragmentation, p);
481
for (int i = 0; i < count; i++) {
482
AddEvacuationCandidate(candidates[i].page());
485
if (count > 0 && FLAG_trace_fragmentation) {
486
PrintF("Collected %d evacuation candidates for space %s\n",
488
AllocationSpaceName(space->identity()));
493
void MarkCompactCollector::AbortCompaction() {
495
int npages = evacuation_candidates_.length();
496
for (int i = 0; i < npages; i++) {
497
Page* p = evacuation_candidates_[i];
498
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
499
p->ClearEvacuationCandidate();
500
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
503
evacuation_candidates_.Rewind(0);
504
invalidated_code_.Rewind(0);
506
ASSERT_EQ(0, evacuation_candidates_.length());
114
510
void MarkCompactCollector::Prepare(GCTracer* tracer) {
511
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
513
// Disable collection of maps if incremental marking is enabled.
514
// Map collection algorithm relies on a special map transition tree traversal
515
// order which is not implemented for incremental marking.
516
collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
115
518
// Rather than passing the tracer around we stash it in a static member
117
520
tracer_ = tracer;
1843
2409
set_encountered_weak_maps(Smi::FromInt(0));
1846
// -------------------------------------------------------------------------
1847
// Phase 2: Encode forwarding addresses.
1848
// When compacting, forwarding addresses for objects in old space and map
1849
// space are encoded in their map pointer word (along with an encoding of
1850
// their map pointers).
1852
// The excact encoding is described in the comments for class MapWord in
1855
// An address range [start, end) can have both live and non-live objects.
1856
// Maximal non-live regions are marked so they can be skipped on subsequent
1857
// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1858
// free regions of one-word size (in which case the next word is the start
1859
// of a live object). A second distinguished map-pointer encoding is used
1860
// to mark free regions larger than one word, and the size of the free
1861
// region (including the first word) is written to the second word of the
1864
// Any valid map page offset must lie in the object area of the page, so map
1865
// page offsets less than Page::kObjectStartOffset are invalid. We use a
1866
// pair of distinguished invalid map encodings (for single word and multiple
1867
// words) to indicate free regions in the page found during computation of
1868
// forwarding addresses and skipped over in subsequent sweeps.
1871
// Encode a free region, defined by the given start address and size, in the
1872
// first word or two of the region.
1873
void EncodeFreeRegion(Address free_start, int free_size) {
1874
ASSERT(free_size >= kIntSize);
1875
if (free_size == kIntSize) {
1876
Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
1878
ASSERT(free_size >= 2 * kIntSize);
1879
Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
1880
Memory::int_at(free_start + kIntSize) = free_size;
1884
// Zap the body of the free region.
1885
if (FLAG_enable_slow_asserts) {
1886
for (int offset = 2 * kIntSize;
1888
offset += kPointerSize) {
1889
Memory::Address_at(free_start + offset) = kZapValue;
1896
// Try to promote all objects in new space. Heap numbers and sequential
1897
// strings are promoted to the code space, large objects to large object space,
1898
// and all others to the old space.
1899
inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
1902
MaybeObject* forwarded;
1903
if (object_size > heap->MaxObjectSizeInPagedSpace()) {
1904
forwarded = Failure::Exception();
1906
OldSpace* target_space = heap->TargetSpace(object);
1907
ASSERT(target_space == heap->old_pointer_space() ||
1908
target_space == heap->old_data_space());
1909
forwarded = target_space->MCAllocateRaw(object_size);
1912
if (!forwarded->ToObject(&result)) {
1913
result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
1919
// Allocation functions for the paged spaces call the space's MCAllocateRaw.
1920
MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
1924
return heap->old_pointer_space()->MCAllocateRaw(object_size);
1928
MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
1932
return heap->old_data_space()->MCAllocateRaw(object_size);
1936
MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
1940
return heap->code_space()->MCAllocateRaw(object_size);
1944
MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
1948
return heap->map_space()->MCAllocateRaw(object_size);
1952
MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
1953
Heap* heap, HeapObject* ignore, int object_size) {
1954
return heap->cell_space()->MCAllocateRaw(object_size);
1958
// The forwarding address is encoded at the same offset as the current
1959
// to-space object, but in from space.
1960
inline void EncodeForwardingAddressInNewSpace(Heap* heap,
1961
HeapObject* old_object,
1966
heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
1967
Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
1968
HeapObject::cast(new_object)->address();
1972
// The forwarding address is encoded in the map pointer of the object as an
1973
// offset (in terms of live bytes) from the address of the first live object
1975
inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
1976
HeapObject* old_object,
1980
// Record the forwarding address of the first live object if necessary.
1982
Page::FromAddress(old_object->address())->mc_first_forwarded =
1983
HeapObject::cast(new_object)->address();
1987
MapWord::EncodeAddress(old_object->map()->address(), *offset);
1988
old_object->set_map_word(encoding);
1989
*offset += object_size;
1990
ASSERT(*offset <= Page::kObjectAreaSize);
1994
// Most non-live objects are ignored.
1995
inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
1998
// Function template that, given a range of addresses (eg, a semispace or a
1999
// paged space page), iterates through the objects in the range to clear
2000
// mark bits and compute and encode forwarding addresses. As a side effect,
2001
// maximal free chunks are marked so that they can be skipped on subsequent
2004
// The template parameters are an allocation function, a forwarding address
2005
// encoding function, and a function to process non-live objects.
2006
template<MarkCompactCollector::AllocationFunction Alloc,
2007
MarkCompactCollector::EncodingFunction Encode,
2008
MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
2009
inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
2013
// The start address of the current free region while sweeping the space.
2014
// This address is set when a transition from live to non-live objects is
2015
// encountered. A value (an encoding of the 'next free region' pointer)
2016
// is written to memory at this address when a transition from non-live to
2017
// live objects is encountered.
2018
Address free_start = NULL;
2020
// A flag giving the state of the previously swept object. Initially true
2021
// to ensure that free_start is initialized to a proper address before
2022
// trying to write to it.
2023
bool is_prev_alive = true;
2025
int object_size; // Will be set on each iteration of the loop.
2026
for (Address current = start; current < end; current += object_size) {
2027
HeapObject* object = HeapObject::FromAddress(current);
2028
if (object->IsMarked()) {
2029
object->ClearMark();
2030
collector->tracer()->decrement_marked_count();
2031
object_size = object->Size();
2034
Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
2035
Encode(collector->heap(), object, object_size, forwarded, offset);
2038
if (FLAG_gc_verbose) {
2039
PrintF("forward %p -> %p.\n", object->address(),
2040
HeapObject::cast(forwarded)->address());
2043
if (!is_prev_alive) { // Transition from non-live to live.
2044
EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
2045
is_prev_alive = true;
2047
} else { // Non-live object.
2048
object_size = object->Size();
2049
ProcessNonLive(object, collector->heap()->isolate());
2050
if (is_prev_alive) { // Transition from live to non-live.
2051
free_start = current;
2052
is_prev_alive = false;
2054
LiveObjectList::ProcessNonLive(object);
2058
// If we ended on a free region, mark it.
2059
if (!is_prev_alive) {
2060
EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
2065
// Functions to encode the forwarding pointers in each compactable space.
2066
void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
2068
EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
2069
EncodeForwardingAddressInNewSpace,
2070
IgnoreNonLiveObject>(
2072
heap()->new_space()->bottom(),
2073
heap()->new_space()->top(),
2078
template<MarkCompactCollector::AllocationFunction Alloc,
2079
MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
2080
void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
2081
PagedSpace* space) {
2082
PageIterator it(space, PageIterator::PAGES_IN_USE);
2083
while (it.has_next()) {
2084
Page* p = it.next();
2086
// The offset of each live object in the page from the first live object
2089
EncodeForwardingAddressesInRange<Alloc,
2090
EncodeForwardingAddressInPagedSpace,
2093
p->ObjectAreaStart(),
2100
2413
// We scavange new space simultaneously with sweeping. This is done in two
2102
2416
// The first pass migrates all alive objects from one semispace to another or
2103
// promotes them to old space. Forwading address is written directly into
2104
// first word of object without any encoding. If object is dead we are writing
2417
// promotes them to old space. Forwarding address is written directly into
2418
// first word of object without any encoding. If object is dead we write
2105
2419
// NULL as a forwarding address.
2106
// The second pass updates pointers to new space in all spaces. It is possible
2107
// to encounter pointers to dead objects during traversal of dirty regions we
2108
// should clear them to avoid encountering them during next dirty regions
2110
static void MigrateObject(Heap* heap,
2114
bool to_old_space) {
2116
heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
2421
// The second pass updates pointers to new space in all spaces. It is possible
2422
// to encounter pointers to dead new space objects during traversal of pointers
2423
// to new space. We should clear them to avoid encountering them during next
2424
// pointer iteration. This is an issue if the store buffer overflows and we
2425
// have to scan the entire old space, including dead objects, looking for
2426
// pointers to new space.
2427
void MarkCompactCollector::MigrateObject(Address dst,
2430
AllocationSpace dest) {
2431
HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2432
if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2433
Address src_slot = src;
2434
Address dst_slot = dst;
2435
ASSERT(IsAligned(size, kPointerSize));
2437
for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2438
Object* value = Memory::Object_at(src_slot);
2440
Memory::Object_at(dst_slot) = value;
2442
if (heap_->InNewSpace(value)) {
2443
heap_->store_buffer()->Mark(dst_slot);
2444
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2445
SlotsBuffer::AddTo(&slots_buffer_allocator_,
2446
&migration_slots_buffer_,
2447
reinterpret_cast<Object**>(dst_slot),
2448
SlotsBuffer::IGNORE_OVERFLOW);
2451
src_slot += kPointerSize;
2452
dst_slot += kPointerSize;
2455
if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2456
Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2457
Address code_entry = Memory::Address_at(code_entry_slot);
2459
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2460
SlotsBuffer::AddTo(&slots_buffer_allocator_,
2461
&migration_slots_buffer_,
2462
SlotsBuffer::CODE_ENTRY_SLOT,
2464
SlotsBuffer::IGNORE_OVERFLOW);
2467
} else if (dest == CODE_SPACE) {
2468
PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2469
heap()->MoveBlock(dst, src, size);
2470
SlotsBuffer::AddTo(&slots_buffer_allocator_,
2471
&migration_slots_buffer_,
2472
SlotsBuffer::RELOCATED_CODE_OBJECT,
2474
SlotsBuffer::IGNORE_OVERFLOW);
2475
Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2118
heap->CopyBlock(dst, src, size);
2477
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2478
heap()->MoveBlock(dst, src, size);
2121
2480
Memory::Address_at(src) = dst;
2125
class StaticPointersToNewGenUpdatingVisitor : public
2126
StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
2128
static inline void VisitPointer(Heap* heap, Object** p) {
2129
if (!(*p)->IsHeapObject()) return;
2131
HeapObject* obj = HeapObject::cast(*p);
2132
Address old_addr = obj->address();
2134
if (heap->new_space()->Contains(obj)) {
2135
ASSERT(heap->InFromSpace(*p));
2136
*p = HeapObject::FromAddress(Memory::Address_at(old_addr));
2142
2484
// Visitor for updating pointers from live objects in old spaces to new space.
2143
2485
// It does not expect to encounter pointers to dead objects.
2144
class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
2486
class PointersUpdatingVisitor: public ObjectVisitor {
2146
explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
2488
explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2148
2490
void VisitPointer(Object** p) {
2149
StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
2152
2494
void VisitPointers(Object** start, Object** end) {
2153
for (Object** p = start; p < end; p++) {
2154
StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
2495
for (Object** p = start; p < end; p++) UpdatePointer(p);
2498
void VisitEmbeddedPointer(RelocInfo* rinfo) {
2499
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2500
Object* target = rinfo->target_object();
2501
VisitPointer(&target);
2502
rinfo->set_target_object(target);
2158
2505
void VisitCodeTarget(RelocInfo* rinfo) {
2246
static void SweepNewSpace(Heap* heap, NewSpace* space) {
2247
heap->CheckNewSpaceExpansionCriteria();
2249
Address from_bottom = space->bottom();
2250
Address from_top = space->top();
2621
void MarkCompactCollector::EvacuateNewSpace() {
2622
heap()->CheckNewSpaceExpansionCriteria();
2624
NewSpace* new_space = heap()->new_space();
2626
// Store allocation range before flipping semispaces.
2627
Address from_bottom = new_space->bottom();
2628
Address from_top = new_space->top();
2252
2630
// Flip the semispaces. After flipping, to space is empty, from space has
2253
2631
// live objects.
2255
space->ResetAllocationInfo();
2633
new_space->ResetAllocationInfo();
2258
2635
int survivors_size = 0;
2260
2637
// First pass: traverse all objects in inactive semispace, remove marks,
2261
// migrate live objects and write forwarding addresses.
2262
for (Address current = from_bottom; current < from_top; current += size) {
2263
HeapObject* object = HeapObject::FromAddress(current);
2265
if (object->IsMarked()) {
2266
object->ClearMark();
2267
heap->mark_compact_collector()->tracer()->decrement_marked_count();
2269
size = object->Size();
2638
// migrate live objects and write forwarding addresses. This stage puts
2639
// new entries in the store buffer and may cause some pages to be marked
2640
// scan-on-scavenge.
2641
SemiSpaceIterator from_it(from_bottom, from_top);
2642
for (HeapObject* object = from_it.Next();
2644
object = from_it.Next()) {
2645
MarkBit mark_bit = Marking::MarkBitFrom(object);
2646
if (mark_bit.Get()) {
2648
// Don't bother decrementing live bytes count. We'll discard the
2649
// entire page at the end.
2650
int size = object->Size();
2270
2651
survivors_size += size;
2272
2653
// Aggressively promote young survivors to the old space.
2273
if (TryPromoteObject(heap, object, size)) {
2654
if (TryPromoteObject(object, size)) {
2277
2658
// Promotion failed. Just migrate object to another semispace.
2278
// Allocation cannot fail at this point: semispaces are of equal size.
2279
Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
2659
MaybeObject* allocation = new_space->AllocateRaw(size);
2660
if (allocation->IsFailure()) {
2661
if (!new_space->AddFreshPage()) {
2662
// Shouldn't happen. We are sweeping linearly, and to-space
2663
// has the same number of pages as from-space, so there is
2667
allocation = new_space->AllocateRaw(size);
2668
ASSERT(!allocation->IsFailure());
2670
Object* target = allocation->ToObjectUnchecked();
2282
HeapObject::cast(target)->address(),
2672
MigrateObject(HeapObject::cast(target)->address(),
2287
2677
// Process the dead object before we write a NULL into its header.
2288
2678
LiveObjectList::ProcessNonLive(object);
2290
size = object->Size();
2291
Memory::Address_at(current) = NULL;
2680
// Mark dead objects in the new space with null in their map field.
2681
Memory::Address_at(object->address()) = NULL;
2685
heap_->IncrementYoungSurvivorsCounter(survivors_size);
2686
new_space->set_age_mark(new_space->top());
2690
void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2691
AlwaysAllocateScope always_allocate;
2692
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2693
ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2694
MarkBit::CellType* cells = p->markbits()->cells();
2695
p->MarkSweptPrecisely();
2697
int last_cell_index =
2698
Bitmap::IndexToCell(
2699
Bitmap::CellAlignIndex(
2700
p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
2702
int cell_index = Page::kFirstUsedCell;
2703
Address cell_base = p->ObjectAreaStart();
2706
for (cell_index = Page::kFirstUsedCell;
2707
cell_index < last_cell_index;
2708
cell_index++, cell_base += 32 * kPointerSize) {
2709
ASSERT((unsigned)cell_index ==
2710
Bitmap::IndexToCell(
2711
Bitmap::CellAlignIndex(
2712
p->AddressToMarkbitIndex(cell_base))));
2713
if (cells[cell_index] == 0) continue;
2715
int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2716
for (int i = 0; i < live_objects; i++) {
2717
Address object_addr = cell_base + offsets[i] * kPointerSize;
2718
HeapObject* object = HeapObject::FromAddress(object_addr);
2719
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2721
int size = object->Size();
2723
MaybeObject* target = space->AllocateRaw(size);
2724
if (target->IsFailure()) {
2725
// OS refused to give us memory.
2726
V8::FatalProcessOutOfMemory("Evacuation");
2730
Object* target_object = target->ToObjectUnchecked();
2732
MigrateObject(HeapObject::cast(target_object)->address(),
2736
ASSERT(object->map_word().IsForwardingAddress());
2739
// Clear marking bits for current cell.
2740
cells[cell_index] = 0;
2742
p->ResetLiveBytes();
2746
void MarkCompactCollector::EvacuatePages() {
2747
int npages = evacuation_candidates_.length();
2748
for (int i = 0; i < npages; i++) {
2749
Page* p = evacuation_candidates_[i];
2750
ASSERT(p->IsEvacuationCandidate() ||
2751
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2752
if (p->IsEvacuationCandidate()) {
2753
// During compaction we might have to request a new page.
2754
// Check that space still have room for that.
2755
if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2756
EvacuateLiveObjectsFromPage(p);
2758
// Without room for expansion evacuation is not guaranteed to succeed.
2759
// Pessimistically abandon unevacuated pages.
2760
for (int j = i; j < npages; j++) {
2761
Page* page = evacuation_candidates_[j];
2762
slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2763
page->ClearEvacuationCandidate();
2764
page->SetFlag(Page::RESCAN_ON_EVACUATION);
2773
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2775
virtual Object* RetainAs(Object* object) {
2776
if (object->IsHeapObject()) {
2777
HeapObject* heap_object = HeapObject::cast(object);
2778
MapWord map_word = heap_object->map_word();
2779
if (map_word.IsForwardingAddress()) {
2780
return map_word.ToForwardingAddress();
2788
static inline void UpdateSlot(ObjectVisitor* v,
2789
SlotsBuffer::SlotType slot_type,
2791
switch (slot_type) {
2792
case SlotsBuffer::CODE_TARGET_SLOT: {
2793
RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
2797
case SlotsBuffer::CODE_ENTRY_SLOT: {
2798
v->VisitCodeEntry(addr);
2801
case SlotsBuffer::RELOCATED_CODE_OBJECT: {
2802
HeapObject* obj = HeapObject::FromAddress(addr);
2803
Code::cast(obj)->CodeIterateBody(v);
2806
case SlotsBuffer::DEBUG_TARGET_SLOT: {
2807
RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
2808
if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
2811
case SlotsBuffer::JS_RETURN_SLOT: {
2812
RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
2813
if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
2816
case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
2817
RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
2830
SWEEP_AND_VISIT_LIVE_OBJECTS
2834
enum SkipListRebuildingMode {
2840
// Sweep a space precisely. After this has been done the space can
2841
// be iterated precisely, hitting only the live objects. Code space
2842
// is always swept precisely because we want to be able to iterate
2843
// over it. Map space is swept precisely, because it is not compacted.
2844
// Slots in live objects pointing into evacuation candidates are updated
2846
template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
2847
static void SweepPrecisely(PagedSpace* space,
2850
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
2851
ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
2852
space->identity() == CODE_SPACE);
2853
ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
2855
MarkBit::CellType* cells = p->markbits()->cells();
2856
p->MarkSweptPrecisely();
2858
int last_cell_index =
2859
Bitmap::IndexToCell(
2860
Bitmap::CellAlignIndex(
2861
p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
2863
int cell_index = Page::kFirstUsedCell;
2864
Address free_start = p->ObjectAreaStart();
2865
ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
2866
Address object_address = p->ObjectAreaStart();
2869
SkipList* skip_list = p->skip_list();
2870
int curr_region = -1;
2871
if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
2875
for (cell_index = Page::kFirstUsedCell;
2876
cell_index < last_cell_index;
2877
cell_index++, object_address += 32 * kPointerSize) {
2878
ASSERT((unsigned)cell_index ==
2879
Bitmap::IndexToCell(
2880
Bitmap::CellAlignIndex(
2881
p->AddressToMarkbitIndex(object_address))));
2882
int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2884
for ( ; live_objects != 0; live_objects--) {
2885
Address free_end = object_address + offsets[live_index++] * kPointerSize;
2886
if (free_end != free_start) {
2887
space->Free(free_start, static_cast<int>(free_end - free_start));
2889
HeapObject* live_object = HeapObject::FromAddress(free_end);
2890
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
2891
Map* map = live_object->map();
2892
int size = live_object->SizeFromMap(map);
2893
if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
2894
live_object->IterateBody(map->instance_type(), size, v);
2896
if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
2897
int new_region_start =
2898
SkipList::RegionNumber(free_end);
2899
int new_region_end =
2900
SkipList::RegionNumber(free_end + size - kPointerSize);
2901
if (new_region_start != curr_region ||
2902
new_region_end != curr_region) {
2903
skip_list->AddObject(free_end, size);
2904
curr_region = new_region_end;
2907
free_start = free_end + size;
2909
// Clear marking bits for current cell.
2910
cells[cell_index] = 0;
2912
if (free_start != p->ObjectAreaEnd()) {
2913
space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
2915
p->ResetLiveBytes();
2919
static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
2920
Page* p = Page::FromAddress(code->address());
2922
if (p->IsEvacuationCandidate() ||
2923
p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
2927
Address code_start = code->address();
2928
Address code_end = code_start + code->Size();
2930
uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
2931
uint32_t end_index =
2932
MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
2934
Bitmap* b = p->markbits();
2936
MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
2937
MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
2939
MarkBit::CellType* start_cell = start_mark_bit.cell();
2940
MarkBit::CellType* end_cell = end_mark_bit.cell();
2943
MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
2944
MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
2946
if (start_cell == end_cell) {
2947
*start_cell |= start_mask & end_mask;
2949
*start_cell |= start_mask;
2950
for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
2953
*end_cell |= end_mask;
2956
for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
2965
static bool IsOnInvalidatedCodeObject(Address addr) {
2966
// We did not record any slots in large objects thus
2967
// we can safely go to the page from the slot address.
2968
Page* p = Page::FromAddress(addr);
2970
// First check owner's identity because old pointer and old data spaces
2971
// are swept lazily and might still have non-zero mark-bits on some
2973
if (p->owner()->identity() != CODE_SPACE) return false;
2975
// In code space only bits on evacuation candidates (but we don't record
2976
// any slots on them) and under invalidated code objects are non-zero.
2978
p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
2980
return mark_bit.Get();
2984
void MarkCompactCollector::InvalidateCode(Code* code) {
2985
if (heap_->incremental_marking()->IsCompacting() &&
2986
!ShouldSkipEvacuationSlotRecording(code)) {
2987
ASSERT(compacting_);
2989
// If the object is white than no slots were recorded on it yet.
2990
MarkBit mark_bit = Marking::MarkBitFrom(code);
2991
if (Marking::IsWhite(mark_bit)) return;
2993
invalidated_code_.Add(code);
2998
bool MarkCompactCollector::MarkInvalidatedCode() {
2999
bool code_marked = false;
3001
int length = invalidated_code_.length();
3002
for (int i = 0; i < length; i++) {
3003
Code* code = invalidated_code_[i];
3005
if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3014
void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3015
int length = invalidated_code_.length();
3016
for (int i = 0; i < length; i++) {
3017
if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3022
void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3023
int length = invalidated_code_.length();
3024
for (int i = 0; i < length; i++) {
3025
Code* code = invalidated_code_[i];
3027
code->Iterate(visitor);
3028
SetMarkBitsUnderInvalidatedCode(code, false);
3031
invalidated_code_.Rewind(0);
3035
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3036
bool code_slots_filtering_required;
3037
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3038
code_slots_filtering_required = MarkInvalidatedCode();
3044
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
2295
3048
// Second pass: find pointers to new space and update them.
2296
PointersToNewGenUpdatingVisitor updating_visitor(heap);
2298
// Update pointers in to space.
2299
Address current = space->bottom();
2300
while (current < space->top()) {
2301
HeapObject* object = HeapObject::FromAddress(current);
2303
StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
2308
heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
2309
LiveObjectList::IterateElements(&updating_visitor);
2311
// Update pointers in old spaces.
2312
heap->IterateDirtyRegions(heap->old_pointer_space(),
2313
&Heap::IteratePointersInDirtyRegion,
2314
&UpdatePointerToNewGen,
2315
heap->WATERMARK_SHOULD_BE_VALID);
2317
heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
3049
PointersUpdatingVisitor updating_visitor(heap());
3051
{ GCTracer::Scope gc_scope(tracer_,
3052
GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3053
// Update pointers in to space.
3054
SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3055
heap()->new_space()->top());
3056
for (HeapObject* object = to_it.Next();
3058
object = to_it.Next()) {
3059
Map* map = object->map();
3060
object->IterateBody(map->instance_type(),
3061
object->SizeFromMap(map),
3066
{ GCTracer::Scope gc_scope(tracer_,
3067
GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3069
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3070
LiveObjectList::IterateElements(&updating_visitor);
3073
{ GCTracer::Scope gc_scope(tracer_,
3074
GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3075
StoreBufferRebuildScope scope(heap_,
3076
heap_->store_buffer(),
3077
&Heap::ScavengeStoreBufferCallback);
3078
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3081
{ GCTracer::Scope gc_scope(tracer_,
3082
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3083
SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3084
migration_slots_buffer_,
3085
code_slots_filtering_required);
3086
if (FLAG_trace_fragmentation) {
3087
PrintF(" migration slots buffer: %d\n",
3088
SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3091
if (compacting_ && was_marked_incrementally_) {
3092
// It's difficult to filter out slots recorded for large objects.
3093
LargeObjectIterator it(heap_->lo_space());
3094
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3095
// LargeObjectSpace is not swept yet thus we have to skip
3096
// dead objects explicitly.
3097
if (!IsMarked(obj)) continue;
3099
Page* p = Page::FromAddress(obj->address());
3100
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3101
obj->Iterate(&updating_visitor);
3102
p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3108
int npages = evacuation_candidates_.length();
3109
{ GCTracer::Scope gc_scope(
3110
tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3111
for (int i = 0; i < npages; i++) {
3112
Page* p = evacuation_candidates_[i];
3113
ASSERT(p->IsEvacuationCandidate() ||
3114
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3116
if (p->IsEvacuationCandidate()) {
3117
SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3119
code_slots_filtering_required);
3120
if (FLAG_trace_fragmentation) {
3121
PrintF(" page %p slots buffer: %d\n",
3122
reinterpret_cast<void*>(p),
3123
SlotsBuffer::SizeOfChain(p->slots_buffer()));
3126
// Important: skip list should be cleared only after roots were updated
3127
// because root iteration traverses the stack and might have to find
3128
// code objects from non-updated pc pointing into evacuation candidate.
3129
SkipList* list = p->skip_list();
3130
if (list != NULL) list->Clear();
3132
if (FLAG_gc_verbose) {
3133
PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3134
reinterpret_cast<intptr_t>(p));
3136
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3137
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3139
switch (space->identity()) {
3140
case OLD_DATA_SPACE:
3141
SweepConservatively(space, p);
3143
case OLD_POINTER_SPACE:
3144
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3145
space, p, &updating_visitor);
3148
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3149
space, p, &updating_visitor);
3159
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
2319
3161
// Update pointers from cells.
2320
HeapObjectIterator cell_iterator(heap->cell_space());
2321
for (HeapObject* cell = cell_iterator.next();
3162
HeapObjectIterator cell_iterator(heap_->cell_space());
3163
for (HeapObject* cell = cell_iterator.Next();
2323
cell = cell_iterator.next()) {
3165
cell = cell_iterator.Next()) {
2324
3166
if (cell->IsJSGlobalPropertyCell()) {
2325
3167
Address value_address =
2326
3168
reinterpret_cast<Address>(cell) +
2332
3174
// Update pointer from the global contexts list.
2333
updating_visitor.VisitPointer(heap->global_contexts_list_address());
3175
updating_visitor.VisitPointer(heap_->global_contexts_list_address());
3177
heap_->symbol_table()->Iterate(&updating_visitor);
2335
3179
// Update pointers from external string table.
2336
heap->UpdateNewSpaceReferencesInExternalStringTable(
2337
&UpdateNewSpaceReferenceInExternalStringTableEntry);
2339
// All pointers were updated. Update auxiliary allocation info.
2340
heap->IncrementYoungSurvivorsCounter(survivors_size);
2341
space->set_age_mark(space->top());
3180
heap_->UpdateReferencesInExternalStringTable(
3181
&UpdateReferenceInExternalStringTableEntry);
2343
3183
// Update JSFunction pointers from the runtime profiler.
2344
heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
2348
static void SweepSpace(Heap* heap, PagedSpace* space) {
2349
PageIterator it(space, PageIterator::PAGES_IN_USE);
2351
// During sweeping of paged space we are trying to find longest sequences
2352
// of pages without live objects and free them (instead of putting them on
2355
// Page preceding current.
2356
Page* prev = Page::FromAddress(NULL);
2358
// First empty page in a sequence.
2359
Page* first_empty_page = Page::FromAddress(NULL);
2361
// Page preceding first empty page.
2362
Page* prec_first_empty_page = Page::FromAddress(NULL);
2364
// If last used page of space ends with a sequence of dead objects
2365
// we can adjust allocation top instead of puting this free area into
2366
// the free list. Thus during sweeping we keep track of such areas
2367
// and defer their deallocation until the sweeping of the next page
2368
// is done: if one of the next pages contains live objects we have
2369
// to put such area into the free list.
2370
Address last_free_start = NULL;
2371
int last_free_size = 0;
3184
heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3187
EvacuationWeakObjectRetainer evacuation_object_retainer;
3188
heap()->ProcessWeakReferences(&evacuation_object_retainer);
3190
// Visit invalidated code (we ignored all slots on it) and clear mark-bits
3192
ProcessInvalidatedCode(&updating_visitor);
3195
if (FLAG_verify_heap) {
3196
VerifyEvacuation(heap_);
3200
slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3201
ASSERT(migration_slots_buffer_ == NULL);
3202
for (int i = 0; i < npages; i++) {
3203
Page* p = evacuation_candidates_[i];
3204
if (!p->IsEvacuationCandidate()) continue;
3205
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3206
space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
3207
p->set_scan_on_scavenge(false);
3208
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3209
p->ClearEvacuationCandidate();
3211
evacuation_candidates_.Rewind(0);
3212
compacting_ = false;
3216
static const int kStartTableEntriesPerLine = 5;
3217
static const int kStartTableLines = 171;
3218
static const int kStartTableInvalidLine = 127;
3219
static const int kStartTableUnusedEntry = 126;
3221
#define _ kStartTableUnusedEntry
3222
#define X kStartTableInvalidLine
3223
// Mark-bit to object start offset table.
3225
// The line is indexed by the mark bits in a byte. The first number on
3226
// the line describes the number of live object starts for the line and the
3227
// other numbers on the line describe the offsets (in words) of the object
3230
// Since objects are at least 2 words large we don't have entries for two
3231
// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3232
char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3243
2, 1, 3, _, _, // 10
3244
X, _, _, _, _, // 11
3245
X, _, _, _, _, // 12
3246
X, _, _, _, _, // 13
3247
X, _, _, _, _, // 14
3248
X, _, _, _, _, // 15
3249
1, 4, _, _, _, // 16
3250
2, 0, 4, _, _, // 17
3251
2, 1, 4, _, _, // 18
3252
X, _, _, _, _, // 19
3253
2, 2, 4, _, _, // 20
3254
3, 0, 2, 4, _, // 21
3255
X, _, _, _, _, // 22
3256
X, _, _, _, _, // 23
3257
X, _, _, _, _, // 24
3258
X, _, _, _, _, // 25
3259
X, _, _, _, _, // 26
3260
X, _, _, _, _, // 27
3261
X, _, _, _, _, // 28
3262
X, _, _, _, _, // 29
3263
X, _, _, _, _, // 30
3264
X, _, _, _, _, // 31
3265
1, 5, _, _, _, // 32
3266
2, 0, 5, _, _, // 33
3267
2, 1, 5, _, _, // 34
3268
X, _, _, _, _, // 35
3269
2, 2, 5, _, _, // 36
3270
3, 0, 2, 5, _, // 37
3271
X, _, _, _, _, // 38
3272
X, _, _, _, _, // 39
3273
2, 3, 5, _, _, // 40
3274
3, 0, 3, 5, _, // 41
3275
3, 1, 3, 5, _, // 42
3276
X, _, _, _, _, // 43
3277
X, _, _, _, _, // 44
3278
X, _, _, _, _, // 45
3279
X, _, _, _, _, // 46
3280
X, _, _, _, _, // 47
3281
X, _, _, _, _, // 48
3282
X, _, _, _, _, // 49
3283
X, _, _, _, _, // 50
3284
X, _, _, _, _, // 51
3285
X, _, _, _, _, // 52
3286
X, _, _, _, _, // 53
3287
X, _, _, _, _, // 54
3288
X, _, _, _, _, // 55
3289
X, _, _, _, _, // 56
3290
X, _, _, _, _, // 57
3291
X, _, _, _, _, // 58
3292
X, _, _, _, _, // 59
3293
X, _, _, _, _, // 60
3294
X, _, _, _, _, // 61
3295
X, _, _, _, _, // 62
3296
X, _, _, _, _, // 63
3297
1, 6, _, _, _, // 64
3298
2, 0, 6, _, _, // 65
3299
2, 1, 6, _, _, // 66
3300
X, _, _, _, _, // 67
3301
2, 2, 6, _, _, // 68
3302
3, 0, 2, 6, _, // 69
3303
X, _, _, _, _, // 70
3304
X, _, _, _, _, // 71
3305
2, 3, 6, _, _, // 72
3306
3, 0, 3, 6, _, // 73
3307
3, 1, 3, 6, _, // 74
3308
X, _, _, _, _, // 75
3309
X, _, _, _, _, // 76
3310
X, _, _, _, _, // 77
3311
X, _, _, _, _, // 78
3312
X, _, _, _, _, // 79
3313
2, 4, 6, _, _, // 80
3314
3, 0, 4, 6, _, // 81
3315
3, 1, 4, 6, _, // 82
3316
X, _, _, _, _, // 83
3317
3, 2, 4, 6, _, // 84
3318
4, 0, 2, 4, 6, // 85
3319
X, _, _, _, _, // 86
3320
X, _, _, _, _, // 87
3321
X, _, _, _, _, // 88
3322
X, _, _, _, _, // 89
3323
X, _, _, _, _, // 90
3324
X, _, _, _, _, // 91
3325
X, _, _, _, _, // 92
3326
X, _, _, _, _, // 93
3327
X, _, _, _, _, // 94
3328
X, _, _, _, _, // 95
3329
X, _, _, _, _, // 96
3330
X, _, _, _, _, // 97
3331
X, _, _, _, _, // 98
3332
X, _, _, _, _, // 99
3333
X, _, _, _, _, // 100
3334
X, _, _, _, _, // 101
3335
X, _, _, _, _, // 102
3336
X, _, _, _, _, // 103
3337
X, _, _, _, _, // 104
3338
X, _, _, _, _, // 105
3339
X, _, _, _, _, // 106
3340
X, _, _, _, _, // 107
3341
X, _, _, _, _, // 108
3342
X, _, _, _, _, // 109
3343
X, _, _, _, _, // 110
3344
X, _, _, _, _, // 111
3345
X, _, _, _, _, // 112
3346
X, _, _, _, _, // 113
3347
X, _, _, _, _, // 114
3348
X, _, _, _, _, // 115
3349
X, _, _, _, _, // 116
3350
X, _, _, _, _, // 117
3351
X, _, _, _, _, // 118
3352
X, _, _, _, _, // 119
3353
X, _, _, _, _, // 120
3354
X, _, _, _, _, // 121
3355
X, _, _, _, _, // 122
3356
X, _, _, _, _, // 123
3357
X, _, _, _, _, // 124
3358
X, _, _, _, _, // 125
3359
X, _, _, _, _, // 126
3360
X, _, _, _, _, // 127
3361
1, 7, _, _, _, // 128
3362
2, 0, 7, _, _, // 129
3363
2, 1, 7, _, _, // 130
3364
X, _, _, _, _, // 131
3365
2, 2, 7, _, _, // 132
3366
3, 0, 2, 7, _, // 133
3367
X, _, _, _, _, // 134
3368
X, _, _, _, _, // 135
3369
2, 3, 7, _, _, // 136
3370
3, 0, 3, 7, _, // 137
3371
3, 1, 3, 7, _, // 138
3372
X, _, _, _, _, // 139
3373
X, _, _, _, _, // 140
3374
X, _, _, _, _, // 141
3375
X, _, _, _, _, // 142
3376
X, _, _, _, _, // 143
3377
2, 4, 7, _, _, // 144
3378
3, 0, 4, 7, _, // 145
3379
3, 1, 4, 7, _, // 146
3380
X, _, _, _, _, // 147
3381
3, 2, 4, 7, _, // 148
3382
4, 0, 2, 4, 7, // 149
3383
X, _, _, _, _, // 150
3384
X, _, _, _, _, // 151
3385
X, _, _, _, _, // 152
3386
X, _, _, _, _, // 153
3387
X, _, _, _, _, // 154
3388
X, _, _, _, _, // 155
3389
X, _, _, _, _, // 156
3390
X, _, _, _, _, // 157
3391
X, _, _, _, _, // 158
3392
X, _, _, _, _, // 159
3393
2, 5, 7, _, _, // 160
3394
3, 0, 5, 7, _, // 161
3395
3, 1, 5, 7, _, // 162
3396
X, _, _, _, _, // 163
3397
3, 2, 5, 7, _, // 164
3398
4, 0, 2, 5, 7, // 165
3399
X, _, _, _, _, // 166
3400
X, _, _, _, _, // 167
3401
3, 3, 5, 7, _, // 168
3402
4, 0, 3, 5, 7, // 169
3403
4, 1, 3, 5, 7 // 170
3409
// Takes a word of mark bits. Returns the number of objects that start in the
3410
// range. Puts the offsets of the words in the supplied array.
3411
static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3415
// No consecutive 1 bits.
3416
ASSERT((mark_bits & 0x180) != 0x180);
3417
ASSERT((mark_bits & 0x18000) != 0x18000);
3418
ASSERT((mark_bits & 0x1800000) != 0x1800000);
3420
while (mark_bits != 0) {
3421
int byte = (mark_bits & 0xff);
3424
ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3425
char* table = kStartTable + byte * kStartTableEntriesPerLine;
3426
int objects_in_these_8_words = table[0];
3427
ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3428
ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3429
for (int i = 0; i < objects_in_these_8_words; i++) {
3430
starts[objects++] = offset + table[1 + i];
3439
static inline Address DigestFreeStart(Address approximate_free_start,
3440
uint32_t free_start_cell) {
3441
ASSERT(free_start_cell != 0);
3443
// No consecutive 1 bits.
3444
ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3447
uint32_t cell = free_start_cell;
3448
int offset_of_last_live;
3449
if ((cell & 0x80000000u) != 0) {
3450
// This case would overflow below.
3451
offset_of_last_live = 31;
3453
// Remove all but one bit, the most significant. This is an optimization
3454
// that may or may not be worthwhile.
3460
cell = (cell + 1) >> 1;
3461
int live_objects = MarkWordToObjectStarts(cell, offsets);
3462
ASSERT(live_objects == 1);
3463
offset_of_last_live = offsets[live_objects - 1];
3465
Address last_live_start =
3466
approximate_free_start + offset_of_last_live * kPointerSize;
3467
HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3468
Address free_start = last_live_start + last_live->Size();
3473
static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3476
// No consecutive 1 bits.
3477
ASSERT((cell & (cell << 1)) == 0);
3480
if (cell == 0x80000000u) { // Avoid overflow below.
3481
return block_address + 31 * kPointerSize;
3483
uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3484
ASSERT((first_set_bit & cell) == first_set_bit);
3485
int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3486
ASSERT(live_objects == 1);
3488
return block_address + offsets[0] * kPointerSize;
3492
// Sweeps a space conservatively. After this has been done the larger free
3493
// spaces have been put on the free list and the smaller ones have been
3494
// ignored and left untouched. A free space is always either ignored or put
3495
// on the free list, never split up into two parts. This is important
3496
// because it means that any FreeSpace maps left actually describe a region of
3497
// memory that can be ignored when scanning. Dead objects other than free
3498
// spaces will not contain the free space map.
3499
intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
3500
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3501
MarkBit::CellType* cells = p->markbits()->cells();
3502
p->MarkSweptConservatively();
3504
int last_cell_index =
3505
Bitmap::IndexToCell(
3506
Bitmap::CellAlignIndex(
3507
p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
3509
int cell_index = Page::kFirstUsedCell;
3510
intptr_t freed_bytes = 0;
3512
// This is the start of the 32 word block that we are currently looking at.
3513
Address block_address = p->ObjectAreaStart();
3515
// Skip over all the dead objects at the start of the page and mark them free.
3516
for (cell_index = Page::kFirstUsedCell;
3517
cell_index < last_cell_index;
3518
cell_index++, block_address += 32 * kPointerSize) {
3519
if (cells[cell_index] != 0) break;
3521
size_t size = block_address - p->ObjectAreaStart();
3522
if (cell_index == last_cell_index) {
3523
freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
3524
static_cast<int>(size)));
3525
ASSERT_EQ(0, p->LiveBytes());
3528
// Grow the size of the start-of-page free space a little to get up to the
3529
// first live object.
3530
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
3531
// Free the first free space.
3532
size = free_end - p->ObjectAreaStart();
3533
freed_bytes += space->Free(p->ObjectAreaStart(),
3534
static_cast<int>(size));
3535
// The start of the current free area is represented in undigested form by
3536
// the address of the last 32-word section that contained a live object and
3537
// the marking bitmap for that cell, which describes where the live object
3538
// started. Unless we find a large free space in the bitmap we will not
3539
// digest this pair into a real address. We start the iteration here at the
3540
// first word in the marking bit map that indicates a live object.
3541
Address free_start = block_address;
3542
uint32_t free_start_cell = cells[cell_index];
3545
cell_index < last_cell_index;
3546
cell_index++, block_address += 32 * kPointerSize) {
3547
ASSERT((unsigned)cell_index ==
3548
Bitmap::IndexToCell(
3549
Bitmap::CellAlignIndex(
3550
p->AddressToMarkbitIndex(block_address))));
3551
uint32_t cell = cells[cell_index];
3553
// We have a live object. Check approximately whether it is more than 32
3554
// words since the last live object.
3555
if (block_address - free_start > 32 * kPointerSize) {
3556
free_start = DigestFreeStart(free_start, free_start_cell);
3557
if (block_address - free_start > 32 * kPointerSize) {
3558
// Now that we know the exact start of the free space it still looks
3559
// like we have a large enough free space to be worth bothering with.
3560
// so now we need to find the start of the first live object at the
3561
// end of the free space.
3562
free_end = StartOfLiveObject(block_address, cell);
3563
freed_bytes += space->Free(free_start,
3564
static_cast<int>(free_end - free_start));
3567
// Update our undigested record of where the current free area started.
3568
free_start = block_address;
3569
free_start_cell = cell;
3570
// Clear marking bits for current cell.
3571
cells[cell_index] = 0;
3575
// Handle the free space at the end of the page.
3576
if (block_address - free_start > 32 * kPointerSize) {
3577
free_start = DigestFreeStart(free_start, free_start_cell);
3578
freed_bytes += space->Free(free_start,
3579
static_cast<int>(block_address - free_start));
3582
p->ResetLiveBytes();
3587
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3588
space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3589
sweeper == LAZY_CONSERVATIVE);
3591
space->ClearStats();
3593
PageIterator it(space);
3595
intptr_t freed_bytes = 0;
3596
int pages_swept = 0;
3597
intptr_t newspace_size = space->heap()->new_space()->Size();
3598
bool lazy_sweeping_active = false;
3599
bool unused_page_present = false;
3601
intptr_t old_space_size = heap()->PromotedSpaceSize();
3602
intptr_t space_left =
3603
Min(heap()->OldGenPromotionLimit(old_space_size),
3604
heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
2373
3606
while (it.has_next()) {
2374
3607
Page* p = it.next();
2376
bool is_previous_alive = true;
2377
Address free_start = NULL;
2380
for (Address current = p->ObjectAreaStart();
2381
current < p->AllocationTop();
2382
current += object->Size()) {
2383
object = HeapObject::FromAddress(current);
2384
if (object->IsMarked()) {
2385
object->ClearMark();
2386
heap->mark_compact_collector()->tracer()->decrement_marked_count();
2388
if (!is_previous_alive) { // Transition from free to live.
2389
space->DeallocateBlock(free_start,
2390
static_cast<int>(current - free_start),
2392
is_previous_alive = true;
2395
heap->mark_compact_collector()->ReportDeleteIfNeeded(
2396
object, heap->isolate());
2397
if (is_previous_alive) { // Transition from live to free.
2398
free_start = current;
2399
is_previous_alive = false;
2401
LiveObjectList::ProcessNonLive(object);
2403
// The object is now unmarked for the call to Size() at the top of the
2407
bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
2408
|| (!is_previous_alive && free_start == p->ObjectAreaStart());
2410
if (page_is_empty) {
2411
// This page is empty. Check whether we are in the middle of
2412
// sequence of empty pages and start one if not.
2413
if (!first_empty_page->is_valid()) {
2414
first_empty_page = p;
2415
prec_first_empty_page = prev;
2418
if (!is_previous_alive) {
2419
// There are dead objects on this page. Update space accounting stats
2420
// without putting anything into free list.
2421
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2422
if (size_in_bytes > 0) {
2423
space->DeallocateBlock(free_start, size_in_bytes, false);
2427
// This page is not empty. Sequence of empty pages ended on the previous
2429
if (first_empty_page->is_valid()) {
2430
space->FreePages(prec_first_empty_page, prev);
2431
prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
2434
// If there is a free ending area on one of the previous pages we have
2435
// deallocate that area and put it on the free list.
2436
if (last_free_size > 0) {
2437
Page::FromAddress(last_free_start)->
2438
SetAllocationWatermark(last_free_start);
2439
space->DeallocateBlock(last_free_start, last_free_size, true);
2440
last_free_start = NULL;
2444
// If the last region of this page was not live we remember it.
2445
if (!is_previous_alive) {
2446
ASSERT(last_free_size == 0);
2447
last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2448
last_free_start = free_start;
2455
// We reached end of space. See if we need to adjust allocation top.
2456
Address new_allocation_top = NULL;
2458
if (first_empty_page->is_valid()) {
2459
// Last used pages in space are empty. We can move allocation top backwards
2460
// to the beginning of first empty page.
2461
ASSERT(prev == space->AllocationTopPage());
2463
new_allocation_top = first_empty_page->ObjectAreaStart();
2466
if (last_free_size > 0) {
2467
// There was a free ending area on the previous page.
2468
// Deallocate it without putting it into freelist and move allocation
2469
// top to the beginning of this free area.
2470
space->DeallocateBlock(last_free_start, last_free_size, false);
2471
new_allocation_top = last_free_start;
2474
if (new_allocation_top != NULL) {
2476
Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2477
if (!first_empty_page->is_valid()) {
2478
ASSERT(new_allocation_top_page == space->AllocationTopPage());
2479
} else if (last_free_size > 0) {
2480
ASSERT(new_allocation_top_page == prec_first_empty_page);
2482
ASSERT(new_allocation_top_page == first_empty_page);
2486
space->SetTop(new_allocation_top);
2491
void MarkCompactCollector::EncodeForwardingAddresses() {
2492
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2493
// Objects in the active semispace of the young generation may be
2494
// relocated to the inactive semispace (if not promoted). Set the
2495
// relocation info to the beginning of the inactive semispace.
2496
heap()->new_space()->MCResetRelocationInfo();
2498
// Compute the forwarding pointers in each space.
2499
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
2500
ReportDeleteIfNeeded>(
2501
heap()->old_pointer_space());
2503
EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
2504
IgnoreNonLiveObject>(
2505
heap()->old_data_space());
2507
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
2508
ReportDeleteIfNeeded>(
2509
heap()->code_space());
2511
EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
2512
IgnoreNonLiveObject>(
2513
heap()->cell_space());
2516
// Compute new space next to last after the old and code spaces have been
2517
// compacted. Objects in new space can be promoted to old or code space.
2518
EncodeForwardingAddressesInNewSpace();
2520
// Compute map space last because computing forwarding addresses
2521
// overwrites non-live objects. Objects in the other spaces rely on
2522
// non-live map pointers to get the sizes of non-live objects.
2523
EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
2524
IgnoreNonLiveObject>(
2525
heap()->map_space());
2527
// Write relocation info to the top page, so we can use it later. This is
2528
// done after promoting objects from the new space so we get the correct
2530
heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
2531
heap()->old_data_space()->MCWriteRelocationInfoToPage();
2532
heap()->code_space()->MCWriteRelocationInfoToPage();
2533
heap()->map_space()->MCWriteRelocationInfoToPage();
2534
heap()->cell_space()->MCWriteRelocationInfoToPage();
2538
class MapIterator : public HeapObjectIterator {
2540
explicit MapIterator(Heap* heap)
2541
: HeapObjectIterator(heap->map_space(), &SizeCallback) { }
2543
MapIterator(Heap* heap, Address start)
2544
: HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
2547
static int SizeCallback(HeapObject* unused) {
2556
explicit MapCompact(Heap* heap, int live_maps)
2558
live_maps_(live_maps),
2559
to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
2560
vacant_map_it_(heap),
2561
map_to_evacuate_it_(heap, to_evacuate_start_),
2562
first_map_to_evacuate_(
2563
reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
2566
void CompactMaps() {
2567
// As we know the number of maps to evacuate beforehand,
2568
// we stop then there is no more vacant maps.
2569
for (Map* next_vacant_map = NextVacantMap();
2571
next_vacant_map = NextVacantMap()) {
2572
EvacuateMap(next_vacant_map, NextMapToEvacuate());
2576
CheckNoMapsToEvacuate();
2580
void UpdateMapPointersInRoots() {
2581
MapUpdatingVisitor map_updating_visitor;
2582
heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
2583
heap()->isolate()->global_handles()->IterateWeakRoots(
2584
&map_updating_visitor);
2585
LiveObjectList::IterateElements(&map_updating_visitor);
2588
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
2589
ASSERT(space != heap()->map_space());
2591
PageIterator it(space, PageIterator::PAGES_IN_USE);
2592
while (it.has_next()) {
2593
Page* p = it.next();
2594
UpdateMapPointersInRange(heap(),
2595
p->ObjectAreaStart(),
2596
p->AllocationTop());
2600
void UpdateMapPointersInNewSpace() {
2601
NewSpace* space = heap()->new_space();
2602
UpdateMapPointersInRange(heap(), space->bottom(), space->top());
2605
void UpdateMapPointersInLargeObjectSpace() {
2606
LargeObjectIterator it(heap()->lo_space());
2607
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2608
UpdateMapPointersInObject(heap(), obj);
2612
heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
2615
inline Heap* heap() const { return heap_; }
2620
Address to_evacuate_start_;
2621
MapIterator vacant_map_it_;
2622
MapIterator map_to_evacuate_it_;
2623
Map* first_map_to_evacuate_;
2625
// Helper class for updating map pointers in HeapObjects.
2626
class MapUpdatingVisitor: public ObjectVisitor {
2628
MapUpdatingVisitor() {}
2630
void VisitPointer(Object** p) {
2631
UpdateMapPointer(p);
2634
void VisitPointers(Object** start, Object** end) {
2635
for (Object** p = start; p < end; p++) UpdateMapPointer(p);
2639
void UpdateMapPointer(Object** p) {
2640
if (!(*p)->IsHeapObject()) return;
2641
HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
2643
// Moved maps are tagged with overflowed map word. They are the only
2644
// objects those map word is overflowed as marking is already complete.
2645
MapWord map_word = old_map->map_word();
2646
if (!map_word.IsOverflowed()) return;
2648
*p = GetForwardedMap(map_word);
2652
static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
2654
HeapObject* next = it->next();
2655
ASSERT(next != NULL);
2658
ASSERT(!next->IsOverflowed());
2659
ASSERT(!next->IsMarked());
2660
ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
2661
if (next->IsMap() == live)
2662
return reinterpret_cast<Map*>(next);
2666
Map* NextVacantMap() {
2667
Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
2668
ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
2672
Map* NextMapToEvacuate() {
2673
Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
2674
ASSERT(map != NULL);
2675
ASSERT(map->IsMap());
2679
static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2680
ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2681
ASSERT(map_to_evacuate->IsMap());
2683
ASSERT(Map::kSize % 4 == 0);
2685
map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
2686
vacant_map->address(), map_to_evacuate->address(), Map::kSize);
2688
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
2690
MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2691
forwarding_map_word.SetOverflow();
2692
map_to_evacuate->set_map_word(forwarding_map_word);
2694
ASSERT(map_to_evacuate->map_word().IsOverflowed());
2695
ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2698
static Map* GetForwardedMap(MapWord map_word) {
2699
ASSERT(map_word.IsOverflowed());
2700
map_word.ClearOverflow();
2701
Map* new_map = map_word.ToMap();
2702
ASSERT_MAP_ALIGNED(new_map->address());
2706
static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
2707
ASSERT(!obj->IsMarked());
2708
Map* map = obj->map();
2709
ASSERT(heap->map_space()->Contains(map));
2710
MapWord map_word = map->map_word();
2711
ASSERT(!map_word.IsMarked());
2712
if (map_word.IsOverflowed()) {
2713
Map* new_map = GetForwardedMap(map_word);
2714
ASSERT(heap->map_space()->Contains(new_map));
2715
obj->set_map(new_map);
3609
// Clear sweeping flags indicating that marking bits are still intact.
3610
p->ClearSweptPrecisely();
3611
p->ClearSweptConservatively();
3613
if (p->IsEvacuationCandidate()) {
3614
ASSERT(evacuation_candidates_.length() > 0);
3618
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3619
// Will be processed in EvacuateNewSpaceAndCandidates.
3623
// One unused page is kept, all further are released before sweeping them.
3624
if (p->LiveBytes() == 0) {
3625
if (unused_page_present) {
3626
if (FLAG_gc_verbose) {
3627
PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3628
reinterpret_cast<intptr_t>(p));
3630
space->ReleasePage(p);
3633
unused_page_present = true;
3636
if (lazy_sweeping_active) {
2718
3637
if (FLAG_gc_verbose) {
2719
PrintF("update %p : %p -> %p\n",
2721
reinterpret_cast<void*>(map),
2722
reinterpret_cast<void*>(new_map));
2727
int size = obj->SizeFromMap(map);
2728
MapUpdatingVisitor map_updating_visitor;
2729
obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
2733
static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
2736
for (Address current = start; current < end; current += size) {
2737
object = HeapObject::FromAddress(current);
2738
size = UpdateMapPointersInObject(heap, object);
2744
void CheckNoMapsToEvacuate() {
2745
if (!FLAG_enable_slow_asserts)
2748
for (HeapObject* obj = map_to_evacuate_it_.next();
2749
obj != NULL; obj = map_to_evacuate_it_.next())
2750
ASSERT(FreeListNode::IsFreeListNode(obj));
3638
PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
3639
reinterpret_cast<intptr_t>(p));
3645
case CONSERVATIVE: {
3646
if (FLAG_gc_verbose) {
3647
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3648
reinterpret_cast<intptr_t>(p));
3650
SweepConservatively(space, p);
3654
case LAZY_CONSERVATIVE: {
3655
if (FLAG_gc_verbose) {
3656
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3657
reinterpret_cast<intptr_t>(p));
3659
freed_bytes += SweepConservatively(space, p);
3661
if (space_left + freed_bytes > newspace_size) {
3662
space->SetPagesToSweep(p->next_page());
3663
lazy_sweeping_active = true;
3665
if (FLAG_gc_verbose) {
3666
PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
3673
if (FLAG_gc_verbose) {
3674
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3675
reinterpret_cast<intptr_t>(p));
3677
if (space->identity() == CODE_SPACE) {
3678
SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3680
SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3691
if (FLAG_gc_verbose) {
3692
PrintF("SweepSpace: %s (%d pages swept)\n",
3693
AllocationSpaceName(space->identity()),
3697
// Give pages that are queued to be freed back to the OS.
3698
heap()->FreeQueuedChunks();
2756
3702
void MarkCompactCollector::SweepSpaces() {
2757
3703
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2759
ASSERT(state_ == SWEEP_SPACES);
2760
ASSERT(!IsCompacting());
3705
state_ = SWEEP_SPACES;
3707
SweeperType how_to_sweep =
3708
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3709
if (sweep_precisely_) how_to_sweep = PRECISE;
2761
3710
// Noncompacting collections simply sweep the spaces to clear the mark
2762
3711
// bits and free the nonlive blocks (for old and map spaces). We sweep
2763
3712
// the map space last because freeing non-live maps overwrites them and
2764
3713
// the other spaces rely on possibly non-live maps to get the sizes for
2765
3714
// non-live objects.
2766
SweepSpace(heap(), heap()->old_pointer_space());
2767
SweepSpace(heap(), heap()->old_data_space());
2768
SweepSpace(heap(), heap()->code_space());
2769
SweepSpace(heap(), heap()->cell_space());
2770
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2771
SweepNewSpace(heap(), heap()->new_space());
2773
SweepSpace(heap(), heap()->map_space());
2775
heap()->IterateDirtyRegions(heap()->map_space(),
2776
&heap()->IteratePointersInDirtyMapsRegion,
2777
&UpdatePointerToNewGen,
2778
heap()->WATERMARK_SHOULD_BE_VALID);
2780
intptr_t live_maps_size = heap()->map_space()->Size();
2781
int live_maps = static_cast<int>(live_maps_size / Map::kSize);
2782
ASSERT(live_map_objects_size_ == live_maps_size);
2784
if (heap()->map_space()->NeedsCompaction(live_maps)) {
2785
MapCompact map_compact(heap(), live_maps);
2787
map_compact.CompactMaps();
2788
map_compact.UpdateMapPointersInRoots();
2791
for (PagedSpace* space = spaces.next();
2792
space != NULL; space = spaces.next()) {
2793
if (space == heap()->map_space()) continue;
2794
map_compact.UpdateMapPointersInPagedSpace(space);
2796
map_compact.UpdateMapPointersInNewSpace();
2797
map_compact.UpdateMapPointersInLargeObjectSpace();
2799
map_compact.Finish();
2804
// Iterate the live objects in a range of addresses (eg, a page or a
2805
// semispace). The live regions of the range have been linked into a list.
2806
// The first live region is [first_live_start, first_live_end), and the last
2807
// address in the range is top. The callback function is used to get the
2808
// size of each live object.
2809
int MarkCompactCollector::IterateLiveObjectsInRange(
2812
LiveObjectCallback size_func) {
2813
int live_objects_size = 0;
2814
Address current = start;
2815
while (current < end) {
2816
uint32_t encoded_map = Memory::uint32_at(current);
2817
if (encoded_map == kSingleFreeEncoding) {
2818
current += kPointerSize;
2819
} else if (encoded_map == kMultiFreeEncoding) {
2820
current += Memory::int_at(current + kIntSize);
2822
int size = (this->*size_func)(HeapObject::FromAddress(current));
2824
live_objects_size += size;
2827
return live_objects_size;
2831
int MarkCompactCollector::IterateLiveObjects(
2832
NewSpace* space, LiveObjectCallback size_f) {
2833
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2834
return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2838
int MarkCompactCollector::IterateLiveObjects(
2839
PagedSpace* space, LiveObjectCallback size_f) {
2840
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2842
PageIterator it(space, PageIterator::PAGES_IN_USE);
2843
while (it.has_next()) {
2844
Page* p = it.next();
2845
total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2853
// -------------------------------------------------------------------------
2854
// Phase 3: Update pointers
2856
// Helper class for updating pointers in HeapObjects.
2857
class UpdatingVisitor: public ObjectVisitor {
2859
explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
2861
void VisitPointer(Object** p) {
2865
void VisitPointers(Object** start, Object** end) {
2866
// Mark all HeapObject pointers in [start, end)
2867
for (Object** p = start; p < end; p++) UpdatePointer(p);
2870
void VisitCodeTarget(RelocInfo* rinfo) {
2871
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2872
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2873
VisitPointer(&target);
2874
rinfo->set_target_address(
2875
reinterpret_cast<Code*>(target)->instruction_start());
2878
void VisitDebugTarget(RelocInfo* rinfo) {
2879
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2880
rinfo->IsPatchedReturnSequence()) ||
2881
(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2882
rinfo->IsPatchedDebugBreakSlotSequence()));
2883
Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2884
VisitPointer(&target);
2885
rinfo->set_call_address(
2886
reinterpret_cast<Code*>(target)->instruction_start());
2889
inline Heap* heap() const { return heap_; }
2892
void UpdatePointer(Object** p) {
2893
if (!(*p)->IsHeapObject()) return;
2895
HeapObject* obj = HeapObject::cast(*p);
2896
Address old_addr = obj->address();
2898
ASSERT(!heap()->InFromSpace(obj));
2900
if (heap()->new_space()->Contains(obj)) {
2901
Address forwarding_pointer_addr =
2902
heap()->new_space()->FromSpaceLow() +
2903
heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
2904
new_addr = Memory::Address_at(forwarding_pointer_addr);
2907
ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
2908
heap()->old_data_space()->Contains(new_addr) ||
2909
heap()->new_space()->FromSpaceContains(new_addr) ||
2910
heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2912
if (heap()->new_space()->FromSpaceContains(new_addr)) {
2913
ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
2914
heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
2918
} else if (heap()->lo_space()->Contains(obj)) {
2919
// Don't move objects in the large object space.
2925
PagedSpace* original_space = spaces.next();
2926
while (original_space != NULL) {
2927
if (original_space->Contains(obj)) break;
2928
original_space = spaces.next();
2930
ASSERT(original_space != NULL);
2932
new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2933
ASSERT(original_space->Contains(new_addr));
2934
ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2935
original_space->MCSpaceOffsetForAddress(old_addr));
2938
*p = HeapObject::FromAddress(new_addr);
2941
if (FLAG_gc_verbose) {
2942
PrintF("update %p : %p -> %p\n",
2943
reinterpret_cast<Address>(p), old_addr, new_addr);
2952
void MarkCompactCollector::UpdatePointers() {
2954
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2955
state_ = UPDATE_POINTERS;
2957
UpdatingVisitor updating_visitor(heap());
2958
heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
2960
heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
2961
heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
2963
// Update the pointer to the head of the weak list of global contexts.
2964
updating_visitor.VisitPointer(&heap()->global_contexts_list_);
2966
LiveObjectList::IterateElements(&updating_visitor);
2968
int live_maps_size = IterateLiveObjects(
2969
heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2970
int live_pointer_olds_size = IterateLiveObjects(
2971
heap()->old_pointer_space(),
2972
&MarkCompactCollector::UpdatePointersInOldObject);
2973
int live_data_olds_size = IterateLiveObjects(
2974
heap()->old_data_space(),
2975
&MarkCompactCollector::UpdatePointersInOldObject);
2976
int live_codes_size = IterateLiveObjects(
2977
heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2978
int live_cells_size = IterateLiveObjects(
2979
heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2980
int live_news_size = IterateLiveObjects(
2981
heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
2983
// Large objects do not move, the map word can be updated directly.
2984
LargeObjectIterator it(heap()->lo_space());
2985
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2986
UpdatePointersInNewObject(obj);
2989
USE(live_maps_size);
2990
USE(live_pointer_olds_size);
2991
USE(live_data_olds_size);
2992
USE(live_codes_size);
2993
USE(live_cells_size);
2994
USE(live_news_size);
2995
ASSERT(live_maps_size == live_map_objects_size_);
2996
ASSERT(live_data_olds_size == live_old_data_objects_size_);
2997
ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2998
ASSERT(live_codes_size == live_code_objects_size_);
2999
ASSERT(live_cells_size == live_cell_objects_size_);
3000
ASSERT(live_news_size == live_young_objects_size_);
3004
int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
3005
// Keep old map pointers
3006
Map* old_map = obj->map();
3007
ASSERT(old_map->IsHeapObject());
3009
Address forwarded = GetForwardingAddressInOldSpace(old_map);
3011
ASSERT(heap()->map_space()->Contains(old_map));
3012
ASSERT(heap()->map_space()->Contains(forwarded));
3014
if (FLAG_gc_verbose) {
3015
PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
3019
// Update the map pointer.
3020
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
3022
// We have to compute the object size relying on the old map because
3023
// map objects are not relocated yet.
3024
int obj_size = obj->SizeFromMap(old_map);
3026
// Update pointers in the object body.
3027
UpdatingVisitor updating_visitor(heap());
3028
obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
3033
int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
3034
// Decode the map pointer.
3035
MapWord encoding = obj->map_word();
3036
Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3037
ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3039
// At this point, the first word of map_addr is also encoded, cannot
3040
// cast it to Map* using Map::cast.
3041
Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
3042
int obj_size = obj->SizeFromMap(map);
3043
InstanceType type = map->instance_type();
3045
// Update map pointer.
3046
Address new_map_addr = GetForwardingAddressInOldSpace(map);
3047
int offset = encoding.DecodeOffset();
3048
obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
3051
if (FLAG_gc_verbose) {
3052
PrintF("update %p : %p -> %p\n", obj->address(),
3053
map_addr, new_map_addr);
3057
// Update pointers in the object body.
3058
UpdatingVisitor updating_visitor(heap());
3059
obj->IterateBody(type, obj_size, &updating_visitor);
3064
Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
3065
// Object should either in old or map space.
3066
MapWord encoding = obj->map_word();
3068
// Offset to the first live object's forwarding address.
3069
int offset = encoding.DecodeOffset();
3070
Address obj_addr = obj->address();
3072
// Find the first live object's forwarding address.
3073
Page* p = Page::FromAddress(obj_addr);
3074
Address first_forwarded = p->mc_first_forwarded;
3076
// Page start address of forwarded address.
3077
Page* forwarded_page = Page::FromAddress(first_forwarded);
3078
int forwarded_offset = forwarded_page->Offset(first_forwarded);
3080
// Find end of allocation in the page of first_forwarded.
3081
int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
3083
// Check if current object's forward pointer is in the same page
3084
// as the first live object's forwarding pointer
3085
if (forwarded_offset + offset < mc_top_offset) {
3086
// In the same page.
3087
return first_forwarded + offset;
3090
// Must be in the next page, NOTE: this may cross chunks.
3091
Page* next_page = forwarded_page->next_page();
3092
ASSERT(next_page->is_valid());
3094
offset -= (mc_top_offset - forwarded_offset);
3095
offset += Page::kObjectStartOffset;
3097
ASSERT_PAGE_OFFSET(offset);
3098
ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
3100
return next_page->OffsetToAddress(offset);
3104
// -------------------------------------------------------------------------
3105
// Phase 4: Relocate objects
3107
void MarkCompactCollector::RelocateObjects() {
3109
ASSERT(state_ == UPDATE_POINTERS);
3110
state_ = RELOCATE_OBJECTS;
3112
// Relocates objects, always relocate map objects first. Relocating
3113
// objects in other space relies on map objects to get object size.
3114
int live_maps_size = IterateLiveObjects(
3115
heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
3116
int live_pointer_olds_size = IterateLiveObjects(
3117
heap()->old_pointer_space(),
3118
&MarkCompactCollector::RelocateOldPointerObject);
3119
int live_data_olds_size = IterateLiveObjects(
3120
heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
3121
int live_codes_size = IterateLiveObjects(
3122
heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
3123
int live_cells_size = IterateLiveObjects(
3124
heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
3125
int live_news_size = IterateLiveObjects(
3126
heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
3128
USE(live_maps_size);
3129
USE(live_pointer_olds_size);
3130
USE(live_data_olds_size);
3131
USE(live_codes_size);
3132
USE(live_cells_size);
3133
USE(live_news_size);
3134
ASSERT(live_maps_size == live_map_objects_size_);
3135
ASSERT(live_data_olds_size == live_old_data_objects_size_);
3136
ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
3137
ASSERT(live_codes_size == live_code_objects_size_);
3138
ASSERT(live_cells_size == live_cell_objects_size_);
3139
ASSERT(live_news_size == live_young_objects_size_);
3141
// Flip from and to spaces
3142
heap()->new_space()->Flip();
3144
heap()->new_space()->MCCommitRelocationInfo();
3146
// Set age_mark to bottom in to space
3147
Address mark = heap()->new_space()->bottom();
3148
heap()->new_space()->set_age_mark(mark);
3151
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
3152
space->MCCommitRelocationInfo();
3154
heap()->CheckNewSpaceExpansionCriteria();
3155
heap()->IncrementYoungSurvivorsCounter(live_news_size);
3159
int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
3160
// Recover map pointer.
3161
MapWord encoding = obj->map_word();
3162
Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3163
ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3165
// Get forwarding address before resetting map pointer
3166
Address new_addr = GetForwardingAddressInOldSpace(obj);
3168
// Reset map pointer. The meta map object may not be copied yet so
3169
// Map::cast does not yet work.
3170
obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
3172
Address old_addr = obj->address();
3174
if (new_addr != old_addr) {
3176
heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3182
if (FLAG_gc_verbose) {
3183
PrintF("relocate %p -> %p\n", old_addr, new_addr);
3191
static inline int RestoreMap(HeapObject* obj,
3195
// This must be a non-map object, and the function relies on the
3196
// assumption that the Map space is compacted before the other paged
3197
// spaces (see RelocateObjects).
3199
// Reset map pointer.
3200
obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
3202
int obj_size = obj->Size();
3203
ASSERT_OBJECT_SIZE(obj_size);
3205
ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
3206
space->MCSpaceOffsetForAddress(obj->address()));
3209
if (FLAG_gc_verbose) {
3210
PrintF("relocate %p -> %p\n", obj->address(), new_addr);
3218
int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
3219
PagedSpace* space) {
3220
// Recover map pointer.
3221
MapWord encoding = obj->map_word();
3222
Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3223
ASSERT(heap()->map_space()->Contains(map_addr));
3225
// Get forwarding address before resetting map pointer.
3226
Address new_addr = GetForwardingAddressInOldSpace(obj);
3228
// Reset the map pointer.
3229
int obj_size = RestoreMap(obj, space, new_addr, map_addr);
3231
Address old_addr = obj->address();
3233
if (new_addr != old_addr) {
3235
if (space == heap()->old_data_space()) {
3236
heap()->MoveBlock(new_addr, old_addr, obj_size);
3238
heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3244
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
3246
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3247
if (copied_to->IsSharedFunctionInfo()) {
3248
PROFILE(heap()->isolate(),
3249
SharedFunctionInfoMoveEvent(old_addr, new_addr));
3251
HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3257
int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
3258
return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
3262
int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
3263
return RelocateOldNonCodeObject(obj, heap()->old_data_space());
3267
int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
3268
return RelocateOldNonCodeObject(obj, heap()->cell_space());
3272
int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
3273
// Recover map pointer.
3274
MapWord encoding = obj->map_word();
3275
Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3276
ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
3278
// Get forwarding address before resetting map pointer
3279
Address new_addr = GetForwardingAddressInOldSpace(obj);
3281
// Reset the map pointer.
3282
int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
3284
Address old_addr = obj->address();
3286
if (new_addr != old_addr) {
3288
heap()->MoveBlock(new_addr, old_addr, obj_size);
3291
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3292
if (copied_to->IsCode()) {
3293
// May also update inline cache target.
3294
Code::cast(copied_to)->Relocate(new_addr - old_addr);
3295
// Notify the logger that compiled code has moved.
3296
PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
3298
HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3304
int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
3305
int obj_size = obj->Size();
3307
// Get forwarding address
3308
Address old_addr = obj->address();
3309
int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
3312
Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
3315
if (heap()->new_space()->FromSpaceContains(new_addr)) {
3316
ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
3317
heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
3319
ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
3320
heap()->TargetSpace(obj) == heap()->old_data_space());
3324
// New and old addresses cannot overlap.
3325
if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
3326
heap()->CopyBlock(new_addr, old_addr, obj_size);
3328
heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3334
if (FLAG_gc_verbose) {
3335
PrintF("relocate %p -> %p\n", old_addr, new_addr);
3339
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3340
if (copied_to->IsSharedFunctionInfo()) {
3341
PROFILE(heap()->isolate(),
3342
SharedFunctionInfoMoveEvent(old_addr, new_addr));
3344
HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
3715
SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3716
SweepSpace(heap()->old_data_space(), how_to_sweep);
3718
RemoveDeadInvalidatedCode();
3719
SweepSpace(heap()->code_space(), PRECISE);
3721
SweepSpace(heap()->cell_space(), PRECISE);
3723
EvacuateNewSpaceAndCandidates();
3725
// ClearNonLiveTransitions depends on precise sweeping of map space to
3726
// detect whether unmarked map became dead in this collection or in one
3727
// of the previous ones.
3728
SweepSpace(heap()->map_space(), PRECISE);
3730
// Deallocate unmarked objects and clear marked bits for marked objects.
3731
heap_->lo_space()->FreeUnmarkedObjects();