1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
6
// * Redistributions of source code must retain the above copyright
7
// notice, this list of conditions and the following disclaimer.
8
// * Redistributions in binary form must reproduce the above
9
// copyright notice, this list of conditions and the following
10
// disclaimer in the documentation and/or other materials provided
11
// with the distribution.
12
// * Neither the name of Google Inc. nor the names of its
13
// contributors may be used to endorse or promote products derived
14
// from this software without specific prior written permission.
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
#include "incremental-marking.h"
32
#include "code-stubs.h"
33
#include "compilation-cache.h"
34
#include "objects-visiting.h"
35
#include "objects-visiting-inl.h"
36
#include "v8conversions.h"
42
IncrementalMarking::IncrementalMarking(Heap* heap)
45
marking_deque_memory_(NULL),
46
marking_deque_memory_committed_(false),
47
marker_(this, heap->mark_compact_collector()),
51
old_generation_space_available_at_start_of_incremental_(0),
52
old_generation_space_used_at_start_of_incremental_(0),
53
steps_count_since_last_gc_(0),
54
steps_took_since_last_gc_(0),
56
allocation_marking_factor_(0),
58
no_marking_scope_depth_(0) {
62
void IncrementalMarking::TearDown() {
63
delete marking_deque_memory_;
67
void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
70
if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
71
MarkBit obj_bit = Marking::MarkBitFrom(obj);
72
if (Marking::IsBlack(obj_bit)) {
73
// Object is not going to be rescanned we need to record the slot.
74
heap_->mark_compact_collector()->RecordSlot(
75
HeapObject::RawField(obj, 0), slot, value);
81
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
84
ASSERT(obj->IsHeapObject());
86
// Fast cases should already be covered by RecordWriteStub.
87
ASSERT(value->IsHeapObject());
88
ASSERT(!value->IsHeapNumber());
89
ASSERT(!value->IsString() ||
90
value->IsConsString() ||
91
value->IsSlicedString());
92
ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
94
IncrementalMarking* marking = isolate->heap()->incremental_marking();
95
ASSERT(!marking->is_compacting_);
96
marking->RecordWrite(obj, NULL, value);
100
void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
103
IncrementalMarking* marking = isolate->heap()->incremental_marking();
104
ASSERT(marking->is_compacting_);
105
marking->RecordWrite(obj, slot, *slot);
109
void IncrementalMarking::RecordCodeTargetPatch(Code* host,
113
RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
114
RecordWriteIntoCode(host, &rinfo, value);
119
void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
121
Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
122
GcSafeFindCodeForInnerPointer(pc);
123
RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
124
RecordWriteIntoCode(host, &rinfo, value);
129
void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
132
if (BaseRecordWrite(host, slot, value) && is_compacting_) {
133
ASSERT(slot != NULL);
134
heap_->mark_compact_collector()->
135
RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
140
void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
143
MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
144
if (Marking::IsWhite(value_bit)) {
145
MarkBit obj_bit = Marking::MarkBitFrom(obj);
146
if (Marking::IsBlack(obj_bit)) {
147
BlackToGreyAndUnshift(obj, obj_bit);
148
RestartIfNotMarking();
150
// Object is either grey or white. It will be scanned if survives.
154
if (is_compacting_) {
155
MarkBit obj_bit = Marking::MarkBitFrom(obj);
156
if (Marking::IsBlack(obj_bit)) {
157
// Object is not going to be rescanned. We need to record the slot.
158
heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
165
class IncrementalMarkingMarkingVisitor
166
: public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
168
static void Initialize() {
169
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
171
table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
173
table_.Register(kVisitJSFunction, &VisitJSFunction);
175
table_.Register(kVisitJSRegExp, &VisitJSRegExp);
178
static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
179
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
180
Object* target = rinfo->target_object();
181
if (target->NonFailureIsHeapObject()) {
182
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
183
MarkObject(heap, target);
187
static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
188
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
189
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
190
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
191
&& (target->ic_age() != heap->global_ic_age())) {
192
IC::Clear(rinfo->pc());
193
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
195
heap->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
196
MarkObject(heap, target);
199
static void VisitCode(Map* map, HeapObject* object) {
200
Heap* heap = map->GetHeap();
201
Code* code = reinterpret_cast<Code*>(object);
202
code->CodeIterateBody<IncrementalMarkingMarkingVisitor>(heap);
205
static void VisitJSWeakMap(Map* map, HeapObject* object) {
206
Heap* heap = map->GetHeap();
208
HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
209
HeapObject::RawField(object, JSWeakMap::kSize));
212
static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
213
Heap* heap = map->GetHeap();
214
SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
215
if (shared->ic_age() != heap->global_ic_age()) {
216
shared->ResetForNewContext(heap->global_ic_age());
218
FixedBodyVisitor<IncrementalMarkingMarkingVisitor,
219
SharedFunctionInfo::BodyDescriptor,
220
void>::Visit(map, object);
223
static inline void VisitJSFunction(Map* map, HeapObject* object) {
224
Heap* heap = map->GetHeap();
225
// Iterate over all fields in the body but take care in dealing with
226
// the code entry and skip weak fields.
228
HeapObject::RawField(object, JSFunction::kPropertiesOffset),
229
HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
230
VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
232
HeapObject::RawField(object,
233
JSFunction::kCodeEntryOffset + kPointerSize),
234
HeapObject::RawField(object,
235
JSFunction::kNonWeakFieldsEndOffset));
238
INLINE(static void VisitPointer(Heap* heap, Object** p)) {
240
if (obj->NonFailureIsHeapObject()) {
241
heap->mark_compact_collector()->RecordSlot(p, p, obj);
242
MarkObject(heap, obj);
246
INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
247
for (Object** p = start; p < end; p++) {
249
if (obj->NonFailureIsHeapObject()) {
250
heap->mark_compact_collector()->RecordSlot(start, p, obj);
251
MarkObject(heap, obj);
256
INLINE(static void MarkObject(Heap* heap, Object* obj)) {
257
HeapObject* heap_object = HeapObject::cast(obj);
258
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
259
if (mark_bit.data_only()) {
260
if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
261
MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
262
heap_object->Size());
264
} else if (Marking::IsWhite(mark_bit)) {
265
heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
271
class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
273
IncrementalMarkingRootMarkingVisitor(Heap* heap,
274
IncrementalMarking* incremental_marking)
276
incremental_marking_(incremental_marking) {
279
void VisitPointer(Object** p) {
280
MarkObjectByPointer(p);
283
void VisitPointers(Object** start, Object** end) {
284
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
288
void MarkObjectByPointer(Object** p) {
290
if (!obj->IsHeapObject()) return;
292
HeapObject* heap_object = HeapObject::cast(obj);
293
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
294
if (mark_bit.data_only()) {
295
if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
296
MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
297
heap_object->Size());
300
if (Marking::IsWhite(mark_bit)) {
301
incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
307
IncrementalMarking* incremental_marking_;
311
void IncrementalMarking::Initialize() {
312
IncrementalMarkingMarkingVisitor::Initialize();
316
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
318
bool is_compacting) {
320
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
323
// It's difficult to filter out slots recorded for large objects.
324
if (chunk->owner()->identity() == LO_SPACE &&
325
chunk->size() > static_cast<size_t>(Page::kPageSize) &&
327
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
329
} else if (chunk->owner()->identity() == CELL_SPACE ||
330
chunk->scan_on_scavenge()) {
331
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
332
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
334
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
335
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
340
void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
342
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
344
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
346
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
348
chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
352
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
354
PageIterator it(space);
355
while (it.has_next()) {
357
SetOldSpacePageFlags(p, false, false);
362
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
364
NewSpacePageIterator it(space);
365
while (it.has_next()) {
366
NewSpacePage* p = it.next();
367
SetNewSpacePageFlags(p, false);
372
void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
373
DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
374
DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
375
DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
376
DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
377
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
378
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
380
LargePage* lop = heap_->lo_space()->first_page();
381
while (lop->is_valid()) {
382
SetOldSpacePageFlags(lop, false, false);
383
lop = lop->next_page();
388
void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
389
PageIterator it(space);
390
while (it.has_next()) {
392
SetOldSpacePageFlags(p, true, is_compacting_);
397
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
398
NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
399
while (it.has_next()) {
400
NewSpacePage* p = it.next();
401
SetNewSpacePageFlags(p, true);
406
void IncrementalMarking::ActivateIncrementalWriteBarrier() {
407
ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
408
ActivateIncrementalWriteBarrier(heap_->old_data_space());
409
ActivateIncrementalWriteBarrier(heap_->cell_space());
410
ActivateIncrementalWriteBarrier(heap_->map_space());
411
ActivateIncrementalWriteBarrier(heap_->code_space());
412
ActivateIncrementalWriteBarrier(heap_->new_space());
414
LargePage* lop = heap_->lo_space()->first_page();
415
while (lop->is_valid()) {
416
SetOldSpacePageFlags(lop, true, is_compacting_);
417
lop = lop->next_page();
422
bool IncrementalMarking::WorthActivating() {
424
static const intptr_t kActivationThreshold = 8 * MB;
426
// TODO(gc) consider setting this to some low level so that some
427
// debug tests run with incremental marking and some without.
428
static const intptr_t kActivationThreshold = 0;
431
return !FLAG_expose_gc &&
432
FLAG_incremental_marking &&
433
!Serializer::enabled() &&
434
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
438
void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
439
ASSERT(RecordWriteStub::GetMode(stub) ==
440
RecordWriteStub::STORE_BUFFER_ONLY);
443
// Initially stub is generated in STORE_BUFFER_ONLY mode thus
444
// we don't need to do anything if incremental marking is
446
} else if (IsCompacting()) {
447
RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
449
RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
454
static void PatchIncrementalMarkingRecordWriteStubs(
455
Heap* heap, RecordWriteStub::Mode mode) {
456
UnseededNumberDictionary* stubs = heap->code_stubs();
458
int capacity = stubs->Capacity();
459
for (int i = 0; i < capacity; i++) {
460
Object* k = stubs->KeyAt(i);
461
if (stubs->IsKey(k)) {
462
uint32_t key = NumberToUint32(k);
464
if (CodeStub::MajorKeyFromKey(key) ==
465
CodeStub::RecordWrite) {
466
Object* e = stubs->ValueAt(i);
468
RecordWriteStub::Patch(Code::cast(e), mode);
476
void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
477
if (marking_deque_memory_ == NULL) {
478
marking_deque_memory_ = new VirtualMemory(4 * MB);
480
if (!marking_deque_memory_committed_) {
481
bool success = marking_deque_memory_->Commit(
482
reinterpret_cast<Address>(marking_deque_memory_->address()),
483
marking_deque_memory_->size(),
484
false); // Not executable.
486
marking_deque_memory_committed_ = true;
490
void IncrementalMarking::UncommitMarkingDeque() {
491
if (state_ == STOPPED && marking_deque_memory_committed_) {
492
bool success = marking_deque_memory_->Uncommit(
493
reinterpret_cast<Address>(marking_deque_memory_->address()),
494
marking_deque_memory_->size());
496
marking_deque_memory_committed_ = false;
501
void IncrementalMarking::Start() {
502
if (FLAG_trace_incremental_marking) {
503
PrintF("[IncrementalMarking] Start\n");
505
ASSERT(FLAG_incremental_marking);
506
ASSERT(state_ == STOPPED);
510
if (heap_->old_pointer_space()->IsSweepingComplete() &&
511
heap_->old_data_space()->IsSweepingComplete()) {
512
StartMarking(ALLOW_COMPACTION);
514
if (FLAG_trace_incremental_marking) {
515
PrintF("[IncrementalMarking] Start sweeping.\n");
520
heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
524
static void MarkObjectGreyDoNotEnqueue(Object* obj) {
525
if (obj->IsHeapObject()) {
526
HeapObject* heap_obj = HeapObject::cast(obj);
527
MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
528
if (Marking::IsBlack(mark_bit)) {
529
MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
532
Marking::AnyToGrey(mark_bit);
537
void IncrementalMarking::StartMarking(CompactionFlag flag) {
538
if (FLAG_trace_incremental_marking) {
539
PrintF("[IncrementalMarking] Start marking\n");
542
is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
543
heap_->mark_compact_collector()->StartCompaction(
544
MarkCompactCollector::INCREMENTAL_COMPACTION);
548
RecordWriteStub::Mode mode = is_compacting_ ?
549
RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
551
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
553
EnsureMarkingDequeIsCommitted();
555
// Initialize marking stack.
556
Address addr = static_cast<Address>(marking_deque_memory_->address());
557
size_t size = marking_deque_memory_->size();
558
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
559
marking_deque_.Initialize(addr, addr + size);
561
ActivateIncrementalWriteBarrier();
564
// Marking bits are cleared by the sweeper.
565
if (FLAG_verify_heap) {
566
heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
570
heap_->CompletelyClearInstanceofCache();
571
heap_->isolate()->compilation_cache()->MarkCompactPrologue();
573
if (FLAG_cleanup_code_caches_at_gc) {
574
// We will mark cache black with a separate pass
575
// when we finish marking.
576
MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
579
// Mark strong roots grey.
580
IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
581
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
583
// Ready to start incremental marking.
584
if (FLAG_trace_incremental_marking) {
585
PrintF("[IncrementalMarking] Running\n");
590
void IncrementalMarking::PrepareForScavenge() {
591
if (!IsMarking()) return;
592
NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
593
heap_->new_space()->FromSpaceEnd());
594
while (it.has_next()) {
595
Bitmap::Clear(it.next());
600
void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
601
if (!IsMarking()) return;
603
int current = marking_deque_.bottom();
604
int mask = marking_deque_.mask();
605
int limit = marking_deque_.top();
606
HeapObject** array = marking_deque_.array();
607
int new_top = current;
609
Map* filler_map = heap_->one_pointer_filler_map();
611
while (current != limit) {
612
HeapObject* obj = array[current];
613
ASSERT(obj->IsHeapObject());
614
current = ((current + 1) & mask);
615
if (heap_->InNewSpace(obj)) {
616
MapWord map_word = obj->map_word();
617
if (map_word.IsForwardingAddress()) {
618
HeapObject* dest = map_word.ToForwardingAddress();
619
array[new_top] = dest;
620
new_top = ((new_top + 1) & mask);
621
ASSERT(new_top != marking_deque_.bottom());
623
MarkBit mark_bit = Marking::MarkBitFrom(obj);
624
ASSERT(Marking::IsGrey(mark_bit) ||
625
(obj->IsFiller() && Marking::IsWhite(mark_bit)));
628
} else if (obj->map() != filler_map) {
629
// Skip one word filler objects that appear on the
630
// stack when we perform in place array shift.
631
array[new_top] = obj;
632
new_top = ((new_top + 1) & mask);
633
ASSERT(new_top != marking_deque_.bottom());
635
MarkBit mark_bit = Marking::MarkBitFrom(obj);
636
ASSERT(Marking::IsGrey(mark_bit) ||
637
(obj->IsFiller() && Marking::IsWhite(mark_bit)));
641
marking_deque_.set_top(new_top);
643
steps_took_since_last_gc_ = 0;
644
steps_count_since_last_gc_ = 0;
649
void IncrementalMarking::Hurry() {
650
if (state() == MARKING) {
652
if (FLAG_trace_incremental_marking) {
653
PrintF("[IncrementalMarking] Hurry\n");
654
start = OS::TimeCurrentMillis();
656
// TODO(gc) hurry can mark objects it encounters black as mutator
658
Map* filler_map = heap_->one_pointer_filler_map();
659
Map* global_context_map = heap_->global_context_map();
660
while (!marking_deque_.IsEmpty()) {
661
HeapObject* obj = marking_deque_.Pop();
663
// Explicitly skip one word fillers. Incremental markbit patterns are
664
// correct only for objects that occupy at least two words.
665
Map* map = obj->map();
666
if (map == filler_map) {
668
} else if (map == global_context_map) {
669
// Global contexts have weak fields.
670
IncrementalMarkingMarkingVisitor::VisitGlobalContext(map, obj);
671
} else if (map->instance_type() == MAP_TYPE) {
672
Map* map = Map::cast(obj);
673
heap_->ClearCacheOnMap(map);
675
// When map collection is enabled we have to mark through map's
676
// transitions and back pointers in a special way to make these links
677
// weak. Only maps for subclasses of JSReceiver can have transitions.
678
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
679
if (FLAG_collect_maps &&
680
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
681
marker_.MarkMapContents(map);
683
IncrementalMarkingMarkingVisitor::VisitPointers(
685
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
686
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
689
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
690
if (Marking::IsWhite(map_mark_bit)) {
691
WhiteToGreyAndPush(map, map_mark_bit);
693
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
696
MarkBit mark_bit = Marking::MarkBitFrom(obj);
697
ASSERT(!Marking::IsBlack(mark_bit));
698
Marking::MarkBlack(mark_bit);
699
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
702
if (FLAG_trace_incremental_marking) {
703
double end = OS::TimeCurrentMillis();
704
PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
705
static_cast<int>(end - start));
709
if (FLAG_cleanup_code_caches_at_gc) {
710
PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
711
Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
712
MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
713
PolymorphicCodeCache::kSize);
716
Object* context = heap_->global_contexts_list();
717
while (!context->IsUndefined()) {
718
// GC can happen when the context is not fully initialized,
719
// so the cache can be undefined.
720
HeapObject* cache = HeapObject::cast(
721
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
722
if (!cache->IsUndefined()) {
723
MarkBit mark_bit = Marking::MarkBitFrom(cache);
724
if (Marking::IsGrey(mark_bit)) {
725
Marking::GreyToBlack(mark_bit);
726
MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
729
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
734
void IncrementalMarking::Abort() {
735
if (IsStopped()) return;
736
if (FLAG_trace_incremental_marking) {
737
PrintF("[IncrementalMarking] Aborting.\n");
739
heap_->new_space()->LowerInlineAllocationLimit(0);
740
IncrementalMarking::set_should_hurry(false);
743
PatchIncrementalMarkingRecordWriteStubs(heap_,
744
RecordWriteStub::STORE_BUFFER_ONLY);
745
DeactivateIncrementalWriteBarrier();
747
if (is_compacting_) {
748
LargeObjectIterator it(heap_->lo_space());
749
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
750
Page* p = Page::FromAddress(obj->address());
751
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
752
p->ClearFlag(Page::RESCAN_ON_EVACUATION);
757
heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
759
is_compacting_ = false;
763
void IncrementalMarking::Finalize() {
766
is_compacting_ = false;
767
heap_->new_space()->LowerInlineAllocationLimit(0);
768
IncrementalMarking::set_should_hurry(false);
770
PatchIncrementalMarkingRecordWriteStubs(heap_,
771
RecordWriteStub::STORE_BUFFER_ONLY);
772
DeactivateIncrementalWriteBarrier();
773
ASSERT(marking_deque_.IsEmpty());
774
heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
778
void IncrementalMarking::MarkingComplete(CompletionAction action) {
780
// We will set the stack guard to request a GC now. This will mean the rest
781
// of the GC gets performed as soon as possible (we can't do a GC here in a
782
// record-write context). If a few things get allocated between now and then
783
// that shouldn't make us do a scavenge and keep being incremental, so we set
784
// the should-hurry flag to indicate that there can't be much work left to do.
785
set_should_hurry(true);
786
if (FLAG_trace_incremental_marking) {
787
PrintF("[IncrementalMarking] Complete (normal).\n");
789
if (action == GC_VIA_STACK_GUARD) {
790
heap_->isolate()->stack_guard()->RequestGC();
795
void IncrementalMarking::Step(intptr_t allocated_bytes,
796
CompletionAction action) {
797
if (heap_->gc_state() != Heap::NOT_IN_GC ||
798
!FLAG_incremental_marking ||
799
!FLAG_incremental_marking_steps ||
800
(state_ != SWEEPING && state_ != MARKING)) {
804
allocated_ += allocated_bytes;
806
if (allocated_ < kAllocatedThreshold) return;
808
if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
810
intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
811
bytes_scanned_ += bytes_to_process;
815
if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
816
start = OS::TimeCurrentMillis();
819
if (state_ == SWEEPING) {
820
if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
822
StartMarking(PREVENT_COMPACTION);
824
} else if (state_ == MARKING) {
825
Map* filler_map = heap_->one_pointer_filler_map();
826
Map* global_context_map = heap_->global_context_map();
827
while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
828
HeapObject* obj = marking_deque_.Pop();
830
// Explicitly skip one word fillers. Incremental markbit patterns are
831
// correct only for objects that occupy at least two words.
832
Map* map = obj->map();
833
if (map == filler_map) continue;
835
int size = obj->SizeFromMap(map);
836
bytes_to_process -= size;
837
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
838
if (Marking::IsWhite(map_mark_bit)) {
839
WhiteToGreyAndPush(map, map_mark_bit);
842
// TODO(gc) switch to static visitor instead of normal visitor.
843
if (map == global_context_map) {
844
// Global contexts have weak fields.
845
Context* ctx = Context::cast(obj);
847
// We will mark cache black with a separate pass
848
// when we finish marking.
849
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
851
IncrementalMarkingMarkingVisitor::VisitGlobalContext(map, ctx);
852
} else if (map->instance_type() == MAP_TYPE) {
853
Map* map = Map::cast(obj);
854
heap_->ClearCacheOnMap(map);
856
// When map collection is enabled we have to mark through map's
857
// transitions and back pointers in a special way to make these links
858
// weak. Only maps for subclasses of JSReceiver can have transitions.
859
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
860
if (FLAG_collect_maps &&
861
map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
862
marker_.MarkMapContents(map);
864
IncrementalMarkingMarkingVisitor::VisitPointers(
866
HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
867
HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
870
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
873
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
874
SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
875
(obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
876
Marking::MarkBlack(obj_mark_bit);
877
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
879
if (marking_deque_.IsEmpty()) MarkingComplete(action);
885
steps_count_since_last_gc_++;
887
bool speed_up = false;
889
if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
891
PrintPID("Speed up marking after %d steps\n",
892
static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
897
bool space_left_is_very_small =
898
(old_generation_space_available_at_start_of_incremental_ < 10 * MB);
900
bool only_1_nth_of_space_that_was_available_still_left =
901
(SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
902
old_generation_space_available_at_start_of_incremental_);
904
if (space_left_is_very_small ||
905
only_1_nth_of_space_that_was_available_still_left) {
906
if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
910
bool size_of_old_space_multiplied_by_n_during_marking =
911
(heap_->PromotedTotalSize() >
912
(allocation_marking_factor_ + 1) *
913
old_generation_space_used_at_start_of_incremental_);
914
if (size_of_old_space_multiplied_by_n_during_marking) {
917
PrintPID("Speed up marking because of heap size increase\n");
921
int64_t promoted_during_marking = heap_->PromotedTotalSize()
922
- old_generation_space_used_at_start_of_incremental_;
923
intptr_t delay = allocation_marking_factor_ * MB;
924
intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
926
// We try to scan at at least twice the speed that we are allocating.
927
if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
929
PrintPID("Speed up marking because marker was not keeping up\n");
935
if (state_ != MARKING) {
937
PrintPID("Postponing speeding up marking until marking starts\n");
940
allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
941
allocation_marking_factor_ = static_cast<int>(
942
Min(kMaxAllocationMarkingFactor,
943
static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
945
PrintPID("Marking speed increased to %d\n", allocation_marking_factor_);
950
if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
951
double end = OS::TimeCurrentMillis();
952
double delta = (end - start);
953
longest_step_ = Max(longest_step_, delta);
954
steps_took_ += delta;
955
steps_took_since_last_gc_ += delta;
960
void IncrementalMarking::ResetStepCounters() {
964
old_generation_space_available_at_start_of_incremental_ =
965
SpaceLeftInOldSpace();
966
old_generation_space_used_at_start_of_incremental_ =
967
heap_->PromotedTotalSize();
968
steps_count_since_last_gc_ = 0;
969
steps_took_since_last_gc_ = 0;
970
bytes_rescanned_ = 0;
971
allocation_marking_factor_ = kInitialAllocationMarkingFactor;
976
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
977
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
980
} } // namespace v8::internal