949
void Heap::ClearRSetRange(Address start, int size_in_bytes) {
951
Address start_word_address =
952
Page::ComputeRSetBitPosition(start, 0, &start_bit);
954
Address end_word_address =
955
Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
959
// We want to clear the bits in the starting word starting with the
960
// first bit, and in the ending word up to and including the last
961
// bit. Build a pair of bitmasks to do that.
962
uint32_t start_bitmask = start_bit - 1;
963
uint32_t end_bitmask = ~((end_bit << 1) - 1);
965
// If the start address and end address are the same, we mask that
966
// word once, otherwise mask the starting and ending word
967
// separately and all the ones in between.
968
if (start_word_address == end_word_address) {
969
Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
971
Memory::uint32_at(start_word_address) &= start_bitmask;
972
Memory::uint32_at(end_word_address) &= end_bitmask;
973
start_word_address += kIntSize;
974
memset(start_word_address, 0, end_word_address - start_word_address);
979
class UpdateRSetVisitor: public ObjectVisitor {
982
void VisitPointer(Object** p) {
986
void VisitPointers(Object** start, Object** end) {
987
// Update a store into slots [start, end), used (a) to update remembered
988
// set when promoting a young object to old space or (b) to rebuild
989
// remembered sets after a mark-compact collection.
990
for (Object** p = start; p < end; p++) UpdateRSet(p);
994
void UpdateRSet(Object** p) {
995
// The remembered set should not be set. It should be clear for objects
996
// newly copied to old space, and it is cleared before rebuilding in the
997
// mark-compact collector.
998
ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
999
if (Heap::InNewSpace(*p)) {
1000
Page::SetRSet(reinterpret_cast<Address>(p), 0);
1006
int Heap::UpdateRSet(HeapObject* obj) {
1007
ASSERT(!InNewSpace(obj));
1008
// Special handling of fixed arrays to iterate the body based on the start
1009
// address and offset. Just iterating the pointers as in UpdateRSetVisitor
1010
// will not work because Page::SetRSet needs to have the start of the
1011
// object for large object pages.
1012
if (obj->IsFixedArray()) {
1013
FixedArray* array = FixedArray::cast(obj);
1014
int length = array->length();
1015
for (int i = 0; i < length; i++) {
1016
int offset = FixedArray::kHeaderSize + i * kPointerSize;
1017
ASSERT(!Page::IsRSetSet(obj->address(), offset));
1018
if (Heap::InNewSpace(array->get(i))) {
1019
Page::SetRSet(obj->address(), offset);
1022
} else if (!obj->IsCode()) {
1023
// Skip code object, we know it does not contain inter-generational
1025
UpdateRSetVisitor v;
1032
void Heap::RebuildRSets() {
1033
// By definition, we do not care about remembered set bits in code,
1034
// data, or cell spaces.
1035
map_space_->ClearRSet();
1036
RebuildRSets(map_space_);
1038
old_pointer_space_->ClearRSet();
1039
RebuildRSets(old_pointer_space_);
1041
Heap::lo_space_->ClearRSet();
1042
RebuildRSets(lo_space_);
1046
void Heap::RebuildRSets(PagedSpace* space) {
1047
HeapObjectIterator it(space);
1048
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1049
Heap::UpdateRSet(obj);
1053
void Heap::RebuildRSets(LargeObjectSpace* space) {
1054
LargeObjectIterator it(space);
1055
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1056
Heap::UpdateRSet(obj);
1060
1028
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1061
1029
void Heap::RecordCopiedObject(HeapObject* obj) {
1062
1030
bool should_record = false;
2192
// The StackVisitor is used to traverse all the archived threads to see if
2193
// there are activations on any of the stacks corresponding to the code.
2194
class FlushingStackVisitor : public ThreadVisitor {
2196
explicit FlushingStackVisitor(Code* code) : found_(false), code_(code) {}
2198
void VisitThread(ThreadLocalTop* top) {
2199
// If we already found the code in a previous traversed thread we return.
2202
for (StackFrameIterator it(top); !it.done(); it.Advance()) {
2203
if (code_->contains(it.frame()->pc())) {
2209
bool FoundCode() {return found_;}
2217
static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
2218
// The function must be compiled and have the source code available,
2219
// to be able to recompile it in case we need the function again.
2220
if (!(function_info->is_compiled() && function_info->HasSourceCode())) return;
2222
// We never flush code for Api functions.
2223
if (function_info->IsApiFunction()) return;
2225
// Only flush code for functions.
2226
if (!function_info->code()->kind() == Code::FUNCTION) return;
2228
// Function must be lazy compilable.
2229
if (!function_info->allows_lazy_compilation()) return;
2231
// If this is a full script wrapped in a function we do no flush the code.
2232
if (function_info->is_toplevel()) return;
2234
// If this function is in the compilation cache we do not flush the code.
2235
if (CompilationCache::HasFunction(function_info)) return;
2237
// Make sure we are not referencing the code from the stack.
2238
for (StackFrameIterator it; !it.done(); it.Advance()) {
2239
if (function_info->code()->contains(it.frame()->pc())) return;
2241
// Iterate the archived stacks in all threads to check if
2242
// the code is referenced.
2243
FlushingStackVisitor threadvisitor(function_info->code());
2244
ThreadManager::IterateArchivedThreads(&threadvisitor);
2245
if (threadvisitor.FoundCode()) return;
2248
// Compute the lazy compilable version of the code.
2249
function_info->set_code(*ComputeLazyCompile(function_info->length()));
2253
void Heap::FlushCode() {
2254
#ifdef ENABLE_DEBUGGER_SUPPORT
2255
// Do not flush code if the debugger is loaded or there are breakpoints.
2256
if (Debug::IsLoaded() || Debug::has_break_points()) return;
2258
HeapObjectIterator it(old_pointer_space());
2259
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
2260
if (obj->IsJSFunction()) {
2261
JSFunction* jsfunction = JSFunction::cast(obj);
2263
// The function must have a valid context and not be a builtin.
2264
if (jsfunction->unchecked_context()->IsContext() &&
2265
!jsfunction->IsBuiltin()) {
2266
FlushCodeForFunction(jsfunction->shared());
2208
2273
Object* Heap::CreateCode(const CodeDesc& desc,
2209
2274
ZoneScopeInfo* sinfo,
2210
2275
Code::Flags flags,
3388
3484
#endif // DEBUG
3391
int Heap::IterateRSetRange(Address object_start,
3394
ObjectSlotCallback copy_object_func) {
3395
Address object_address = object_start;
3396
Address rset_address = rset_start;
3397
int set_bits_count = 0;
3399
// Loop over all the pointers in [object_start, object_end).
3400
while (object_address < object_end) {
3401
uint32_t rset_word = Memory::uint32_at(rset_address);
3402
if (rset_word != 0) {
3403
uint32_t result_rset = rset_word;
3404
for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
3405
// Do not dereference pointers at or past object_end.
3406
if ((rset_word & bitmask) != 0 && object_address < object_end) {
3407
Object** object_p = reinterpret_cast<Object**>(object_address);
3408
if (Heap::InNewSpace(*object_p)) {
3409
copy_object_func(reinterpret_cast<HeapObject**>(object_p));
3411
// If this pointer does not need to be remembered anymore, clear
3412
// the remembered set bit.
3413
if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
3416
object_address += kPointerSize;
3418
// Update the remembered set if it has changed.
3419
if (result_rset != rset_word) {
3420
Memory::uint32_at(rset_address) = result_rset;
3423
// No bits in the word were set. This is the common case.
3424
object_address += kPointerSize * kBitsPerInt;
3426
rset_address += kIntSize;
3428
return set_bits_count;
3432
void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
3433
ASSERT(Page::is_rset_in_use());
3434
ASSERT(space == old_pointer_space_ || space == map_space_);
3436
static void* paged_rset_histogram = StatsTable::CreateHistogram(
3439
Page::kObjectAreaSize / kPointerSize,
3487
bool Heap::IteratePointersInDirtyRegion(Address start,
3489
ObjectSlotCallback copy_object_func) {
3490
Address slot_address = start;
3491
bool pointers_to_new_space_found = false;
3493
while (slot_address < end) {
3494
Object** slot = reinterpret_cast<Object**>(slot_address);
3495
if (Heap::InNewSpace(*slot)) {
3496
ASSERT((*slot)->IsHeapObject());
3497
copy_object_func(reinterpret_cast<HeapObject**>(slot));
3498
if (Heap::InNewSpace(*slot)) {
3499
ASSERT((*slot)->IsHeapObject());
3500
pointers_to_new_space_found = true;
3503
slot_address += kPointerSize;
3505
return pointers_to_new_space_found;
3509
// Compute start address of the first map following given addr.
3510
static inline Address MapStartAlign(Address addr) {
3511
Address page = Page::FromAddress(addr)->ObjectAreaStart();
3512
return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
3516
// Compute end address of the first map preceding given addr.
3517
static inline Address MapEndAlign(Address addr) {
3518
Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
3519
return page + ((addr - page) / Map::kSize * Map::kSize);
3523
static bool IteratePointersInDirtyMaps(Address start,
3525
ObjectSlotCallback copy_object_func) {
3526
ASSERT(MapStartAlign(start) == start);
3527
ASSERT(MapEndAlign(end) == end);
3529
Address map_address = start;
3530
bool pointers_to_new_space_found = false;
3532
while (map_address < end) {
3533
ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
3534
ASSERT(Memory::Object_at(map_address)->IsMap());
3536
Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
3537
Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
3539
if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
3541
copy_object_func)) {
3542
pointers_to_new_space_found = true;
3545
map_address += Map::kSize;
3548
return pointers_to_new_space_found;
3552
bool Heap::IteratePointersInDirtyMapsRegion(
3555
ObjectSlotCallback copy_object_func) {
3556
Address map_aligned_start = MapStartAlign(start);
3557
Address map_aligned_end = MapEndAlign(end);
3559
bool contains_pointers_to_new_space = false;
3561
if (map_aligned_start != start) {
3562
Address prev_map = map_aligned_start - Map::kSize;
3563
ASSERT(Memory::Object_at(prev_map)->IsMap());
3565
Address pointer_fields_start =
3566
Max(start, prev_map + Map::kPointerFieldsBeginOffset);
3568
Address pointer_fields_end =
3569
Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
3571
contains_pointers_to_new_space =
3572
IteratePointersInDirtyRegion(pointer_fields_start,
3575
|| contains_pointers_to_new_space;
3578
contains_pointers_to_new_space =
3579
IteratePointersInDirtyMaps(map_aligned_start,
3582
|| contains_pointers_to_new_space;
3584
if (map_aligned_end != end) {
3585
ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
3587
Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
3589
Address pointer_fields_end =
3590
Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
3592
contains_pointers_to_new_space =
3593
IteratePointersInDirtyRegion(pointer_fields_start,
3596
|| contains_pointers_to_new_space;
3599
return contains_pointers_to_new_space;
3603
void Heap::IterateAndMarkPointersToNewSpace(Address start,
3605
ObjectSlotCallback callback) {
3606
Address slot_address = start;
3607
Page* page = Page::FromAddress(start);
3609
uint32_t marks = page->GetRegionMarks();
3611
while (slot_address < end) {
3612
Object** slot = reinterpret_cast<Object**>(slot_address);
3613
if (Heap::InNewSpace(*slot)) {
3614
ASSERT((*slot)->IsHeapObject());
3615
callback(reinterpret_cast<HeapObject**>(slot));
3616
if (Heap::InNewSpace(*slot)) {
3617
ASSERT((*slot)->IsHeapObject());
3618
marks |= page->GetRegionMaskForAddress(slot_address);
3621
slot_address += kPointerSize;
3624
page->SetRegionMarks(marks);
3628
uint32_t Heap::IterateDirtyRegions(
3632
DirtyRegionCallback visit_dirty_region,
3633
ObjectSlotCallback copy_object_func) {
3634
uint32_t newmarks = 0;
3637
if (area_start >= area_end) {
3641
Address region_start = area_start;
3643
// area_start does not necessarily coincide with start of the first region.
3644
// Thus to calculate the beginning of the next region we have to align
3645
// area_start by Page::kRegionSize.
3646
Address second_region =
3647
reinterpret_cast<Address>(
3648
reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
3649
~Page::kRegionAlignmentMask);
3651
// Next region might be beyond area_end.
3652
Address region_end = Min(second_region, area_end);
3655
if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3661
// Iterate subsequent regions which fully lay inside [area_start, area_end[.
3662
region_start = region_end;
3663
region_end = region_start + Page::kRegionSize;
3665
while (region_end <= area_end) {
3667
if (visit_dirty_region(region_start, region_end, copy_object_func)) {
3672
region_start = region_end;
3673
region_end = region_start + Page::kRegionSize;
3678
if (region_start != area_end) {
3679
// A small piece of area left uniterated because area_end does not coincide
3680
// with region end. Check whether region covering last part of area is
3683
if (visit_dirty_region(region_start, area_end, copy_object_func)) {
3694
void Heap::IterateDirtyRegions(
3696
DirtyRegionCallback visit_dirty_region,
3697
ObjectSlotCallback copy_object_func,
3698
ExpectedPageWatermarkState expected_page_watermark_state) {
3442
3700
PageIterator it(space, PageIterator::PAGES_IN_USE);
3443
3702
while (it.has_next()) {
3444
3703
Page* page = it.next();
3445
int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
3446
page->RSetStart(), copy_object_func);
3447
if (paged_rset_histogram != NULL) {
3448
StatsTable::AddHistogramSample(paged_rset_histogram, count);
3704
uint32_t marks = page->GetRegionMarks();
3706
if (marks != Page::kAllRegionsCleanMarks) {
3707
Address start = page->ObjectAreaStart();
3709
// Do not try to visit pointers beyond page allocation watermark.
3710
// Page can contain garbage pointers there.
3713
if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
3714
page->IsWatermarkValid()) {
3715
end = page->AllocationWatermark();
3717
end = page->CachedAllocationWatermark();
3720
ASSERT(space == old_pointer_space_ ||
3721
(space == map_space_ &&
3722
((page->ObjectAreaStart() - end) % Map::kSize == 0)));
3724
page->SetRegionMarks(IterateDirtyRegions(marks,
3731
// Mark page watermark as invalid to maintain watermark validity invariant.
3732
// See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
3733
page->InvalidateWatermark(true);
4504
static int CountTotalHolesSize() {
4507
for (OldSpace* space = spaces.next();
4509
space = spaces.next()) {
4510
holes_size += space->Waste() + space->AvailableFree();
4209
4516
GCTracer::GCTracer()
4210
4517
: start_time_(0.0),
4212
external_time_(0.0),
4214
4520
full_gc_count_(0),
4215
4521
is_compacting_(false),
4523
allocated_since_last_gc_(0),
4524
spent_in_mutator_(0),
4525
promoted_objects_size_(0) {
4217
4526
// These two fields reflect the state of the previous full collection.
4218
4527
// Set them before they are changed by the collector.
4219
4528
previous_has_compacted_ = MarkCompactCollector::HasCompacted();
4220
4529
previous_marked_count_ = MarkCompactCollector::previous_marked_count();
4221
if (!FLAG_trace_gc) return;
4530
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
4222
4531
start_time_ = OS::TimeCurrentMillis();
4223
start_size_ = SizeOfHeapObjects();
4532
start_size_ = Heap::SizeOfObjects();
4534
for (int i = 0; i < Scope::kNumberOfScopes; i++) {
4538
in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
4540
allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
4542
if (last_gc_end_timestamp_ > 0) {
4543
spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
4227
4548
GCTracer::~GCTracer() {
4228
if (!FLAG_trace_gc) return;
4229
4549
// Printf ONE line iff flag is set.
4230
int time = static_cast<int>(OS::TimeCurrentMillis() - start_time_);
4231
int external_time = static_cast<int>(external_time_);
4232
PrintF("%s %.1f -> %.1f MB, ",
4233
CollectorString(), start_size_, SizeOfHeapObjects());
4234
if (external_time > 0) PrintF("%d / ", external_time);
4235
PrintF("%d ms.\n", time);
4550
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
4552
bool first_gc = (last_gc_end_timestamp_ == 0);
4554
alive_after_last_gc_ = Heap::SizeOfObjects();
4555
last_gc_end_timestamp_ = OS::TimeCurrentMillis();
4557
int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
4559
// Update cumulative GC statistics if required.
4560
if (FLAG_print_cumulative_gc_stat) {
4561
max_gc_pause_ = Max(max_gc_pause_, time);
4562
max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
4564
min_in_mutator_ = Min(min_in_mutator_,
4565
static_cast<int>(spent_in_mutator_));
4569
if (!FLAG_trace_gc_nvp) {
4570
int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
4572
PrintF("%s %.1f -> %.1f MB, ",
4574
static_cast<double>(start_size_) / MB,
4575
SizeOfHeapObjects());
4577
if (external_time > 0) PrintF("%d / ", external_time);
4578
PrintF("%d ms.\n", time);
4580
PrintF("pause=%d ", time);
4581
PrintF("mutator=%d ",
4582
static_cast<int>(spent_in_mutator_));
4585
switch (collector_) {
4589
case MARK_COMPACTOR:
4590
PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
4597
PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
4598
PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
4599
PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
4600
PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
4602
PrintF("total_size_before=%d ", start_size_);
4603
PrintF("total_size_after=%d ", Heap::SizeOfObjects());
4604
PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_);
4605
PrintF("holes_size_after=%d ", CountTotalHolesSize());
4607
PrintF("allocated=%d ", allocated_since_last_gc_);
4608
PrintF("promoted=%d ", promoted_objects_size_);
4237
4613
#if defined(ENABLE_LOGGING_AND_PROFILING)
4238
4614
Heap::PrintShortHeapStatistics();