81
81
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
82
82
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
83
initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
83
initial_semispace_size_(Page::kPageSize),
84
84
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
85
max_executable_size_(128l * LUMP_OF_MEMORY),
85
max_executable_size_(256l * LUMP_OF_MEMORY),
87
87
// Variables set based on semispace_size_ and old_generation_size_ in
88
88
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
144
144
number_idle_notifications_(0),
145
145
last_idle_notification_gc_count_(0),
146
146
last_idle_notification_gc_count_init_(false),
147
idle_notification_will_schedule_next_gc_(false),
148
mark_sweeps_since_idle_round_started_(0),
149
ms_count_at_last_idle_notification_(0),
150
gc_count_at_last_idle_gc_(0),
151
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
147
152
promotion_queue_(this),
148
153
configured_(false),
149
154
chunks_queued_for_free_(NULL) {
197
202
intptr_t Heap::CommittedMemoryExecutable() {
198
if (!HasBeenSetup()) return 0;
203
if (!HasBeenSetUp()) return 0;
200
205
return isolate()->memory_allocator()->SizeExecutable();
204
209
intptr_t Heap::Available() {
205
if (!HasBeenSetup()) return 0;
210
if (!HasBeenSetUp()) return 0;
207
212
return new_space_.Available() +
208
213
old_pointer_space_->Available() +
234
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
239
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
240
const char** reason) {
235
241
// Is global GC requested?
236
242
if (space != NEW_SPACE || FLAG_gc_global) {
237
243
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
244
*reason = "GC in old space requested";
238
245
return MARK_COMPACTOR;
241
248
// Is enough data promoted to justify a global GC?
242
249
if (OldGenerationPromotionLimitReached()) {
243
250
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
251
*reason = "promotion limit reached";
244
252
return MARK_COMPACTOR;
429
void Heap::CollectAllGarbage(int flags) {
440
void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
430
441
// Since we are ignoring the return value, the exact choice of space does
431
442
// not matter, so long as we do not specify NEW_SPACE, which would not
432
443
// cause a full GC.
433
444
mark_compact_collector_.SetFlags(flags);
434
CollectGarbage(OLD_POINTER_SPACE);
445
CollectGarbage(OLD_POINTER_SPACE, gc_reason);
435
446
mark_compact_collector_.SetFlags(kNoGCFlags);
439
void Heap::CollectAllAvailableGarbage() {
450
void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
440
451
// Since we are ignoring the return value, the exact choice of space does
441
452
// not matter, so long as we do not specify NEW_SPACE, which would not
442
453
// cause a full GC.
448
459
// Note: as weak callbacks can execute arbitrary code, we cannot
449
460
// hope that eventually there will be no weak callbacks invocations.
450
461
// Therefore stop recollecting after several attempts.
451
mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
462
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
463
kReduceMemoryFootprintMask);
452
464
isolate_->compilation_cache()->Clear();
453
465
const int kMaxNumberOfAttempts = 7;
454
466
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
455
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
467
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
459
471
mark_compact_collector()->SetFlags(kNoGCFlags);
460
472
new_space_.Shrink();
461
475
incremental_marking()->UncommitMarkingDeque();
465
bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
479
bool Heap::CollectGarbage(AllocationSpace space,
480
GarbageCollector collector,
481
const char* gc_reason,
482
const char* collector_reason) {
466
483
// The VM is in the GC state until exiting this function.
467
484
VMState state(isolate_, GC);
490
507
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
492
509
collector = SCAVENGER;
510
collector_reason = "incremental marking delaying mark-sweep";
495
513
bool next_gc_likely_to_collect_more = false;
497
{ GCTracer tracer(this);
515
{ GCTracer tracer(this, gc_reason, collector_reason);
498
516
GarbageCollectionPrologue();
499
517
// The GC count was incremented in the prologue. Tell the tracer about
581
599
while (gc_performed && counter++ < kThreshold) {
582
600
gc_performed = false;
583
601
if (!new_space->ReserveSpace(new_space_size)) {
584
Heap::CollectGarbage(NEW_SPACE);
602
Heap::CollectGarbage(NEW_SPACE,
603
"failed to reserve space in the new space");
585
604
gc_performed = true;
587
606
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
588
Heap::CollectGarbage(OLD_POINTER_SPACE);
607
Heap::CollectGarbage(OLD_POINTER_SPACE,
608
"failed to reserve space in the old pointer space");
589
609
gc_performed = true;
591
611
if (!(old_data_space->ReserveSpace(data_space_size))) {
592
Heap::CollectGarbage(OLD_DATA_SPACE);
612
Heap::CollectGarbage(OLD_DATA_SPACE,
613
"failed to reserve space in the old data space");
593
614
gc_performed = true;
595
616
if (!(code_space->ReserveSpace(code_space_size))) {
596
Heap::CollectGarbage(CODE_SPACE);
617
Heap::CollectGarbage(CODE_SPACE,
618
"failed to reserve space in the code space");
597
619
gc_performed = true;
599
621
if (!(map_space->ReserveSpace(map_space_size))) {
600
Heap::CollectGarbage(MAP_SPACE);
622
Heap::CollectGarbage(MAP_SPACE,
623
"failed to reserve space in the map space");
601
624
gc_performed = true;
603
626
if (!(cell_space->ReserveSpace(cell_space_size))) {
604
Heap::CollectGarbage(CELL_SPACE);
627
Heap::CollectGarbage(CELL_SPACE,
628
"failed to reserve space in the cell space");
605
629
gc_performed = true;
607
631
// We add a slack-factor of 2 in order to have space for a series of
688
713
(static_cast<double>(young_survivors_after_last_gc_) * 100) /
689
714
start_new_space_size;
691
if (survival_rate > kYoungSurvivalRateThreshold) {
716
if (survival_rate > kYoungSurvivalRateHighThreshold) {
692
717
high_survival_rate_period_length_++;
694
719
high_survival_rate_period_length_ = 0;
722
if (survival_rate < kYoungSurvivalRateLowThreshold) {
723
low_survival_rate_period_length_++;
725
low_survival_rate_period_length_ = 0;
697
728
double survival_rate_diff = survival_rate_ - survival_rate;
699
730
if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
754
785
UpdateSurvivalRateTrend(start_new_space_size);
756
if (!new_space_high_promotion_mode_active_ &&
757
new_space_.Capacity() == new_space_.MaximumCapacity() &&
758
IsStableOrIncreasingSurvivalTrend() &&
759
IsHighSurvivalRate()) {
760
// Stable high survival rates even though young generation is at
761
// maximum capacity indicates that most objects will be promoted.
762
// To decrease scavenger pauses and final mark-sweep pauses, we
763
// have to limit maximal capacity of the young generation.
764
new_space_high_promotion_mode_active_ = true;
766
PrintF("Limited new space size due to high promotion rate: %d MB\n",
767
new_space_.InitialCapacity() / MB);
769
} else if (new_space_high_promotion_mode_active_ &&
770
IsDecreasingSurvivalTrend() &&
771
!IsHighSurvivalRate()) {
772
// Decreasing low survival rates might indicate that the above high
773
// promotion mode is over and we should allow the young generation
775
new_space_high_promotion_mode_active_ = false;
777
PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
778
new_space_.MaximumCapacity() / MB);
782
787
size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
784
789
if (high_survival_rate_during_scavenges &&
808
813
UpdateSurvivalRateTrend(start_new_space_size);
816
if (!new_space_high_promotion_mode_active_ &&
817
new_space_.Capacity() == new_space_.MaximumCapacity() &&
818
IsStableOrIncreasingSurvivalTrend() &&
819
IsHighSurvivalRate()) {
820
// Stable high survival rates even though young generation is at
821
// maximum capacity indicates that most objects will be promoted.
822
// To decrease scavenger pauses and final mark-sweep pauses, we
823
// have to limit maximal capacity of the young generation.
824
new_space_high_promotion_mode_active_ = true;
826
PrintF("Limited new space size due to high promotion rate: %d MB\n",
827
new_space_.InitialCapacity() / MB);
829
} else if (new_space_high_promotion_mode_active_ &&
830
IsStableOrDecreasingSurvivalTrend() &&
831
IsLowSurvivalRate()) {
832
// Decreasing low survival rates might indicate that the above high
833
// promotion mode is over and we should allow the young generation
835
new_space_high_promotion_mode_active_ = false;
837
PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
838
new_space_.MaximumCapacity() / MB);
811
842
if (new_space_high_promotion_mode_active_ &&
812
843
new_space_.Capacity() > new_space_.InitialCapacity()) {
813
844
new_space_.Shrink();
1184
1216
// Update how much has survived scavenge.
1185
1217
IncrementYoungSurvivorsCounter(static_cast<int>(
1186
(PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
1218
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1188
1220
LOG(isolate_, ResourceEvent("scavenge", "end"));
1190
1222
gc_state_ = NOT_IN_GC;
1224
scavenges_since_last_idle_round_++;
1390
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1391
AssertNoAllocation no_allocation;
1393
class VisitorAdapter : public ObjectVisitor {
1395
explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
1396
: visitor_(visitor) {}
1397
virtual void VisitPointers(Object** start, Object** end) {
1398
for (Object** p = start; p < end; p++) {
1399
if ((*p)->IsExternalString()) {
1400
visitor_->VisitExternalString(Utils::ToLocal(
1401
Handle<String>(String::cast(*p))));
1406
v8::ExternalResourceVisitor* visitor_;
1407
} visitor_adapter(visitor);
1408
external_string_table_.Iterate(&visitor_adapter);
1356
1412
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1358
1414
static inline void VisitPointer(Heap* heap, Object** p) {
1927
MaybeObject* Heap::AllocateAccessorPair() {
1929
{ MaybeObject* maybe_result = AllocateStruct(ACCESSOR_PAIR_TYPE);
1930
if (!maybe_result->ToObject(&result)) return maybe_result;
1932
AccessorPair* accessors = AccessorPair::cast(result);
1933
// Later we will have to distinguish between undefined and the hole...
1934
// accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
1935
// accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
1871
1940
const Heap::StringTypeTable Heap::string_type_table[] = {
1872
1941
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1873
1942
{type, size, k##camel_name##MapRootIndex},
2197
2266
// This version of AllocateHeapNumber is optimized for
2198
2267
// allocation in new space.
2199
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
2268
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2200
2269
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2201
2270
Object* result;
2202
2271
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2203
2272
if (!maybe_result->ToObject(&result)) return maybe_result;
2205
HeapObject::cast(result)->set_map_unsafe(heap_number_map());
2274
HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2206
2275
HeapNumber::cast(result)->set_value(value);
2317
2387
set_infinity_value(HeapNumber::cast(obj));
2389
// The hole has not been created yet, but we want to put something
2390
// predictable in the gaps in the symbol table, so lets make that Smi zero.
2391
set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2319
2393
// Allocate initial symbol table.
2320
2394
{ MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2321
2395
if (!maybe_obj->ToObject(&obj)) return false;
2359
2433
set_the_hole_value(Oddball::cast(obj));
2361
2435
{ MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2363
2437
Oddball::kArgumentMarker);
2364
2438
if (!maybe_obj->ToObject(&obj)) return false;
2366
2440
set_arguments_marker(Oddball::cast(obj));
2368
2442
{ MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2370
2444
Oddball::kOther);
2371
2445
if (!maybe_obj->ToObject(&obj)) return false;
2373
2447
set_no_interceptor_result_sentinel(obj);
2375
2449
{ MaybeObject* maybe_obj = CreateOddball("termination_exception",
2377
2451
Oddball::kOther);
2378
2452
if (!maybe_obj->ToObject(&obj)) return false;
2380
2454
set_termination_exception(obj);
2382
{ MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
2385
if (!maybe_obj->ToObject(&obj)) return false;
2387
set_frame_alignment_marker(Oddball::cast(obj));
2388
STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
2390
2456
// Allocate the empty string.
2391
2457
{ MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2392
2458
if (!maybe_obj->ToObject(&obj)) return false;
2423
2489
// Allocate the code_stubs dictionary. The initial size is set to avoid
2424
2490
// expanding the dictionary during bootstrapping.
2425
{ MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2491
{ MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128);
2426
2492
if (!maybe_obj->ToObject(&obj)) return false;
2428
set_code_stubs(NumberDictionary::cast(obj));
2494
set_code_stubs(UnseededNumberDictionary::cast(obj));
2430
2497
// Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2431
2498
// is set to avoid expanding the dictionary during bootstrapping.
2432
{ MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2499
{ MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
2433
2500
if (!maybe_obj->ToObject(&obj)) return false;
2435
set_non_monomorphic_cache(NumberDictionary::cast(obj));
2502
set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
2437
2504
{ MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2438
2505
if (!maybe_obj->ToObject(&obj)) return false;
2729
2796
MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2730
2797
// Statically ensure that it is safe to allocate foreigns in paged spaces.
2731
STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
2798
STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
2732
2799
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2733
2800
Foreign* result;
2734
2801
MaybeObject* maybe_result = Allocate(foreign_map(), space);
2881
2948
bool is_ascii_data_in_two_byte_string = false;
2882
2949
if (!is_ascii) {
2883
2950
// At least one of the strings uses two-byte representation so we
2884
// can't use the fast case code for short ascii strings below, but
2885
// we can try to save memory if all chars actually fit in ascii.
2951
// can't use the fast case code for short ASCII strings below, but
2952
// we can try to save memory if all chars actually fit in ASCII.
2886
2953
is_ascii_data_in_two_byte_string =
2887
2954
first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2888
2955
if (is_ascii_data_in_two_byte_string) {
2893
2960
// If the resulting string is small make a flat string.
2894
if (length < String::kMinNonFlatLength) {
2961
if (length < ConsString::kMinLength) {
2895
2962
// Note that neither of the two inputs can be a slice because:
2896
STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
2963
STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
2897
2964
ASSERT(first->IsFlat());
2898
2965
ASSERT(second->IsFlat());
2899
2966
if (is_ascii) {
3145
3212
int size = ByteArray::SizeFor(length);
3146
3213
Object* result;
3147
{ MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
3214
{ MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3148
3215
? old_data_space_->AllocateRaw(size)
3149
3216
: lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3150
3217
if (!maybe_result->ToObject(&result)) return maybe_result;
3153
reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
3220
reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3154
3222
reinterpret_cast<ByteArray*>(result)->set_length(length);
3163
3231
int size = ByteArray::SizeFor(length);
3164
3232
AllocationSpace space =
3165
(size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
3233
(size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3166
3234
Object* result;
3167
3235
{ MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3168
3236
if (!maybe_result->ToObject(&result)) return maybe_result;
3171
reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
3239
reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3172
3241
reinterpret_cast<ByteArray*>(result)->set_length(length);
3178
3247
if (size == 0) return;
3179
3248
HeapObject* filler = HeapObject::FromAddress(addr);
3180
3249
if (size == kPointerSize) {
3181
filler->set_map_unsafe(one_pointer_filler_map());
3250
filler->set_map_no_write_barrier(one_pointer_filler_map());
3182
3251
} else if (size == 2 * kPointerSize) {
3183
filler->set_map_unsafe(two_pointer_filler_map());
3252
filler->set_map_no_write_barrier(two_pointer_filler_map());
3185
filler->set_map_unsafe(free_space_map());
3254
filler->set_map_no_write_barrier(free_space_map());
3186
3255
FreeSpace::cast(filler)->set_size(size);
3227
3296
MaybeObject* maybe_result;
3228
3297
// Large code objects and code objects which should stay at a fixed address
3229
3298
// are allocated in large object space.
3230
if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
3299
if (obj_size > code_space()->AreaSize() || immovable) {
3231
3300
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3233
3302
maybe_result = code_space_->AllocateRaw(obj_size);
3250
3319
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3251
3320
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3252
code->set_next_code_flushing_candidate(undefined_value());
3321
code->set_gc_metadata(Smi::FromInt(0));
3253
3322
// Allow self references to created code object by patching the handle to
3254
3323
// point to the newly allocated Code object.
3255
3324
if (!self_reference.is_null()) {
3275
3344
// Allocate an object the same size as the code object.
3276
3345
int obj_size = code->Size();
3277
3346
MaybeObject* maybe_result;
3278
if (obj_size > MaxObjectSizeInPagedSpace()) {
3347
if (obj_size > code_space()->AreaSize()) {
3279
3348
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3281
3350
maybe_result = code_space_->AllocateRaw(obj_size);
3591
3660
// TODO(1240798): Initialize the object's body using valid initial values
3592
3661
// according to the object's initial map. For example, if the map's
3593
3662
// instance type is JS_ARRAY_TYPE, the length field should be initialized
3594
// to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3595
// fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3663
// to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3664
// fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3596
3665
// verification code has to cope with (temporarily) invalid objects. See
3597
3666
// for example, JSArray::JSArrayVerify).
3598
3667
Object* filler;
4090
4159
// Allocate string.
4091
4160
Object* result;
4092
{ MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
4161
{ MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
4093
4162
? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
4094
4163
: old_data_space_->AllocateRaw(size);
4095
4164
if (!maybe_result->ToObject(&result)) return maybe_result;
4098
reinterpret_cast<HeapObject*>(result)->set_map_unsafe(map);
4167
reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4099
4168
// Set length and hash fields of the allocated string.
4100
4169
String* answer = String::cast(result);
4101
4170
answer->set_length(chars);
4126
4195
if (size > kMaxObjectSizeInNewSpace) {
4127
4196
// Allocate in large object space, retry space will be ignored.
4128
4197
space = LO_SPACE;
4129
} else if (size > MaxObjectSizeInPagedSpace()) {
4198
} else if (size > Page::kMaxNonCodeHeapObjectSize) {
4130
4199
// Allocate in new space, retry in large object space.
4131
4200
retry_space = LO_SPACE;
4133
} else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
4202
} else if (space == OLD_DATA_SPACE &&
4203
size > Page::kMaxNonCodeHeapObjectSize) {
4134
4204
space = LO_SPACE;
4136
4206
Object* result;
4161
4231
if (size > kMaxObjectSizeInNewSpace) {
4162
4232
// Allocate in large object space, retry space will be ignored.
4163
4233
space = LO_SPACE;
4164
} else if (size > MaxObjectSizeInPagedSpace()) {
4234
} else if (size > Page::kMaxNonCodeHeapObjectSize) {
4165
4235
// Allocate in new space, retry in large object space.
4166
4236
retry_space = LO_SPACE;
4168
} else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
4238
} else if (space == OLD_DATA_SPACE &&
4239
size > Page::kMaxNonCodeHeapObjectSize) {
4169
4240
space = LO_SPACE;
4171
4242
Object* result;
4220
4292
if (InNewSpace(obj)) {
4221
4293
HeapObject* dst = HeapObject::cast(obj);
4222
dst->set_map_unsafe(map);
4294
dst->set_map_no_write_barrier(map);
4223
4295
CopyBlock(dst->address() + kPointerSize,
4224
4296
src->address() + kPointerSize,
4225
4297
FixedArray::SizeFor(len) - kPointerSize);
4228
HeapObject::cast(obj)->set_map_unsafe(map);
4300
HeapObject::cast(obj)->set_map_no_write_barrier(map);
4229
4301
FixedArray* result = FixedArray::cast(obj);
4230
4302
result->set_length(len);
4284
4356
// Too big for new space.
4285
4357
space = LO_SPACE;
4286
4358
} else if (space == OLD_POINTER_SPACE &&
4287
size > MaxObjectSizeInPagedSpace()) {
4359
size > Page::kMaxNonCodeHeapObjectSize) {
4288
4360
// Too big for old pointer space.
4289
4361
space = LO_SPACE;
4292
4364
AllocationSpace retry_space =
4293
(size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
4365
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
4295
4367
return AllocateRaw(size, space, retry_space);
4311
4383
if (!maybe_result->ToObject(&result)) return maybe_result;
4314
HeapObject::cast(result)->set_map_unsafe(heap->fixed_array_map());
4386
HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
4315
4387
FixedArray* array = FixedArray::cast(result);
4316
4388
array->set_length(length);
4317
4389
MemsetPointer(array->data_start(), filler, length);
4395
4468
// Too big for new space.
4396
4469
space = LO_SPACE;
4397
4470
} else if (space == OLD_DATA_SPACE &&
4398
size > MaxObjectSizeInPagedSpace()) {
4471
size > Page::kMaxNonCodeHeapObjectSize) {
4399
4472
// Too big for old data space.
4400
4473
space = LO_SPACE;
4403
4476
AllocationSpace retry_space =
4404
(size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
4477
(size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
4406
4479
return AllocateRaw(size, space, retry_space);
4439
4513
if (!maybe_result->ToObject(&result)) return maybe_result;
4441
4515
Context* context = reinterpret_cast<Context*>(result);
4442
context->set_map_unsafe(function_context_map());
4516
context->set_map_no_write_barrier(function_context_map());
4443
4517
context->set_closure(function);
4444
4518
context->set_previous(function->context());
4445
4519
context->set_extension(NULL);
4459
4533
if (!maybe_result->ToObject(&result)) return maybe_result;
4461
4535
Context* context = reinterpret_cast<Context*>(result);
4462
context->set_map_unsafe(catch_context_map());
4536
context->set_map_no_write_barrier(catch_context_map());
4463
4537
context->set_closure(function);
4464
4538
context->set_previous(previous);
4465
4539
context->set_extension(name);
4477
4551
if (!maybe_result->ToObject(&result)) return maybe_result;
4479
4553
Context* context = reinterpret_cast<Context*>(result);
4480
context->set_map_unsafe(with_context_map());
4554
context->set_map_no_write_barrier(with_context_map());
4481
4555
context->set_closure(function);
4482
4556
context->set_previous(previous);
4483
4557
context->set_extension(extension);
4495
4569
if (!maybe_result->ToObject(&result)) return maybe_result;
4497
4571
Context* context = reinterpret_cast<Context*>(result);
4498
context->set_map_unsafe(block_context_map());
4572
context->set_map_no_write_barrier(block_context_map());
4499
4573
context->set_closure(function);
4500
4574
context->set_previous(previous);
4501
4575
context->set_extension(scope_info);
4545
4619
void Heap::EnsureHeapIsIterable() {
4546
4620
ASSERT(IsAllocationAllowed());
4547
4621
if (!IsHeapIterable()) {
4548
CollectAllGarbage(kMakeHeapIterableMask);
4622
CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
4550
4624
ASSERT(IsHeapIterable());
4554
bool Heap::IdleNotification() {
4628
bool Heap::IdleNotification(int hint) {
4629
if (hint >= 1000) return IdleGlobalGC();
4630
if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
4631
FLAG_expose_gc || Serializer::enabled()) {
4635
// By doing small chunks of GC work in each IdleNotification,
4636
// perform a round of incremental GCs and after that wait until
4637
// the mutator creates enough garbage to justify a new round.
4638
// An incremental GC progresses as follows:
4639
// 1. many incremental marking steps,
4640
// 2. one old space mark-sweep-compact,
4641
// 3. many lazy sweep steps.
4642
// Use mark-sweep-compact events to count incremental GCs in a round.
4644
intptr_t size_factor = Min(Max(hint, 30), 1000) / 10;
4645
// The size factor is in range [3..100].
4646
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
4648
if (incremental_marking()->IsStopped()) {
4649
if (!IsSweepingComplete() &&
4650
!AdvanceSweepers(static_cast<int>(step_size))) {
4655
if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4656
if (EnoughGarbageSinceLastIdleRound()) {
4663
int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
4664
mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
4665
ms_count_at_last_idle_notification_ = ms_count_;
4667
if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4672
if (incremental_marking()->IsStopped()) {
4673
if (hint < 1000 && !WorthStartingGCWhenIdle()) {
4677
incremental_marking()->Start();
4680
// This flag prevents incremental marking from requesting GC via stack guard
4681
idle_notification_will_schedule_next_gc_ = true;
4682
incremental_marking()->Step(step_size);
4683
idle_notification_will_schedule_next_gc_ = false;
4685
if (incremental_marking()->IsComplete()) {
4686
bool uncommit = false;
4687
if (gc_count_at_last_idle_gc_ == gc_count_) {
4688
// No GC since the last full GC, the mutator is probably not active.
4689
isolate_->compilation_cache()->Clear();
4692
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4693
gc_count_at_last_idle_gc_ = gc_count_;
4695
new_space_.Shrink();
4696
UncommitFromSpace();
4703
bool Heap::IdleGlobalGC() {
4555
4704
static const int kIdlesBeforeScavenge = 4;
4556
4705
static const int kIdlesBeforeMarkSweep = 7;
4557
4706
static const int kIdlesBeforeMarkCompact = 8;
4581
4730
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4582
4731
if (contexts_disposed_ > 0) {
4583
4732
HistogramTimerScope scope(isolate_->counters()->gc_context());
4584
CollectAllGarbage(kNoGCFlags);
4733
CollectAllGarbage(kReduceMemoryFootprintMask,
4734
"idle notification: contexts disposed");
4586
CollectGarbage(NEW_SPACE);
4736
CollectGarbage(NEW_SPACE, "idle notification");
4588
4738
new_space_.Shrink();
4589
4739
last_idle_notification_gc_count_ = gc_count_;
4593
4743
// generated code for cached functions.
4594
4744
isolate_->compilation_cache()->Clear();
4596
CollectAllGarbage(kNoGCFlags);
4746
CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4597
4747
new_space_.Shrink();
4598
4748
last_idle_notification_gc_count_ = gc_count_;
4600
4750
} else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4601
CollectAllGarbage(kNoGCFlags);
4751
CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4602
4752
new_space_.Shrink();
4603
4753
last_idle_notification_gc_count_ = gc_count_;
4604
4754
number_idle_notifications_ = 0;
5007
5158
while (pages.has_next()) {
5008
5159
Page* page = pages.next();
5009
Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
5160
Object** current = reinterpret_cast<Object**>(page->area_start());
5011
Address end = page->ObjectAreaEnd();
5162
Address end = page->area_end();
5013
5164
Object*** store_buffer_position = store_buffer()->Start();
5014
5165
Object*** store_buffer_top = store_buffer()->Top();
5035
5186
while (pages.has_next()) {
5036
5187
Page* page = pages.next();
5037
Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
5188
Object** current = reinterpret_cast<Object**>(page->area_start());
5039
Address end = page->ObjectAreaEnd();
5190
Address end = page->area_end();
5041
5192
Object*** store_buffer_position = store_buffer()->Start();
5042
5193
Object*** store_buffer_top = store_buffer()->Top();
5089
5240
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5090
5241
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
5091
v->Synchronize("symbol_table");
5242
v->Synchronize(VisitorSynchronization::kSymbolTable);
5092
5243
if (mode != VISIT_ALL_IN_SCAVENGE &&
5093
5244
mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5094
5245
// Scavenge collections have special processing for this.
5095
5246
external_string_table_.Iterate(v);
5097
v->Synchronize("external_string_table");
5248
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5101
5252
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5102
5253
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5103
v->Synchronize("strong_root_list");
5254
v->Synchronize(VisitorSynchronization::kStrongRootList);
5105
5256
v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
5106
v->Synchronize("symbol");
5257
v->Synchronize(VisitorSynchronization::kSymbol);
5108
5259
isolate_->bootstrapper()->Iterate(v);
5109
v->Synchronize("bootstrapper");
5260
v->Synchronize(VisitorSynchronization::kBootstrapper);
5110
5261
isolate_->Iterate(v);
5111
v->Synchronize("top");
5262
v->Synchronize(VisitorSynchronization::kTop);
5112
5263
Relocatable::Iterate(v);
5113
v->Synchronize("relocatable");
5264
v->Synchronize(VisitorSynchronization::kRelocatable);
5115
5266
#ifdef ENABLE_DEBUGGER_SUPPORT
5116
5267
isolate_->debug()->Iterate(v);
5118
5269
isolate_->deoptimizer_data()->Iterate(v);
5121
v->Synchronize("debug");
5272
v->Synchronize(VisitorSynchronization::kDebug);
5122
5273
isolate_->compilation_cache()->Iterate(v);
5123
v->Synchronize("compilationcache");
5274
v->Synchronize(VisitorSynchronization::kCompilationCache);
5125
5276
// Iterate over local handles in handle scopes.
5126
5277
isolate_->handle_scope_implementer()->Iterate(v);
5127
v->Synchronize("handlescope");
5278
v->Synchronize(VisitorSynchronization::kHandleScope);
5129
5280
// Iterate over the builtin code objects and code stubs in the
5130
5281
// heap. Note that it is not necessary to iterate over code objects
5147
5298
isolate_->global_handles()->IterateAllRoots(v);
5150
v->Synchronize("globalhandles");
5301
v->Synchronize(VisitorSynchronization::kGlobalHandles);
5152
5303
// Iterate over pointers being held by inactive threads.
5153
5304
isolate_->thread_manager()->Iterate(v);
5154
v->Synchronize("threadmanager");
5305
v->Synchronize(VisitorSynchronization::kThreadManager);
5156
5307
// Iterate over the pointers the Serialization/Deserialization code is
5175
5326
bool Heap::ConfigureHeap(int max_semispace_size,
5176
5327
intptr_t max_old_gen_size,
5177
5328
intptr_t max_executable_size) {
5178
if (HasBeenSetup()) return false;
5329
if (HasBeenSetUp()) return false;
5180
5331
if (max_semispace_size > 0) {
5181
5332
if (max_semispace_size < Page::kPageSize) {
5445
intptr_t Heap::PromotedSpaceSizeOfObjects() {
5446
return old_pointer_space_->SizeOfObjects()
5447
+ old_data_space_->SizeOfObjects()
5448
+ code_space_->SizeOfObjects()
5449
+ map_space_->SizeOfObjects()
5450
+ cell_space_->SizeOfObjects()
5451
+ lo_space_->SizeOfObjects();
5294
5455
int Heap::PromotedExternalMemorySize() {
5295
5456
if (amount_of_external_allocated_memory_
5296
5457
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5474
5635
// goes wrong, just return false. The caller should check the results and
5475
5636
// call Heap::TearDown() to release allocated memory.
5477
// If the heap is not yet configured (eg, through the API), configure it.
5638
// If the heap is not yet configured (e.g. through the API), configure it.
5478
5639
// Configuration is based on the flags new-space-size (really the semispace
5479
5640
// size) and old-space-size if set or the initial values of semispace_size_
5480
5641
// and old_generation_size_ otherwise.
5495
5656
MarkMapPointersAsEncoded(false);
5497
// Setup memory allocator.
5498
if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
5658
// Set up memory allocator.
5659
if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5502
if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
5662
// Set up new space.
5663
if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
5519
5680
OLD_DATA_SPACE,
5520
5681
NOT_EXECUTABLE);
5521
5682
if (old_data_space_ == NULL) return false;
5522
if (!old_data_space_->Setup()) return false;
5683
if (!old_data_space_->SetUp()) return false;
5524
5685
// Initialize the code space, set its maximum capacity to the old
5525
5686
// generation size. It needs executable memory.
5526
5687
// On 64-bit platform(s), we put all code objects in a 2 GB range of
5527
5688
// virtual address space, so that they can call each other with near calls.
5528
5689
if (code_range_size_ > 0) {
5529
if (!isolate_->code_range()->Setup(code_range_size_)) {
5690
if (!isolate_->code_range()->SetUp(code_range_size_)) {
5542
5703
FLAG_max_map_space_pages,
5544
5705
if (map_space_ == NULL) return false;
5545
if (!map_space_->Setup()) return false;
5706
if (!map_space_->SetUp()) return false;
5547
5708
// Initialize global property cell space.
5548
5709
cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5549
5710
if (cell_space_ == NULL) return false;
5550
if (!cell_space_->Setup()) return false;
5711
if (!cell_space_->SetUp()) return false;
5552
5713
// The large object code space may contain code or data. We set the memory
5553
5714
// to be non-executable here for safety, but this means we need to enable it
5554
5715
// explicitly when allocating large code objects.
5555
5716
lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5556
5717
if (lo_space_ == NULL) return false;
5557
if (!lo_space_->Setup()) return false;
5718
if (!lo_space_->SetUp()) return false;
5720
// Set up the seed that is used to randomize the string hash function.
5721
ASSERT(hash_seed() == 0);
5722
if (FLAG_randomize_hashes) {
5723
if (FLAG_hash_seed == 0) {
5725
Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
5727
set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5558
5731
if (create_heap_objects) {
5559
5732
// Create initial maps.
5560
5733
if (!CreateInitialMaps()) return false;
6381
6567
int KeyedLookupCache::Lookup(Map* map, String* name) {
6382
int index = Hash(map, name);
6383
Key& key = keys_[index];
6384
if ((key.map == map) && key.name->Equals(name)) {
6385
return field_offsets_[index];
6568
int index = (Hash(map, name) & kHashMask);
6569
for (int i = 0; i < kEntriesPerBucket; i++) {
6570
Key& key = keys_[index + i];
6571
if ((key.map == map) && key.name->Equals(name)) {
6572
return field_offsets_[index + i];
6387
6575
return kNotFound;
6391
6579
void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
6392
6580
String* symbol;
6393
6581
if (HEAP->LookupSymbolIfExists(name, &symbol)) {
6394
int index = Hash(map, symbol);
6582
int index = (Hash(map, symbol) & kHashMask);
6583
// After a GC there will be free slots, so we use them in order (this may
6584
// help to get the most frequently used one in position 0).
6585
for (int i = 0; i< kEntriesPerBucket; i++) {
6586
Key& key = keys_[index];
6587
Object* free_entry_indicator = NULL;
6588
if (key.map == free_entry_indicator) {
6591
field_offsets_[index + i] = field_offset;
6595
// No free entry found in this bucket, so we move them all down one and
6596
// put the new entry at position zero.
6597
for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6598
Key& key = keys_[index + i];
6599
Key& key2 = keys_[index + i - 1];
6601
field_offsets_[index + i] = field_offsets_[index + i - 1];
6604
// Write the new first entry.
6395
6605
Key& key = keys_[index];
6397
6607
key.name = symbol;
6501
6711
// pieces and initialize size, owner and flags field of every piece.
6502
6712
// If FromAnyPointerAddress encounters a slot that belongs to one of
6503
6713
// these smaller pieces it will treat it as a slot on a normal Page.
6714
Address chunk_end = chunk->address() + chunk->size();
6504
6715
MemoryChunk* inner = MemoryChunk::FromAddress(
6505
6716
chunk->address() + Page::kPageSize);
6506
MemoryChunk* inner_last = MemoryChunk::FromAddress(
6507
chunk->address() + chunk->size() - 1);
6717
MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6508
6718
while (inner <= inner_last) {
6509
6719
// Size of a large chunk is always a multiple of
6510
6720
// OS::AllocateAlignment() so there is always
6511
6721
// enough space for a fake MemoryChunk header.
6722
Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6723
// Guard against overflow.
6724
if (area_end < inner->address()) area_end = chunk_end;
6725
inner->SetArea(inner->address(), area_end);
6512
6726
inner->set_size(Page::kPageSize);
6513
6727
inner->set_owner(lo_space());
6514
6728
inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);