75
75
owner == HEAP->cell_space() ||
76
76
owner == HEAP->code_space());
77
77
Initialize(reinterpret_cast<PagedSpace*>(owner),
78
page->ObjectAreaStart(),
79
page->ObjectAreaEnd(),
82
82
ASSERT(page->WasSweptPrecisely());
108
108
cur_page = space_->anchor();
110
110
cur_page = Page::FromAddress(cur_addr_ - 1);
111
ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
111
ASSERT(cur_addr_ == cur_page->area_end());
113
113
cur_page = cur_page->next_page();
114
114
if (cur_page == space_->anchor()) return false;
115
cur_addr_ = cur_page->ObjectAreaStart();
116
cur_end_ = cur_page->ObjectAreaEnd();
115
cur_addr_ = cur_page->area_start();
116
cur_end_ = cur_page->area_end();
117
117
ASSERT(cur_page->WasSweptPrecisely());
228
228
ASSERT(*allocated <= current.size);
229
229
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230
if (!code_range_->Commit(current.start, *allocated, true)) {
230
if (!MemoryAllocator::CommitCodePage(code_range_,
271
bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
273
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
272
274
capacity_ = RoundUp(capacity, Page::kPageSize);
273
275
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
274
276
ASSERT_GE(capacity_, capacity_executable_);
358
360
VirtualMemory reservation;
359
361
Address base = ReserveAlignedMemory(size, alignment, &reservation);
360
362
if (base == NULL) return NULL;
361
if (!reservation.Commit(base,
363
executable == EXECUTABLE)) {
364
if (executable == EXECUTABLE) {
365
CommitCodePage(&reservation, base, size);
367
if (!reservation.Commit(base,
369
executable == EXECUTABLE)) {
366
374
controller->TakeControl(&reservation);
378
386
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
380
388
SemiSpace* semi_space) {
389
Address area_start = start + NewSpacePage::kObjectStartOffset;
390
Address area_end = start + Page::kPageSize;
381
392
MemoryChunk* chunk = MemoryChunk::Initialize(heap,
386
399
chunk->set_next_chunk(NULL);
431
448
ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
432
449
ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
434
if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
451
if (executable == EXECUTABLE) {
452
chunk->SetFlag(IS_EXECUTABLE);
436
if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
455
if (owner == heap->old_data_space()) {
456
chunk->SetFlag(CONTAINS_ONLY_DATA);
462
483
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
463
484
Executability executable,
465
size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
466
487
Heap* heap = isolate_->heap();
467
488
Address base = NULL;
468
489
VirtualMemory reservation;
490
Address area_start = NULL;
491
Address area_end = NULL;
469
492
if (executable == EXECUTABLE) {
493
chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
494
OS::CommitPageSize()) + CodePageGuardSize();
470
496
// Check executable memory limit.
471
497
if (size_executable_ + chunk_size > capacity_executable_) {
494
520
// Update executable memory size.
495
521
size_executable_ += reservation.size();
525
ZapBlock(base, CodePageGuardStartOffset());
526
ZapBlock(base + CodePageAreaStartOffset(), body_size);
528
area_start = base + CodePageAreaStartOffset();
529
area_end = area_start + body_size;
531
chunk_size = MemoryChunk::kObjectStartOffset + body_size;
498
532
base = AllocateAlignedMemory(chunk_size,
499
533
MemoryChunk::kAlignment,
503
537
if (base == NULL) return NULL;
507
ZapBlock(base, chunk_size);
540
ZapBlock(base, chunk_size);
543
area_start = base + Page::kObjectStartOffset;
544
area_end = base + chunk_size;
509
547
isolate_->counters()->memory_allocated()->
510
548
Increment(static_cast<int>(chunk_size));
528
568
Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
529
569
Executability executable) {
530
MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
570
MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
532
574
if (chunk == NULL) return NULL;
694
int MemoryAllocator::CodePageGuardStartOffset() {
695
// We are guarding code pages: the first OS page after the header
696
// will be protected as non-writable.
697
return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
701
int MemoryAllocator::CodePageGuardSize() {
702
return OS::CommitPageSize();
706
int MemoryAllocator::CodePageAreaStartOffset() {
707
// We are guarding code pages: the first OS page after the header
708
// will be protected as non-writable.
709
return CodePageGuardStartOffset() + CodePageGuardSize();
713
int MemoryAllocator::CodePageAreaEndOffset() {
714
// We are guarding code pages: the last OS page will be protected as
716
return Page::kPageSize - OS::CommitPageSize();
720
bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
723
// Commit page header (not executable).
724
if (!vm->Commit(start,
725
CodePageGuardStartOffset(),
730
// Create guard page after the header.
731
if (!vm->Guard(start + CodePageGuardStartOffset())) {
735
// Commit page body (executable).
736
size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
737
if (!vm->Commit(start + CodePageAreaStartOffset(),
743
// Create guard page after the allocatable area.
744
if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
752
// -----------------------------------------------------------------------------
753
// MemoryChunk implementation
755
void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
756
MemoryChunk* chunk = MemoryChunk::FromAddress(address);
757
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
758
static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
760
chunk->IncrementLiveBytes(by);
651
763
// -----------------------------------------------------------------------------
652
764
// PagedSpace implementation
658
770
: Space(heap, id, executable),
659
771
free_list_(this),
660
772
was_swept_conservatively_(false),
661
first_unswept_page_(Page::FromAddress(NULL)) {
773
first_unswept_page_(Page::FromAddress(NULL)),
774
unswept_free_bytes_(0) {
775
if (id == CODE_SPACE) {
776
area_size_ = heap->isolate()->memory_allocator()->
779
area_size_ = Page::kPageSize - Page::kObjectStartOffset;
662
781
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
663
* Page::kObjectAreaSize;
664
783
accounting_stats_.Clear();
666
785
allocation_info_.top = NULL;
712
831
bool PagedSpace::CanExpand() {
713
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
714
ASSERT(Capacity() % Page::kObjectAreaSize == 0);
832
ASSERT(max_capacity_ % AreaSize() == 0);
833
ASSERT(Capacity() % AreaSize() == 0);
716
835
if (Capacity() == max_capacity_) return false;
752
871
void PagedSpace::ReleasePage(Page* page) {
753
872
ASSERT(page->LiveBytes() == 0);
873
ASSERT(AreaSize() == page->area_size());
755
875
// Adjust list of unswept pages if the page is the head of the list.
756
876
if (first_unswept_page_ == page) {
763
883
if (page->WasSwept()) {
764
884
intptr_t size = free_list_.EvictFreeListItems(page);
765
885
accounting_stats_.AllocateBytes(size);
766
ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
886
ASSERT_EQ(AreaSize(), static_cast<int>(size));
888
DecreaseUnsweptFreeBytes(page);
769
891
if (Page::FromAllocationTop(allocation_info_.top) == page) {
790
912
if (!page->WasSwept()) {
791
913
if (page->LiveBytes() == 0) ReleasePage(page);
793
HeapObject* obj = HeapObject::FromAddress(page->body());
915
HeapObject* obj = HeapObject::FromAddress(page->area_start());
794
916
if (obj->IsFreeSpace() &&
795
FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
917
FreeSpace::cast(obj)->size() == AreaSize()) {
796
918
// Sometimes we allocate memory from free list but don't
797
919
// immediately initialize it (e.g. see PagedSpace::ReserveSpace
798
920
// called from Heap::ReserveSpace that can cause GC before
803
925
// by free list items.
804
926
FreeList::SizeStats sizes;
805
927
free_list_.CountFreeListItems(page, &sizes);
806
if (sizes.Total() == Page::kObjectAreaSize) {
928
if (sizes.Total() == AreaSize()) {
807
929
ReleasePage(page);
835
957
ASSERT(page->WasSweptPrecisely());
836
958
HeapObjectIterator it(page, NULL);
837
Address end_of_previous_object = page->ObjectAreaStart();
838
Address top = page->ObjectAreaEnd();
959
Address end_of_previous_object = page->area_start();
960
Address top = page->area_end();
839
961
int black_size = 0;
840
962
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
841
963
ASSERT(end_of_previous_object <= object->address());
873
995
// NewSpace implementation
876
bool NewSpace::Setup(int reserved_semispace_capacity,
998
bool NewSpace::SetUp(int reserved_semispace_capacity,
877
999
int maximum_semispace_capacity) {
878
// Setup new space based on the preallocated memory block defined by
1000
// Set up new space based on the preallocated memory block defined by
879
1001
// start and size. The provided space is divided into two semi-spaces.
880
1002
// To support fast containment testing in the new space, the size of
881
1003
// this chunk must be a power of two and it must be aligned to its size.
894
1016
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
895
1017
ASSERT(IsPowerOf2(maximum_semispace_capacity));
897
// Allocate and setup the histogram arrays if necessary.
1019
// Allocate and set up the histogram arrays if necessary.
898
1020
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
899
1021
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
908
1030
2 * heap()->ReservedSemiSpaceSize());
909
1031
ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
911
if (!to_space_.Setup(chunk_base_,
912
initial_semispace_capacity,
913
maximum_semispace_capacity)) {
916
if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
917
initial_semispace_capacity,
918
maximum_semispace_capacity)) {
1033
to_space_.SetUp(chunk_base_,
1034
initial_semispace_capacity,
1035
maximum_semispace_capacity);
1036
from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1037
initial_semispace_capacity,
1038
maximum_semispace_capacity);
1039
if (!to_space_.Commit()) {
1050
1171
// Clear remainder of current page.
1051
Address limit = NewSpacePage::FromLimit(top)->body_limit();
1172
Address limit = NewSpacePage::FromLimit(top)->area_end();
1052
1173
if (heap()->gc_state() == Heap::SCAVENGE) {
1053
1174
heap()->promotion_queue()->SetNewLimit(limit);
1054
1175
heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1099
1220
// There should be objects packed in from the low address up to the
1100
1221
// allocation pointer.
1101
Address current = to_space_.first_page()->body();
1222
Address current = to_space_.first_page()->area_start();
1102
1223
CHECK_EQ(current, to_space_.space_start());
1104
1225
while (current != top()) {
1148
1269
// -----------------------------------------------------------------------------
1149
1270
// SemiSpace implementation
1151
bool SemiSpace::Setup(Address start,
1272
void SemiSpace::SetUp(Address start,
1152
1273
int initial_capacity,
1153
1274
int maximum_capacity) {
1154
1275
// Creates a space in the young generation. The constructor does not
1220
1339
bool SemiSpace::GrowTo(int new_capacity) {
1340
if (!is_committed()) {
1341
if (!Commit()) return false;
1221
1343
ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1222
1344
ASSERT(new_capacity <= maximum_capacity_);
1223
1345
ASSERT(new_capacity > capacity_);
1256
1378
ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1257
1379
ASSERT(new_capacity >= initial_capacity_);
1258
1380
ASSERT(new_capacity < capacity_);
1259
// Semispaces grow backwards from the end of their allocated capacity,
1260
// so we find the before and after start addresses relative to the
1261
// end of the space.
1262
Address space_end = start_ + maximum_capacity_;
1263
Address old_start = space_end - capacity_;
1264
size_t delta = capacity_ - new_capacity;
1265
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1266
if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
1381
if (is_committed()) {
1382
// Semispaces grow backwards from the end of their allocated capacity,
1383
// so we find the before and after start addresses relative to the
1384
// end of the space.
1385
Address space_end = start_ + maximum_capacity_;
1386
Address old_start = space_end - capacity_;
1387
size_t delta = capacity_ - new_capacity;
1388
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1390
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1391
if (!allocator->UncommitBlock(old_start, delta)) {
1395
int pages_after = new_capacity / Page::kPageSize;
1396
NewSpacePage* new_last_page =
1397
NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1398
new_last_page->set_next_page(anchor());
1399
anchor()->set_prev_page(new_last_page);
1400
ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1269
1403
capacity_ = new_capacity;
1271
int pages_after = capacity_ / Page::kPageSize;
1272
NewSpacePage* new_last_page =
1273
NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1274
new_last_page->set_next_page(anchor());
1275
anchor()->set_prev_page(new_last_page);
1276
ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1656
1783
// field and a next pointer, we give it a filler map that gives it the
1657
1784
// correct size.
1658
1785
if (size_in_bytes > FreeSpace::kHeaderSize) {
1659
set_map_unsafe(heap->raw_unchecked_free_space_map());
1786
set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1660
1787
// Can't use FreeSpace::cast because it fails during deserialization.
1661
1788
FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1662
1789
this_as_free_space->set_size(size_in_bytes);
1663
1790
} else if (size_in_bytes == kPointerSize) {
1664
set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
1791
set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1665
1792
} else if (size_in_bytes == 2 * kPointerSize) {
1666
set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
1793
set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1914
2041
void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
1915
2042
sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
1916
if (sizes->huge_size_ < Page::kObjectAreaSize) {
2043
if (sizes->huge_size_ < p->area_size()) {
1917
2044
sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
1918
2045
sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
1919
2046
sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
1943
2070
intptr_t FreeList::EvictFreeListItems(Page* p) {
1944
2071
intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
1946
if (sum < Page::kObjectAreaSize) {
2073
if (sum < p->area_size()) {
1947
2074
sum += EvictFreeListItemsInList(&small_list_, p) +
1948
2075
EvictFreeListItemsInList(&medium_list_, p) +
1949
2076
EvictFreeListItemsInList(&large_list_, p);
2066
2194
bool PagedSpace::ReserveSpace(int size_in_bytes) {
2067
ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
2195
ASSERT(size_in_bytes <= AreaSize());
2068
2196
ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2069
2197
Address current_top = allocation_info_.top;
2070
2198
Address new_top = current_top + size_in_bytes;
2105
2233
PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2106
2234
reinterpret_cast<intptr_t>(p));
2236
DecreaseUnsweptFreeBytes(p);
2108
2237
freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2142
2271
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2143
2272
// Allocation in this space has failed.
2274
// If there are unswept pages advance lazy sweeper then sweep one page before
2275
// allocating a new page.
2276
if (first_unswept_page_->is_valid()) {
2277
AdvanceSweeper(size_in_bytes);
2279
// Retry the free list allocation.
2280
HeapObject* object = free_list_.Allocate(size_in_bytes);
2281
if (object != NULL) return object;
2145
2284
// Free list allocation failed and there is no next page. Fail if we have
2146
2285
// hit the old generation size limit that should cause a garbage
2153
// If there are unswept pages advance lazy sweeper.
2154
if (first_unswept_page_->is_valid()) {
2155
AdvanceSweeper(size_in_bytes);
2157
// Retry the free list allocation.
2158
HeapObject* object = free_list_.Allocate(size_in_bytes);
2159
if (object != NULL) return object;
2161
if (!IsSweepingComplete()) {
2162
AdvanceSweeper(kMaxInt);
2164
// Retry the free list allocation.
2165
object = free_list_.Allocate(size_in_bytes);
2166
if (object != NULL) return object;
2170
2292
// Try to expand the space and allocate in the new next page.
2171
2293
if (Expand()) {
2172
2294
return free_list_.Allocate(size_in_bytes);
2297
// Last ditch, sweep all the remaining pages to try to find space. This may
2299
if (!IsSweepingComplete()) {
2300
AdvanceSweeper(kMaxInt);
2302
// Retry the free list allocation.
2303
HeapObject* object = free_list_.Allocate(size_in_bytes);
2304
if (object != NULL) return object;
2175
2307
// Finally, fail.
2440
2572
LargePage* page = heap()->isolate()->memory_allocator()->
2441
2573
AllocateLargePage(object_size, executable, this);
2442
2574
if (page == NULL) return Failure::RetryAfterGC(identity());
2443
ASSERT(page->body_size() >= object_size);
2575
ASSERT(page->area_size() >= object_size);
2445
2577
size_ += static_cast<int>(page->size());
2446
2578
objects_size_ += object_size;
2502
2634
MarkBit mark_bit = Marking::MarkBitFrom(object);
2503
2635
if (mark_bit.Get()) {
2504
2636
mark_bit.Clear();
2505
MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
2637
MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
2506
2638
previous = current;
2507
2639
current = current->next_page();
2556
2688
// object area start.
2557
2689
HeapObject* object = chunk->GetObject();
2558
2690
Page* page = Page::FromAddress(object->address());
2559
ASSERT(object->address() == page->ObjectAreaStart());
2691
ASSERT(object->address() == page->area_start());
2561
2693
// The first word should be a map, and we expect all map pointers to be
2562
2694
// in map space.