~ubuntu-branches/ubuntu/saucy/libv8/saucy

« back to all changes in this revision

Viewing changes to src/spaces.cc

  • Committer: Package Import Robot
  • Author(s): Jérémy Lal
  • Date: 2012-04-07 16:26:13 UTC
  • mfrom: (15.1.27 sid)
  • Revision ID: package-import@ubuntu.com-20120407162613-dqo1m6w9r3fh8tst
Tags: 3.8.9.16-3
* mipsel build fixes :
  + v8_use_mips_abi_hardfloat=false, this lowers EABI requirements.
  + v8_can_use_fpu_instructions=false, detect if FPU is present.
  + set -Wno-unused-but-set-variable only on mipsel.

Show diffs side-by-side

added added

removed removed

Lines of Context:
75
75
         owner == HEAP->cell_space() ||
76
76
         owner == HEAP->code_space());
77
77
  Initialize(reinterpret_cast<PagedSpace*>(owner),
78
 
             page->ObjectAreaStart(),
79
 
             page->ObjectAreaEnd(),
 
78
             page->area_start(),
 
79
             page->area_end(),
80
80
             kOnePageOnly,
81
81
             size_func);
82
82
  ASSERT(page->WasSweptPrecisely());
108
108
    cur_page = space_->anchor();
109
109
  } else {
110
110
    cur_page = Page::FromAddress(cur_addr_ - 1);
111
 
    ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
 
111
    ASSERT(cur_addr_ == cur_page->area_end());
112
112
  }
113
113
  cur_page = cur_page->next_page();
114
114
  if (cur_page == space_->anchor()) return false;
115
 
  cur_addr_ = cur_page->ObjectAreaStart();
116
 
  cur_end_ = cur_page->ObjectAreaEnd();
 
115
  cur_addr_ = cur_page->area_start();
 
116
  cur_end_ = cur_page->area_end();
117
117
  ASSERT(cur_page->WasSweptPrecisely());
118
118
  return true;
119
119
}
132
132
}
133
133
 
134
134
 
135
 
bool CodeRange::Setup(const size_t requested) {
 
135
bool CodeRange::SetUp(const size_t requested) {
136
136
  ASSERT(code_range_ == NULL);
137
137
 
138
138
  code_range_ = new VirtualMemory(requested);
227
227
  }
228
228
  ASSERT(*allocated <= current.size);
229
229
  ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230
 
  if (!code_range_->Commit(current.start, *allocated, true)) {
 
230
  if (!MemoryAllocator::CommitCodePage(code_range_,
 
231
                                       current.start,
 
232
                                       *allocated)) {
231
233
    *allocated = 0;
232
234
    return NULL;
233
235
  }
268
270
}
269
271
 
270
272
 
271
 
bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
 
273
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
272
274
  capacity_ = RoundUp(capacity, Page::kPageSize);
273
275
  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
274
276
  ASSERT_GE(capacity_, capacity_executable_);
358
360
  VirtualMemory reservation;
359
361
  Address base = ReserveAlignedMemory(size, alignment, &reservation);
360
362
  if (base == NULL) return NULL;
361
 
  if (!reservation.Commit(base,
362
 
                          size,
363
 
                          executable == EXECUTABLE)) {
364
 
    return NULL;
 
363
 
 
364
  if (executable == EXECUTABLE) {
 
365
    CommitCodePage(&reservation, base, size);
 
366
  } else {
 
367
    if (!reservation.Commit(base,
 
368
                            size,
 
369
                            executable == EXECUTABLE)) {
 
370
      return NULL;
 
371
    }
365
372
  }
 
373
 
366
374
  controller->TakeControl(&reservation);
367
375
  return base;
368
376
}
378
386
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
379
387
                                       Address start,
380
388
                                       SemiSpace* semi_space) {
 
389
  Address area_start = start + NewSpacePage::kObjectStartOffset;
 
390
  Address area_end = start + Page::kPageSize;
 
391
 
381
392
  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
382
393
                                               start,
383
394
                                               Page::kPageSize,
 
395
                                               area_start,
 
396
                                               area_end,
384
397
                                               NOT_EXECUTABLE,
385
398
                                               semi_space);
386
399
  chunk->set_next_chunk(NULL);
410
423
MemoryChunk* MemoryChunk::Initialize(Heap* heap,
411
424
                                     Address base,
412
425
                                     size_t size,
 
426
                                     Address area_start,
 
427
                                     Address area_end,
413
428
                                     Executability executable,
414
429
                                     Space* owner) {
415
430
  MemoryChunk* chunk = FromAddress(base);
418
433
 
419
434
  chunk->heap_ = heap;
420
435
  chunk->size_ = size;
 
436
  chunk->area_start_ = area_start;
 
437
  chunk->area_end_ = area_end;
421
438
  chunk->flags_ = 0;
422
439
  chunk->set_owner(owner);
423
440
  chunk->InitializeReservedMemory();
431
448
  ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
432
449
  ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
433
450
 
434
 
  if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
 
451
  if (executable == EXECUTABLE) {
 
452
    chunk->SetFlag(IS_EXECUTABLE);
 
453
  }
435
454
 
436
 
  if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
 
455
  if (owner == heap->old_data_space()) {
 
456
    chunk->SetFlag(CONTAINS_ONLY_DATA);
 
457
  }
437
458
 
438
459
  return chunk;
439
460
}
462
483
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
463
484
                                            Executability executable,
464
485
                                            Space* owner) {
465
 
  size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
 
486
  size_t chunk_size;
466
487
  Heap* heap = isolate_->heap();
467
488
  Address base = NULL;
468
489
  VirtualMemory reservation;
 
490
  Address area_start = NULL;
 
491
  Address area_end = NULL;
469
492
  if (executable == EXECUTABLE) {
 
493
    chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
 
494
                         OS::CommitPageSize()) + CodePageGuardSize();
 
495
 
470
496
    // Check executable memory limit.
471
497
    if (size_executable_ + chunk_size > capacity_executable_) {
472
498
      LOG(isolate_,
494
520
      // Update executable memory size.
495
521
      size_executable_ += reservation.size();
496
522
    }
 
523
 
 
524
#ifdef DEBUG
 
525
    ZapBlock(base, CodePageGuardStartOffset());
 
526
    ZapBlock(base + CodePageAreaStartOffset(), body_size);
 
527
#endif
 
528
    area_start = base + CodePageAreaStartOffset();
 
529
    area_end = area_start + body_size;
497
530
  } else {
 
531
    chunk_size = MemoryChunk::kObjectStartOffset + body_size;
498
532
    base = AllocateAlignedMemory(chunk_size,
499
533
                                 MemoryChunk::kAlignment,
500
534
                                 executable,
501
535
                                 &reservation);
502
536
 
503
537
    if (base == NULL) return NULL;
504
 
  }
505
538
 
506
539
#ifdef DEBUG
507
 
  ZapBlock(base, chunk_size);
 
540
    ZapBlock(base, chunk_size);
508
541
#endif
 
542
 
 
543
    area_start = base + Page::kObjectStartOffset;
 
544
    area_end = base + chunk_size;
 
545
  }
 
546
 
509
547
  isolate_->counters()->memory_allocated()->
510
548
      Increment(static_cast<int>(chunk_size));
511
549
 
518
556
  MemoryChunk* result = MemoryChunk::Initialize(heap,
519
557
                                                base,
520
558
                                                chunk_size,
 
559
                                                area_start,
 
560
                                                area_end,
521
561
                                                executable,
522
562
                                                owner);
523
563
  result->set_reserved_memory(&reservation);
527
567
 
528
568
Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
529
569
                                    Executability executable) {
530
 
  MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
 
570
  MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
 
571
                                     executable,
 
572
                                     owner);
531
573
 
532
574
  if (chunk == NULL) return NULL;
533
575
 
648
690
}
649
691
#endif
650
692
 
 
693
 
 
694
int MemoryAllocator::CodePageGuardStartOffset() {
 
695
  // We are guarding code pages: the first OS page after the header
 
696
  // will be protected as non-writable.
 
697
  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
 
698
}
 
699
 
 
700
 
 
701
int MemoryAllocator::CodePageGuardSize() {
 
702
  return OS::CommitPageSize();
 
703
}
 
704
 
 
705
 
 
706
int MemoryAllocator::CodePageAreaStartOffset() {
 
707
  // We are guarding code pages: the first OS page after the header
 
708
  // will be protected as non-writable.
 
709
  return CodePageGuardStartOffset() + CodePageGuardSize();
 
710
}
 
711
 
 
712
 
 
713
int MemoryAllocator::CodePageAreaEndOffset() {
 
714
  // We are guarding code pages: the last OS page will be protected as
 
715
  // non-writable.
 
716
  return Page::kPageSize - OS::CommitPageSize();
 
717
}
 
718
 
 
719
 
 
720
bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
 
721
                                     Address start,
 
722
                                     size_t size) {
 
723
  // Commit page header (not executable).
 
724
  if (!vm->Commit(start,
 
725
                  CodePageGuardStartOffset(),
 
726
                  false)) {
 
727
    return false;
 
728
  }
 
729
 
 
730
  // Create guard page after the header.
 
731
  if (!vm->Guard(start + CodePageGuardStartOffset())) {
 
732
    return false;
 
733
  }
 
734
 
 
735
  // Commit page body (executable).
 
736
  size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
 
737
  if (!vm->Commit(start + CodePageAreaStartOffset(),
 
738
                  area_size,
 
739
                  true)) {
 
740
    return false;
 
741
  }
 
742
 
 
743
  // Create guard page after the allocatable area.
 
744
  if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
 
745
    return false;
 
746
  }
 
747
 
 
748
  return true;
 
749
}
 
750
 
 
751
 
 
752
// -----------------------------------------------------------------------------
 
753
// MemoryChunk implementation
 
754
 
 
755
void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
 
756
  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
 
757
  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
 
758
    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
 
759
  }
 
760
  chunk->IncrementLiveBytes(by);
 
761
}
 
762
 
651
763
// -----------------------------------------------------------------------------
652
764
// PagedSpace implementation
653
765
 
658
770
    : Space(heap, id, executable),
659
771
      free_list_(this),
660
772
      was_swept_conservatively_(false),
661
 
      first_unswept_page_(Page::FromAddress(NULL)) {
 
773
      first_unswept_page_(Page::FromAddress(NULL)),
 
774
      unswept_free_bytes_(0) {
 
775
  if (id == CODE_SPACE) {
 
776
    area_size_ = heap->isolate()->memory_allocator()->
 
777
        CodePageAreaSize();
 
778
  } else {
 
779
    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
 
780
  }
662
781
  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
663
 
                  * Page::kObjectAreaSize;
 
782
      * AreaSize();
664
783
  accounting_stats_.Clear();
665
784
 
666
785
  allocation_info_.top = NULL;
670
789
}
671
790
 
672
791
 
673
 
bool PagedSpace::Setup() {
 
792
bool PagedSpace::SetUp() {
674
793
  return true;
675
794
}
676
795
 
677
796
 
678
 
bool PagedSpace::HasBeenSetup() {
 
797
bool PagedSpace::HasBeenSetUp() {
679
798
  return true;
680
799
}
681
800
 
710
829
}
711
830
 
712
831
bool PagedSpace::CanExpand() {
713
 
  ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
714
 
  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
 
832
  ASSERT(max_capacity_ % AreaSize() == 0);
 
833
  ASSERT(Capacity() % AreaSize() == 0);
715
834
 
716
835
  if (Capacity() == max_capacity_) return false;
717
836
 
751
870
 
752
871
void PagedSpace::ReleasePage(Page* page) {
753
872
  ASSERT(page->LiveBytes() == 0);
 
873
  ASSERT(AreaSize() == page->area_size());
754
874
 
755
875
  // Adjust list of unswept pages if the page is the head of the list.
756
876
  if (first_unswept_page_ == page) {
763
883
  if (page->WasSwept()) {
764
884
    intptr_t size = free_list_.EvictFreeListItems(page);
765
885
    accounting_stats_.AllocateBytes(size);
766
 
    ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
 
886
    ASSERT_EQ(AreaSize(), static_cast<int>(size));
 
887
  } else {
 
888
    DecreaseUnsweptFreeBytes(page);
767
889
  }
768
890
 
769
891
  if (Page::FromAllocationTop(allocation_info_.top) == page) {
778
900
  }
779
901
 
780
902
  ASSERT(Capacity() > 0);
781
 
  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
782
 
  accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
 
903
  ASSERT(Capacity() % AreaSize() == 0);
 
904
  accounting_stats_.ShrinkSpace(AreaSize());
783
905
}
784
906
 
785
907
 
790
912
    if (!page->WasSwept()) {
791
913
      if (page->LiveBytes() == 0) ReleasePage(page);
792
914
    } else {
793
 
      HeapObject* obj = HeapObject::FromAddress(page->body());
 
915
      HeapObject* obj = HeapObject::FromAddress(page->area_start());
794
916
      if (obj->IsFreeSpace() &&
795
 
          FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
 
917
          FreeSpace::cast(obj)->size() == AreaSize()) {
796
918
        // Sometimes we allocate memory from free list but don't
797
919
        // immediately initialize it (e.g. see PagedSpace::ReserveSpace
798
920
        // called from Heap::ReserveSpace that can cause GC before
803
925
        // by free list items.
804
926
        FreeList::SizeStats sizes;
805
927
        free_list_.CountFreeListItems(page, &sizes);
806
 
        if (sizes.Total() == Page::kObjectAreaSize) {
 
928
        if (sizes.Total() == AreaSize()) {
807
929
          ReleasePage(page);
808
930
        }
809
931
      }
834
956
    }
835
957
    ASSERT(page->WasSweptPrecisely());
836
958
    HeapObjectIterator it(page, NULL);
837
 
    Address end_of_previous_object = page->ObjectAreaStart();
838
 
    Address top = page->ObjectAreaEnd();
 
959
    Address end_of_previous_object = page->area_start();
 
960
    Address top = page->area_end();
839
961
    int black_size = 0;
840
962
    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
841
963
      ASSERT(end_of_previous_object <= object->address());
873
995
// NewSpace implementation
874
996
 
875
997
 
876
 
bool NewSpace::Setup(int reserved_semispace_capacity,
 
998
bool NewSpace::SetUp(int reserved_semispace_capacity,
877
999
                     int maximum_semispace_capacity) {
878
 
  // Setup new space based on the preallocated memory block defined by
 
1000
  // Set up new space based on the preallocated memory block defined by
879
1001
  // start and size. The provided space is divided into two semi-spaces.
880
1002
  // To support fast containment testing in the new space, the size of
881
1003
  // this chunk must be a power of two and it must be aligned to its size.
894
1016
  ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
895
1017
  ASSERT(IsPowerOf2(maximum_semispace_capacity));
896
1018
 
897
 
  // Allocate and setup the histogram arrays if necessary.
 
1019
  // Allocate and set up the histogram arrays if necessary.
898
1020
  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
899
1021
  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
900
1022
 
908
1030
         2 * heap()->ReservedSemiSpaceSize());
909
1031
  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
910
1032
 
911
 
  if (!to_space_.Setup(chunk_base_,
912
 
                       initial_semispace_capacity,
913
 
                       maximum_semispace_capacity)) {
914
 
    return false;
915
 
  }
916
 
  if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
917
 
                         initial_semispace_capacity,
918
 
                         maximum_semispace_capacity)) {
 
1033
  to_space_.SetUp(chunk_base_,
 
1034
                  initial_semispace_capacity,
 
1035
                  maximum_semispace_capacity);
 
1036
  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
 
1037
                    initial_semispace_capacity,
 
1038
                    maximum_semispace_capacity);
 
1039
  if (!to_space_.Commit()) {
919
1040
    return false;
920
1041
  }
921
1042
 
1048
1169
  }
1049
1170
 
1050
1171
  // Clear remainder of current page.
1051
 
  Address limit = NewSpacePage::FromLimit(top)->body_limit();
 
1172
  Address limit = NewSpacePage::FromLimit(top)->area_end();
1052
1173
  if (heap()->gc_state() == Heap::SCAVENGE) {
1053
1174
    heap()->promotion_queue()->SetNewLimit(limit);
1054
1175
    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1098
1219
 
1099
1220
  // There should be objects packed in from the low address up to the
1100
1221
  // allocation pointer.
1101
 
  Address current = to_space_.first_page()->body();
 
1222
  Address current = to_space_.first_page()->area_start();
1102
1223
  CHECK_EQ(current, to_space_.space_start());
1103
1224
 
1104
1225
  while (current != top()) {
1133
1254
      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1134
1255
      // Next page should be valid.
1135
1256
      CHECK(!page->is_anchor());
1136
 
      current = page->body();
 
1257
      current = page->area_start();
1137
1258
    }
1138
1259
  }
1139
1260
 
1148
1269
// -----------------------------------------------------------------------------
1149
1270
// SemiSpace implementation
1150
1271
 
1151
 
bool SemiSpace::Setup(Address start,
 
1272
void SemiSpace::SetUp(Address start,
1152
1273
                      int initial_capacity,
1153
1274
                      int maximum_capacity) {
1154
1275
  // Creates a space in the young generation. The constructor does not
1167
1288
  object_mask_ = address_mask_ | kHeapObjectTagMask;
1168
1289
  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1169
1290
  age_mark_ = start_;
1170
 
 
1171
 
  return Commit();
1172
1291
}
1173
1292
 
1174
1293
 
1218
1337
 
1219
1338
 
1220
1339
bool SemiSpace::GrowTo(int new_capacity) {
 
1340
  if (!is_committed()) {
 
1341
    if (!Commit()) return false;
 
1342
  }
1221
1343
  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1222
1344
  ASSERT(new_capacity <= maximum_capacity_);
1223
1345
  ASSERT(new_capacity > capacity_);
1256
1378
  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1257
1379
  ASSERT(new_capacity >= initial_capacity_);
1258
1380
  ASSERT(new_capacity < capacity_);
1259
 
  // Semispaces grow backwards from the end of their allocated capacity,
1260
 
  // so we find the before and after start addresses relative to the
1261
 
  // end of the space.
1262
 
  Address space_end = start_ + maximum_capacity_;
1263
 
  Address old_start = space_end - capacity_;
1264
 
  size_t delta = capacity_ - new_capacity;
1265
 
  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1266
 
  if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
1267
 
    return false;
 
1381
  if (is_committed()) {
 
1382
    // Semispaces grow backwards from the end of their allocated capacity,
 
1383
    // so we find the before and after start addresses relative to the
 
1384
    // end of the space.
 
1385
    Address space_end = start_ + maximum_capacity_;
 
1386
    Address old_start = space_end - capacity_;
 
1387
    size_t delta = capacity_ - new_capacity;
 
1388
    ASSERT(IsAligned(delta, OS::AllocateAlignment()));
 
1389
 
 
1390
    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
 
1391
    if (!allocator->UncommitBlock(old_start, delta)) {
 
1392
      return false;
 
1393
    }
 
1394
 
 
1395
    int pages_after = new_capacity / Page::kPageSize;
 
1396
    NewSpacePage* new_last_page =
 
1397
        NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
 
1398
    new_last_page->set_next_page(anchor());
 
1399
    anchor()->set_prev_page(new_last_page);
 
1400
    ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1268
1401
  }
 
1402
 
1269
1403
  capacity_ = new_capacity;
1270
1404
 
1271
 
  int pages_after = capacity_ / Page::kPageSize;
1272
 
  NewSpacePage* new_last_page =
1273
 
      NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1274
 
  new_last_page->set_next_page(anchor());
1275
 
  anchor()->set_prev_page(new_last_page);
1276
 
  ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1277
 
 
1278
1405
  return true;
1279
1406
}
1280
1407
 
1656
1783
  // field and a next pointer, we give it a filler map that gives it the
1657
1784
  // correct size.
1658
1785
  if (size_in_bytes > FreeSpace::kHeaderSize) {
1659
 
    set_map_unsafe(heap->raw_unchecked_free_space_map());
 
1786
    set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
1660
1787
    // Can't use FreeSpace::cast because it fails during deserialization.
1661
1788
    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1662
1789
    this_as_free_space->set_size(size_in_bytes);
1663
1790
  } else if (size_in_bytes == kPointerSize) {
1664
 
    set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
 
1791
    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
1665
1792
  } else if (size_in_bytes == 2 * kPointerSize) {
1666
 
    set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
 
1793
    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
1667
1794
  } else {
1668
1795
    UNREACHABLE();
1669
1796
  }
1913
2040
 
1914
2041
void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
1915
2042
  sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
1916
 
  if (sizes->huge_size_ < Page::kObjectAreaSize) {
 
2043
  if (sizes->huge_size_ < p->area_size()) {
1917
2044
    sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
1918
2045
    sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
1919
2046
    sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
1943
2070
intptr_t FreeList::EvictFreeListItems(Page* p) {
1944
2071
  intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
1945
2072
 
1946
 
  if (sum < Page::kObjectAreaSize) {
 
2073
  if (sum < p->area_size()) {
1947
2074
    sum += EvictFreeListItemsInList(&small_list_, p) +
1948
2075
        EvictFreeListItemsInList(&medium_list_, p) +
1949
2076
        EvictFreeListItemsInList(&large_list_, p);
2057
2184
    } while (p != anchor());
2058
2185
  }
2059
2186
  first_unswept_page_ = Page::FromAddress(NULL);
 
2187
  unswept_free_bytes_ = 0;
2060
2188
 
2061
2189
  // Clear the free list before a full GC---it will be rebuilt afterward.
2062
2190
  free_list_.Reset();
2064
2192
 
2065
2193
 
2066
2194
bool PagedSpace::ReserveSpace(int size_in_bytes) {
2067
 
  ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
 
2195
  ASSERT(size_in_bytes <= AreaSize());
2068
2196
  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2069
2197
  Address current_top = allocation_info_.top;
2070
2198
  Address new_top = current_top + size_in_bytes;
2105
2233
        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2106
2234
               reinterpret_cast<intptr_t>(p));
2107
2235
      }
 
2236
      DecreaseUnsweptFreeBytes(p);
2108
2237
      freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2109
2238
    }
2110
2239
    p = next_page;
2142
2271
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2143
2272
  // Allocation in this space has failed.
2144
2273
 
 
2274
  // If there are unswept pages advance lazy sweeper then sweep one page before
 
2275
  // allocating a new page.
 
2276
  if (first_unswept_page_->is_valid()) {
 
2277
    AdvanceSweeper(size_in_bytes);
 
2278
 
 
2279
    // Retry the free list allocation.
 
2280
    HeapObject* object = free_list_.Allocate(size_in_bytes);
 
2281
    if (object != NULL) return object;
 
2282
  }
 
2283
 
2145
2284
  // Free list allocation failed and there is no next page.  Fail if we have
2146
2285
  // hit the old generation size limit that should cause a garbage
2147
2286
  // collection.
2150
2289
    return NULL;
2151
2290
  }
2152
2291
 
2153
 
  // If there are unswept pages advance lazy sweeper.
2154
 
  if (first_unswept_page_->is_valid()) {
2155
 
    AdvanceSweeper(size_in_bytes);
2156
 
 
2157
 
    // Retry the free list allocation.
2158
 
    HeapObject* object = free_list_.Allocate(size_in_bytes);
2159
 
    if (object != NULL) return object;
2160
 
 
2161
 
    if (!IsSweepingComplete()) {
2162
 
      AdvanceSweeper(kMaxInt);
2163
 
 
2164
 
      // Retry the free list allocation.
2165
 
      object = free_list_.Allocate(size_in_bytes);
2166
 
      if (object != NULL) return object;
2167
 
    }
2168
 
  }
2169
 
 
2170
2292
  // Try to expand the space and allocate in the new next page.
2171
2293
  if (Expand()) {
2172
2294
    return free_list_.Allocate(size_in_bytes);
2173
2295
  }
2174
2296
 
 
2297
  // Last ditch, sweep all the remaining pages to try to find space.  This may
 
2298
  // cause a pause.
 
2299
  if (!IsSweepingComplete()) {
 
2300
    AdvanceSweeper(kMaxInt);
 
2301
 
 
2302
    // Retry the free list allocation.
 
2303
    HeapObject* object = free_list_.Allocate(size_in_bytes);
 
2304
    if (object != NULL) return object;
 
2305
  }
 
2306
 
2175
2307
  // Finally, fail.
2176
2308
  return NULL;
2177
2309
}
2400
2532
      objects_size_(0) {}
2401
2533
 
2402
2534
 
2403
 
bool LargeObjectSpace::Setup() {
 
2535
bool LargeObjectSpace::SetUp() {
2404
2536
  first_page_ = NULL;
2405
2537
  size_ = 0;
2406
2538
  page_count_ = 0;
2420
2552
        space, kAllocationActionFree, page->size());
2421
2553
    heap()->isolate()->memory_allocator()->Free(page);
2422
2554
  }
2423
 
  Setup();
 
2555
  SetUp();
2424
2556
}
2425
2557
 
2426
2558
 
2440
2572
  LargePage* page = heap()->isolate()->memory_allocator()->
2441
2573
      AllocateLargePage(object_size, executable, this);
2442
2574
  if (page == NULL) return Failure::RetryAfterGC(identity());
2443
 
  ASSERT(page->body_size() >= object_size);
 
2575
  ASSERT(page->area_size() >= object_size);
2444
2576
 
2445
2577
  size_ += static_cast<int>(page->size());
2446
2578
  objects_size_ += object_size;
2502
2634
    MarkBit mark_bit = Marking::MarkBitFrom(object);
2503
2635
    if (mark_bit.Get()) {
2504
2636
      mark_bit.Clear();
2505
 
      MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
 
2637
      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
2506
2638
      previous = current;
2507
2639
      current = current->next_page();
2508
2640
    } else {
2556
2688
    // object area start.
2557
2689
    HeapObject* object = chunk->GetObject();
2558
2690
    Page* page = Page::FromAddress(object->address());
2559
 
    ASSERT(object->address() == page->ObjectAreaStart());
 
2691
    ASSERT(object->address() == page->area_start());
2560
2692
 
2561
2693
    // The first word should be a map, and we expect all map pointers to be
2562
2694
    // in map space.