~ubuntu-branches/ubuntu/precise/nodejs/precise

« back to all changes in this revision

Viewing changes to deps/v8/src/spaces.cc

  • Committer: Bazaar Package Importer
  • Author(s): Jérémy Lal
  • Date: 2010-08-20 11:49:04 UTC
  • mfrom: (7.1.6 sid)
  • Revision ID: james.westby@ubuntu.com-20100820114904-lz22w6fkth7yh179
Tags: 0.2.0-1
New upstream release

Show diffs side-by-side

added added

removed removed

Lines of Context:
41
41
         && (info).top <= (space).high()              \
42
42
         && (info).limit == (space).high())
43
43
 
 
44
intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
44
45
 
45
46
// ----------------------------------------------------------------------------
46
47
// HeapObjectIterator
139
140
 
140
141
 
141
142
// -----------------------------------------------------------------------------
142
 
// Page
143
 
 
144
 
#ifdef DEBUG
145
 
Page::RSetState Page::rset_state_ = Page::IN_USE;
146
 
#endif
147
 
 
148
 
// -----------------------------------------------------------------------------
149
143
// CodeRange
150
144
 
151
145
List<CodeRange::FreeBlock> CodeRange::free_list_(0);
348
342
void* MemoryAllocator::AllocateRawMemory(const size_t requested,
349
343
                                         size_t* allocated,
350
344
                                         Executability executable) {
351
 
  if (size_ + static_cast<int>(requested) > capacity_) return NULL;
 
345
  if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
 
346
    return NULL;
 
347
  }
352
348
  void* mem;
353
349
  if (executable == EXECUTABLE  && CodeRange::exists()) {
354
350
    mem = CodeRange::AllocateRawMemory(requested, allocated);
524
520
  for (int i = 0; i < pages_in_chunk; i++) {
525
521
    Page* p = Page::FromAddress(page_addr);
526
522
    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
 
523
    p->InvalidateWatermark(true);
527
524
    p->SetIsLargeObjectPage(false);
 
525
    p->SetAllocationWatermark(p->ObjectAreaStart());
 
526
    p->SetCachedAllocationWatermark(p->ObjectAreaStart());
528
527
    page_addr += Page::kPageSize;
529
528
  }
530
529
 
681
680
    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
682
681
    page_addr += Page::kPageSize;
683
682
 
 
683
    p->InvalidateWatermark(true);
684
684
    if (p->WasInUseBeforeMC()) {
685
685
      *last_page_in_use = p;
686
686
    }
744
744
  accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
745
745
  ASSERT(Capacity() <= max_capacity_);
746
746
 
747
 
  // Sequentially initialize remembered sets in the newly allocated
 
747
  // Sequentially clear region marks in the newly allocated
748
748
  // pages and cache the current last page in the space.
749
749
  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
750
 
    p->ClearRSet();
 
750
    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
751
751
    last_page_ = p;
752
752
  }
753
753
 
794
794
#endif
795
795
 
796
796
 
797
 
void PagedSpace::ClearRSet() {
 
797
void PagedSpace::MarkAllPagesClean() {
798
798
  PageIterator it(this, PageIterator::ALL_PAGES);
799
799
  while (it.has_next()) {
800
 
    it.next()->ClearRSet();
 
800
    it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
801
801
  }
802
802
}
803
803
 
900
900
  // of forwarding addresses is as an offset in terms of live bytes, so we
901
901
  // need quick access to the allocation top of each page to decode
902
902
  // forwarding addresses.
903
 
  current_page->mc_relocation_top = mc_forwarding_info_.top;
 
903
  current_page->SetAllocationWatermark(mc_forwarding_info_.top);
 
904
  current_page->next_page()->InvalidateWatermark(true);
904
905
  SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
905
906
  return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
906
907
}
928
929
 
929
930
  MemoryAllocator::SetNextPage(last_page, p);
930
931
 
931
 
  // Sequentially clear remembered set of new pages and and cache the
 
932
  // Sequentially clear region marks of new pages and and cache the
932
933
  // new last page in the space.
933
934
  while (p->is_valid()) {
934
 
    p->ClearRSet();
 
935
    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
935
936
    last_page_ = p;
936
937
    p = p->next_page();
937
938
  }
1030
1031
    if (above_allocation_top) {
1031
1032
      // We don't care what's above the allocation top.
1032
1033
    } else {
1033
 
      // Unless this is the last page in the space containing allocated
1034
 
      // objects, the allocation top should be at a constant offset from the
1035
 
      // object area end.
1036
1034
      Address top = current_page->AllocationTop();
1037
1035
      if (current_page == top_page) {
1038
1036
        ASSERT(top == allocation_info_.top);
1039
1037
        // The next page will be above the allocation top.
1040
1038
        above_allocation_top = true;
1041
 
      } else {
1042
 
        ASSERT(top == PageAllocationLimit(current_page));
1043
1039
      }
1044
1040
 
1045
1041
      // It should be packed with objects from the bottom to the top.
1060
1056
        object->Verify();
1061
1057
 
1062
1058
        // All the interior pointers should be contained in the heap and
1063
 
        // have their remembered set bits set if required as determined
1064
 
        // by the visitor.
 
1059
        // have page regions covering intergenerational references should be
 
1060
        // marked dirty.
1065
1061
        int size = object->Size();
1066
1062
        object->IterateBody(map->instance_type(), size, visitor);
1067
1063
 
1120
1116
 
1121
1117
  start_ = start;
1122
1118
  address_mask_ = ~(size - 1);
1123
 
  object_mask_ = address_mask_ | kHeapObjectTag;
 
1119
  object_mask_ = address_mask_ | kHeapObjectTagMask;
1124
1120
  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1125
1121
 
1126
1122
  allocation_info_.top = to_space_.low();
1324
1320
 
1325
1321
  start_ = start;
1326
1322
  address_mask_ = ~(maximum_capacity - 1);
1327
 
  object_mask_ = address_mask_ | kHeapObjectTag;
 
1323
  object_mask_ = address_mask_ | kHeapObjectTagMask;
1328
1324
  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1329
1325
  age_mark_ = start_;
1330
1326
 
1463
1459
      CASE(STORE_IC);
1464
1460
      CASE(KEYED_STORE_IC);
1465
1461
      CASE(CALL_IC);
 
1462
      CASE(KEYED_CALL_IC);
1466
1463
      CASE(BINARY_OP_IC);
1467
1464
    }
1468
1465
  }
1634
1631
  // If the block is too small (eg, one or two words), to hold both a size
1635
1632
  // field and a next pointer, we give it a filler map that gives it the
1636
1633
  // correct size.
1637
 
  if (size_in_bytes > ByteArray::kAlignedSize) {
 
1634
  if (size_in_bytes > ByteArray::kHeaderSize) {
1638
1635
    set_map(Heap::raw_unchecked_byte_array_map());
1639
1636
    // Can't use ByteArray::cast because it fails during deserialization.
1640
1637
    ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1831
1828
 
1832
1829
void FixedSizeFreeList::Reset() {
1833
1830
  available_ = 0;
1834
 
  head_ = NULL;
 
1831
  head_ = tail_ = NULL;
1835
1832
}
1836
1833
 
1837
1834
 
1843
1840
  ASSERT(!MarkCompactCollector::IsCompacting());
1844
1841
  FreeListNode* node = FreeListNode::FromAddress(start);
1845
1842
  node->set_size(object_size_);
1846
 
  node->set_next(head_);
1847
 
  head_ = node->address();
 
1843
  node->set_next(NULL);
 
1844
  if (head_ == NULL) {
 
1845
    tail_ = head_ = node->address();
 
1846
  } else {
 
1847
    FreeListNode::FromAddress(tail_)->set_next(node->address());
 
1848
    tail_ = node->address();
 
1849
  }
1848
1850
  available_ += object_size_;
1849
1851
}
1850
1852
 
1907
1909
    Page* p = it.next();
1908
1910
    // Space below the relocation pointer is allocated.
1909
1911
    computed_size +=
1910
 
        static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
 
1912
        static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
1911
1913
    if (it.has_next()) {
1912
 
      // Free the space at the top of the page.  We cannot use
1913
 
      // p->mc_relocation_top after the call to Free (because Free will clear
1914
 
      // remembered set bits).
 
1914
      // Free the space at the top of the page.
1915
1915
      int extra_size =
1916
 
          static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
 
1916
          static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
1917
1917
      if (extra_size > 0) {
1918
 
        int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
 
1918
        int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
 
1919
                                           extra_size);
1919
1920
        // The bytes we have just "freed" to add to the free list were
1920
1921
        // already accounted as available.
1921
1922
        accounting_stats_.WasteBytes(wasted_bytes);
1963
1964
 
1964
1965
  // Clean them up.
1965
1966
  do {
1966
 
    first->ClearRSet();
 
1967
    first->InvalidateWatermark(true);
 
1968
    first->SetAllocationWatermark(first->ObjectAreaStart());
 
1969
    first->SetCachedAllocationWatermark(first->ObjectAreaStart());
 
1970
    first->SetRegionMarks(Page::kAllRegionsCleanMarks);
1967
1971
    first = first->next_page();
1968
1972
  } while (first != NULL);
1969
1973
 
2003
2007
        // Current allocation top points to a page which is now in the middle
2004
2008
        // of page list. We should move allocation top forward to the new last
2005
2009
        // used page so various object iterators will continue to work properly.
 
2010
        last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2006
2011
 
2007
2012
        int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2008
2013
                                             last_in_use->AllocationTop());
2035
2040
          int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2036
2041
                                               p->ObjectAreaStart());
2037
2042
 
 
2043
          p->SetAllocationWatermark(p->ObjectAreaStart());
2038
2044
          Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
2039
2045
        }
2040
2046
      }
2066
2072
    if (!reserved_page->is_valid()) return false;
2067
2073
  }
2068
2074
  ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
 
2075
  TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
2069
2076
  SetAllocationInfo(&allocation_info_,
2070
2077
                    TopPageOf(allocation_info_)->next_page());
2071
2078
  return true;
2100
2107
    accounting_stats_.WasteBytes(wasted_bytes);
2101
2108
    if (!result->IsFailure()) {
2102
2109
      accounting_stats_.AllocateBytes(size_in_bytes);
2103
 
      return HeapObject::cast(result);
 
2110
 
 
2111
      HeapObject* obj = HeapObject::cast(result);
 
2112
      Page* p = Page::FromAddress(obj->address());
 
2113
 
 
2114
      if (obj->address() >= p->AllocationWatermark()) {
 
2115
        // There should be no hole between the allocation watermark
 
2116
        // and allocated object address.
 
2117
        // Memory above the allocation watermark was not swept and
 
2118
        // might contain garbage pointers to new space.
 
2119
        ASSERT(obj->address() == p->AllocationWatermark());
 
2120
        p->SetAllocationWatermark(obj->address() + size_in_bytes);
 
2121
      }
 
2122
 
 
2123
      return obj;
2104
2124
    }
2105
2125
  }
2106
2126
 
2123
2143
 
2124
2144
 
2125
2145
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
 
2146
  current_page->SetAllocationWatermark(allocation_info_.top);
2126
2147
  int free_size =
2127
2148
      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2128
2149
  if (free_size > 0) {
2133
2154
 
2134
2155
 
2135
2156
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
 
2157
  current_page->SetAllocationWatermark(allocation_info_.top);
2136
2158
  int free_size =
2137
2159
      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2138
2160
  // In the fixed space free list all the free list items have the right size.
2152
2174
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2153
2175
                                         int size_in_bytes) {
2154
2176
  ASSERT(current_page->next_page()->is_valid());
 
2177
  Page* next_page = current_page->next_page();
 
2178
  next_page->ClearGCFields();
2155
2179
  PutRestOfCurrentPageOnFreeList(current_page);
2156
 
  SetAllocationInfo(&allocation_info_, current_page->next_page());
 
2180
  SetAllocationInfo(&allocation_info_, next_page);
2157
2181
  return AllocateLinearly(&allocation_info_, size_in_bytes);
2158
2182
}
2159
2183
 
2283
2307
      }
2284
2308
 
2285
2309
      ASSERT(code->instruction_start() <= prev_pc &&
2286
 
             prev_pc <= code->relocation_start());
2287
 
      delta += static_cast<int>(code->relocation_start() - prev_pc);
 
2310
             prev_pc <= code->instruction_end());
 
2311
      delta += static_cast<int>(code->instruction_end() - prev_pc);
2288
2312
      EnterComment("NoComment", delta);
2289
2313
    }
2290
2314
  }
2296
2320
  PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
2297
2321
         Capacity(), Waste(), Available(), pct);
2298
2322
 
2299
 
  // Report remembered set statistics.
2300
 
  int rset_marked_pointers = 0;
2301
 
  int rset_marked_arrays = 0;
2302
 
  int rset_marked_array_elements = 0;
2303
 
  int cross_gen_pointers = 0;
2304
 
  int cross_gen_array_elements = 0;
2305
 
 
2306
 
  PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2307
 
  while (page_it.has_next()) {
2308
 
    Page* p = page_it.next();
2309
 
 
2310
 
    for (Address rset_addr = p->RSetStart();
2311
 
         rset_addr < p->RSetEnd();
2312
 
         rset_addr += kIntSize) {
2313
 
      int rset = Memory::int_at(rset_addr);
2314
 
      if (rset != 0) {
2315
 
        // Bits were set
2316
 
        int intoff =
2317
 
            static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
2318
 
        int bitoff = 0;
2319
 
        for (; bitoff < kBitsPerInt; ++bitoff) {
2320
 
          if ((rset & (1 << bitoff)) != 0) {
2321
 
            int bitpos = intoff*kBitsPerByte + bitoff;
2322
 
            Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2323
 
            Object** obj = reinterpret_cast<Object**>(slot);
2324
 
            if (*obj == Heap::raw_unchecked_fixed_array_map()) {
2325
 
              rset_marked_arrays++;
2326
 
              FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
2327
 
 
2328
 
              rset_marked_array_elements += fa->length();
2329
 
              // Manually inline FixedArray::IterateBody
2330
 
              Address elm_start = slot + FixedArray::kHeaderSize;
2331
 
              Address elm_stop = elm_start + fa->length() * kPointerSize;
2332
 
              for (Address elm_addr = elm_start;
2333
 
                   elm_addr < elm_stop; elm_addr += kPointerSize) {
2334
 
                // Filter non-heap-object pointers
2335
 
                Object** elm_p = reinterpret_cast<Object**>(elm_addr);
2336
 
                if (Heap::InNewSpace(*elm_p))
2337
 
                  cross_gen_array_elements++;
2338
 
              }
2339
 
            } else {
2340
 
              rset_marked_pointers++;
2341
 
              if (Heap::InNewSpace(*obj))
2342
 
                cross_gen_pointers++;
2343
 
            }
2344
 
          }
2345
 
        }
2346
 
      }
2347
 
    }
2348
 
  }
2349
 
 
2350
 
  pct = rset_marked_pointers == 0 ?
2351
 
        0 : cross_gen_pointers * 100 / rset_marked_pointers;
2352
 
  PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",
2353
 
            rset_marked_pointers, cross_gen_pointers, pct);
2354
 
  PrintF("  rset_marked arrays %d, ", rset_marked_arrays);
2355
 
  PrintF("  elements %d, ", rset_marked_array_elements);
2356
 
  pct = rset_marked_array_elements == 0 ? 0
2357
 
           : cross_gen_array_elements * 100 / rset_marked_array_elements;
2358
 
  PrintF("  pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
2359
 
  PrintF("  total rset-marked bits %d\n",
2360
 
            (rset_marked_pointers + rset_marked_arrays));
2361
 
  pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
2362
 
        : (cross_gen_pointers + cross_gen_array_elements) * 100 /
2363
 
          (rset_marked_pointers + rset_marked_array_elements);
2364
 
  PrintF("  total rset pointers %d, true cross generation ones %d (%%%d)\n",
2365
 
         (rset_marked_pointers + rset_marked_array_elements),
2366
 
         (cross_gen_pointers + cross_gen_array_elements),
2367
 
         pct);
2368
 
 
2369
2323
  ClearHistograms();
2370
2324
  HeapObjectIterator obj_it(this);
2371
2325
  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2372
2326
    CollectHistogramInfo(obj);
2373
2327
  ReportHistogram(true);
2374
2328
}
2375
 
 
2376
 
 
2377
 
// Dump the range of remembered set words between [start, end) corresponding
2378
 
// to the pointers starting at object_p.  The allocation_top is an object
2379
 
// pointer which should not be read past.  This is important for large object
2380
 
// pages, where some bits in the remembered set range do not correspond to
2381
 
// allocated addresses.
2382
 
static void PrintRSetRange(Address start, Address end, Object** object_p,
2383
 
                           Address allocation_top) {
2384
 
  Address rset_address = start;
2385
 
 
2386
 
  // If the range starts on on odd numbered word (eg, for large object extra
2387
 
  // remembered set ranges), print some spaces.
2388
 
  if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
2389
 
    PrintF("                                    ");
2390
 
  }
2391
 
 
2392
 
  // Loop over all the words in the range.
2393
 
  while (rset_address < end) {
2394
 
    uint32_t rset_word = Memory::uint32_at(rset_address);
2395
 
    int bit_position = 0;
2396
 
 
2397
 
    // Loop over all the bits in the word.
2398
 
    while (bit_position < kBitsPerInt) {
2399
 
      if (object_p == reinterpret_cast<Object**>(allocation_top)) {
2400
 
        // Print a bar at the allocation pointer.
2401
 
        PrintF("|");
2402
 
      } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
2403
 
        // Do not dereference object_p past the allocation pointer.
2404
 
        PrintF("#");
2405
 
      } else if ((rset_word & (1 << bit_position)) == 0) {
2406
 
        // Print a dot for zero bits.
2407
 
        PrintF(".");
2408
 
      } else if (Heap::InNewSpace(*object_p)) {
2409
 
        // Print an X for one bits for pointers to new space.
2410
 
        PrintF("X");
2411
 
      } else {
2412
 
        // Print a circle for one bits for pointers to old space.
2413
 
        PrintF("o");
2414
 
      }
2415
 
 
2416
 
      // Print a space after every 8th bit except the last.
2417
 
      if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
2418
 
        PrintF(" ");
2419
 
      }
2420
 
 
2421
 
      // Advance to next bit.
2422
 
      bit_position++;
2423
 
      object_p++;
2424
 
    }
2425
 
 
2426
 
    // Print a newline after every odd numbered word, otherwise a space.
2427
 
    if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
2428
 
      PrintF("\n");
2429
 
    } else {
2430
 
      PrintF(" ");
2431
 
    }
2432
 
 
2433
 
    // Advance to next remembered set word.
2434
 
    rset_address += kIntSize;
2435
 
  }
2436
 
}
2437
 
 
2438
 
 
2439
 
void PagedSpace::DoPrintRSet(const char* space_name) {
2440
 
  PageIterator it(this, PageIterator::PAGES_IN_USE);
2441
 
  while (it.has_next()) {
2442
 
    Page* p = it.next();
2443
 
    PrintF("%s page 0x%x:\n", space_name, p);
2444
 
    PrintRSetRange(p->RSetStart(), p->RSetEnd(),
2445
 
                   reinterpret_cast<Object**>(p->ObjectAreaStart()),
2446
 
                   p->AllocationTop());
2447
 
    PrintF("\n");
2448
 
  }
2449
 
}
2450
 
 
2451
 
 
2452
 
void OldSpace::PrintRSet() { DoPrintRSet("old"); }
2453
2329
#endif
2454
2330
 
2455
2331
// -----------------------------------------------------------------------------
2499
2375
    if (it.has_next()) {
2500
2376
      accounting_stats_.WasteBytes(
2501
2377
          static_cast<int>(page->ObjectAreaEnd() - page_top));
 
2378
      page->SetAllocationWatermark(page_top);
2502
2379
    }
2503
2380
  }
2504
2381
 
2528
2405
    Object* result = free_list_.Allocate();
2529
2406
    if (!result->IsFailure()) {
2530
2407
      accounting_stats_.AllocateBytes(size_in_bytes);
2531
 
      return HeapObject::cast(result);
 
2408
      HeapObject* obj = HeapObject::cast(result);
 
2409
      Page* p = Page::FromAddress(obj->address());
 
2410
 
 
2411
      if (obj->address() >= p->AllocationWatermark()) {
 
2412
        // There should be no hole between the allocation watermark
 
2413
        // and allocated object address.
 
2414
        // Memory above the allocation watermark was not swept and
 
2415
        // might contain garbage pointers to new space.
 
2416
        ASSERT(obj->address() == p->AllocationWatermark());
 
2417
        p->SetAllocationWatermark(obj->address() + size_in_bytes);
 
2418
      }
 
2419
 
 
2420
      return obj;
2532
2421
    }
2533
2422
  }
2534
2423
 
2558
2447
  ASSERT(current_page->next_page()->is_valid());
2559
2448
  ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
2560
2449
  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
 
2450
  Page* next_page = current_page->next_page();
 
2451
  next_page->ClearGCFields();
 
2452
  current_page->SetAllocationWatermark(allocation_info_.top);
2561
2453
  accounting_stats_.WasteBytes(page_extra_);
2562
 
  SetAllocationInfo(&allocation_info_, current_page->next_page());
 
2454
  SetAllocationInfo(&allocation_info_, next_page);
2563
2455
  return AllocateLinearly(&allocation_info_, size_in_bytes);
2564
2456
}
2565
2457
 
2570
2462
  PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
2571
2463
         Capacity(), Waste(), Available(), pct);
2572
2464
 
2573
 
  // Report remembered set statistics.
2574
 
  int rset_marked_pointers = 0;
2575
 
  int cross_gen_pointers = 0;
2576
 
 
2577
 
  PageIterator page_it(this, PageIterator::PAGES_IN_USE);
2578
 
  while (page_it.has_next()) {
2579
 
    Page* p = page_it.next();
2580
 
 
2581
 
    for (Address rset_addr = p->RSetStart();
2582
 
         rset_addr < p->RSetEnd();
2583
 
         rset_addr += kIntSize) {
2584
 
      int rset = Memory::int_at(rset_addr);
2585
 
      if (rset != 0) {
2586
 
        // Bits were set
2587
 
        int intoff =
2588
 
            static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
2589
 
        int bitoff = 0;
2590
 
        for (; bitoff < kBitsPerInt; ++bitoff) {
2591
 
          if ((rset & (1 << bitoff)) != 0) {
2592
 
            int bitpos = intoff*kBitsPerByte + bitoff;
2593
 
            Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
2594
 
            Object** obj = reinterpret_cast<Object**>(slot);
2595
 
            rset_marked_pointers++;
2596
 
            if (Heap::InNewSpace(*obj))
2597
 
              cross_gen_pointers++;
2598
 
          }
2599
 
        }
2600
 
      }
2601
 
    }
2602
 
  }
2603
 
 
2604
 
  pct = rset_marked_pointers == 0 ?
2605
 
          0 : cross_gen_pointers * 100 / rset_marked_pointers;
2606
 
  PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",
2607
 
            rset_marked_pointers, cross_gen_pointers, pct);
2608
 
 
2609
2465
  ClearHistograms();
2610
2466
  HeapObjectIterator obj_it(this);
2611
2467
  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2612
2468
    CollectHistogramInfo(obj);
2613
2469
  ReportHistogram(false);
2614
2470
}
2615
 
 
2616
 
 
2617
 
void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
2618
2471
#endif
2619
2472
 
2620
2473
 
2793
2646
  chunk->set_size(chunk_size);
2794
2647
  first_chunk_ = chunk;
2795
2648
 
2796
 
  // Set the object address and size in the page header and clear its
2797
 
  // remembered set.
 
2649
  // Initialize page header.
2798
2650
  Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2799
2651
  Address object_address = page->ObjectAreaStart();
2800
2652
  // Clear the low order bit of the second word in the page to flag it as a
2802
2654
  // low order bit should already be clear.
2803
2655
  ASSERT((chunk_size & 0x1) == 0);
2804
2656
  page->SetIsLargeObjectPage(true);
2805
 
  page->ClearRSet();
2806
 
  int extra_bytes = requested_size - object_size;
2807
 
  if (extra_bytes > 0) {
2808
 
    // The extra memory for the remembered set should be cleared.
2809
 
    memset(object_address + object_size, 0, extra_bytes);
2810
 
  }
2811
 
 
 
2657
  page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2812
2658
  return HeapObject::FromAddress(object_address);
2813
2659
}
2814
2660
 
2823
2669
 
2824
2670
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2825
2671
  ASSERT(0 < size_in_bytes);
2826
 
  int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
2827
 
  return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
 
2672
  return AllocateRawInternal(size_in_bytes,
2828
2673
                             size_in_bytes,
2829
2674
                             NOT_EXECUTABLE);
2830
2675
}
2851
2696
  return Failure::Exception();
2852
2697
}
2853
2698
 
2854
 
 
2855
 
void LargeObjectSpace::ClearRSet() {
2856
 
  ASSERT(Page::is_rset_in_use());
2857
 
 
2858
 
  LargeObjectIterator it(this);
2859
 
  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2860
 
    // We only have code, sequential strings, or fixed arrays in large
2861
 
    // object space, and only fixed arrays need remembered set support.
2862
 
    if (object->IsFixedArray()) {
2863
 
      // Clear the normal remembered set region of the page;
2864
 
      Page* page = Page::FromAddress(object->address());
2865
 
      page->ClearRSet();
2866
 
 
2867
 
      // Clear the extra remembered set.
2868
 
      int size = object->Size();
2869
 
      int extra_rset_bytes = ExtraRSetBytesFor(size);
2870
 
      memset(object->address() + size, 0, extra_rset_bytes);
2871
 
    }
2872
 
  }
2873
 
}
2874
 
 
2875
 
 
2876
 
void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
2877
 
  ASSERT(Page::is_rset_in_use());
2878
 
 
2879
 
  static void* lo_rset_histogram = StatsTable::CreateHistogram(
2880
 
      "V8.RSetLO",
2881
 
      0,
2882
 
      // Keeping this histogram's buckets the same as the paged space histogram.
2883
 
      Page::kObjectAreaSize / kPointerSize,
2884
 
      30);
2885
 
 
 
2699
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
2886
2700
  LargeObjectIterator it(this);
2887
2701
  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2888
2702
    // We only have code, sequential strings, or fixed arrays in large
2889
2703
    // object space, and only fixed arrays can possibly contain pointers to
2890
2704
    // the young generation.
2891
2705
    if (object->IsFixedArray()) {
2892
 
      // Iterate the normal page remembered set range.
2893
2706
      Page* page = Page::FromAddress(object->address());
2894
 
      Address object_end = object->address() + object->Size();
2895
 
      int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
2896
 
                                         Min(page->ObjectAreaEnd(), object_end),
2897
 
                                         page->RSetStart(),
2898
 
                                         copy_object_func);
2899
 
 
2900
 
      // Iterate the extra array elements.
2901
 
      if (object_end > page->ObjectAreaEnd()) {
2902
 
        count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
2903
 
                                        object_end, copy_object_func);
2904
 
      }
2905
 
      if (lo_rset_histogram != NULL) {
2906
 
        StatsTable::AddHistogramSample(lo_rset_histogram, count);
 
2707
      uint32_t marks = page->GetRegionMarks();
 
2708
      uint32_t newmarks = Page::kAllRegionsCleanMarks;
 
2709
 
 
2710
      if (marks != Page::kAllRegionsCleanMarks) {
 
2711
        // For a large page a single dirty mark corresponds to several
 
2712
        // regions (modulo 32). So we treat a large page as a sequence of
 
2713
        // normal pages of size Page::kPageSize having same dirty marks
 
2714
        // and subsequently iterate dirty regions on each of these pages.
 
2715
        Address start = object->address();
 
2716
        Address end = page->ObjectAreaEnd();
 
2717
        Address object_end = start + object->Size();
 
2718
 
 
2719
        // Iterate regions of the first normal page covering object.
 
2720
        uint32_t first_region_number = page->GetRegionNumberForAddress(start);
 
2721
        newmarks |=
 
2722
            Heap::IterateDirtyRegions(marks >> first_region_number,
 
2723
                                      start,
 
2724
                                      end,
 
2725
                                      &Heap::IteratePointersInDirtyRegion,
 
2726
                                      copy_object) << first_region_number;
 
2727
 
 
2728
        start = end;
 
2729
        end = start + Page::kPageSize;
 
2730
        while (end <= object_end) {
 
2731
          // Iterate next 32 regions.
 
2732
          newmarks |=
 
2733
              Heap::IterateDirtyRegions(marks,
 
2734
                                        start,
 
2735
                                        end,
 
2736
                                        &Heap::IteratePointersInDirtyRegion,
 
2737
                                        copy_object);
 
2738
          start = end;
 
2739
          end = start + Page::kPageSize;
 
2740
        }
 
2741
 
 
2742
        if (start != object_end) {
 
2743
          // Iterate the last piece of an object which is less than
 
2744
          // Page::kPageSize.
 
2745
          newmarks |=
 
2746
              Heap::IterateDirtyRegions(marks,
 
2747
                                        start,
 
2748
                                        object_end,
 
2749
                                        &Heap::IteratePointersInDirtyRegion,
 
2750
                                        copy_object);
 
2751
        }
 
2752
 
 
2753
        page->SetRegionMarks(newmarks);
2907
2754
      }
2908
2755
    }
2909
2756
  }
2995
2842
    } else if (object->IsFixedArray()) {
2996
2843
      // We loop over fixed arrays ourselves, rather then using the visitor,
2997
2844
      // because the visitor doesn't support the start/offset iteration
2998
 
      // needed for IsRSetSet.
 
2845
      // needed for IsRegionDirty.
2999
2846
      FixedArray* array = FixedArray::cast(object);
3000
2847
      for (int j = 0; j < array->length(); j++) {
3001
2848
        Object* element = array->get(j);
3004
2851
          ASSERT(Heap::Contains(element_object));
3005
2852
          ASSERT(element_object->map()->IsMap());
3006
2853
          if (Heap::InNewSpace(element_object)) {
3007
 
            ASSERT(Page::IsRSetSet(object->address(),
3008
 
                                   FixedArray::kHeaderSize + j * kPointerSize));
 
2854
            Address array_addr = object->address();
 
2855
            Address element_addr = array_addr + FixedArray::kHeaderSize +
 
2856
                j * kPointerSize;
 
2857
 
 
2858
            ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
3009
2859
          }
3010
2860
        }
3011
2861
      }
3046
2896
    }
3047
2897
  }
3048
2898
}
3049
 
 
3050
 
 
3051
 
void LargeObjectSpace::PrintRSet() {
3052
 
  LargeObjectIterator it(this);
3053
 
  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3054
 
    if (object->IsFixedArray()) {
3055
 
      Page* page = Page::FromAddress(object->address());
3056
 
 
3057
 
      Address allocation_top = object->address() + object->Size();
3058
 
      PrintF("large page 0x%x:\n", page);
3059
 
      PrintRSetRange(page->RSetStart(), page->RSetEnd(),
3060
 
                     reinterpret_cast<Object**>(object->address()),
3061
 
                     allocation_top);
3062
 
      int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
3063
 
      int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
3064
 
                                    kBitsPerInt);
3065
 
      PrintF("------------------------------------------------------------"
3066
 
             "-----------\n");
3067
 
      PrintRSetRange(allocation_top,
3068
 
                     allocation_top + extra_rset_bits / kBitsPerByte,
3069
 
                     reinterpret_cast<Object**>(object->address()
3070
 
                                                + Page::kObjectAreaSize),
3071
 
                     allocation_top);
3072
 
      PrintF("\n");
3073
 
    }
3074
 
  }
3075
 
}
3076
2899
#endif  // DEBUG
3077
2900
 
3078
2901
} }  // namespace v8::internal