2
* Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
4
* Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, write to the Free Software
18
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25
#include "BlockAllocator.h"
27
#include "HeapBlock.h"
30
#include <wtf/Bitmap.h>
31
#include <wtf/DataLog.h>
32
#include <wtf/DoublyLinkedList.h>
33
#include <wtf/HashFunctions.h>
34
#include <wtf/PageAllocationAligned.h>
35
#include <wtf/StdLibExtras.h>
36
#include <wtf/Vector.h>
38
// Set to log state transitions of blocks.
39
#define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
41
#if HEAP_LOG_BLOCK_STATE_TRANSITIONS
42
#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
44
"%s:%d %s: block %s = %p, %d\n", \
45
__FILE__, __LINE__, __FUNCTION__, \
46
#block, (block), (block)->m_state); \
49
#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
56
class MarkedAllocator;
58
typedef uintptr_t Bits;
60
static const size_t MB = 1024 * 1024;
62
bool isZapped(const JSCell*);
64
// A marked block is a page-aligned container for heap-allocated objects.
65
// Objects are allocated within cells of the marked block. For a given
66
// marked block, all cells have the same size. Objects smaller than the
67
// cell size may be allocated in the marked block, in which case the
68
// allocation suffers from internal fragmentation: wasted space whose
69
// size is equal to the difference between the cell size and the object
72
class MarkedBlock : public HeapBlock<MarkedBlock> {
74
// Ensure natural alignment for native types whilst recognizing that the smallest
75
// object the heap will commonly allocate is four words.
76
static const size_t atomSize = 4 * sizeof(void*);
77
static const size_t atomShift = 5;
78
static const size_t blockSize = 64 * KB;
79
static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
81
static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead
82
static const size_t atomMask = atomsPerBlock - 1;
83
static const int cardShift = 8; // This is log2 of bytes per card.
84
static const size_t bytesPerCard = 1 << cardShift;
85
static const int cardCount = blockSize / bytesPerCard;
86
static const int cardMask = cardCount - 1;
97
FreeList(FreeCell*, size_t);
101
typedef void ReturnType;
102
void returnValue() { }
107
typedef size_t ReturnType;
109
CountFunctor() : m_count(0) { }
110
void count(size_t count) { m_count += count; }
111
ReturnType returnValue() { return m_count; }
117
enum DestructorType { None, ImmortalStructure, Normal };
118
static MarkedBlock* create(DeadBlock*, MarkedAllocator*, size_t cellSize, DestructorType);
120
static bool isAtomAligned(const void*);
121
static MarkedBlock* blockFor(const void*);
122
static size_t firstAtom();
124
void lastChanceToFinalize();
126
MarkedAllocator* allocator() const;
128
JSGlobalData* globalData() const;
131
enum SweepMode { SweepOnly, SweepToFreeList };
132
FreeList sweep(SweepMode = SweepOnly);
136
void visitWeakSet(HeapRootVisitor&);
139
// While allocating from a free list, MarkedBlock temporarily has bogus
140
// cell liveness data. To restore accurate cell liveness data, call one
141
// of these functions:
142
void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
143
void canonicalizeCellLivenessData(const FreeList&);
150
DestructorType destructorType();
155
bool isMarked(const void*);
156
bool testAndSetMarked(const void*);
157
bool isLive(const JSCell*);
158
bool isLiveCell(const void*);
159
void setMarked(const void*);
160
void clearMarked(const void*);
162
bool isNewlyAllocated(const void*);
163
void setNewlyAllocated(const void*);
164
void clearNewlyAllocated(const void*);
166
bool needsSweeping();
169
void setDirtyObject(const void* atom)
171
ASSERT(MarkedBlock::blockFor(atom) == this);
172
m_cards.markCardForAtom(atom);
175
uint8_t* addressOfCardFor(const void* atom)
177
ASSERT(MarkedBlock::blockFor(atom) == this);
178
return &m_cards.cardForAtom(atom);
181
static inline size_t offsetOfCards()
183
return OBJECT_OFFSETOF(MarkedBlock, m_cards);
186
static inline size_t offsetOfMarks()
188
return OBJECT_OFFSETOF(MarkedBlock, m_marks);
191
typedef Vector<JSCell*, 32> DirtyCellVector;
192
inline void gatherDirtyCells(DirtyCellVector&);
193
template <int size> inline void gatherDirtyCellsWithSize(DirtyCellVector&);
196
template <typename Functor> void forEachCell(Functor&);
197
template <typename Functor> void forEachLiveCell(Functor&);
198
template <typename Functor> void forEachDeadCell(Functor&);
201
static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
203
enum BlockState { New, FreeListed, Allocated, Marked };
204
template<DestructorType> FreeList sweepHelper(SweepMode = SweepOnly);
206
typedef char Atom[atomSize];
208
MarkedBlock(Region*, MarkedAllocator*, size_t cellSize, DestructorType);
210
size_t atomNumber(const void*);
211
void callDestructor(JSCell*);
212
template<BlockState, SweepMode, DestructorType> FreeList specializedSweep();
215
CardSet<bytesPerCard, blockSize> m_cards;
218
size_t m_atomsPerCell;
219
size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
220
#if ENABLE(PARALLEL_GC)
221
WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks;
223
WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks;
225
OwnPtr<WTF::Bitmap<atomsPerBlock> > m_newlyAllocated;
227
DestructorType m_destructorType;
228
MarkedAllocator* m_allocator;
233
inline MarkedBlock::FreeList::FreeList()
239
inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes)
245
inline size_t MarkedBlock::firstAtom()
247
return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
250
inline MarkedBlock::Atom* MarkedBlock::atoms()
252
return reinterpret_cast<Atom*>(this);
255
inline bool MarkedBlock::isAtomAligned(const void* p)
257
return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
260
inline MarkedBlock* MarkedBlock::blockFor(const void* p)
262
return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
265
inline void MarkedBlock::lastChanceToFinalize()
267
m_weakSet.lastChanceToFinalize();
273
inline MarkedAllocator* MarkedBlock::allocator() const
278
inline Heap* MarkedBlock::heap() const
280
return m_weakSet.heap();
283
inline JSGlobalData* MarkedBlock::globalData() const
285
return m_weakSet.globalData();
288
inline WeakSet& MarkedBlock::weakSet()
293
inline void MarkedBlock::shrink()
298
inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor)
300
m_weakSet.visit(heapRootVisitor);
303
inline void MarkedBlock::reapWeakSet()
308
inline void MarkedBlock::didConsumeFreeList()
310
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
312
ASSERT(m_state == FreeListed);
316
inline void MarkedBlock::clearMarks()
318
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
320
ASSERT(m_state != New && m_state != FreeListed);
322
m_newlyAllocated.clear();
324
// This will become true at the end of the mark phase. We set it now to
325
// avoid an extra pass to do so later.
329
inline size_t MarkedBlock::markCount()
331
return m_marks.count();
334
inline bool MarkedBlock::isEmpty()
336
return m_marks.isEmpty() && m_weakSet.isEmpty() && (!m_newlyAllocated || m_newlyAllocated->isEmpty());
339
inline size_t MarkedBlock::cellSize()
341
return m_atomsPerCell * atomSize;
344
inline MarkedBlock::DestructorType MarkedBlock::destructorType()
346
return m_destructorType;
349
inline size_t MarkedBlock::size()
351
return markCount() * cellSize();
354
inline size_t MarkedBlock::capacity()
356
return region()->blockSize();
359
inline size_t MarkedBlock::atomNumber(const void* p)
361
return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
364
inline bool MarkedBlock::isMarked(const void* p)
366
return m_marks.get(atomNumber(p));
369
inline bool MarkedBlock::testAndSetMarked(const void* p)
371
return m_marks.concurrentTestAndSet(atomNumber(p));
374
inline void MarkedBlock::setMarked(const void* p)
376
m_marks.set(atomNumber(p));
379
inline void MarkedBlock::clearMarked(const void* p)
381
ASSERT(m_marks.get(atomNumber(p)));
382
m_marks.clear(atomNumber(p));
385
inline bool MarkedBlock::isNewlyAllocated(const void* p)
387
return m_newlyAllocated->get(atomNumber(p));
390
inline void MarkedBlock::setNewlyAllocated(const void* p)
392
m_newlyAllocated->set(atomNumber(p));
395
inline void MarkedBlock::clearNewlyAllocated(const void* p)
397
m_newlyAllocated->clear(atomNumber(p));
400
inline bool MarkedBlock::isLive(const JSCell* cell)
407
return m_marks.get(atomNumber(cell)) || (m_newlyAllocated && isNewlyAllocated(cell));
411
ASSERT_NOT_REACHED();
415
ASSERT_NOT_REACHED();
419
inline bool MarkedBlock::isLiveCell(const void* p)
421
ASSERT(MarkedBlock::isAtomAligned(p));
422
size_t atomNumber = this->atomNumber(p);
423
size_t firstAtom = this->firstAtom();
424
if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
426
if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles.
428
if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range.
431
return isLive(static_cast<const JSCell*>(p));
434
template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor)
436
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
437
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
442
template <typename Functor> inline void MarkedBlock::forEachLiveCell(Functor& functor)
444
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
445
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
453
template <typename Functor> inline void MarkedBlock::forEachDeadCell(Functor& functor)
455
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
456
JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
464
inline bool MarkedBlock::needsSweeping()
466
return m_state == Marked;
470
template <int _cellSize> void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector& dirtyCells)
472
if (m_cards.testAndClear(0)) {
473
char* ptr = reinterpret_cast<char*>(&atoms()[firstAtom()]);
474
const char* end = reinterpret_cast<char*>(this) + bytesPerCard;
476
JSCell* cell = reinterpret_cast<JSCell*>(ptr);
478
dirtyCells.append(cell);
483
const size_t cellOffset = firstAtom() * atomSize % _cellSize;
484
for (size_t i = 1; i < m_cards.cardCount; i++) {
485
if (!m_cards.testAndClear(i))
487
char* ptr = reinterpret_cast<char*>(this) + i * bytesPerCard + cellOffset;
488
char* end = reinterpret_cast<char*>(this) + (i + 1) * bytesPerCard;
491
JSCell* cell = reinterpret_cast<JSCell*>(ptr);
493
dirtyCells.append(cell);
499
void MarkedBlock::gatherDirtyCells(DirtyCellVector& dirtyCells)
501
COMPILE_ASSERT((int)m_cards.cardCount == (int)cardCount, MarkedBlockCardCountsMatch);
503
ASSERT(m_state != New && m_state != FreeListed);
505
// This is an optimisation to avoid having to walk the set of marked
506
// blocks twice during GC.
512
size_t cellSize = this->cellSize();
513
if (cellSize == 32) {
514
gatherDirtyCellsWithSize<32>(dirtyCells);
517
if (cellSize == 64) {
518
gatherDirtyCellsWithSize<64>(dirtyCells);
522
const size_t firstCellOffset = firstAtom() * atomSize % cellSize;
524
if (m_cards.testAndClear(0)) {
525
char* ptr = reinterpret_cast<char*>(this) + firstAtom() * atomSize;
526
char* end = reinterpret_cast<char*>(this) + bytesPerCard;
528
JSCell* cell = reinterpret_cast<JSCell*>(ptr);
530
dirtyCells.append(cell);
534
for (size_t i = 1; i < m_cards.cardCount; i++) {
535
if (!m_cards.testAndClear(i))
537
char* ptr = reinterpret_cast<char*>(this) + firstCellOffset + cellSize * ((i * bytesPerCard + cellSize - 1 - firstCellOffset) / cellSize);
538
char* end = reinterpret_cast<char*>(this) + std::min((i + 1) * bytesPerCard, m_endAtom * atomSize);
541
JSCell* cell = reinterpret_cast<JSCell*>(ptr);
543
dirtyCells.append(cell);
554
struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
555
static unsigned hash(JSC::MarkedBlock* const& key)
557
// Aligned VM regions tend to be monotonically increasing integers,
558
// which is a great hash function, but we have to remove the low bits,
559
// since they're always zero, which is a terrible hash function!
560
return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
564
template<> struct DefaultHash<JSC::MarkedBlock*> {
565
typedef MarkedBlockHash Hash;
570
#endif // MarkedBlock_h