183
void* tryFastMalloc(size_t n)
203
TryMallocReturnValue tryFastMalloc(size_t n)
185
205
ASSERT(!isForbidden());
207
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
208
if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
211
void* result = malloc(n + sizeof(AllocAlignmentInteger));
215
*static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
216
result = static_cast<AllocAlignmentInteger*>(result) + 1;
186
220
return malloc(n);
189
224
void* fastMalloc(size_t n)
191
226
ASSERT(!isForbidden());
228
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
229
TryMallocReturnValue returnValue = tryFastMalloc(n);
231
returnValue.getValue(result);
192
233
void* result = malloc(n);
198
void* tryFastCalloc(size_t n_elements, size_t element_size)
241
TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
200
243
ASSERT(!isForbidden());
245
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
246
size_t totalBytes = n_elements * element_size;
247
if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements || (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes))
250
totalBytes += sizeof(AllocAlignmentInteger);
251
void* result = malloc(totalBytes);
255
memset(result, 0, totalBytes);
256
*static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
257
result = static_cast<AllocAlignmentInteger*>(result) + 1;
201
260
return calloc(n_elements, element_size);
204
264
void* fastCalloc(size_t n_elements, size_t element_size)
206
266
ASSERT(!isForbidden());
268
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
269
TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
271
returnValue.getValue(result);
207
273
void* result = calloc(n_elements, element_size);
213
281
void fastFree(void* p)
215
283
ASSERT(!isForbidden());
285
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
289
AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
290
if (*header != Internal::AllocTypeMalloc)
291
Internal::fastMallocMatchFailed(p);
219
void* tryFastRealloc(void* p, size_t n)
298
TryMallocReturnValue tryFastRealloc(void* p, size_t n)
221
300
ASSERT(!isForbidden());
302
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
304
if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
306
AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
307
if (*header != Internal::AllocTypeMalloc)
308
Internal::fastMallocMatchFailed(p);
309
void* result = realloc(header, n + sizeof(AllocAlignmentInteger));
313
// This should not be needed because the value is already there:
314
// *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
315
result = static_cast<AllocAlignmentInteger*>(result) + 1;
318
return fastMalloc(n);
222
321
return realloc(p, n);
225
325
void* fastRealloc(void* p, size_t n)
227
327
ASSERT(!isForbidden());
329
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
330
TryMallocReturnValue returnValue = tryFastRealloc(p, n);
332
returnValue.getValue(result);
228
334
void* result = realloc(p, n);
1037
1167
typedef PackedCache<BITS, uint64_t> CacheType;
1170
#if defined(WTF_CHANGES)
1171
#if PLATFORM(X86_64)
1172
// On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1173
// can be excluded from the PageMap key.
1174
// See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1176
static const size_t kBitsUnusedOn64Bit = 16;
1178
static const size_t kBitsUnusedOn64Bit = 0;
1181
// A three-level map for 64-bit machines
1182
template <> class MapSelector<64> {
1184
typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
1185
typedef PackedCache<64, uint64_t> CacheType;
1040
1189
// A two-level map for 32-bit machines
1041
1190
template <> class MapSelector<32> {
1043
typedef TCMalloc_PageMap2<32-kPageShift> Type;
1044
typedef PackedCache<32-kPageShift, uint16_t> CacheType;
1192
typedef TCMalloc_PageMap2<32 - kPageShift> Type;
1193
typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
1047
1196
// -------------------------------------------------------------------------
1052
1201
// contiguous runs of pages (called a "span").
1053
1202
// -------------------------------------------------------------------------
1204
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1205
// The central page heap collects spans of memory that have been deleted but are still committed until they are released
1206
// back to the system. We use a background thread to periodically scan the list of free spans and release some back to the
1207
// system. Every 5 seconds, the background thread wakes up and does the following:
1208
// - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short
1209
// of free committed pages and so we should not release them back to the system yet.
1210
// - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages
1211
// back to the system.
1212
// - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the
1213
// scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount.
1215
// Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system.
1216
static const int kScavengeTimerDelayInSeconds = 5;
1218
// Number of free committed pages that we want to keep around.
1219
static const size_t kMinimumFreeCommittedPageCount = 512;
1221
// During a scavenge, we'll release up to a fraction of the free committed pages.
1223
// We are slightly less aggressive in releasing memory on Windows due to performance reasons.
1224
static const int kMaxScavengeAmountFactor = 3;
1226
static const int kMaxScavengeAmountFactor = 2;
1055
1230
class TCMalloc_PageHeap {
1204
1413
DLL_Init(&free_[i].normal);
1205
1414
DLL_Init(&free_[i].returned);
1417
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1418
pthread_mutex_init(&m_scavengeMutex, 0);
1419
pthread_cond_init(&m_scavengeCondition, 0);
1420
m_scavengeThreadActive = true;
1422
pthread_create(&thread, 0, runScavengerThread, this);
1423
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1426
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1427
void* TCMalloc_PageHeap::runScavengerThread(void* context)
1429
static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
1431
// Without this, Visual Studio will complain that this method does not return a value.
1436
void TCMalloc_PageHeap::scavenge()
1438
// If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages
1439
// for the amount of allocations that we do. So hold off on releasing memory back to the system.
1440
if (pages_committed_since_last_scavenge_ > 0) {
1441
pages_committed_since_last_scavenge_ = 0;
1444
Length pagesDecommitted = 0;
1445
for (int i = kMaxPages; i >= 0; i--) {
1446
SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
1447
if (!DLL_IsEmpty(&slist->normal)) {
1448
// Release the last span on the normal portion of this list
1449
Span* s = slist->normal.prev;
1450
// Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0.
1451
if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_)
1454
TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1455
static_cast<size_t>(s->length << kPageShift));
1456
if (!s->decommitted) {
1457
pagesDecommitted += s->length;
1458
s->decommitted = true;
1460
DLL_Prepend(&slist->returned, s);
1461
// We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around.
1462
if (free_committed_pages_ <= kMinimumFreeCommittedPageCount + pagesDecommitted)
1466
pages_committed_since_last_scavenge_ = 0;
1467
ASSERT(free_committed_pages_ >= pagesDecommitted);
1468
free_committed_pages_ -= pagesDecommitted;
1471
inline bool TCMalloc_PageHeap::shouldContinueScavenging() const
1473
return free_committed_pages_ > kMinimumFreeCommittedPageCount;
1476
#endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1209
1478
inline Span* TCMalloc_PageHeap::New(Length n) {
1210
1479
ASSERT(Check());
1414
1707
Event(span, 'D', span->length);
1415
1708
span->free = 1;
1416
if (span->length < kMaxPages) {
1417
DLL_Prepend(&free_[span->length].normal, span);
1709
if (span->decommitted) {
1710
if (span->length < kMaxPages)
1711
DLL_Prepend(&free_[span->length].returned, span);
1713
DLL_Prepend(&large_.returned, span);
1419
DLL_Prepend(&large_.normal, span);
1715
if (span->length < kMaxPages)
1716
DLL_Prepend(&free_[span->length].normal, span);
1718
DLL_Prepend(&large_.normal, span);
1421
1720
free_pages_ += n;
1722
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1723
if (span->decommitted) {
1724
// If the merged span is decommitted, that means we decommitted any neighboring spans that were
1725
// committed. Update the free committed pages count.
1726
free_committed_pages_ -= neighboringCommittedSpansLength;
1728
// If the merged span remains committed, add the deleted span's size to the free committed pages count.
1729
free_committed_pages_ += n;
1732
// Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
1733
if (!m_scavengeThreadActive && shouldContinueScavenging())
1734
pthread_cond_signal(&m_scavengeCondition);
1423
1736
IncrementalScavenge(n);
1424
1739
ASSERT(Check());
1742
#if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1427
1743
void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
1428
1744
// Fast path; not yet time to release memory
1429
1745
scavenge_counter_ -= n;
1955
2274
#define pageheap getPageHeap()
2276
#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2278
static void sleep(unsigned seconds)
2280
::Sleep(seconds * 1000);
2284
void TCMalloc_PageHeap::scavengerThread()
2286
#if HAVE(PTHREAD_SETNAME_NP)
2287
pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
2291
if (!shouldContinueScavenging()) {
2292
pthread_mutex_lock(&m_scavengeMutex);
2293
m_scavengeThreadActive = false;
2294
// Block until there are enough freed pages to release back to the system.
2295
pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
2296
m_scavengeThreadActive = true;
2297
pthread_mutex_unlock(&m_scavengeMutex);
2299
sleep(kScavengeTimerDelayInSeconds);
2301
SpinLockHolder h(&pageheap_lock);
2302
pageheap->scavenge();
1957
2308
// If TLS is available, we also store a copy
1958
2309
// of the per-thread object in a __thread variable
1959
2310
// since __thread variables are faster to read
3283
3658
void* calloc(size_t n, size_t elem_size) {
3284
const size_t totalBytes = n * elem_size;
3659
size_t totalBytes = n * elem_size;
3286
3661
// Protect against overflow
3287
3662
if (n > 1 && elem_size && (totalBytes / elem_size) != n)
3290
void* result = do_malloc(totalBytes);
3291
if (result != NULL) {
3665
#if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3666
if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes) // If overflow would occur...
3669
totalBytes += sizeof(AllocAlignmentInteger);
3670
void* result = do_malloc(totalBytes);
3292
3674
memset(result, 0, totalBytes);
3675
*static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
3676
result = static_cast<AllocAlignmentInteger*>(result) + 1;
3678
void* result = do_malloc(totalBytes);
3679
if (result != NULL) {
3680
memset(result, 0, totalBytes);
3294
3684
#ifndef WTF_CHANGES
3295
3685
MallocHook::InvokeNewHook(result, totalBytes);
3690
// Since cfree isn't used anywhere, we don't compile it in.
3300
3692
#ifndef WTF_CHANGES
3670
4086
, m_freeObjectFinder(freeObjectFinder)
3673
int visit(void* ptr) const
3678
Span* span = m_reader(reinterpret_cast<Span*>(ptr));
3679
if (m_seenPointers.contains(ptr))
3680
return span->length;
3681
m_seenPointers.add(ptr);
3683
// Mark the memory used for the Span itself as an administrative region
3684
vm_range_t ptrRange = { reinterpret_cast<vm_address_t>(ptr), sizeof(Span) };
3685
if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE))
3686
(*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, &ptrRange, 1);
3688
ptrRange.address = span->start << kPageShift;
3689
ptrRange.size = span->length * kPageSize;
3691
// Mark the memory region the span represents as candidates for containing pointers
3692
if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE))
4089
~PageMapMemoryUsageRecorder()
4091
ASSERT(!m_coalescedSpans.size());
4094
void recordPendingRegions()
4096
Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4097
vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 };
4098
ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize);
4100
// Mark the memory region the spans represent as a candidate for containing pointers
4101
if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE)
3693
4102
(*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
3695
if (!span->free && (m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
3696
// If it's an allocated large object span, mark it as in use
3697
if (span->sizeclass == 0 && !m_freeObjectFinder.isFreeObject(reinterpret_cast<void*>(ptrRange.address)))
3698
(*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &ptrRange, 1);
3699
else if (span->sizeclass) {
3700
const size_t byteSize = ByteSizeForClass(span->sizeclass);
3701
unsigned totalObjects = (span->length << kPageShift) / byteSize;
3702
ASSERT(span->refcount <= totalObjects);
3703
char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
4104
if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
4105
m_coalescedSpans.clear();
4109
Vector<vm_range_t, 1024> allocatedPointers;
4110
for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
4111
Span *theSpan = m_coalescedSpans[i];
4115
vm_address_t spanStartAddress = theSpan->start << kPageShift;
4116
vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
4118
if (!theSpan->sizeclass) {
4119
// If it's an allocated large object span, mark it as in use
4120
if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
4121
allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
4123
const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
3705
4125
// Mark each allocated small object within the span as in use
3706
for (unsigned i = 0; i < totalObjects; i++) {
3707
char* thisObject = ptr + (i * byteSize);
3708
if (m_freeObjectFinder.isFreeObject(thisObject))
3711
vm_range_t objectRange = { reinterpret_cast<vm_address_t>(thisObject), byteSize };
3712
(*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &objectRange, 1);
4126
const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
4127
for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
4128
if (!m_freeObjectFinder.isFreeObject(object))
4129
allocatedPointers.append((vm_range_t){object, objectSize});
4134
(*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
4136
m_coalescedSpans.clear();
4139
int visit(void* ptr)
4144
Span* span = m_reader(reinterpret_cast<Span*>(ptr));
4148
if (m_seenPointers.contains(ptr))
4149
return span->length;
4150
m_seenPointers.add(ptr);
4152
if (!m_coalescedSpans.size()) {
4153
m_coalescedSpans.append(span);
4154
return span->length;
4157
Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4158
vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
4159
vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
4161
// If the new span is adjacent to the previous span, do nothing for now.
4162
vm_address_t spanStartAddress = span->start << kPageShift;
4163
if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
4164
m_coalescedSpans.append(span);
4165
return span->length;
4168
// New span is not adjacent to previous span, so record the spans coalesced so far.
4169
recordPendingRegions();
4170
m_coalescedSpans.append(span);
3717
4172
return span->length;
4176
class AdminRegionRecorder {
4179
unsigned m_typeMask;
4180
vm_range_recorder_t* m_recorder;
4181
const RemoteMemoryReader& m_reader;
4183
Vector<vm_range_t, 1024> m_pendingRegions;
4186
AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader)
4188
, m_context(context)
4189
, m_typeMask(typeMask)
4190
, m_recorder(recorder)
4194
void recordRegion(vm_address_t ptr, size_t size)
4196
if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
4197
m_pendingRegions.append((vm_range_t){ ptr, size });
4200
void visit(void *ptr, size_t size)
4202
recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
4205
void recordPendingRegions()
4207
if (m_pendingRegions.size()) {
4208
(*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
4209
m_pendingRegions.clear();
4213
~AdminRegionRecorder()
4215
ASSERT(!m_pendingRegions.size());
3721
4219
kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
3723
4221
RemoteMemoryReader memoryReader(task, reader);
3738
4236
TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
3739
4237
PageMapFreeObjectFinder pageMapFinder(memoryReader, finder);
3740
pageMap->visit(pageMapFinder, memoryReader);
4238
pageMap->visitValues(pageMapFinder, memoryReader);
3742
4240
PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
3743
pageMap->visit(usageRecorder, memoryReader);
4241
pageMap->visitValues(usageRecorder, memoryReader);
4242
usageRecorder.recordPendingRegions();
4244
AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader);
4245
pageMap->visitAllocations(adminRegionRecorder, memoryReader);
4247
PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
4248
PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
4250
spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4251
pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4253
adminRegionRecorder.recordPendingRegions();
3783
4293
malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
3784
&FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics };
4294
&FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
4296
#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !PLATFORM(IPHONE)
4297
, 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
3787
FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches)
4303
FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
3788
4304
: m_pageHeap(pageHeap)
3789
4305
, m_threadHeaps(threadHeaps)
3790
4306
, m_centralCaches(centralCaches)
4307
, m_spanAllocator(spanAllocator)
4308
, m_pageHeapAllocator(pageHeapAllocator)
3792
4310
memset(&m_zone, 0, sizeof(m_zone));
3793
4312
m_zone.zone_name = "JavaScriptCore FastMalloc";
3794
4313
m_zone.size = &FastMallocZone::size;
3795
4314
m_zone.malloc = &FastMallocZone::zoneMalloc;