83
#define LOGF STL_NAMESPACE::cout // where we log to; LOGF is a historical name
85
using HASH_NAMESPACE::hash_set;
86
74
using STL_NAMESPACE::string;
87
75
using STL_NAMESPACE::sort;
89
77
//----------------------------------------------------------------------
90
78
// Flags that control heap-profiling
80
// The thread-safety of the profiler depends on these being immutable
81
// after main starts, so don't change them.
91
82
//----------------------------------------------------------------------
93
DEFINE_bool(cleanup_old_heap_profiles, true,
94
"At initialization time, delete old heap profiles.");
95
DEFINE_int64(heap_profile_allocation_interval, 1 << 30 /*1GB*/,
84
DEFINE_int64(heap_profile_allocation_interval,
85
EnvToInt64("HEAP_PROFILE_ALLOCATION_INTERVAL", 1 << 30 /*1GB*/),
96
86
"Dump heap profiling information once every specified "
97
87
"number of bytes allocated by the program.");
98
DEFINE_int64(heap_profile_inuse_interval, 100 << 20 /*100MB*/,
99
"Dump heap profiling information whenever the high-water "
88
DEFINE_int64(heap_profile_inuse_interval,
89
EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/),
90
"Dump heap profiling information whenever the high-water "
100
91
"memory usage mark increases by the specified number of "
102
DEFINE_bool(mmap_log, false, "Should mmap/munmap calls be logged?");
103
DEFINE_bool(mmap_profile, false, "If heap-profiling on, also profile mmaps");
104
DEFINE_int32(heap_profile_log, 0,
105
"Logging level for heap profiler/checker messages");
107
// Level of logging used by the heap profiler and heap checker (if applicable)
109
void HeapProfilerSetLogLevel(int level) {
110
FLAGS_heap_profile_log = level;
113
// Dump heap profiling information once every specified number of bytes
114
// allocated by the program. Default: 1GB
115
void HeapProfilerSetAllocationInterval(size_t interval) {
116
FLAGS_heap_profile_allocation_interval = interval;
119
// Dump heap profiling information whenever the high-water
120
// memory usage mark increases by the specified number of
121
// bytes. Default: 100MB
122
void HeapProfilerSetInuseInterval(size_t interval) {
123
FLAGS_heap_profile_inuse_interval = interval;
126
//----------------------------------------------------------------------
127
// For printing messages without using malloc
128
//----------------------------------------------------------------------
130
void HeapProfiler::MESSAGE(int level, const char* format, ...) {
131
if (FLAGS_heap_profile_log < level) return;
133
// We write directly to the stderr file descriptor and avoid FILE
134
// buffering because that may invoke malloc()
136
va_start(ap, format);
138
vsnprintf(buf, sizeof(buf), format, ap);
140
write(STDERR_FILENO, buf, strlen(buf));
143
//----------------------------------------------------------------------
145
//----------------------------------------------------------------------
147
class HeapProfilerMemory {
149
// Default unit of allocation from system
150
static const int kBlockSize = 1 << 20;
152
// Maximum number of blocks we can allocate
153
static const int kMaxBlocks = 1024;
155
// Info kept per allocated block
162
union AlignUnion { double d; void* p; int64 i; size_t s; };
163
static const int kAlignment = sizeof(AlignUnion);
165
Block blocks_[kMaxBlocks]; // List of allocated blocks
166
int nblocks_; // # of allocated blocks
167
char* current_; // Current block
168
int pos_; // Position in current block
170
// Allocate a block with the specified size
171
void* AllocBlock(size_t size) {
172
// Round size upto a multiple of the page size
173
const size_t pagesize = getpagesize();
174
size = ((size + pagesize -1 ) / pagesize) * pagesize;
176
HeapProfiler::MESSAGE(1, "HeapProfiler: allocating %"PRIuS
177
" bytes for internal use\n", size);
178
if (nblocks_ == kMaxBlocks) {
179
HeapProfiler::MESSAGE(-1, "HeapProfilerMemory: Alloc out of memory\n");
183
// Disable mmap hooks while calling mmap here to avoid recursive calls
184
MallocHook::MmapHook saved = MallocHook::SetMmapHook(NULL);
185
void* ptr = mmap(NULL, size,
186
PROT_READ|PROT_WRITE,
187
MAP_PRIVATE|MAP_ANONYMOUS,
189
MallocHook::SetMmapHook(saved);
191
if (ptr == reinterpret_cast<void*>(MAP_FAILED)) {
192
HeapProfiler::MESSAGE(-1, "HeapProfilerMemory: mmap %"PRIuS": %s\n",
193
size, strerror(errno));
196
blocks_[nblocks_].ptr = ptr;
197
blocks_[nblocks_].size = size;
209
// Disable munmap hooks while calling mmap here to avoid recursive calls
210
MallocHook::MunmapHook saved = MallocHook::SetMunmapHook(NULL);
211
for (int i = 0; i < nblocks_; ++i) {
212
if (munmap(blocks_[i].ptr, blocks_[i].size) != 0) {
213
HeapProfiler::MESSAGE(-1, "HeapProfilerMemory: munmap: %s\n",
218
MallocHook::SetMunmapHook(saved);
225
void* Alloc(size_t bytes) {
226
if (bytes >= kBlockSize / 8) {
227
// Too big for piecemeal allocation
228
return AllocBlock(bytes);
230
if (pos_ + bytes > kBlockSize) {
231
current_ = reinterpret_cast<char*>(AllocBlock(kBlockSize));
234
void* result = current_ + pos_;
235
pos_ = (pos_ + bytes + kAlignment - 1) & ~(kAlignment-1);
240
static HeapProfilerMemory heap_profiler_memory;
241
void* HeapProfiler::Malloc(size_t bytes) {
242
return heap_profiler_memory.Alloc(bytes);
244
void HeapProfiler::Free(void* p) {
245
// Do nothing -- all memory is released in one shot
248
//----------------------------------------------------------------------
94
EnvToBool("HEAP_PROFILE_MMAP_LOG", false),
95
"Should mmap/munmap calls be logged?");
96
DEFINE_bool(mmap_profile,
97
EnvToBool("HEAP_PROFILE_MMAP", false),
98
"If heap-profiling is on, also profile mmap, mremap, and sbrk)");
99
DEFINE_bool(only_mmap_profile,
100
EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
101
"If heap-profiling is on, only profile mmap, mremap, and sbrk; "
102
"do not profile malloc/new/etc");
105
//----------------------------------------------------------------------
250
107
//----------------------------------------------------------------------
252
109
// A pthread_mutex has way too much lock contention to be used here.
253
// In some applications we've run, pthread_mutex took >75% of the running
255
// I would like to roll our own mutex wrapper, but the obvious
256
// solutions can call malloc(), which can lead to infinite recursion.
258
// So we use a simple spinlock (just like the spinlocks used in tcmalloc)
260
static TCMalloc_SpinLock heap_lock;
262
void HeapProfiler::Lock() {
264
// for debugging deadlocks
265
HeapProfiler::MESSAGE(10, "HeapProfiler: Lock from %d\n",
266
int(pthread_self()));
272
void HeapProfiler::Unlock() {
274
HeapProfiler::MESSAGE(10, "HeapProfiler: Unlock from %d\n",
275
int(pthread_self()));
282
//----------------------------------------------------------------------
283
// Profile-maintenance code
284
//----------------------------------------------------------------------
286
typedef HeapProfiler::Bucket Bucket;
288
bool HeapProfiler::is_on_ = false;
289
bool HeapProfiler::init_has_been_called_ = false;
290
bool HeapProfiler::need_for_leaks_ = false;
291
bool HeapProfiler::self_disable_ = false;
292
pthread_t HeapProfiler::self_disabled_tid_;
293
HeapProfiler::IgnoredObjectSet* HeapProfiler::ignored_objects_ = NULL;
294
bool HeapProfiler::dump_for_leaks_ = false;
295
bool HeapProfiler::dumping_ = false;
296
Bucket HeapProfiler::total_;
297
Bucket HeapProfiler::self_disabled_;
298
Bucket HeapProfiler::profile_;
299
char* HeapProfiler::filename_prefix_ = NULL;
301
// Hash-table: we hand-craft one instead of using one of the pre-written
302
// ones because we do not want to use malloc when operating on the table.
303
// It is only five lines of code, so no big deal.
304
static const int kHashTableSize = 179999;
305
static Bucket** table = NULL;
306
HeapProfiler::AllocationMap* HeapProfiler::allocation_ = NULL;
308
static int num_buckets = 0;
309
static int total_stack_depth = 0;
310
static int dump_count = 0; // How many dumps so far
311
static int64 last_dump = 0; // When did we last dump
312
static int64 high_water_mark = 0; // In-use-bytes at last high-water dump
314
int HeapProfiler::strip_frames_ = 0;
315
bool HeapProfiler::done_first_alloc_ = false;
316
void* HeapProfiler::recordalloc_reference_stack_position_ = NULL;
318
// For sorting buckets by in-use space
319
static bool ByAllocatedSpace(Bucket* a, Bucket* b) {
320
// Return true iff "a" has more allocated space than "b"
321
return (a->alloc_size_ - a->free_size_) > (b->alloc_size_ - b->free_size_);
324
int HeapProfiler::UnparseBucket(char* buf, int buflen, int bufsize,
326
profile_.allocs_ += b->allocs_;
327
profile_.alloc_size_ += b->alloc_size_;
328
profile_.frees_ += b->frees_;
329
profile_.free_size_ += b->free_size_;
330
if (dump_for_leaks_ &&
331
b->allocs_ - b->frees_ == 0 &&
332
b->alloc_size_ - b->free_size_ == 0) {
333
// don't waste the profile space on buckets that do not matter
337
snprintf(buf + buflen, bufsize - buflen, "%6d: %8"LLD" [%6d: %8"LLD"] @",
338
b->allocs_ - b->frees_,
339
b->alloc_size_ - b->free_size_,
342
// If it looks like the snprintf failed, ignore the fact we printed anything
343
if (printed < 0 || printed >= bufsize - buflen) return buflen;
345
for (int d = 0; d < b->depth_; d++) {
346
printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08lx",
347
(unsigned long)b->stack_[d]);
348
if (printed < 0 || printed >= bufsize - buflen) return buflen;
351
printed = snprintf(buf + buflen, bufsize - buflen, "\n");
352
if (printed < 0 || printed >= bufsize - buflen) return buflen;
357
void HeapProfiler::AdjustByIgnoredObjects(int adjust) {
358
if (ignored_objects_) {
359
assert(dump_for_leaks_);
360
for (IgnoredObjectSet::const_iterator i = ignored_objects_->begin();
361
i != ignored_objects_->end(); ++i) {
363
if (!allocation_->Find(reinterpret_cast<void*>(*i), &v)) abort();
365
v.bucket->allocs_ += adjust;
366
v.bucket->alloc_size_ += adjust * int64(v.bytes);
367
// need explicit size_t to int64 conversion before multiplication
368
// in case size_t is unsigned and adjust is negative
369
assert(v.bucket->allocs_ >= 0 && v.bucket->alloc_size_ >= 0);
370
if (kMaxLogging && adjust < 0) {
371
HeapProfiler::MESSAGE(4, "HeapChecker: "
372
"Ignoring object of %"PRIuS" bytes\n", v.bytes);
111
// I would like to use Mutex, but it can call malloc(),
112
// which can cause us to fall into an infinite recursion.
114
// So we use a simple spinlock.
115
static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED);
117
//----------------------------------------------------------------------
118
// Simple allocator for heap profiler's internal memory
119
//----------------------------------------------------------------------
121
static LowLevelAlloc::Arena *heap_profiler_memory;
123
static void* ProfilerMalloc(size_t bytes) {
124
return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory);
126
static void ProfilerFree(void* p) {
127
LowLevelAlloc::Free(p);
130
//----------------------------------------------------------------------
131
// Profiling control/state data
132
//----------------------------------------------------------------------
134
static bool is_on = false; // If are on as a subsytem.
135
static bool dumping = false; // Dumping status to prevent recursion
136
static char* filename_prefix = NULL; // Prefix used for profile file names
137
// (NULL if no need for dumping yet)
138
static int dump_count = 0; // How many dumps so far
139
static int64 last_dump = 0; // When did we last dump
140
static int64 high_water_mark = 0; // In-use-bytes at last high-water dump
142
static HeapProfileTable* heap_profile = NULL; // the heap profile table
144
//----------------------------------------------------------------------
145
// Profile generation
146
//----------------------------------------------------------------------
148
enum AddOrRemove { ADD, REMOVE };
150
// Add or remove all MMap-allocated regions to/from *heap_profile.
151
// Assumes heap_lock is held.
152
static void AddRemoveMMapDataLocked(AddOrRemove mode) {
153
RAW_DCHECK(heap_lock.IsHeld(), "");
154
if (!FLAGS_mmap_profile || !is_on) return;
155
if (!FLAGS_mmap_log) MemoryRegionMap::CheckMallocHooks();
156
// MemoryRegionMap maintained all the data we need for all
157
// mmap-like allocations, so we just use it here:
158
MemoryRegionMap::LockHolder l;
159
for (MemoryRegionMap::RegionIterator r = MemoryRegionMap::BeginRegionLocked();
160
r != MemoryRegionMap::EndRegionLocked(); ++r) {
162
heap_profile->RecordAllocWithStack(
163
reinterpret_cast<const void*>(r->start_addr),
164
r->end_addr - r->start_addr,
165
r->call_stack_depth, r->call_stack);
167
heap_profile->RecordFree(reinterpret_cast<void*>(r->start_addr));
378
char* GetHeapProfile() {
172
static char* DoGetHeapProfile(void* (*alloc_func)(size_t)) {
379
173
// We used to be smarter about estimating the required memory and
380
174
// then capping it to 1MB and generating the profile into that.
381
175
// However it should not cost us much to allocate 1MB every time.
382
176
static const int size = 1 << 20;
177
char* buf = reinterpret_cast<char*>((*alloc_func)(size));
178
if (buf == NULL) return NULL;
180
// Grab the lock and generate the profile.
181
SpinLockHolder l(&heap_lock);
384
char* buf = reinterpret_cast<char*>(malloc(size));
389
Bucket **list = NULL;
391
// We can't allocate list on the stack, as this would overflow on threads
392
// running with a small stack size. We can't allocate it under the lock
393
// either, as this would cause a deadlock. But num_buckets is only valid
394
// while holding the lock- new buckets can be created at any time otherwise.
395
// So we'll read num_buckets dirtily, allocate room for all the current
396
// buckets + a few more, and check the count when we get the lock; if we
397
// don't have enough, we release the lock and try again.
399
int nb = num_buckets + num_buckets / 16 + 8;
404
list = new Bucket *[nb];
406
// Grab the lock and generate the profile
407
// (for leak checking the lock is acquired higher up).
408
if (!HeapProfiler::dump_for_leaks_) HeapProfiler::Lock();
409
if (!HeapProfiler::is_on_) {
410
if (!HeapProfiler::dump_for_leaks_) HeapProfiler::Unlock();
414
// Get all buckets and sort
415
assert(table != NULL);
417
// If we have allocated some extra buckets while waiting for the lock, we
418
// may have to reallocate list
419
if (num_buckets > nb) {
420
if (!HeapProfiler::dump_for_leaks_) HeapProfiler::Unlock();
425
for (int b = 0; b < kHashTableSize; b++) {
426
for (Bucket* x = table[b]; x != 0; x = x->next_) {
430
assert(n == num_buckets);
431
sort(list, list + num_buckets, ByAllocatedSpace);
433
buflen = snprintf(buf, size-1, "heap profile: ");
434
buflen = HeapProfiler::UnparseBucket(buf, buflen, size-1,
435
&HeapProfiler::total_);
436
memset(&HeapProfiler::profile_, 0, sizeof(HeapProfiler::profile_));
437
HeapProfiler::AdjustByIgnoredObjects(-1); // drop from profile
438
for (int i = 0; i < num_buckets; i++) {
440
buflen = HeapProfiler::UnparseBucket(buf, buflen, size-1, b);
442
HeapProfiler::AdjustByIgnoredObjects(1); // add back to profile
443
assert(buflen < size);
444
if (!HeapProfiler::dump_for_leaks_) HeapProfiler::Unlock();
184
HeapProfileTable::Stats const stats = heap_profile->total();
185
AddRemoveMMapDataLocked(ADD);
186
buflen = heap_profile->FillOrderedProfile(buf, size - 1);
187
// FillOrderedProfile should not reduce the set of active mmap-ed regions,
188
// hence MemoryRegionMap will let us remove everything we've added above:
189
AddRemoveMMapDataLocked(REMOVE);
190
RAW_DCHECK(stats.Equivalent(heap_profile->total()), "");
191
// if this fails, we somehow removed by AddRemoveMMapDataLocked
192
// more than we have added.
448
194
buf[buflen] = '\0';
195
RAW_DCHECK(buflen == strlen(buf), "");
454
// We keep HeapProfile() as a backwards-compatible name for GetHeapProfile(),
455
// but don't export the symbol, so you probably won't be able to call this.
456
extern char* HeapProfile() {
457
return GetHeapProfile();
200
extern "C" char* GetHeapProfile() {
201
// Use normal malloc: we return the profile to the user to free it:
202
return DoGetHeapProfile(&malloc);
460
void HeapProfiler::DumpLocked(const char *reason, const char* file_name) {
463
if (filename_prefix_ == NULL && file_name == NULL) return;
464
// we do not yet need dumping
206
static void NewHook(const void* ptr, size_t size);
207
static void DeleteHook(const void* ptr);
209
// Helper for HeapProfilerDump.
210
static void DumpProfileLocked(const char* reason) {
211
RAW_DCHECK(heap_lock.IsHeld(), "");
212
RAW_DCHECK(is_on, "");
213
RAW_DCHECK(!dumping, "");
215
if (filename_prefix == NULL) return; // we do not yet need dumping
217
if (FLAGS_only_mmap_profile == false) {
218
if (MallocHook::GetNewHook() != NewHook ||
219
MallocHook::GetDeleteHook() != DeleteHook) {
220
RAW_LOG(FATAL, "Had our new/delete MallocHook-s replaced. "
221
"Are you using another MallocHook client? "
222
"Do not use --heap_profile=... to avoid this conflict.");
468
228
// Make file name
470
if (file_name == NULL) {
472
snprintf(fname, sizeof(fname), "%s.%04d.heap",
473
filename_prefix_, dump_count);
229
char file_name[1000];
231
snprintf(file_name, sizeof(file_name), "%s.%04d%s",
232
filename_prefix, dump_count, HeapProfileTable::kFileExt);
477
234
// Release allocation lock around the meat of this routine
478
// when not leak checking thus not blocking other threads too much,
479
// but for leak checking we want to hold the lock to prevent heap activity.
480
if (!dump_for_leaks_) HeapProfiler::Unlock();
235
// thus not blocking other threads too much.
482
238
// Dump the profile
483
HeapProfiler::MESSAGE(dump_for_leaks_ ? 1 : 0,
485
"Dumping heap profile to %s (%s)\n",
239
RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
487
240
FILE* f = fopen(file_name, "w");
489
const char* profile = HeapProfile();
242
// Use ProfilerMalloc not to put info about profile itself into itself:
243
char* profile = DoGetHeapProfile(&ProfilerMalloc);
490
244
fputs(profile, f);
491
free(const_cast<char*>(profile));
493
// Dump "/proc/self/maps" so we get list of mapped shared libraries
494
fputs("\nMAPPED_LIBRARIES:\n", f);
495
int maps = open("/proc/self/maps", O_RDONLY);
499
while ((r = read(maps, buf, sizeof(buf))) > 0) {
500
fwrite(buf, 1, r, f);
245
ProfilerFree(profile);
508
HeapProfiler::MESSAGE(0, "HeapProfiler: "
509
"FAILED Dumping heap profile to %s (%s)\n",
511
if (dump_for_leaks_) abort(); // no sense to continue
515
if (!dump_for_leaks_) HeapProfiler::Lock();
520
void HeapProfilerDump(const char *reason) {
521
if (HeapProfiler::is_on_ && (num_buckets > 0)) {
523
HeapProfiler::Lock();
524
if (!HeapProfiler::dumping_) {
525
HeapProfiler::DumpLocked(reason, NULL);
527
HeapProfiler::Unlock();
531
// Allocation map for heap objects (de)allocated
532
// while HeapProfiler::self_disable_ is true.
533
// We use it to test if heap leak checking itself changed the heap state.
534
// An own map seems cleaner than trying to keep everything
535
// in HeapProfiler::allocation_.
536
HeapProfiler::AllocationMap* self_disabled_allocation = NULL;
538
// This is the number of bytes allocated by the first call to malloc() after
539
// registering this handler. We want to sanity check that our first call is
540
// actually for this number of bytes.
541
static const int kFirstAllocationNumBytes = 23;
543
void HeapProfiler::RecordAlloc(void* ptr, size_t bytes, int skip_count) {
544
// Our first allocation is triggered in EarlyStartLocked and is intended
545
// solely to calibrate strip_frames_, which may be greater or smaller
546
// depending on the degree of optimization with which we were compiled.
547
if (!done_first_alloc_) {
548
done_first_alloc_ = true;
549
assert(bytes == kFirstAllocationNumBytes);
550
assert(strip_frames_ == 0);
552
static const int kMaxStackTrace = 32;
553
void* stack[kMaxStackTrace];
554
// We skip one frame here so that it's as if we are running from NewHook,
555
// which is where strip_frames_ is used.
556
int depth = GetStackTrace(stack, kMaxStackTrace, 1);
559
for (i = 0; i < depth; i++) {
560
if (stack[i] == recordalloc_reference_stack_position_) {
561
MESSAGE(1, "Determined strip_frames_ to be %d\n", i - 1);
562
// Subtract one to offset the fact that
563
// recordalloc_reference_stack_position_ actually records the stack
564
// position one frame above the spot in EarlyStartLocked where we are
566
strip_frames_ = i - 1;
569
// Fell through the loop without finding our parent
570
if (strip_frames_ == 0) {
571
MESSAGE(0, "Could not determine strip_frames_, aborting");
575
// Return without recording the allocation. We will free the memory before
576
// registering a DeleteHook.
580
// this locking before if (is_on_ ...)
581
// is not an overhead because with profiling off
582
// this hook is not called at all.
585
HeapProfiler::MESSAGE(7, "HeapProfiler: Alloc: %p of %"PRIuS" from %d\n",
586
ptr, bytes, int(pthread_self()));
589
if (self_disable_ && self_disabled_tid_ == pthread_self()) {
590
self_disabled_.allocs_++;
591
self_disabled_.alloc_size_ += bytes;
593
v.bucket = NULL; // initialize just to make smart tools happy
594
// (no one will read it)
596
self_disabled_allocation->Insert(ptr, v);
600
HeapProfiler::Lock();
602
Bucket* b = GetBucket(skip_count+1);
604
b->alloc_size_ += bytes;
606
total_.alloc_size_ += bytes;
611
allocation_->Insert(ptr, v);
614
HeapProfiler::MESSAGE(8, "HeapProfiler: Alloc Recorded: %p of %"PRIuS"\n",
618
const int64 inuse_bytes = total_.alloc_size_ - total_.free_size_;
620
bool need_dump = false;
248
RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
256
//----------------------------------------------------------------------
257
// Profile collection
258
//----------------------------------------------------------------------
260
// Record an allocation in the profile.
261
static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
262
SpinLockHolder l(&heap_lock);
264
heap_profile->RecordAlloc(ptr, bytes, skip_count + 1);
265
const HeapProfileTable::Stats& total = heap_profile->total();
266
const int64 inuse_bytes = total.alloc_size - total.free_size;
268
bool need_to_dump = false;
622
if (total_.alloc_size_ >=
270
if (total.alloc_size >=
623
271
last_dump + FLAGS_heap_profile_allocation_interval) {
624
snprintf(buf, sizeof(buf), "%"LLD" MB allocated",
625
total_.alloc_size_ >> 20);
272
snprintf(buf, sizeof(buf), "%"PRId64" MB allocated",
273
total.alloc_size >> 20);
626
274
// Track that we made a "total allocation size" dump
627
last_dump = total_.alloc_size_;
629
} else if(inuse_bytes >
630
high_water_mark + FLAGS_heap_profile_inuse_interval) {
631
sprintf(buf, "%"LLD" MB in use", inuse_bytes >> 20);
275
last_dump = total.alloc_size;
277
} else if (inuse_bytes >
278
high_water_mark + FLAGS_heap_profile_inuse_interval) {
279
snprintf(buf, sizeof(buf), "%"PRId64" MB in use", inuse_bytes >> 20);
632
280
// Track that we made a "high water mark" dump
633
281
high_water_mark = inuse_bytes;
639
DumpLocked(buf, NULL);
643
HeapProfiler::Unlock();
646
void HeapProfiler::RecordFree(void* ptr) {
647
// All activity before if (is_on_)
648
// is not an overhead because with profiling turned off this hook
649
// is not called at all.
652
HeapProfiler::MESSAGE(7, "HeapProfiler: Free %p from %d\n",
653
ptr, int(pthread_self()));
656
if (self_disable_ && self_disabled_tid_ == pthread_self()) {
658
if (self_disabled_allocation->FindAndRemove(ptr, &v)) {
659
self_disabled_.free_size_ += v.bytes;
660
self_disabled_.frees_++;
662
// Try to mess the counters up and fail later in
663
// HeapLeakChecker::DumpProfileLocked instead of failing right now:
664
// presently execution gets here only from within the guts
665
// of pthread library and only when being in an address space
666
// that is about to disappear completely.
667
// I.e. failing right here is wrong, but failing later if
668
// this happens in the course of normal execution is needed.
669
self_disabled_.free_size_ += 100000000;
670
self_disabled_.frees_ += 100000000;
675
HeapProfiler::Lock();
678
if (allocation_->FindAndRemove(ptr, &v)) {
679
Bucket* b = v.bucket;
681
b->free_size_ += v.bytes;
683
total_.free_size_ += v.bytes;
686
HeapProfiler::MESSAGE(8, "HeapProfiler: Free Recorded: %p\n", ptr);
690
HeapProfiler::Unlock();
693
bool HeapProfiler::HaveOnHeapLocked(void** ptr, AllocValue* alloc_value) {
695
// Size of the C++ object array size integer
696
// (potentially compiler/dependent; 4 on i386 and gcc)
697
const int kArraySizeOffset = sizeof(int);
698
// sizeof(basic_string<...>::_Rep) for C++ library of gcc 3.4
699
// (basically three integer counters;
700
// library/compiler dependent; 12 on i386 and gcc)
701
const int kStringOffset = sizeof(int) * 3;
702
// NOTE: One can add more similar offset cases below
703
// even when they do not happen for the used compiler/library;
704
// all that's impacted is
705
// - HeapLeakChecker's performace during live heap walking
706
// - and a slightly greater chance to mistake random memory bytes
707
// for a pointer and miss a leak in a particular run of a binary.
709
if (allocation_->Find(*ptr, alloc_value)) {
711
} else if (allocation_->Find(reinterpret_cast<char*>(*ptr)
714
alloc_value->bytes > kArraySizeOffset) {
715
// this case is to account for the array size stored inside of
716
// the memory allocated by new FooClass[size] for classes with destructors
717
*ptr = reinterpret_cast<char*>(*ptr) - kArraySizeOffset;
719
HeapProfiler::MESSAGE(7, "HeapProfiler: Got poiter into %p at +%d\n",
720
ptr, kArraySizeOffset);
722
} else if (allocation_->Find(reinterpret_cast<char*>(*ptr)
725
alloc_value->bytes > kStringOffset) {
726
// this case is to account for basic_string<> representation in
727
// newer C++ library versions when the kept pointer points to inside of
728
// the allocated region
729
*ptr = reinterpret_cast<char*>(*ptr) - kStringOffset;
731
HeapProfiler::MESSAGE(7, "HeapProfiler: Got poiter into %p at +%d\n",
740
bool HeapProfiler::HaveOnHeap(void** ptr, AllocValue* alloc_value) {
741
HeapProfiler::Lock();
742
bool result = is_on_ && HaveOnHeapLocked(ptr, alloc_value);
743
HeapProfiler::Unlock();
747
//----------------------------------------------------------------------
748
// Allocation/deallocation hooks
749
//----------------------------------------------------------------------
751
void HeapProfiler::NewHook(void* ptr, size_t size) {
752
if (ptr != NULL) RecordAlloc(ptr, size, strip_frames_);
755
void HeapProfiler::DeleteHook(void* ptr) {
285
DumpProfileLocked(buf);
291
// Record a deallocation in the profile.
292
static void RecordFree(const void* ptr) {
293
SpinLockHolder l(&heap_lock);
295
heap_profile->RecordFree(ptr);
299
//----------------------------------------------------------------------
300
// Allocation/deallocation hooks for MallocHook
301
//----------------------------------------------------------------------
304
void NewHook(const void* ptr, size_t size) {
305
if (ptr != NULL) RecordAlloc(ptr, size, 0);
309
void DeleteHook(const void* ptr) {
756
310
if (ptr != NULL) RecordFree(ptr);
759
void HeapProfiler::MmapHook(void* result,
760
void* start, size_t size,
762
int fd, off_t offset) {
763
// Log the mmap if necessary
764
if (FLAGS_mmap_log) {
766
snprintf(buf, sizeof(buf),
767
"mmap(start=%p, len=%"PRIuS", prot=0x%x, flags=0x%x, "
768
"fd=%d, offset=0x%x) = %p",
769
start, size, prot, flags, fd, (unsigned int) offset,
772
// TODO(jandrews): Re-enable stack tracing
773
//DumpStackTrace(1, DebugWriteToStream, &LOG(INFO));
776
// Record mmap in profile if appropriate
777
if (result != (void*) MAP_FAILED &&
778
FLAGS_mmap_profile &&
781
RecordAlloc(result, size, strip_frames_);
785
void HeapProfiler::MunmapHook(void* ptr, size_t size) {
786
if (FLAGS_mmap_profile && is_on_) {
789
if (FLAGS_mmap_log) {
791
snprintf(buf, sizeof(buf), "munmap(start=%p, len=%"PRIuS")", ptr, size);
796
//----------------------------------------------------------------------
797
// Profiler maintenance
798
//----------------------------------------------------------------------
800
Bucket* HeapProfiler::GetBucket(int skip_count) {
801
// Get raw stack trace
802
static const int kMaxStackTrace = 32;
803
void* key[kMaxStackTrace];
804
int depth = GetStackTrace(key, kMaxStackTrace, skip_count+1);
808
for (int i = 0; i < depth; i++) {
809
uintptr_t pc = reinterpret_cast<uintptr_t>(key[i]);
810
h = (h << 8) | (h >> (8*(sizeof(h)-1)));
811
h += (pc * 31) + (pc * 7) + (pc * 3);
814
// Lookup stack trace in table
815
const size_t key_size = sizeof(key[0]) * depth;
816
unsigned int buck = ((unsigned int) h) % kHashTableSize;
817
for (Bucket* b = table[buck]; b != 0; b = b->next_) {
818
if ((b->hash_ == h) &&
819
(b->depth_ == depth) &&
820
(memcmp(b->stack_, key, key_size) == 0)) {
826
void** kcopy = reinterpret_cast<void**>(Malloc(key_size));
827
memcpy(kcopy, key, key_size);
828
Bucket* b = reinterpret_cast<Bucket*>(Malloc(sizeof(Bucket)));
829
memset(b, 0, sizeof(*b));
833
b->next_ = table[buck];
836
total_stack_depth += depth;
840
void HeapProfiler::EarlyStartLocked() {
843
heap_profiler_memory.Init();
846
// we should be really turned off:
847
if (need_for_leaks_) abort();
848
if (self_disable_) abort();
849
if (filename_prefix_ != NULL) abort();
852
const int table_bytes = kHashTableSize * sizeof(Bucket*);
853
table = reinterpret_cast<Bucket**>(Malloc(table_bytes));
854
memset(table, 0, table_bytes);
856
// Make allocation map
857
void* aptr = Malloc(sizeof(AllocationMap));
858
allocation_ = new (aptr) AllocationMap(Malloc, Free);
860
memset(&total_, 0, sizeof(total_));
862
total_stack_depth = 0;
313
// TODO(jandrews): Re-enable stack tracing
314
#ifdef TODO_REENABLE_STACK_TRACING
315
static void RawInfoStackDumper(const char* message, void*) {
316
RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message);
317
// -1 is to chop the \n which will be added by RAW_LOG
321
// Saved MemoryRegionMap's hooks to daisy-chain calls to.
322
MallocHook::MmapHook saved_mmap_hook = NULL;
323
MallocHook::MremapHook saved_mremap_hook = NULL;
324
MallocHook::MunmapHook saved_munmap_hook = NULL;
325
MallocHook::SbrkHook saved_sbrk_hook = NULL;
327
static void MmapHook(const void* result, const void* start, size_t size,
328
int prot, int flags, int fd, off_t offset) {
329
if (FLAGS_mmap_log) { // log it
330
// We use PRIxS not just '%p' to avoid deadlocks
331
// in pretty-printing of NULL as "nil".
332
// TODO(maxim): instead should use a safe snprintf reimplementation
334
"mmap(start=0x%"PRIxS", len=%"PRIuS", prot=0x%x, flags=0x%x, "
335
"fd=%d, offset=0x%x) = 0x%"PRIxS"",
336
(uintptr_t) start, size, prot, flags, fd, (unsigned int) offset,
338
#ifdef TODO_REENABLE_STACK_TRACING
339
DumpStackTrace(1, RawInfoStackDumper, NULL);
342
if (saved_mmap_hook) {
343
// Call MemoryRegionMap's hook: it will record needed info about the mmap
344
// for us w/o deadlocks:
345
(*saved_mmap_hook)(result, start, size, prot, flags, fd, offset);
349
static void MremapHook(const void* result, const void* old_addr,
350
size_t old_size, size_t new_size,
351
int flags, const void* new_addr) {
352
if (FLAGS_mmap_log) { // log it
353
// We use PRIxS not just '%p' to avoid deadlocks
354
// in pretty-printing of NULL as "nil".
355
// TODO(maxim): instead should use a safe snprintf reimplementation
357
"mremap(old_addr=0x%"PRIxS", old_size=%"PRIuS", new_size=%"PRIuS", "
358
"flags=0x%x, new_addr=0x%"PRIxS") = 0x%"PRIxS"",
359
(uintptr_t) old_addr, old_size, new_size, flags,
360
(uintptr_t) new_addr, (uintptr_t) result);
361
#ifdef TODO_REENABLE_STACK_TRACING
362
DumpStackTrace(1, RawInfoStackDumper, NULL);
365
if (saved_mremap_hook) { // call MemoryRegionMap's hook
366
(*saved_mremap_hook)(result, old_addr, old_size, new_size, flags, new_addr);
370
static void MunmapHook(const void* ptr, size_t size) {
371
if (FLAGS_mmap_log) { // log it
372
// We use PRIxS not just '%p' to avoid deadlocks
373
// in pretty-printing of NULL as "nil".
374
// TODO(maxim): instead should use a safe snprintf reimplementation
375
RAW_LOG(INFO, "munmap(start=0x%"PRIxS", len=%"PRIuS")",
376
(uintptr_t) ptr, size);
377
#ifdef TODO_REENABLE_STACK_TRACING
378
DumpStackTrace(1, RawInfoStackDumper, NULL);
381
if (saved_munmap_hook) { // call MemoryRegionMap's hook
382
(*saved_munmap_hook)(ptr, size);
386
static void SbrkHook(const void* result, ptrdiff_t increment) {
387
if (FLAGS_mmap_log) { // log it
388
RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxS"",
389
increment, (uintptr_t) result);
390
#ifdef TODO_REENABLE_STACK_TRACING
391
DumpStackTrace(1, RawInfoStackDumper, NULL);
394
if (saved_sbrk_hook) { // call MemoryRegionMap's hook
395
(*saved_sbrk_hook)(result, increment);
399
//----------------------------------------------------------------------
400
// Starting/stopping/dumping
401
//----------------------------------------------------------------------
403
extern "C" void HeapProfilerStart(const char* prefix) {
404
SpinLockHolder l(&heap_lock);
410
RAW_VLOG(0, "Starting tracking the heap");
412
if (FLAGS_only_mmap_profile) {
413
FLAGS_mmap_profile = true;
416
if (FLAGS_mmap_profile) {
417
// Ask MemoryRegionMap to record all mmap, mremap, and sbrk
418
// call stack traces of at least size kMaxStackDepth:
419
MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth);
422
if (FLAGS_mmap_log) {
423
// Install our hooks to do the logging
424
// and maybe save MemoryRegionMap's hooks to call:
425
saved_mmap_hook = MallocHook::SetMmapHook(MmapHook);
426
saved_mremap_hook = MallocHook::SetMremapHook(MremapHook);
427
saved_munmap_hook = MallocHook::SetMunmapHook(MunmapHook);
428
saved_sbrk_hook = MallocHook::SetSbrkHook(SbrkHook);
431
heap_profiler_memory =
432
LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
434
heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
435
HeapProfileTable(ProfilerMalloc, ProfilerFree);
864
439
// We do not reset dump_count so if the user does a sequence of
865
440
// HeapProfilerStart/HeapProfileStop, we will get a continuous
866
441
// sequence of profiles.
868
// Now set the hooks that capture mallocs/frees
869
MallocHook::SetNewHook(NewHook);
871
// Our first allocation after registering our hook is treated specially by
872
// RecordAlloc(); It looks at the stack and counts how many frames up we
873
// are. First we record the current stack pointer.
874
// Note: The stacktrace implementations differ about how many args they
875
// fill when skip is non-zero. Safest just to reserve maxdepth space.
877
GetStackTrace(here, 2, 1);
878
// Skip the first frame. It points to the current offset within this
879
// function, which will have changed by the time we get to the malloc()
880
// call which triggers. Instead, we store our parent function's offset,
881
// which is shared by both us and by the malloc() call below.
882
recordalloc_reference_stack_position_ = here[0];
883
done_first_alloc_ = false; // Initialization has not occured yet
884
void* first_alloc = malloc(kFirstAllocationNumBytes);
887
MallocHook::SetDeleteHook(DeleteHook);
889
HeapProfiler::MESSAGE(1, "HeapProfiler: Starting heap tracking\n");
892
void HeapProfiler::StartLocked(const char* prefix) {
893
if (filename_prefix_ != NULL) return;
443
if (FLAGS_only_mmap_profile == false) {
444
// Now set the hooks that capture new/delete and malloc/free
445
// and check that these are the only hooks:
446
if (MallocHook::SetNewHook(NewHook) != NULL ||
447
MallocHook::SetDeleteHook(DeleteHook) != NULL) {
448
RAW_LOG(FATAL, "Had other new/delete MallocHook-s set. "
449
"Are you using the heap leak checker? "
450
"Use --heap_check=\"\" to avoid this conflict.");
899
454
// Copy filename prefix
455
RAW_DCHECK(filename_prefix == NULL, "");
900
456
const int prefix_length = strlen(prefix);
901
filename_prefix_ = reinterpret_cast<char*>(Malloc(prefix_length + 1));
902
memcpy(filename_prefix_, prefix, prefix_length);
903
filename_prefix_[prefix_length] = '\0';
906
void HeapProfiler::StopLocked() {
909
filename_prefix_ = NULL;
911
if (need_for_leaks_) return;
913
// Turn us off completely:
915
MallocHook::SetNewHook(NULL);
916
MallocHook::SetDeleteHook(NULL);
918
// Get rid of all memory we allocated
919
heap_profiler_memory.Clear();
926
void HeapProfiler::StartForLeaks() {
930
EarlyStartLocked(); // fire-up HeapProfiler hooks
932
need_for_leaks_ = true;
934
memset(&self_disabled_, 0, sizeof(self_disabled_)); // zero the counters
936
// Make allocation map for self-disabled allocations
937
void* aptr = Malloc(sizeof(AllocationMap));
938
self_disabled_allocation = new (aptr) AllocationMap(Malloc, Free);
943
void HeapProfiler::StopForLeaks() {
945
need_for_leaks_ = false;
946
if (filename_prefix_ == NULL) StopLocked();
950
void HeapProfilerStart(const char* prefix) {
951
HeapProfiler::Lock();
952
HeapProfiler::StartLocked(prefix);
953
HeapProfiler::Unlock();
956
void HeapProfilerStop() {
957
HeapProfiler::Lock();
958
HeapProfiler::StopLocked();
959
HeapProfiler::Unlock();
457
filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
458
memcpy(filename_prefix, prefix, prefix_length);
459
filename_prefix[prefix_length] = '\0';
461
// This should be done before the hooks are set up, since it should
462
// call new, and we want that to be accounted for correctly.
463
MallocExtension::Initialize();
466
extern "C" void HeapProfilerStop() {
467
SpinLockHolder l(&heap_lock);
471
if (FLAGS_only_mmap_profile == false) {
472
// Unset our new/delete hooks, checking they were the ones set:
473
if (MallocHook::SetNewHook(NULL) != NewHook ||
474
MallocHook::SetDeleteHook(NULL) != DeleteHook) {
475
RAW_LOG(FATAL, "Had our new/delete MallocHook-s replaced. "
476
"Are you using another MallocHook client? "
477
"Do not use --heap_profile=... to avoid this conflict.");
480
if (FLAGS_mmap_log) {
481
// Restore mmap/sbrk hooks, checking that our hooks were the ones set:
482
if (MallocHook::SetMmapHook(saved_mmap_hook) != MmapHook ||
483
MallocHook::SetMremapHook(saved_mremap_hook) != MremapHook ||
484
MallocHook::SetMunmapHook(saved_munmap_hook) != MunmapHook ||
485
MallocHook::SetSbrkHook(saved_sbrk_hook) != SbrkHook) {
486
RAW_LOG(FATAL, "Had our mmap/mremap/munmap/sbrk MallocHook-s replaced. "
487
"Are you using another MallocHook client? "
488
"Do not use --heap_profile=... to avoid this conflict.");
493
heap_profile->~HeapProfileTable();
494
ProfilerFree(heap_profile);
498
ProfilerFree(filename_prefix);
499
filename_prefix = NULL;
501
if (!LowLevelAlloc::DeleteArena(heap_profiler_memory)) {
502
RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
505
if (FLAGS_mmap_profile) {
506
MemoryRegionMap::Shutdown();
512
extern "C" void HeapProfilerDump(const char *reason) {
513
SpinLockHolder l(&heap_lock);
514
if (is_on && !dumping) {
515
DumpProfileLocked(reason);
962
519
//----------------------------------------------------------------------