108
109
/*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
109
110
/*------------------------------------------------------------*/
112
-- Proper-ly reached; a pointer to its start has been found
113
-- Interior-ly reached; only an interior pointer to it has been found
114
-- Unreached; so far, no pointers to any part of it have been found.
115
-- IndirectLeak; leaked, but referred to by another leaked block
126
112
/* An entry in the mark stack */
135
/* A block record, used for generating err msgs. */
138
struct _LossRecord* next;
139
/* Where these lost blocks were allocated. */
140
ExeContext* allocated_at;
141
/* Their reachability. */
142
Reachedness loss_mode;
143
/* Number of blocks and total # bytes involved. */
145
SizeT indirect_bytes;
150
/* The 'extra' struct for leak errors. */
154
UInt n_total_records;
155
LossRecord* lossRecord;
159
121
/* Find the i such that ptr points at or inside the block described by
160
122
shadows[i]. Return -1 if none found. This assumes that shadows[]
161
123
has been sorted on the ->data field. */
201
163
mid = (lo + hi) / 2;
202
164
a_mid_lo = shadows[mid]->data;
203
a_mid_hi = shadows[mid]->data + shadows[mid]->size;
165
a_mid_hi = shadows[mid]->data + shadows[mid]->szB;
204
166
/* Extent of block 'mid' is [a_mid_lo .. a_mid_hi).
205
167
Special-case zero-sized blocks - treat them as if they had
206
168
size 1. Not doing so causes them to not cover any address
207
169
range at all and so will never be identified as the target of
208
170
any pointer, which causes them to be incorrectly reported as
209
171
definitely leaked. */
210
if (shadows[mid]->size == 0)
172
if (shadows[mid]->szB == 0)
213
175
if (ptr < a_mid_lo) {
242
204
static Bool (*lc_is_within_valid_secondary) (Addr addr);
243
205
static Bool (*lc_is_valid_aligned_word) (Addr addr);
245
static const HChar* str_lossmode ( Reachedness lossmode )
247
const HChar *loss = "?";
249
case Unreached: loss = "definitely lost"; break;
250
case IndirectLeak: loss = "indirectly lost"; break;
251
case Interior: loss = "possibly lost"; break;
252
case Proper: loss = "still reachable"; break;
257
static const HChar* xml_kind ( Reachedness lossmode )
259
const HChar *loss = "?";
261
case Unreached: loss = "Leak_DefinitelyLost"; break;
262
case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
263
case Interior: loss = "Leak_PossiblyLost"; break;
264
case Proper: loss = "Leak_StillReachable"; break;
270
/* Used for printing leak errors, avoids exposing the LossRecord type (which
271
comes in as void*, requiring a cast. */
272
void MC_(pp_LeakError)(void* vextra)
274
HChar* xpre = VG_(clo_xml) ? " <what>" : "";
275
HChar* xpost = VG_(clo_xml) ? "</what>" : "";
277
LeakExtra* extra = (LeakExtra*)vextra;
278
LossRecord* l = extra->lossRecord;
279
const Char *loss = str_lossmode(l->loss_mode);
282
VG_(message)(Vg_UserMsg, " <kind>%t</kind>", xml_kind(l->loss_mode));
284
VG_(message)(Vg_UserMsg, "");
287
if (l->indirect_bytes) {
288
VG_(message)(Vg_UserMsg,
289
"%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks"
290
" are %s in loss record %,u of %,u%s",
292
l->total_bytes + l->indirect_bytes,
293
l->total_bytes, l->indirect_bytes, l->num_blocks,
294
loss, extra->n_this_record, extra->n_total_records,
298
// Nb: don't put commas in these XML numbers
299
VG_(message)(Vg_UserMsg, " <leakedbytes>%lu</leakedbytes>",
300
l->total_bytes + l->indirect_bytes);
301
VG_(message)(Vg_UserMsg, " <leakedblocks>%u</leakedblocks>",
307
"%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s",
309
l->total_bytes, l->num_blocks,
310
loss, extra->n_this_record, extra->n_total_records,
314
VG_(message)(Vg_UserMsg, " <leakedbytes>%d</leakedbytes>",
316
VG_(message)(Vg_UserMsg, " <leakedblocks>%d</leakedblocks>",
320
VG_(pp_ExeContext)(l->allocated_at);
323
208
SizeT MC_(bytes_leaked) = 0;
324
209
SizeT MC_(bytes_indirect) = 0;
355
240
tl_assert(sh_no >= 0 && sh_no < lc_n_shadows);
356
241
tl_assert(ptr >= lc_shadows[sh_no]->data);
357
242
tl_assert(ptr < lc_shadows[sh_no]->data
358
+ lc_shadows[sh_no]->size
359
+ (lc_shadows[sh_no]->size==0 ? 1 : 0));
243
+ lc_shadows[sh_no]->szB
244
+ (lc_shadows[sh_no]->szB==0 ? 1 : 0));
361
246
if (lc_markstack[sh_no].state == Unreached) {
363
248
VG_(printf)("pushing %p-%p\n", lc_shadows[sh_no]->data,
364
lc_shadows[sh_no]->data + lc_shadows[sh_no]->size);
249
lc_shadows[sh_no]->data + lc_shadows[sh_no]->szB);
366
251
tl_assert(lc_markstack[sh_no].next == -1);
367
252
lc_markstack[sh_no].next = lc_markstack_top;
388
273
if (sh_no != clique) {
389
274
if (VG_DEBUG_CLIQUE) {
390
275
if (lc_markstack[sh_no].indirect)
391
VG_(printf)(" clique %d joining clique %d adding %d+%d bytes\n",
276
VG_(printf)(" clique %d joining clique %d adding %lu+%lu bytes\n",
393
lc_shadows[sh_no]->size, lc_markstack[sh_no].indirect);
278
lc_shadows[sh_no]->szB, lc_markstack[sh_no].indirect);
395
VG_(printf)(" %d joining %d adding %d\n",
396
sh_no, clique, lc_shadows[sh_no]->size);
280
VG_(printf)(" %d joining %d adding %lu\n",
281
sh_no, clique, lc_shadows[sh_no]->szB);
399
lc_markstack[clique].indirect += lc_shadows[sh_no]->size;
284
lc_markstack[clique].indirect += lc_shadows[sh_no]->szB;
400
285
lc_markstack[clique].indirect += lc_markstack[sh_no].indirect;
401
286
lc_markstack[sh_no].indirect = 0; /* shouldn't matter */
529
413
pass), then the cliques are merged. */
530
414
for (i = 0; i < lc_n_shadows; i++) {
531
415
if (VG_DEBUG_CLIQUE)
532
VG_(printf)("cliques: %d at %p -> %s\n",
533
i, lc_shadows[i]->data, str_lossmode(lc_markstack[i].state));
416
VG_(printf)("cliques: %d at %p -> Loss state %d\n",
417
i, lc_shadows[i]->data, lc_markstack[i].state);
534
418
if (lc_markstack[i].state != Unreached)
579
463
p->num_blocks ++;
580
p->total_bytes += lc_shadows[i]->size;
464
p->total_bytes += lc_shadows[i]->szB;
581
465
p->indirect_bytes += lc_markstack[i].indirect;
583
467
n_lossrecords ++;
584
468
p = VG_(malloc)(sizeof(LossRecord));
585
469
p->loss_mode = lc_markstack[i].state;
586
470
p->allocated_at = where;
587
p->total_bytes = lc_shadows[i]->size;
471
p->total_bytes = lc_shadows[i]->szB;
588
472
p->indirect_bytes = lc_markstack[i].indirect;
589
473
p->num_blocks = 1;
590
474
p->next = errlist;
618
502
// Nb: because VG_(unique_error) does all the error processing
619
503
// immediately, and doesn't save the error, leakExtra can be
620
504
// stack-allocated.
621
leak_extra.n_this_record = i+1;
622
leak_extra.n_total_records = n_lossrecords;
623
leak_extra.lossRecord = p_min;
625
MC_(record_leak_error) ( tid, &leak_extra, p_min->allocated_at,
506
MC_(record_leak_error) ( tid, i+1, n_lossrecords, p_min,
628
509
if (is_suppressed) {
811
692
tl_assert( lc_shadows[i]->data <= lc_shadows[i+1]->data);
814
/* Sanity check -- make sure they don't overlap */
695
/* Sanity check -- make sure they don't overlap. But do allow
696
exact duplicates. If this assertion fails, it may mean that the
697
application has done something stupid with
698
VALGRIND_MALLOCLIKE_BLOCK client requests, specifically, has
699
made overlapping requests (which are nonsensical). Another way
700
to screw up is to use VALGRIND_MALLOCLIKE_BLOCK for stack
701
locations; again nonsensical. */
815
702
for (i = 0; i < lc_n_shadows-1; i++) {
816
tl_assert( lc_shadows[i]->data + lc_shadows[i]->size
817
<= lc_shadows[i+1]->data );
703
tl_assert( /* normal case - no overlap */
704
(lc_shadows[i]->data + lc_shadows[i]->szB
705
<= lc_shadows[i+1]->data )
707
/* degenerate case: exact duplicates */
708
(lc_shadows[i]->data == lc_shadows[i+1]->data
709
&& lc_shadows[i]->szB == lc_shadows[i+1]->szB)
820
713
if (lc_n_shadows == 0) {
858
751
categorisation, which [if the users ever manage to understand it]
859
752
is really useful for detecting lost cycles.
863
755
Int n_seg_starts;
864
756
seg_starts = get_seg_starts( &n_seg_starts );
865
757
tl_assert(seg_starts && n_seg_starts > 0);
866
758
/* VG_(am_show_nsegments)( 0,"leakcheck"); */
867
759
for (i = 0; i < n_seg_starts; i++) {
868
seg = VG_(am_find_nsegment)( seg_starts[i] );
760
NSegment const* seg = VG_(am_find_nsegment)( seg_starts[i] );
870
762
if (seg->kind != SkFileC && seg->kind != SkAnonC)
928
820
MC_(bytes_reachable), blocks_reachable );
929
821
VG_(message)(Vg_UserMsg, " suppressed: %,lu bytes in %,lu blocks.",
930
822
MC_(bytes_suppressed), blocks_suppressed );
931
if (mode == LC_Summary && blocks_leaked > 0)
932
VG_(message)(Vg_UserMsg,
933
"Use --leak-check=full to see details of leaked memory.");
934
else if (!MC_(clo_show_reachable)) {
823
if (mode == LC_Summary
824
&& (blocks_leaked + blocks_indirect
825
+ blocks_dubious + blocks_reachable) > 0) {
826
VG_(message)(Vg_UserMsg,
827
"Rerun with --leak-check=full to see details of leaked memory.");
829
if (blocks_reachable > 0 && !MC_(clo_show_reachable) && mode == LC_Full) {
935
830
VG_(message)(Vg_UserMsg,
936
831
"Reachable blocks (those to which a pointer was found) are not shown.");
937
832
VG_(message)(Vg_UserMsg,
938
"To see them, rerun with: --show-reachable=yes");
833
"To see them, rerun with: --leak-check=full --show-reachable=yes");