~bkerensa/ubuntu/raring/valgrind/merge-from-deb

1 by Andrés Roldán
Import upstream version 2.1.1
1
/*--------------------------------------------------------------------*/
2
/*--- MemCheck: Maintain bitmaps of memory, tracking the           ---*/
3
/*--- accessibility (A) and validity (V) status of each byte.      ---*/
4
/*---                                                    mc_main.c ---*/
5
/*--------------------------------------------------------------------*/
6
7
/*
8
   This file is part of MemCheck, a heavyweight Valgrind tool for
9
   detecting memory errors.
10
11
   Copyright (C) 2000-2007 Julian Seward 
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
12
      jseward@acm.org
1 by Andrés Roldán
Import upstream version 2.1.1
13
14
   This program is free software; you can redistribute it and/or
15
   modify it under the terms of the GNU General Public License as
16
   published by the Free Software Foundation; either version 2 of the
17
   License, or (at your option) any later version.
18
19
   This program is distributed in the hope that it will be useful, but
20
   WITHOUT ANY WARRANTY; without even the implied warranty of
21
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22
   General Public License for more details.
23
24
   You should have received a copy of the GNU General Public License
25
   along with this program; if not, write to the Free Software
26
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27
   02111-1307, USA.
28
29
   The GNU General Public License is contained in the file COPYING.
30
*/
31
32
#include "pub_tool_basics.h"
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
33
#include "pub_tool_aspacemgr.h"
34
#include "pub_tool_hashtable.h"     // For mc_include.h
35
#include "pub_tool_libcbase.h"
36
#include "pub_tool_libcassert.h"
37
#include "pub_tool_libcprint.h"
38
#include "pub_tool_machine.h"
39
#include "pub_tool_mallocfree.h"
40
#include "pub_tool_options.h"
41
#include "pub_tool_oset.h"
42
#include "pub_tool_replacemalloc.h"
43
#include "pub_tool_tooliface.h"
44
#include "pub_tool_threadstate.h"
45
#include "pub_tool_oset.h"
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
46
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
47
#include "mc_include.h"
1 by Andrés Roldán
Import upstream version 2.1.1
48
#include "memcheck.h"   /* for client requests */
49
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
50
#ifdef HAVE_BUILTIN_EXPECT
51
#define EXPECTED_TAKEN(cond)     __builtin_expect((cond),1)
52
#define EXPECTED_NOT_TAKEN(cond) __builtin_expect((cond),0)
53
#else
54
#define EXPECTED_TAKEN(cond)     (cond)
55
#define EXPECTED_NOT_TAKEN(cond) (cond)
56
#endif
57
58
/* Set to 1 to do a little more sanity checking */
59
#define VG_DEBUG_MEMORY 0
60
1 by Andrés Roldán
Import upstream version 2.1.1
61
#define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
62
63
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
64
/*------------------------------------------------------------*/
65
/*--- Fast-case knobs                                      ---*/
66
/*------------------------------------------------------------*/
67
 
68
// Comment these out to disable the fast cases (don't just set them to zero).
69
70
#define PERF_FAST_LOADV    1
71
#define PERF_FAST_STOREV   1
72
73
#define PERF_FAST_SARP     1
74
75
#define PERF_FAST_STACK    1
76
#define PERF_FAST_STACK2   1
77
78
/*------------------------------------------------------------*/
79
/*--- V bits and A bits                                    ---*/
80
/*------------------------------------------------------------*/
81
82
/* Conceptually, every byte value has 8 V bits, which track whether Memcheck
83
   thinks the corresponding value bit is defined.  And every memory byte
84
   has an A bit, which tracks whether Memcheck thinks the program can access
85
   it safely.   So every N-bit register is shadowed with N V bits, and every
86
   memory byte is shadowed with 8 V bits and one A bit.
87
88
   In the implementation, we use two forms of compression (compressed V bits
89
   and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
90
   for memory.
91
92
   Memcheck also tracks extra information about each heap block that is
93
   allocated, for detecting memory leaks and other purposes.
94
*/
95
96
/*------------------------------------------------------------*/
97
/*--- Basic A/V bitmap representation.                     ---*/
98
/*------------------------------------------------------------*/
99
100
/* All reads and writes are checked against a memory map (a.k.a. shadow
101
   memory), which records the state of all memory in the process.  
102
   
103
   On 32-bit machines the memory map is organised as follows.
104
   The top 16 bits of an address are used to index into a top-level
1 by Andrés Roldán
Import upstream version 2.1.1
105
   map table, containing 65536 entries.  Each entry is a pointer to a
106
   second-level map, which records the accesibililty and validity
107
   permissions for the 65536 bytes indexed by the lower 16 bits of the
108
   address.  Each byte is represented by two bits (details are below).  So
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
109
   each second-level map contains 16384 bytes.  This two-level arrangement
110
   conveniently divides the 4G address space into 64k lumps, each size 64k
111
   bytes.
112
1 by Andrés Roldán
Import upstream version 2.1.1
113
   All entries in the primary (top-level) map must point to a valid
114
   secondary (second-level) map.  Since many of the 64kB chunks will
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
115
   have the same status for every bit -- ie. noaccess (for unused
116
   address space) or entirely addressable and defined (for code segments) --
117
   there are three distinguished secondary maps, which indicate 'noaccess',
118
   'undefined' and 'defined'.  For these uniform 64kB chunks, the primary
119
   map entry points to the relevant distinguished map.  In practice,
120
   typically more than half of the addressable memory is represented with
121
   the 'undefined' or 'defined' distinguished secondary map, so it gives a
122
   good saving.  It also lets us set the V+A bits of large address regions
123
   quickly in set_address_range_perms().
124
125
   On 64-bit machines it's more complicated.  If we followed the same basic
126
   scheme we'd have a four-level table which would require too many memory
127
   accesses.  So instead the top-level map table has 2^19 entries (indexed
128
   using bits 16..34 of the address);  this covers the bottom 32GB.  Any
129
   accesses above 32GB are handled with a slow, sparse auxiliary table.
130
   Valgrind's address space manager tries very hard to keep things below
131
   this 32GB barrier so that performance doesn't suffer too much.
132
133
   Note that this file has a lot of different functions for reading and
134
   writing shadow memory.  Only a couple are strictly necessary (eg.
135
   get_vabits2 and set_vabits2), most are just specialised for specific
136
   common cases to improve performance.
137
138
   Aside: the V+A bits are less precise than they could be -- we have no way
139
   of marking memory as read-only.  It would be great if we could add an
140
   extra state VA_BITSn_READONLY.  But then we'd have 5 different states,
141
   which requires 2.3 bits to hold, and there's no way to do that elegantly
142
   -- we'd have to double up to 4 bits of metadata per byte, which doesn't
143
   seem worth it.
144
*/
1 by Andrés Roldán
Import upstream version 2.1.1
145
146
/* --------------- Basic configuration --------------- */
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
147
148
/* Only change this.  N_PRIMARY_MAP *must* be a power of 2. */
149
150
#if VG_WORDSIZE == 4
151
152
/* cover the entire address space */
153
#  define N_PRIMARY_BITS  16
154
155
#else
156
157
/* Just handle the first 32G fast and the rest via auxiliary
158
   primaries. */
159
#  define N_PRIMARY_BITS  19
160
161
#endif
162
163
164
/* Do not change this. */
165
#define N_PRIMARY_MAP  ( ((UWord)1) << N_PRIMARY_BITS)
166
167
/* Do not change this. */
168
#define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
169
170
171
/* --------------- Secondary maps --------------- */
172
173
// Each byte of memory conceptually has an A bit, which indicates its
174
// addressability, and 8 V bits, which indicates its definedness.
175
//
176
// But because very few bytes are partially defined, we can use a nice
177
// compression scheme to reduce the size of shadow memory.  Each byte of
178
// memory has 2 bits which indicates its state (ie. V+A bits):
179
//
180
//   00:  noaccess    (unaddressable but treated as fully defined)
181
//   01:  undefined   (addressable and fully undefined)
182
//   10:  defined     (addressable and fully defined)
183
//   11:  partdefined (addressable and partially defined)
184
//
185
// In the "partdefined" case, we use a secondary table to store the V bits.
186
// Each entry in the secondary-V-bits table maps a byte address to its 8 V
187
// bits.
188
//
189
// We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
190
// four bytes (32 bits) of memory are in each chunk.  Hence the name
191
// "vabits8".  This lets us get the V+A bits for four bytes at a time
192
// easily (without having to do any shifting and/or masking), and that is a
193
// very common operation.  (Note that although each vabits8 chunk
194
// is 8 bits in size, it represents 32 bits of memory.)
195
//
196
// The representation is "inverse" little-endian... each 4 bytes of
197
// memory is represented by a 1 byte value, where:
198
//
199
// - the status of byte (a+0) is held in bits [1..0]
200
// - the status of byte (a+1) is held in bits [3..2]
201
// - the status of byte (a+2) is held in bits [5..4]
202
// - the status of byte (a+3) is held in bits [7..6]
203
//
204
// It's "inverse" because endianness normally describes a mapping from
205
// value bits to memory addresses;  in this case the mapping is inverted.
206
// Ie. instead of particular value bits being held in certain addresses, in
207
// this case certain addresses are represented by particular value bits.
208
// See insert_vabits2_into_vabits8() for an example.
209
// 
210
// But note that we don't compress the V bits stored in registers;  they
211
// need to be explicit to made the shadow operations possible.  Therefore
212
// when moving values between registers and memory we need to convert
213
// between the expanded in-register format and the compressed in-memory
214
// format.  This isn't so difficult, it just requires careful attention in a
215
// few places.
216
217
// These represent eight bits of memory.
218
#define VA_BITS2_NOACCESS     0x0      // 00b
219
#define VA_BITS2_UNDEFINED    0x1      // 01b
220
#define VA_BITS2_DEFINED      0x2      // 10b
221
#define VA_BITS2_PARTDEFINED  0x3      // 11b
222
223
// These represent 16 bits of memory.
224
#define VA_BITS4_NOACCESS     0x0      // 00_00b
225
#define VA_BITS4_UNDEFINED    0x5      // 01_01b
226
#define VA_BITS4_DEFINED      0xa      // 10_10b
227
228
// These represent 32 bits of memory.
229
#define VA_BITS8_NOACCESS     0x00     // 00_00_00_00b
230
#define VA_BITS8_UNDEFINED    0x55     // 01_01_01_01b
231
#define VA_BITS8_DEFINED      0xaa     // 10_10_10_10b
232
233
// These represent 64 bits of memory.
234
#define VA_BITS16_NOACCESS    0x0000   // 00_00_00_00b x 2
235
#define VA_BITS16_UNDEFINED   0x5555   // 01_01_01_01b x 2
236
#define VA_BITS16_DEFINED     0xaaaa   // 10_10_10_10b x 2
237
238
239
#define SM_CHUNKS             16384
240
#define SM_OFF(aaa)           (((aaa) & 0xffff) >> 2)
241
#define SM_OFF_16(aaa)        (((aaa) & 0xffff) >> 3)
242
243
// Paranoia:  it's critical for performance that the requested inlining
244
// occurs.  So try extra hard.
245
#define INLINE    inline __attribute__((always_inline))
246
247
static INLINE Addr start_of_this_sm ( Addr a ) {
248
   return (a & (~SM_MASK));
249
}
250
static INLINE Bool is_start_of_sm ( Addr a ) {
251
   return (start_of_this_sm(a) == a);
252
}
253
1 by Andrés Roldán
Import upstream version 2.1.1
254
typedef 
255
   struct {
256
      UChar vabits8[SM_CHUNKS];
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
257
   }
1 by Andrés Roldán
Import upstream version 2.1.1
258
   SecMap;
259
260
// 3 distinguished secondary maps, one for no-access, one for
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
261
// accessible but undefined, and one for accessible and defined.
262
// Distinguished secondaries may never be modified.
263
#define SM_DIST_NOACCESS   0
264
#define SM_DIST_UNDEFINED  1
265
#define SM_DIST_DEFINED    2
266
267
static SecMap sm_distinguished[3];
268
269
static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
270
   return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
271
}
272
273
// Forward declaration
274
static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
275
276
/* dist_sm points to one of our three distinguished secondaries.  Make
277
   a copy of it so that we can write to it.
278
*/
279
static SecMap* copy_for_writing ( SecMap* dist_sm )
280
{
281
   SecMap* new_sm;
282
   tl_assert(dist_sm == &sm_distinguished[0]
283
          || dist_sm == &sm_distinguished[1]
284
          || dist_sm == &sm_distinguished[2]);
285
286
   new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
287
   if (new_sm == NULL)
288
      VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap", 
289
                                   sizeof(SecMap) );
290
   VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
291
   update_SM_counts(dist_sm, new_sm);
292
   return new_sm;
293
}
294
295
/* --------------- Stats --------------- */
296
297
static Int   n_issued_SMs      = 0;
298
static Int   n_deissued_SMs    = 0;
299
static Int   n_noaccess_SMs    = N_PRIMARY_MAP; // start with many noaccess DSMs
300
static Int   n_undefined_SMs   = 0;
301
static Int   n_defined_SMs     = 0;
302
static Int   n_non_DSM_SMs     = 0;
303
static Int   max_noaccess_SMs  = 0;
304
static Int   max_undefined_SMs = 0;
305
static Int   max_defined_SMs   = 0;
306
static Int   max_non_DSM_SMs   = 0;
307
308
/* # searches initiated in auxmap_L1, and # base cmps required */
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
309
static ULong n_auxmap_L1_searches  = 0;
310
static ULong n_auxmap_L1_cmps      = 0;
311
/* # of searches that missed in auxmap_L1 and therefore had to
312
   be handed to auxmap_L2. And the number of nodes inserted. */
313
static ULong n_auxmap_L2_searches  = 0;
314
static ULong n_auxmap_L2_nodes     = 0;
315
316
static Int   n_sanity_cheap     = 0;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
317
static Int   n_sanity_expensive = 0;
318
319
static Int   n_secVBit_nodes   = 0;
320
static Int   max_secVBit_nodes = 0;
321
322
static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
323
{
324
   if      (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
325
   else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
326
   else if (oldSM == &sm_distinguished[SM_DIST_DEFINED  ]) n_defined_SMs  --;
327
   else                                                  { n_non_DSM_SMs  --;
328
                                                           n_deissued_SMs ++; }
329
330
   if      (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
331
   else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
332
   else if (newSM == &sm_distinguished[SM_DIST_DEFINED  ]) n_defined_SMs  ++;
333
   else                                                  { n_non_DSM_SMs  ++;
334
                                                           n_issued_SMs   ++; }
335
336
   if (n_noaccess_SMs  > max_noaccess_SMs ) max_noaccess_SMs  = n_noaccess_SMs;
337
   if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
338
   if (n_defined_SMs   > max_defined_SMs  ) max_defined_SMs   = n_defined_SMs;
339
   if (n_non_DSM_SMs   > max_non_DSM_SMs  ) max_non_DSM_SMs   = n_non_DSM_SMs;   
340
}
341
342
/* --------------- Primary maps --------------- */
343
344
/* The main primary map.  This covers some initial part of the address
345
   space, addresses 0 .. (N_PRIMARY_MAP << 16)-1.  The rest of it is
346
   handled using the auxiliary primary map.  
347
*/
348
static SecMap* primary_map[N_PRIMARY_MAP];
349
350
351
/* An entry in the auxiliary primary map.  base must be a 64k-aligned
352
   value, and sm points at the relevant secondary map.  As with the
353
   main primary map, the secondary may be either a real secondary, or
354
   one of the three distinguished secondaries.  DO NOT CHANGE THIS
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
355
   LAYOUT: the first word has to be the key for OSet fast lookups.
356
*/
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
357
typedef
358
   struct { 
359
      Addr    base;
360
      SecMap* sm;
361
   }
362
   AuxMapEnt;
363
364
/* Tunable parameter: How big is the L1 queue? */
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
365
#define N_AUXMAP_L1 24
366
367
/* Tunable parameter: How far along the L1 queue to insert
368
   entries resulting from L2 lookups? */
369
#define AUXMAP_L1_INSERT_IX 12
370
371
static struct {
372
          Addr       base;
373
          AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
374
       } 
375
       auxmap_L1[N_AUXMAP_L1];
376
377
static OSet* auxmap_L2 = NULL;
378
379
static void init_auxmap_L1_L2 ( void )
380
{
381
   Int i;
382
   for (i = 0; i < N_AUXMAP_L1; i++) {
383
      auxmap_L1[i].base = 0;
384
      auxmap_L1[i].ent  = NULL;
385
   }
386
387
   tl_assert(0 == offsetof(AuxMapEnt,base));
388
   tl_assert(sizeof(Addr) == sizeof(void*));
389
   auxmap_L2 = VG_(OSetGen_Create)( /*keyOff*/  offsetof(AuxMapEnt,base),
390
                                    /*fastCmp*/ NULL,
391
                                    VG_(malloc), VG_(free) );
392
}
393
394
/* Check representation invariants; if OK return NULL; else a
395
   descriptive bit of text.  Also return the number of
396
   non-distinguished secondary maps referred to from the auxiliary
397
   primary maps. */
398
399
static HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
400
{
401
   Word i, j;
402
   /* On a 32-bit platform, the L2 and L1 tables should
403
      both remain empty forever.
404
405
      On a 64-bit platform:
406
      In the L2 table:
407
       all .base & 0xFFFF == 0
408
       all .base > MAX_PRIMARY_ADDRESS
409
      In the L1 table:
410
       all .base & 0xFFFF == 0
411
       all (.base > MAX_PRIMARY_ADDRESS
412
            .base & 0xFFFF == 0
413
            and .ent points to an AuxMapEnt with the same .base)
414
           or
415
           (.base == 0 and .ent == NULL)
416
   */
417
   *n_secmaps_found = 0;
418
   if (sizeof(void*) == 4) {
419
      /* 32-bit platform */
420
      if (VG_(OSetGen_Size)(auxmap_L2) != 0)
421
         return "32-bit: auxmap_L2 is non-empty";
422
      for (i = 0; i < N_AUXMAP_L1; i++) 
423
        if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
424
      return "32-bit: auxmap_L1 is non-empty";
425
   } else {
426
      /* 64-bit platform */
427
      UWord elems_seen = 0;
428
      AuxMapEnt *elem, *res;
429
      AuxMapEnt key;
430
      /* L2 table */
431
      VG_(OSetGen_ResetIter)(auxmap_L2);
432
      while ( (elem = VG_(OSetGen_Next)(auxmap_L2)) ) {
433
         elems_seen++;
434
         if (0 != (elem->base & (Addr)0xFFFF))
435
            return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
436
         if (elem->base <= MAX_PRIMARY_ADDRESS)
437
            return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
438
         if (elem->sm == NULL)
439
            return "64-bit: .sm in _L2 is NULL";
440
         if (!is_distinguished_sm(elem->sm))
441
            (*n_secmaps_found)++;
442
      }
443
      if (elems_seen != n_auxmap_L2_nodes)
444
         return "64-bit: disagreement on number of elems in _L2";
445
      /* Check L1-L2 correspondence */
446
      for (i = 0; i < N_AUXMAP_L1; i++) {
447
         if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
448
            continue;
449
         if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
450
            return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
451
         if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
452
            return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
453
         if (auxmap_L1[i].ent == NULL)
454
            return "64-bit: .ent is NULL in auxmap_L1";
455
         if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
456
            return "64-bit: _L1 and _L2 bases are inconsistent";
457
         /* Look it up in auxmap_L2. */
458
         key.base = auxmap_L1[i].base;
459
         key.sm   = 0;
460
         res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
461
         if (res == NULL)
462
            return "64-bit: _L1 .base not found in _L2";
463
         if (res != auxmap_L1[i].ent)
464
            return "64-bit: _L1 .ent disagrees with _L2 entry";
465
      }
466
      /* Check L1 contains no duplicates */
467
      for (i = 0; i < N_AUXMAP_L1; i++) {
468
         if (auxmap_L1[i].base == 0)
469
            continue;
470
	 for (j = i+1; j < N_AUXMAP_L1; j++) {
471
            if (auxmap_L1[j].base == 0)
472
               continue;
473
            if (auxmap_L1[j].base == auxmap_L1[i].base)
474
               return "64-bit: duplicate _L1 .base entries";
475
         }
476
      }
477
   }
478
   return NULL; /* ok */
479
}
480
481
static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
482
{
483
   Word i;
484
   tl_assert(ent);
485
   tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
486
   for (i = N_AUXMAP_L1-1; i > rank; i--)
487
      auxmap_L1[i] = auxmap_L1[i-1];
488
   auxmap_L1[rank].base = ent->base;
489
   auxmap_L1[rank].ent  = ent;
490
}
491
492
static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
493
{
494
   AuxMapEnt  key;
495
   AuxMapEnt* res;
496
   Word       i;
497
498
   tl_assert(a > MAX_PRIMARY_ADDRESS);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
499
   a &= ~(Addr)0xFFFF;
500
501
   /* First search the front-cache, which is a self-organising
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
502
      list containing the most popular entries. */
503
504
   if (EXPECTED_TAKEN(auxmap_L1[0].base == a))
505
      return auxmap_L1[0].ent;
506
   if (EXPECTED_TAKEN(auxmap_L1[1].base == a)) {
507
      Addr       t_base = auxmap_L1[0].base;
508
      AuxMapEnt* t_ent  = auxmap_L1[0].ent;
509
      auxmap_L1[0].base = auxmap_L1[1].base;
510
      auxmap_L1[0].ent  = auxmap_L1[1].ent;
511
      auxmap_L1[1].base = t_base;
512
      auxmap_L1[1].ent  = t_ent;
513
      return auxmap_L1[0].ent;
514
   }
515
516
   n_auxmap_L1_searches++;
517
518
   for (i = 0; i < N_AUXMAP_L1; i++) {
519
      if (auxmap_L1[i].base == a) {
520
         break;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
521
      }
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
522
   }
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
523
   tl_assert(i >= 0 && i <= N_AUXMAP_L1);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
524
525
   n_auxmap_L1_cmps += (ULong)(i+1);
526
527
   if (i < N_AUXMAP_L1) {
528
      if (i > 0) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
529
         Addr       t_base = auxmap_L1[i-1].base;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
530
         AuxMapEnt* t_ent  = auxmap_L1[i-1].ent;
531
         auxmap_L1[i-1].base = auxmap_L1[i-0].base;
532
         auxmap_L1[i-1].ent  = auxmap_L1[i-0].ent;
533
         auxmap_L1[i-0].base = t_base;
534
         auxmap_L1[i-0].ent  = t_ent;
535
         i--;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
536
      }
537
      return auxmap_L1[i].ent;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
538
   }
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
539
540
   n_auxmap_L2_searches++;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
541
542
   /* First see if we already have it. */
543
   key.base = a;
544
   key.sm   = 0;
545
546
   res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
547
   if (res)
548
      insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
549
   return res;
550
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
551
552
static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
553
{
554
   AuxMapEnt *nyu, *res;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
555
556
   /* First see if we already have it. */
557
   res = maybe_find_in_auxmap( a );
558
   if (EXPECTED_TAKEN(res))
559
      return res;
560
561
   /* Ok, there's no entry in the secondary map, so we'll have
562
      to allocate one. */
563
   a &= ~(Addr)0xFFFF;
564
565
   nyu = (AuxMapEnt*) VG_(OSetGen_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
566
   tl_assert(nyu);
567
   nyu->base = a;
568
   nyu->sm   = &sm_distinguished[SM_DIST_NOACCESS];
569
   VG_(OSetGen_Insert)( auxmap_L2, nyu );
570
   insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
571
   n_auxmap_L2_nodes++;
572
   return nyu;
573
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
574
575
/* --------------- SecMap fundamentals --------------- */
576
577
// In all these, 'low' means it's definitely in the main primary map,
578
// 'high' means it's definitely in the auxiliary table.
579
580
static INLINE SecMap** get_secmap_low_ptr ( Addr a )
581
{
582
   UWord pm_off = a >> 16;
583
#  if VG_DEBUG_MEMORY >= 1
584
   tl_assert(pm_off < N_PRIMARY_MAP);
585
#  endif
586
   return &primary_map[ pm_off ];
587
}
588
589
static INLINE SecMap** get_secmap_high_ptr ( Addr a )
590
{
591
   AuxMapEnt* am = find_or_alloc_in_auxmap(a);
592
   return &am->sm;
593
}
594
595
static SecMap** get_secmap_ptr ( Addr a )
596
{
597
   return ( a <= MAX_PRIMARY_ADDRESS 
598
          ? get_secmap_low_ptr(a) 
599
          : get_secmap_high_ptr(a));
600
}
601
602
static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
603
{
604
   return *get_secmap_low_ptr(a);
605
}
606
607
static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
608
{
609
   return *get_secmap_high_ptr(a);
610
}
611
612
static INLINE SecMap* get_secmap_for_writing_low(Addr a)
613
{
614
   SecMap** p = get_secmap_low_ptr(a);
615
   if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
616
      *p = copy_for_writing(*p);
617
   return *p;
618
}
619
620
static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
621
{
622
   SecMap** p = get_secmap_high_ptr(a);
623
   if (EXPECTED_NOT_TAKEN(is_distinguished_sm(*p)))
624
      *p = copy_for_writing(*p);
625
   return *p;
626
}
627
628
/* Produce the secmap for 'a', either from the primary map or by
629
   ensuring there is an entry for it in the aux primary map.  The
630
   secmap may be a distinguished one as the caller will only want to
631
   be able to read it. 
632
*/
633
static INLINE SecMap* get_secmap_for_reading ( Addr a )
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
634
{
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
635
   return ( a <= MAX_PRIMARY_ADDRESS
636
          ? get_secmap_for_reading_low (a)
637
          : get_secmap_for_reading_high(a) );
638
}
639
640
/* Produce the secmap for 'a', either from the primary map or by
641
   ensuring there is an entry for it in the aux primary map.  The
642
   secmap may not be a distinguished one, since the caller will want
643
   to be able to write it.  If it is a distinguished secondary, make a
644
   writable copy of it, install it, and return the copy instead.  (COW
645
   semantics).
646
*/
647
static SecMap* get_secmap_for_writing ( Addr a )
648
{
649
   return ( a <= MAX_PRIMARY_ADDRESS
650
          ? get_secmap_for_writing_low (a)
651
          : get_secmap_for_writing_high(a) );
652
}
653
654
/* If 'a' has a SecMap, produce it.  Else produce NULL.  But don't
655
   allocate one if one doesn't already exist.  This is used by the
656
   leak checker.
657
*/
658
static SecMap* maybe_get_secmap_for ( Addr a )
659
{
660
   if (a <= MAX_PRIMARY_ADDRESS) {
661
      return get_secmap_for_reading_low(a);
662
   } else {
663
      AuxMapEnt* am = maybe_find_in_auxmap(a);
664
      return am ? am->sm : NULL;
665
   }
666
}
667
668
/* --------------- Fundamental functions --------------- */
669
670
static INLINE
671
void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
672
{
673
   UInt shift =  (a & 3)  << 1;        // shift by 0, 2, 4, or 6
674
   *vabits8  &= ~(0x3     << shift);   // mask out the two old bits
675
   *vabits8  |=  (vabits2 << shift);   // mask  in the two new bits
676
}
677
678
static INLINE
679
void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
680
{
681
   UInt shift;
682
   tl_assert(VG_IS_2_ALIGNED(a));      // Must be 2-aligned
683
   shift     =  (a & 2)   << 1;        // shift by 0 or 4
684
   *vabits8 &= ~(0xf      << shift);   // mask out the four old bits
685
   *vabits8 |=  (vabits4 << shift);    // mask  in the four new bits
686
}
687
688
static INLINE
689
UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
690
{
691
   UInt shift = (a & 3) << 1;          // shift by 0, 2, 4, or 6
692
   vabits8 >>= shift;                  // shift the two bits to the bottom
693
   return 0x3 & vabits8;               // mask out the rest
694
}
695
696
static INLINE
697
UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
698
{
699
   UInt shift;
700
   tl_assert(VG_IS_2_ALIGNED(a));      // Must be 2-aligned
701
   shift = (a & 2) << 1;               // shift by 0 or 4
702
   vabits8 >>= shift;                  // shift the four bits to the bottom
703
   return 0xf & vabits8;               // mask out the rest
704
}
705
706
// Note that these four are only used in slow cases.  The fast cases do
707
// clever things like combine the auxmap check (in
708
// get_secmap_{read,writ}able) with alignment checks.
709
710
// *** WARNING! ***
711
// Any time this function is called, if it is possible that vabits2
712
// is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
713
// sec-V-bits table must also be set!
714
static INLINE
715
void set_vabits2 ( Addr a, UChar vabits2 )
716
{
717
   SecMap* sm       = get_secmap_for_writing(a);
718
   UWord   sm_off   = SM_OFF(a);
719
   insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
720
}
721
722
static INLINE
723
UChar get_vabits2 ( Addr a )
724
{
725
   SecMap* sm       = get_secmap_for_reading(a);
726
   UWord   sm_off   = SM_OFF(a);
727
   UChar   vabits8  = sm->vabits8[sm_off];
728
   return extract_vabits2_from_vabits8(a, vabits8);
729
}
730
731
// *** WARNING! ***
732
// Any time this function is called, if it is possible that any of the
733
// 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the 
734
// corresponding entry(s) in the sec-V-bits table must also be set!
735
static INLINE
736
UChar get_vabits8_for_aligned_word32 ( Addr a )
737
{
738
   SecMap* sm       = get_secmap_for_reading(a);
739
   UWord   sm_off   = SM_OFF(a);
740
   UChar   vabits8  = sm->vabits8[sm_off];
741
   return vabits8;
742
}
743
744
static INLINE
745
void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
746
{
747
   SecMap* sm       = get_secmap_for_writing(a);
748
   UWord   sm_off   = SM_OFF(a);
749
   sm->vabits8[sm_off] = vabits8;
750
}
751
752
753
// Forward declarations
754
static UWord get_sec_vbits8(Addr a);
755
static void  set_sec_vbits8(Addr a, UWord vbits8);
756
757
// Returns False if there was an addressability error.
758
static INLINE
759
Bool set_vbits8 ( Addr a, UChar vbits8 )
760
{
761
   Bool  ok      = True;
762
   UChar vabits2 = get_vabits2(a);
763
   if ( VA_BITS2_NOACCESS != vabits2 ) {
764
      // Addressable.  Convert in-register format to in-memory format.
765
      // Also remove any existing sec V bit entry for the byte if no
766
      // longer necessary.
767
      if      ( V_BITS8_DEFINED   == vbits8 ) { vabits2 = VA_BITS2_DEFINED;   }
768
      else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
769
      else                                    { vabits2 = VA_BITS2_PARTDEFINED;
770
                                                set_sec_vbits8(a, vbits8);  }
771
      set_vabits2(a, vabits2);
772
773
   } else {
774
      // Unaddressable!  Do nothing -- when writing to unaddressable
775
      // memory it acts as a black hole, and the V bits can never be seen
776
      // again.  So we don't have to write them at all.
777
      ok = False;
778
   }
779
   return ok;
780
}
781
782
// Returns False if there was an addressability error.  In that case, we put
783
// all defined bits into vbits8.
784
static INLINE
785
Bool get_vbits8 ( Addr a, UChar* vbits8 )
786
{
787
   Bool  ok      = True;
788
   UChar vabits2 = get_vabits2(a);
789
790
   // Convert the in-memory format to in-register format.
791
   if      ( VA_BITS2_DEFINED   == vabits2 ) { *vbits8 = V_BITS8_DEFINED;   }
792
   else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
793
   else if ( VA_BITS2_NOACCESS  == vabits2 ) {
794
      *vbits8 = V_BITS8_DEFINED;    // Make V bits defined!
795
      ok = False;
796
   } else {
797
      tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
798
      *vbits8 = get_sec_vbits8(a);
799
   }
800
   return ok;
801
}
802
803
804
/* --------------- Secondary V bit table ------------ */
805
806
// This table holds the full V bit pattern for partially-defined bytes
807
// (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
808
// memory.
809
//
810
// Note: the nodes in this table can become stale.  Eg. if you write a PDB,
811
// then overwrite the same address with a fully defined byte, the sec-V-bit
812
// node will not necessarily be removed.  This is because checking for
813
// whether removal is necessary would slow down the fast paths.  
814
//
815
// To avoid the stale nodes building up too much, we periodically (once the
816
// table reaches a certain size) garbage collect (GC) the table by
817
// traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
818
// are stale and haven't been touched for a certain number of collections.
819
// If more than a certain proportion of nodes survived, we increase the
820
// table size so that GCs occur less often.  
821
//
822
// (So this a bit different to a traditional GC, where you definitely want
823
// to remove any dead nodes.  It's more like we have a resizable cache and
824
// we're trying to find the right balance how many elements to evict and how
825
// big to make the cache.)
826
//
827
// This policy is designed to avoid bad table bloat in the worst case where
828
// a program creates huge numbers of stale PDBs -- we would get this bloat
829
// if we had no GC -- while handling well the case where a node becomes
830
// stale but shortly afterwards is rewritten with a PDB and so becomes
831
// non-stale again (which happens quite often, eg. in perf/bz2).  If we just
832
// remove all stale nodes as soon as possible, we just end up re-adding a
833
// lot of them in later again.  The "sufficiently stale" approach avoids
834
// this.  (If a program has many live PDBs, performance will just suck,
835
// there's no way around that.)
836
837
static OSet* secVBitTable;
838
839
// Stats
840
static ULong sec_vbits_new_nodes = 0;
841
static ULong sec_vbits_updates   = 0;
842
843
// This must be a power of two;  this is checked in mc_pre_clo_init().
844
// The size chosen here is a trade-off:  if the nodes are bigger (ie. cover
845
// a larger address range) they take more space but we can get multiple
846
// partially-defined bytes in one if they are close to each other, reducing
847
// the number of total nodes.  In practice sometimes they are clustered (eg.
848
// perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
849
// row), but often not.  So we choose something intermediate.
850
#define BYTES_PER_SEC_VBIT_NODE     16
851
852
// We make the table bigger if more than this many nodes survive a GC.
853
#define MAX_SURVIVOR_PROPORTION  0.5
854
855
// Each time we make the table bigger, we increase it by this much.
856
#define TABLE_GROWTH_FACTOR      2
857
858
// This defines "sufficiently stale" -- any node that hasn't been touched in
859
// this many GCs will be removed.
860
#define MAX_STALE_AGE            2
861
      
862
// We GC the table when it gets this many nodes in it, ie. it's effectively
863
// the table size.  It can change.
864
static Int  secVBitLimit = 1024;
865
866
// The number of GCs done, used to age sec-V-bit nodes for eviction.
867
// Because it's unsigned, wrapping doesn't matter -- the right answer will
868
// come out anyway.
869
static UInt GCs_done = 0;
870
871
typedef 
872
   struct {
873
      Addr  a;
874
      UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
875
      UInt  last_touched;
876
   } 
877
   SecVBitNode;
878
879
static OSet* createSecVBitTable(void)
880
{
881
   return VG_(OSetGen_Create)( offsetof(SecVBitNode, a), 
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
882
                               NULL, // use fast comparisons
883
                               VG_(malloc), VG_(free) );
884
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
885
886
static void gcSecVBitTable(void)
887
{
888
   OSet*        secVBitTable2;
889
   SecVBitNode* n;
890
   Int          i, n_nodes = 0, n_survivors = 0;
891
892
   GCs_done++;
893
894
   // Create the new table.
895
   secVBitTable2 = createSecVBitTable();
896
897
   // Traverse the table, moving fresh nodes into the new table.
898
   VG_(OSetGen_ResetIter)(secVBitTable);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
899
   while ( (n = VG_(OSetGen_Next)(secVBitTable)) ) {
900
      Bool keep = False;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
901
      if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
902
         // Keep node if it's been touched recently enough (regardless of
903
         // freshness/staleness).
904
         keep = True;
905
      } else {
906
         // Keep node if any of its bytes are non-stale.  Using
907
         // get_vabits2() for the lookup is not very efficient, but I don't
908
         // think it matters.
909
         for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
910
            if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
911
               keep = True;      // Found a non-stale byte, so keep
912
               break;
913
            }
914
         }
915
      }
916
917
      if ( keep ) {
918
         // Insert a copy of the node into the new table.
919
         SecVBitNode* n2 = 
920
            VG_(OSetGen_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
921
         *n2 = *n;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
922
         VG_(OSetGen_Insert)(secVBitTable2, n2);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
923
      }
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
924
   }
925
926
   // Get the before and after sizes.
927
   n_nodes     = VG_(OSetGen_Size)(secVBitTable);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
928
   n_survivors = VG_(OSetGen_Size)(secVBitTable2);
929
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
930
   // Destroy the old table, and put the new one in its place.
931
   VG_(OSetGen_Destroy)(secVBitTable);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
932
   secVBitTable = secVBitTable2;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
933
934
   if (VG_(clo_verbosity) > 1) {
935
      Char percbuf[6];
936
      VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
937
      VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)",
938
                   n_nodes, n_survivors, percbuf);
939
   }
940
941
   // Increase table size if necessary.
942
   if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
943
      secVBitLimit *= TABLE_GROWTH_FACTOR;
944
      if (VG_(clo_verbosity) > 1)
945
         VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d",
946
                      secVBitLimit);
947
   }
948
}
949
950
static UWord get_sec_vbits8(Addr a)
951
{
952
   Addr         aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
953
   Int          amod     = a % BYTES_PER_SEC_VBIT_NODE;
954
   SecVBitNode* n        = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
955
   UChar        vbits8;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
956
   tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
957
   // Shouldn't be fully defined or fully undefined -- those cases shouldn't
958
   // make it to the secondary V bits table.
959
   vbits8 = n->vbits8[amod];
960
   tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
961
   return vbits8;
962
}
963
964
static void set_sec_vbits8(Addr a, UWord vbits8)
965
{
966
   Addr         aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
967
   Int          i, amod  = a % BYTES_PER_SEC_VBIT_NODE;
968
   SecVBitNode* n        = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
969
   // Shouldn't be fully defined or fully undefined -- those cases shouldn't
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
970
   // make it to the secondary V bits table.
971
   tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
972
   if (n) {
973
      n->vbits8[amod] = vbits8;     // update
974
      n->last_touched = GCs_done;
975
      sec_vbits_updates++;
976
   } else {
977
      // New node:  assign the specific byte, make the rest invalid (they
978
      // should never be read as-is, but be cautious).
979
      n = VG_(OSetGen_AllocNode)(secVBitTable, sizeof(SecVBitNode));
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
980
      n->a            = aAligned;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
981
      for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
982
         n->vbits8[i] = V_BITS8_UNDEFINED;
983
      }
984
      n->vbits8[amod] = vbits8;
985
      n->last_touched = GCs_done;
986
987
      // Do a table GC if necessary.  Nb: do this before inserting the new
988
      // node, to avoid erroneously GC'ing the new node.
989
      if (secVBitLimit == VG_(OSetGen_Size)(secVBitTable)) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
990
         gcSecVBitTable();
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
991
      }
992
993
      // Insert the new node.
994
      VG_(OSetGen_Insert)(secVBitTable, n);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
995
      sec_vbits_new_nodes++;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
996
997
      n_secVBit_nodes = VG_(OSetGen_Size)(secVBitTable);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
998
      if (n_secVBit_nodes > max_secVBit_nodes)
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
999
         max_secVBit_nodes = n_secVBit_nodes;
1000
   }
1001
}
1002
1003
/* --------------- Endianness helpers --------------- */
1004
1005
/* Returns the offset in memory of the byteno-th most significant byte
1006
   in a wordszB-sized word, given the specified endianness. */
1007
static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian, 
1008
                                    UWord byteno ) {
1009
   return bigendian ? (wordszB-1-byteno) : byteno;
1010
}
1011
1012
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1013
/* --------------- Ignored address ranges --------------- */
1014
1015
#define M_IGNORE_RANGES 4
1016
1017
typedef
1018
   struct {
1019
      Int  used;
1020
      Addr start[M_IGNORE_RANGES];
1021
      Addr end[M_IGNORE_RANGES];
1022
   }
1023
   IgnoreRanges;
1024
1025
static IgnoreRanges ignoreRanges;
1026
1027
static INLINE Bool in_ignored_range ( Addr a )
1028
{
1029
   Int i;
1030
   if (EXPECTED_TAKEN(ignoreRanges.used == 0))
1031
      return False;
1032
   for (i = 0; i < ignoreRanges.used; i++) {
1033
      if (a >= ignoreRanges.start[i] && a < ignoreRanges.end[i])
1034
         return True;
1035
   }
1036
   return False;
1037
}
1038
1039
1040
/* Parse a 32- or 64-bit hex number, including leading 0x, from string
1041
   starting at *ppc, putting result in *result, and return True.  Or
1042
   fail, in which case *ppc and *result are undefined, and return
1043
   False. */
1044
1045
static Bool isHex ( UChar c )
1046
{
1047
  return ((c >= '0' && c <= '9')
1048
	  || (c >= 'a' && c <= 'f')
1049
	  || (c >= 'A' && c <= 'F'));
1050
}
1051
1052
static UInt fromHex ( UChar c )
1053
{
1054
   if (c >= '0' && c <= '9')
1055
      return (UInt)c - (UInt)'0';
1056
   if (c >= 'a' && c <= 'f')
1057
      return 10 +  (UInt)c - (UInt)'a';
1058
   if (c >= 'A' && c <= 'F')
1059
      return 10 +  (UInt)c - (UInt)'A';
1060
   /*NOTREACHED*/
1061
   tl_assert(0);
1062
   return 0;
1063
}
1064
1065
static Bool parse_Addr ( UChar** ppc, Addr* result )
1066
{
1067
   Int used, limit = 2 * sizeof(Addr);
1068
   if (**ppc != '0')
1069
      return False;
1070
   (*ppc)++;
1071
   if (**ppc != 'x')
1072
      return False;
1073
   (*ppc)++;
1074
   *result = 0;
1075
   used = 0;
1076
   while (isHex(**ppc)) {
1077
      UInt d = fromHex(**ppc);
1078
      tl_assert(d < 16);
1079
      *result = ((*result) << 4) | fromHex(**ppc);
1080
      (*ppc)++;
1081
      used++;
1082
      if (used > limit) return False;
1083
   }
1084
   if (used == 0)
1085
      return False;
1086
   return True;
1087
}
1088
1089
/* Parse two such numbers separated by a dash, or fail. */
1090
1091
static Bool parse_range ( UChar** ppc, Addr* result1, Addr* result2 )
1092
{
1093
   Bool ok = parse_Addr(ppc, result1);
1094
   if (!ok)
1095
      return False;
1096
   if (**ppc != '-')
1097
      return False;
1098
   (*ppc)++;
1099
   ok = parse_Addr(ppc, result2);
1100
   if (!ok)
1101
      return False;
1102
   return True;
1103
}
1104
1105
/* Parse a set of ranges separated by commas into 'ignoreRanges', or
1106
   fail. */
1107
1108
static Bool parse_ignore_ranges ( UChar* str0 )
1109
{
1110
   Addr start, end;
1111
   Bool ok;
1112
   UChar*  str = str0;
1113
   UChar** ppc = &str;
1114
   ignoreRanges.used = 0;
1115
   while (1) {
1116
      ok = parse_range(ppc, &start, &end);
1117
      if (!ok)
1118
         return False;
1119
      if (ignoreRanges.used >= M_IGNORE_RANGES)
1120
         return False;
1121
      ignoreRanges.start[ignoreRanges.used] = start;
1122
      ignoreRanges.end[ignoreRanges.used] = end;
1123
      ignoreRanges.used++;
1124
      if (**ppc == 0)
1125
         return True;
1126
      if (**ppc != ',')
1127
         return False;
1128
      (*ppc)++;
1129
   }
1130
   /*NOTREACHED*/
1131
   return False;
1132
}
1133
1134
1135
/* --------------- Load/store slow cases. --------------- */
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1136
1137
// Forward declarations
1138
static void mc_record_address_error  ( ThreadId tid, Addr a,
1139
                                       Int size, Bool isWrite );
1140
static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* s );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1141
static void mc_record_regparam_error ( ThreadId tid, Char* msg );
1142
static void mc_record_memparam_error ( ThreadId tid, Addr a,
1143
                                       Bool isAddrErr, Char* msg );
1144
static void mc_record_jump_error     ( ThreadId tid, Addr a );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1145
1146
static
1147
#ifndef PERF_FAST_LOADV
1148
INLINE
1149
#endif
1150
ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
1151
{
1152
   /* Make up a 64-bit result V word, which contains the loaded data for
1153
      valid addresses and Defined for invalid addresses.  Iterate over
1154
      the bytes in the word, from the most significant down to the
1155
      least. */
1156
   ULong vbits64     = V_BITS64_UNDEFINED;
1157
   SizeT szB         = nBits / 8;
1158
   SSizeT i          = szB-1;    // Must be signed
1159
   SizeT n_addrs_bad = 0;
1160
   Addr  ai;
1161
   Bool  partial_load_exemption_applies;
1162
   UChar vbits8;
1163
   Bool  ok;
1164
1165
   PROF_EVENT(30, "mc_LOADVn_slow");
1166
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1167
   /* ------------ BEGIN semi-fast cases ------------ */
1168
   /* These deal quickly-ish with the common auxiliary primary map
1169
      cases on 64-bit platforms.  Are merely a speedup hack; can be
1170
      omitted without loss of correctness/functionality.  Note that in
1171
      both cases the "sizeof(void*) == 8" causes these cases to be
1172
      folded out by compilers on 32-bit platforms.  These are derived
1173
      from LOADV64 and LOADV32.
1174
   */
1175
   if (EXPECTED_TAKEN(sizeof(void*) == 8 
1176
                      && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1177
      SecMap* sm       = get_secmap_for_reading(a);
1178
      UWord   sm_off16 = SM_OFF_16(a);
1179
      UWord   vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1180
      if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED))
1181
         return V_BITS64_DEFINED;
1182
      if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED))
1183
         return V_BITS64_UNDEFINED;
1184
      /* else fall into the slow case */
1185
   }
1186
   if (EXPECTED_TAKEN(sizeof(void*) == 8 
1187
                      && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1188
      SecMap* sm = get_secmap_for_reading(a);
1189
      UWord sm_off = SM_OFF(a);
1190
      UWord vabits8 = sm->vabits8[sm_off];
1191
      if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED))
1192
         return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
1193
      if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED))
1194
         return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
1195
      /* else fall into slow case */
1196
   }
1197
   /* ------------ END semi-fast cases ------------ */
1198
1199
   tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1200
1201
   for (i = szB-1; i >= 0; i--) {
1202
      PROF_EVENT(31, "mc_LOADVn_slow(loop)");
1203
      ai = a + byte_offset_w(szB, bigendian, i);
1204
      ok = get_vbits8(ai, &vbits8);
1205
      if (!ok) n_addrs_bad++;
1206
      vbits64 <<= 8; 
1207
      vbits64 |= vbits8;
1208
   }
1209
1210
   /* This is a hack which avoids producing errors for code which
1211
      insists in stepping along byte strings in aligned word-sized
1212
      chunks, and there is a partially defined word at the end.  (eg,
1213
      optimised strlen).  Such code is basically broken at least WRT
1214
      semantics of ANSI C, but sometimes users don't have the option
1215
      to fix it, and so this option is provided.  Note it is now
1216
      defaulted to not-engaged.
1217
1218
      A load from a partially-addressible place is allowed if:
1219
      - the command-line flag is set
1220
      - it's a word-sized, word-aligned load
1221
      - at least one of the addresses in the word *is* valid
1222
   */
1223
   partial_load_exemption_applies
1224
      = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE 
1225
                                   && VG_IS_WORD_ALIGNED(a) 
1226
                                   && n_addrs_bad < VG_WORDSIZE;
1227
1228
   if (n_addrs_bad > 0 && !partial_load_exemption_applies)
1229
      mc_record_address_error( VG_(get_running_tid)(), a, szB, False );
1230
1231
   return vbits64;
1232
}
1233
1234
1235
static
1236
#ifndef PERF_FAST_STOREV
1237
INLINE
1238
#endif
1239
void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
1240
{
1241
   SizeT szB = nBits / 8;
1242
   SizeT i, n_addrs_bad = 0;
1243
   UChar vbits8;
1244
   Addr  ai;
1245
   Bool  ok;
1246
1247
   PROF_EVENT(35, "mc_STOREVn_slow");
1248
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1249
   /* ------------ BEGIN semi-fast cases ------------ */
1250
   /* These deal quickly-ish with the common auxiliary primary map
1251
      cases on 64-bit platforms.  Are merely a speedup hack; can be
1252
      omitted without loss of correctness/functionality.  Note that in
1253
      both cases the "sizeof(void*) == 8" causes these cases to be
1254
      folded out by compilers on 32-bit platforms.  These are derived
1255
      from STOREV64 and STOREV32.
1256
   */
1257
   if (EXPECTED_TAKEN(sizeof(void*) == 8 
1258
                      && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1259
      SecMap* sm       = get_secmap_for_reading(a);
1260
      UWord   sm_off16 = SM_OFF_16(a);
1261
      UWord   vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1262
      if (EXPECTED_TAKEN( !is_distinguished_sm(sm) && 
1263
                          (VA_BITS16_DEFINED   == vabits16 ||
1264
                           VA_BITS16_UNDEFINED == vabits16) )) {
1265
         /* Handle common case quickly: a is suitably aligned, */
1266
         /* is mapped, and is addressible. */
1267
         // Convert full V-bits in register to compact 2-bit form.
1268
         if (EXPECTED_TAKEN(V_BITS64_DEFINED == vbytes)) {
1269
            ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
1270
            return;
1271
         } else if (V_BITS64_UNDEFINED == vbytes) {
1272
            ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
1273
            return;
1274
         }
1275
         /* else fall into the slow case */
1276
      }
1277
      /* else fall into the slow case */
1278
   }
1279
   if (EXPECTED_TAKEN(sizeof(void*) == 8
1280
                      && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1281
      SecMap* sm      = get_secmap_for_reading(a);
1282
      UWord   sm_off  = SM_OFF(a);
1283
      UWord   vabits8 = sm->vabits8[sm_off];
1284
      if (EXPECTED_TAKEN( !is_distinguished_sm(sm) && 
1285
                          (VA_BITS8_DEFINED   == vabits8 ||
1286
                           VA_BITS8_UNDEFINED == vabits8) )) {
1287
         /* Handle common case quickly: a is suitably aligned, */
1288
         /* is mapped, and is addressible. */
1289
         // Convert full V-bits in register to compact 2-bit form.
1290
         if (EXPECTED_TAKEN(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
1291
            sm->vabits8[sm_off] = VA_BITS8_DEFINED;
1292
            return;
1293
         } else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
1294
            sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1295
            return;
1296
         }
1297
         /* else fall into the slow case */
1298
      }
1299
      /* else fall into the slow case */
1300
   }
1301
   /* ------------ END semi-fast cases ------------ */
1302
1303
   tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1304
1305
   /* Dump vbytes in memory, iterating from least to most significant
1306
      byte.  At the same time establish addressibility of the location. */
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1307
   for (i = 0; i < szB; i++) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1308
      PROF_EVENT(36, "mc_STOREVn_slow(loop)");
1309
      ai     = a + byte_offset_w(szB, bigendian, i);
1310
      vbits8 = vbytes & 0xff;
1311
      ok     = set_vbits8(ai, vbits8);
1312
      if (!ok) n_addrs_bad++;
1313
      vbytes >>= 8;
1314
   }
1315
1316
   /* If an address error has happened, report it. */
1317
   if (n_addrs_bad > 0)
1318
      mc_record_address_error( VG_(get_running_tid)(), a, szB, True );
1319
}
1 by Andrés Roldán
Import upstream version 2.1.1
1320
1321
1322
/*------------------------------------------------------------*/
1323
/*--- Setting permissions over address ranges.             ---*/
1324
/*------------------------------------------------------------*/
1325
1326
static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1327
                                      UWord dsm_num )
1328
{
1329
   UWord    sm_off, sm_off16;
1330
   UWord    vabits2 = vabits16 & 0x3;
1331
   SizeT    lenA, lenB, len_to_next_secmap;
1332
   Addr     aNext;
1333
   SecMap*  sm;
1334
   SecMap** sm_ptr;
1335
   SecMap*  example_dsm;
1336
1337
   PROF_EVENT(150, "set_address_range_perms");
1338
1339
   /* Check the V+A bits make sense. */
1340
   tl_assert(VA_BITS16_NOACCESS  == vabits16 ||
1341
             VA_BITS16_UNDEFINED == vabits16 ||
1342
             VA_BITS16_DEFINED   == vabits16);
1343
1344
   // This code should never write PDBs;  ensure this.  (See comment above
1345
   // set_vabits2().)
1346
   tl_assert(VA_BITS2_PARTDEFINED != vabits2);
1347
1348
   if (lenT == 0)
1349
      return;
1350
1351
   if (lenT > 100 * 1000 * 1000) {
1352
      if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1353
         Char* s = "unknown???";
1354
         if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1355
         if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1356
         if (vabits16 == VA_BITS16_DEFINED  ) s = "defined";
1357
         VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1358
                                  "large range %lu (%s)", lenT, s);
1359
      }
1360
   }
1361
1362
#ifndef PERF_FAST_SARP
1363
   /*------------------ debug-only case ------------------ */
1364
   {
1365
      // Endianness doesn't matter here because all bytes are being set to
1366
      // the same value.
1367
      // Nb: We don't have to worry about updating the sec-V-bits table
1368
      // after these set_vabits2() calls because this code never writes
1369
      // VA_BITS2_PARTDEFINED values.
1370
      SizeT i;
1371
      for (i = 0; i < lenT; i++) {
1372
         set_vabits2(a + i, vabits2);
1373
      }
1374
      return;
1375
   }
1376
#endif
1377
1378
   /*------------------ standard handling ------------------ */
1379
1380
   /* Get the distinguished secondary that we might want
1381
      to use (part of the space-compression scheme). */
1382
   example_dsm = &sm_distinguished[dsm_num];
1383
1384
   // We have to handle ranges covering various combinations of partial and
1385
   // whole sec-maps.  Here is how parts 1, 2 and 3 are used in each case.
1386
   // Cases marked with a '*' are common.
1387
   //
1388
   //   TYPE                                             PARTS USED
1389
   //   ----                                             ----------
1390
   // * one partial sec-map                  (p)         1
1391
   // - one whole sec-map                    (P)         2
1392
   //
1393
   // * two partial sec-maps                 (pp)        1,3 
1394
   // - one partial, one whole sec-map       (pP)        1,2
1395
   // - one whole, one partial sec-map       (Pp)        2,3
1396
   // - two whole sec-maps                   (PP)        2,2
1397
   //
1398
   // * one partial, one whole, one partial  (pPp)       1,2,3
1399
   // - one partial, two whole               (pPP)       1,2,2
1400
   // - two whole, one partial               (PPp)       2,2,3
1401
   // - three whole                          (PPP)       2,2,2
1402
   //
1403
   // * one partial, N-2 whole, one partial  (pP...Pp)   1,2...2,3
1404
   // - one partial, N-1 whole               (pP...PP)   1,2...2,2
1405
   // - N-1 whole, one partial               (PP...Pp)   2,2...2,3
1406
   // - N whole                              (PP...PP)   2,2...2,3
1407
1408
   // Break up total length (lenT) into two parts:  length in the first
1409
   // sec-map (lenA), and the rest (lenB);   lenT == lenA + lenB.
1410
   aNext = start_of_this_sm(a) + SM_SIZE;
1411
   len_to_next_secmap = aNext - a;
1412
   if ( lenT <= len_to_next_secmap ) {
1413
      // Range entirely within one sec-map.  Covers almost all cases.
1414
      PROF_EVENT(151, "set_address_range_perms-single-secmap");
1415
      lenA = lenT;
1416
      lenB = 0;
1417
   } else if (is_start_of_sm(a)) {
1418
      // Range spans at least one whole sec-map, and starts at the beginning
1419
      // of a sec-map; skip to Part 2.
1420
      PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1421
      lenA = 0;
1422
      lenB = lenT;
1423
      goto part2;
1424
   } else {
1425
      // Range spans two or more sec-maps, first one is partial.
1426
      PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1427
      lenA = len_to_next_secmap;
1428
      lenB = lenT - lenA;
1429
   }
1430
1431
   //------------------------------------------------------------------------
1432
   // Part 1: Deal with the first sec_map.  Most of the time the range will be
1433
   // entirely within a sec_map and this part alone will suffice.  Also,
1434
   // doing it this way lets us avoid repeatedly testing for the crossing of
1435
   // a sec-map boundary within these loops.
1436
   //------------------------------------------------------------------------
1437
1438
   // If it's distinguished, make it undistinguished if necessary.
1439
   sm_ptr = get_secmap_ptr(a);
1440
   if (is_distinguished_sm(*sm_ptr)) {
1441
      if (*sm_ptr == example_dsm) {
1442
         // Sec-map already has the V+A bits that we want, so skip.
1443
         PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1444
         a    = aNext;
1445
         lenA = 0;
1446
      } else {
1447
         PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1448
         *sm_ptr = copy_for_writing(*sm_ptr);
1449
      }
1450
   }
1451
   sm = *sm_ptr;
1452
1453
   // 1 byte steps
1454
   while (True) {
1455
      if (VG_IS_8_ALIGNED(a)) break;
1456
      if (lenA < 1)           break;
1457
      PROF_EVENT(156, "set_address_range_perms-loop1a");
1458
      sm_off = SM_OFF(a);
1459
      insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1460
      a    += 1;
1461
      lenA -= 1;
1462
   }
1463
   // 8-aligned, 8 byte steps
1464
   while (True) {
1465
      if (lenA < 8) break;
1466
      PROF_EVENT(157, "set_address_range_perms-loop8a");
1467
      sm_off16 = SM_OFF_16(a);
1468
      ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1469
      a    += 8;
1470
      lenA -= 8;
1471
   }
1472
   // 1 byte steps
1473
   while (True) {
1474
      if (lenA < 1) break;
1475
      PROF_EVENT(158, "set_address_range_perms-loop1b");
1476
      sm_off = SM_OFF(a);
1477
      insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1478
      a    += 1;
1479
      lenA -= 1;
1480
   }
1481
1482
   // We've finished the first sec-map.  Is that it?
1483
   if (lenB == 0)
1484
      return;
1485
1486
   //------------------------------------------------------------------------
1487
   // Part 2: Fast-set entire sec-maps at a time.
1488
   //------------------------------------------------------------------------
1489
  part2:
1490
   // 64KB-aligned, 64KB steps.
1491
   // Nb: we can reach here with lenB < SM_SIZE
1492
   while (True) {
1493
      if (lenB < SM_SIZE) break;
1494
      tl_assert(is_start_of_sm(a));
1495
      PROF_EVENT(159, "set_address_range_perms-loop64K");
1496
      sm_ptr = get_secmap_ptr(a);
1497
      if (!is_distinguished_sm(*sm_ptr)) {
1498
         PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1499
         // Free the non-distinguished sec-map that we're replacing.  This
1500
         // case happens moderately often, enough to be worthwhile.
1501
         VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1502
      }
1503
      update_SM_counts(*sm_ptr, example_dsm);
1504
      // Make the sec-map entry point to the example DSM
1505
      *sm_ptr = example_dsm;
1506
      lenB -= SM_SIZE;
1507
      a    += SM_SIZE;
1508
   }
1509
1510
   // We've finished the whole sec-maps.  Is that it?
1511
   if (lenB == 0)
1512
      return;
1513
1514
   //------------------------------------------------------------------------
1515
   // Part 3: Finish off the final partial sec-map, if necessary.
1516
   //------------------------------------------------------------------------
1517
1518
   tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1519
1520
   // If it's distinguished, make it undistinguished if necessary.
1521
   sm_ptr = get_secmap_ptr(a);
1522
   if (is_distinguished_sm(*sm_ptr)) {
1523
      if (*sm_ptr == example_dsm) {
1524
         // Sec-map already has the V+A bits that we want, so stop.
1525
         PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1526
         return;
1527
      } else {
1528
         PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1529
         *sm_ptr = copy_for_writing(*sm_ptr);
1530
      }
1531
   }
1532
   sm = *sm_ptr;
1533
1534
   // 8-aligned, 8 byte steps
1535
   while (True) {
1536
      if (lenB < 8) break;
1537
      PROF_EVENT(163, "set_address_range_perms-loop8b");
1538
      sm_off16 = SM_OFF_16(a);
1539
      ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1540
      a    += 8;
1541
      lenB -= 8;
1542
   }
1543
   // 1 byte steps
1544
   while (True) {
1545
      if (lenB < 1) return;
1546
      PROF_EVENT(164, "set_address_range_perms-loop1c");
1547
      sm_off = SM_OFF(a);
1548
      insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1549
      a    += 1;
1550
      lenB -= 1;
1551
   }
1552
}
1553
1554
1555
/* --- Set permissions for arbitrary address ranges --- */
1556
1557
void MC_(make_mem_noaccess) ( Addr a, SizeT len )
1558
{
1559
   PROF_EVENT(40, "MC_(make_mem_noaccess)");
1560
   DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
1561
   set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
1562
}
1563
1564
void MC_(make_mem_undefined) ( Addr a, SizeT len )
1565
{
1566
   PROF_EVENT(41, "MC_(make_mem_undefined)");
1567
   DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1568
   set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
1569
}
1570
1571
void MC_(make_mem_defined) ( Addr a, SizeT len )
1572
{
1573
   PROF_EVENT(42, "MC_(make_mem_defined)");
1574
   DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1575
   set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
1576
}
1577
1578
/* For each byte in [a,a+len), if the byte is addressable, make it be
1579
   defined, but if it isn't addressible, leave it alone.  In other
1580
   words a version of MC_(make_mem_defined) that doesn't mess with
1581
   addressibility.  Low-performance implementation. */
1582
static void make_mem_defined_if_addressable ( Addr a, SizeT len )
1583
{
1584
   SizeT i;
1585
   UChar vabits2;
1586
   DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
1587
   for (i = 0; i < len; i++) {
1588
      vabits2 = get_vabits2( a+i );
1589
      if (EXPECTED_TAKEN(VA_BITS2_NOACCESS != vabits2)) {
1590
         set_vabits2(a+i, VA_BITS2_DEFINED);
1591
      }
1592
   }
1593
}
1594
1595
1596
/* --- Block-copy permissions (needed for implementing realloc() and
1597
       sys_mremap). --- */
1598
1599
void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
1600
{
1601
   SizeT i, j;
1602
   UChar vabits2, vabits8;
1603
   Bool  aligned, nooverlap;
1604
1605
   DEBUG("MC_(copy_address_range_state)\n");
1606
   PROF_EVENT(50, "MC_(copy_address_range_state)");
1607
1608
   if (len == 0 || src == dst)
1609
      return;
1610
1611
   aligned   = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1612
   nooverlap = src+len <= dst || dst+len <= src;
1613
1614
   if (nooverlap && aligned) {
1615
1616
      /* Vectorised fast case, when no overlap and suitably aligned */
1617
      /* vector loop */
1618
      i = 0;
1619
      while (len >= 4) {
1620
         vabits8 = get_vabits8_for_aligned_word32( src+i );
1621
         set_vabits8_for_aligned_word32( dst+i, vabits8 );
1622
         if (EXPECTED_TAKEN(VA_BITS8_DEFINED == vabits8 
1623
                            || VA_BITS8_UNDEFINED == vabits8 
1624
                            || VA_BITS8_NOACCESS == vabits8)) {
1625
            /* do nothing */
1626
         } else {
1627
            /* have to copy secondary map info */
1628
            if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1629
               set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1630
            if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1631
               set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1632
            if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1633
               set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1634
            if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1635
               set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1636
         }
1637
         i += 4;
1638
         len -= 4;
1639
      }
1640
      /* fixup loop */
1641
      while (len >= 1) {
1642
         vabits2 = get_vabits2( src+i );
1643
         set_vabits2( dst+i, vabits2 );
1644
         if (VA_BITS2_PARTDEFINED == vabits2) {
1645
            set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1646
         }
1647
         i++;
1648
         len--;
1649
      }
1650
1651
   } else {
1652
1653
      /* We have to do things the slow way */
1654
      if (src < dst) {
1655
         for (i = 0, j = len-1; i < len; i++, j--) {
1656
            PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1657
            vabits2 = get_vabits2( src+j );
1658
            set_vabits2( dst+j, vabits2 );
1659
            if (VA_BITS2_PARTDEFINED == vabits2) {
1660
               set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1661
            }
1662
         }
1663
      }
1664
1665
      if (src > dst) {
1666
         for (i = 0; i < len; i++) {
1667
            PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1668
            vabits2 = get_vabits2( src+i );
1669
            set_vabits2( dst+i, vabits2 );
1670
            if (VA_BITS2_PARTDEFINED == vabits2) {
1671
               set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1672
            }
1673
         }
1674
      }
1675
   }
1676
1677
}
1678
1679
1680
/* --- Fast case permission setters, for dealing with stacks. --- */
1681
1682
static INLINE
1683
void make_aligned_word32_undefined ( Addr a )
1684
{
1685
   UWord   sm_off;
1686
   SecMap* sm;
1687
1688
   PROF_EVENT(300, "make_aligned_word32_undefined");
1689
1690
#ifndef PERF_FAST_STACK2
1691
   MC_(make_mem_undefined)(a, 4);
1692
#else
1693
   if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
1694
      PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
1695
      MC_(make_mem_undefined)(a, 4);
1696
      return;
1697
   }
1698
1699
   sm                  = get_secmap_for_writing_low(a);
1700
   sm_off              = SM_OFF(a);
1701
   sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1702
#endif
1703
}
1704
1705
1706
static INLINE
1707
void make_aligned_word32_noaccess ( Addr a )
1708
{
1709
   UWord   sm_off;
1710
   SecMap* sm;
1711
1712
   PROF_EVENT(310, "make_aligned_word32_noaccess");
1713
1714
#ifndef PERF_FAST_STACK2
1715
   MC_(make_mem_noaccess)(a, 4);
1716
#else
1717
   if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
1718
      PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
1719
      MC_(make_mem_noaccess)(a, 4);
1720
      return;
1721
   }
1722
1723
   sm                  = get_secmap_for_writing_low(a);
1724
   sm_off              = SM_OFF(a);
1725
   sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
1726
#endif
1727
}
1728
1729
1 by Andrés Roldán
Import upstream version 2.1.1
1730
/* Nb: by "aligned" here we mean 8-byte aligned */
1731
static INLINE
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1732
void make_aligned_word64_undefined ( Addr a )
1733
{
1734
   UWord   sm_off16;
1735
   SecMap* sm;
1736
1737
   PROF_EVENT(320, "make_aligned_word64_undefined");
1738
1739
#ifndef PERF_FAST_STACK2
1740
   MC_(make_mem_undefined)(a, 8);
1741
#else
1742
   if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
1743
      PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
1744
      MC_(make_mem_undefined)(a, 8);
1745
      return;
1746
   }
1747
1748
   sm       = get_secmap_for_writing_low(a);
1749
   sm_off16 = SM_OFF_16(a);
1750
   ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
1751
#endif
1752
}
1753
1754
1755
static INLINE
1756
void make_aligned_word64_noaccess ( Addr a )
1757
{
1758
   UWord   sm_off16;
1759
   SecMap* sm;
1760
1761
   PROF_EVENT(330, "make_aligned_word64_noaccess");
1762
1763
#ifndef PERF_FAST_STACK2
1764
   MC_(make_mem_noaccess)(a, 8);
1765
#else
1766
   if (EXPECTED_NOT_TAKEN(a > MAX_PRIMARY_ADDRESS)) {
1767
      PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
1768
      MC_(make_mem_noaccess)(a, 8);
1769
      return;
1770
   }
1771
1772
   sm       = get_secmap_for_writing_low(a);
1773
   sm_off16 = SM_OFF_16(a);
1774
   ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
1775
#endif
1776
}
1777
1778
1779
/*------------------------------------------------------------*/
1780
/*--- Stack pointer adjustment                             ---*/
1781
/*------------------------------------------------------------*/
1782
1783
static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
1784
{
1785
   PROF_EVENT(110, "new_mem_stack_4");
1786
   if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1787
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1788
   } else {
1789
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
1790
   }
1791
}
1792
1793
static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
1794
{
1795
   PROF_EVENT(120, "die_mem_stack_4");
1796
   if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1797
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1798
   } else {
1799
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
1800
   }
1801
}
1802
1803
static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
1804
{
1805
   PROF_EVENT(111, "new_mem_stack_8");
1806
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1807
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1808
   } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1809
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP   );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1810
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1811
   } else {
1812
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
1813
   }
1814
}
1815
1816
static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
1817
{
1818
   PROF_EVENT(121, "die_mem_stack_8");
1819
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1820
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1821
   } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1822
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1823
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
1824
   } else {
1825
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
1826
   }
1827
}
1828
1829
static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
1830
{
1831
   PROF_EVENT(112, "new_mem_stack_12");
1832
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1833
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP   );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1834
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1835
   } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1836
      /* from previous test we don't have 8-alignment at offset +0,
1837
         hence must have 8 alignment at offsets +4/-4.  Hence safe to
1838
         do 4 at +0 and then 8 at +4/. */
1839
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP   );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1840
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
1841
   } else {
1842
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
1843
   }
1844
}
1845
1846
static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
1847
{
1848
   PROF_EVENT(122, "die_mem_stack_12");
1849
   /* Note the -12 in the test */
1850
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1851
      /* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
1852
         -4. */
1853
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1854
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4  );
1855
   } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1856
      /* We have 4-alignment at +0, but we don't have 8-alignment at
1857
         -12.  So we must have 8-alignment at -8.  Hence do 4 at -12
1858
         and then 8 at -8. */
1859
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1860
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8  );
1861
   } else {
1862
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
1863
   }
1864
}
1865
1866
static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
1867
{
1868
   PROF_EVENT(113, "new_mem_stack_16");
1869
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1870
      /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
1871
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP   );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1872
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
1873
   } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1874
      /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
1875
         Hence do 4 at +0, 8 at +4, 4 at +12. */
1876
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP    );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1877
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4  );
1878
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
1879
   } else {
1880
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
1881
   }
1882
}
1883
1884
static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
1885
{
1886
   PROF_EVENT(123, "die_mem_stack_16");
1887
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1888
      /* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
1889
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1890
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8  );
1891
   } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1892
      /* 8 alignment must be at -12.  Do 4 at -16, 8 at -12, 4 at -4. */
1893
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1894
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1895
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4  );
1896
   } else {
1897
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
1898
   }
1899
}
1900
1901
static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
1902
{
1903
   PROF_EVENT(114, "new_mem_stack_32");
1904
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1905
      /* Straightforward */
1906
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP    );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1907
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8  );
1908
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1909
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1910
   } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1911
      /* 8 alignment must be at +4.  Hence do 8 at +4,+12,+20 and 4 at
1912
         +0,+28. */
1913
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP    );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1914
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4  );
1915
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
1916
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
1917
      make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
1918
   } else {
1919
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
1920
   }
1921
}
1922
1923
static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
1924
{
1925
   PROF_EVENT(124, "die_mem_stack_32");
1926
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1927
      /* Straightforward */
1928
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1929
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1930
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1931
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
1932
   } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1933
      /* 8 alignment must be at -4 etc.  Hence do 8 at -12,-20,-28 and
1934
         4 at -32,-4. */
1935
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1936
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
1937
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
1938
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
1939
      make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4  );
1940
   } else {
1941
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
1942
   }
1943
}
1944
1945
static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
1946
{
1947
   PROF_EVENT(115, "new_mem_stack_112");
1948
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1949
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP    );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1950
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8  );
1951
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1952
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1953
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
1954
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
1955
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
1956
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
1957
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
1958
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
1959
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
1960
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
1961
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
1962
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
1963
   } else {
1964
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
1965
   }
1966
}
1967
1968
static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
1969
{
1970
   PROF_EVENT(125, "die_mem_stack_112");
1971
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1972
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1973
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
1974
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
1975
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
1976
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
1977
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
1978
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
1979
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
1980
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
1981
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
1982
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
1983
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
1984
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
1985
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
1986
   } else {
1987
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
1988
   }
1989
}
1990
1991
static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
1992
{
1993
   PROF_EVENT(116, "new_mem_stack_128");
1994
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
1995
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP    );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
1996
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8  );
1997
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
1998
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
1999
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2000
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2001
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2002
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2003
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2004
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2005
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2006
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2007
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2008
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2009
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2010
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2011
   } else {
2012
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
2013
   }
2014
}
2015
2016
static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
2017
{
2018
   PROF_EVENT(126, "die_mem_stack_128");
2019
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2020
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2021
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2022
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2023
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2024
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2025
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2026
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2027
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2028
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2029
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2030
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2031
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2032
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2033
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2034
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2035
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
2036
   } else {
2037
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
2038
   }
2039
}
2040
2041
static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
2042
{
2043
   PROF_EVENT(117, "new_mem_stack_144");
2044
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2045
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP    );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2046
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8  );
2047
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2048
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2049
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2050
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2051
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2052
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2053
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2054
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2055
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2056
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2057
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2058
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2059
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2060
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2061
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2062
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
2063
   } else {
2064
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
2065
   }
2066
}
2067
2068
static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
2069
{
2070
   PROF_EVENT(127, "die_mem_stack_144");
2071
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2072
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2073
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2074
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2075
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2076
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2077
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2078
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2079
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2080
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2081
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2082
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2083
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2084
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2085
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2086
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2087
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2088
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2089
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
2090
   } else {
2091
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
2092
   }
2093
}
2094
2095
static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
2096
{
2097
   PROF_EVENT(118, "new_mem_stack_160");
2098
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2099
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP    );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2100
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8  );
2101
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2102
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2103
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2104
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2105
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2106
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2107
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2108
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2109
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2110
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2111
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2112
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104);
2113
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112);
2114
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120);
2115
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128);
2116
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136);
2117
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144);
2118
      make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152);
2119
   } else {
2120
      MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
2121
   }
2122
}
2123
2124
static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
2125
{
2126
   PROF_EVENT(128, "die_mem_stack_160");
2127
   if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2128
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2129
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
2130
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
2131
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
2132
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2133
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2134
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2135
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2136
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2137
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2138
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2139
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2140
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2141
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2142
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2143
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2144
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2145
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2146
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2147
      make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
2148
   } else {
2149
      MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
2150
   }
2151
}
2152
2153
static void mc_new_mem_stack ( Addr a, SizeT len )
2154
{
2155
   PROF_EVENT(115, "new_mem_stack");
2156
   MC_(make_mem_undefined) ( -VG_STACK_REDZONE_SZB + a, len );
2157
}
2158
2159
static void mc_die_mem_stack ( Addr a, SizeT len )
2160
{
2161
   PROF_EVENT(125, "die_mem_stack");
2162
   MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
2163
}
2164
2165
2166
/* The AMD64 ABI says:
2167
2168
   "The 128-byte area beyond the location pointed to by %rsp is considered
2169
    to be reserved and shall not be modified by signal or interrupt
2170
    handlers.  Therefore, functions may use this area for temporary data
2171
    that is not needed across function calls.  In particular, leaf functions
2172
    may use this area for their entire stack frame, rather than adjusting
2173
    the stack pointer in the prologue and epilogue.  This area is known as
2174
    red zone [sic]."
2175
2176
   So after any call or return we need to mark this redzone as containing
2177
   undefined values.
2178
2179
   Consider this:  we're in function f.  f calls g.  g moves rsp down
2180
   modestly (say 16 bytes) and writes stuff all over the red zone, making it
2181
   defined.  g returns.  f is buggy and reads from parts of the red zone
2182
   that it didn't write on.  But because g filled that area in, f is going
2183
   to be picking up defined V bits and so any errors from reading bits of
2184
   the red zone it didn't write, will be missed.  The only solution I could
2185
   think of was to make the red zone undefined when g returns to f.
2186
2187
   This is in accordance with the ABI, which makes it clear the redzone
2188
   is volatile across function calls.
2189
2190
   The problem occurs the other way round too: f could fill the RZ up
2191
   with defined values and g could mistakenly read them.  So the RZ
2192
   also needs to be nuked on function calls.
2193
*/
2194
void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len )
2195
{
2196
   tl_assert(sizeof(UWord) == sizeof(SizeT));
2197
   if (0)
2198
      VG_(printf)("helperc_MAKE_STACK_UNINIT %p %lu\n", base, len );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2199
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2200
#  if 0
2201
   /* Really slow version */
2202
   MC_(make_mem_undefined)(base, len);
2203
#  endif
2204
2205
#  if 0
2206
   /* Slow(ish) version, which is fairly easily seen to be correct.
2207
   */
2208
   if (EXPECTED_TAKEN( VG_IS_8_ALIGNED(base) && len==128 )) {
2209
      make_aligned_word64_undefined(base +   0);
2210
      make_aligned_word64_undefined(base +   8);
2211
      make_aligned_word64_undefined(base +  16);
2212
      make_aligned_word64_undefined(base +  24);
2213
2214
      make_aligned_word64_undefined(base +  32);
2215
      make_aligned_word64_undefined(base +  40);
2216
      make_aligned_word64_undefined(base +  48);
2217
      make_aligned_word64_undefined(base +  56);
2218
2219
      make_aligned_word64_undefined(base +  64);
2220
      make_aligned_word64_undefined(base +  72);
2221
      make_aligned_word64_undefined(base +  80);
2222
      make_aligned_word64_undefined(base +  88);
2223
2224
      make_aligned_word64_undefined(base +  96);
2225
      make_aligned_word64_undefined(base + 104);
2226
      make_aligned_word64_undefined(base + 112);
2227
      make_aligned_word64_undefined(base + 120);
2228
   } else {
2229
      MC_(make_mem_undefined)(base, len);
2230
   }
2231
#  endif 
2232
2233
   /* Idea is: go fast when
2234
         * 8-aligned and length is 128
2235
         * the sm is available in the main primary map
2236
         * the address range falls entirely with a single secondary map
2237
      If all those conditions hold, just update the V+A bits by writing
2238
      directly into the vabits array.  (If the sm was distinguished, this
2239
      will make a copy and then write to it.)
2240
   */
2241
   if (EXPECTED_TAKEN( len == 128 && VG_IS_8_ALIGNED(base) )) {
2242
      /* Now we know the address range is suitably sized and aligned. */
2243
      UWord a_lo = (UWord)(base);
2244
      UWord a_hi = (UWord)(base + 128 - 1);
2245
      tl_assert(a_lo < a_hi);             // paranoia: detect overflow
2246
      if (a_hi < MAX_PRIMARY_ADDRESS) {
2247
         // Now we know the entire range is within the main primary map.
2248
         SecMap* sm    = get_secmap_for_writing_low(a_lo);
2249
         SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
2250
         /* Now we know that the entire address range falls within a
2251
            single secondary map, and that that secondary 'lives' in
2252
            the main primary map. */
2253
         if (EXPECTED_TAKEN(sm == sm_hi)) {
2254
            // Finally, we know that the range is entirely within one secmap.
2255
            UWord   v_off = SM_OFF(a_lo);
2256
            UShort* p     = (UShort*)(&sm->vabits8[v_off]);
2257
            p[ 0] = VA_BITS16_UNDEFINED;
2258
            p[ 1] = VA_BITS16_UNDEFINED;
2259
            p[ 2] = VA_BITS16_UNDEFINED;
2260
            p[ 3] = VA_BITS16_UNDEFINED;
2261
            p[ 4] = VA_BITS16_UNDEFINED;
2262
            p[ 5] = VA_BITS16_UNDEFINED;
2263
            p[ 6] = VA_BITS16_UNDEFINED;
2264
            p[ 7] = VA_BITS16_UNDEFINED;
2265
            p[ 8] = VA_BITS16_UNDEFINED;
2266
            p[ 9] = VA_BITS16_UNDEFINED;
2267
            p[10] = VA_BITS16_UNDEFINED;
2268
            p[11] = VA_BITS16_UNDEFINED;
2269
            p[12] = VA_BITS16_UNDEFINED;
2270
            p[13] = VA_BITS16_UNDEFINED;
2271
            p[14] = VA_BITS16_UNDEFINED;
2272
            p[15] = VA_BITS16_UNDEFINED;
2273
            return;
2274
         }
2275
      }
2276
   }
2277
2278
   /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
2279
   if (EXPECTED_TAKEN( len == 288 && VG_IS_8_ALIGNED(base) )) {
2280
      /* Now we know the address range is suitably sized and aligned. */
2281
      UWord a_lo = (UWord)(base);
2282
      UWord a_hi = (UWord)(base + 288 - 1);
2283
      tl_assert(a_lo < a_hi);             // paranoia: detect overflow
2284
      if (a_hi < MAX_PRIMARY_ADDRESS) {
2285
         // Now we know the entire range is within the main primary map.
2286
         SecMap* sm    = get_secmap_for_writing_low(a_lo);
2287
         SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
2288
         /* Now we know that the entire address range falls within a
2289
            single secondary map, and that that secondary 'lives' in
2290
            the main primary map. */
2291
         if (EXPECTED_TAKEN(sm == sm_hi)) {
2292
            // Finally, we know that the range is entirely within one secmap.
2293
            UWord   v_off = SM_OFF(a_lo);
2294
            UShort* p     = (UShort*)(&sm->vabits8[v_off]);
2295
            p[ 0] = VA_BITS16_UNDEFINED;
2296
            p[ 1] = VA_BITS16_UNDEFINED;
2297
            p[ 2] = VA_BITS16_UNDEFINED;
2298
            p[ 3] = VA_BITS16_UNDEFINED;
2299
            p[ 4] = VA_BITS16_UNDEFINED;
2300
            p[ 5] = VA_BITS16_UNDEFINED;
2301
            p[ 6] = VA_BITS16_UNDEFINED;
2302
            p[ 7] = VA_BITS16_UNDEFINED;
2303
            p[ 8] = VA_BITS16_UNDEFINED;
2304
            p[ 9] = VA_BITS16_UNDEFINED;
2305
            p[10] = VA_BITS16_UNDEFINED;
2306
            p[11] = VA_BITS16_UNDEFINED;
2307
            p[12] = VA_BITS16_UNDEFINED;
2308
            p[13] = VA_BITS16_UNDEFINED;
2309
            p[14] = VA_BITS16_UNDEFINED;
2310
            p[15] = VA_BITS16_UNDEFINED;
2311
            p[16] = VA_BITS16_UNDEFINED;
2312
            p[17] = VA_BITS16_UNDEFINED;
2313
            p[18] = VA_BITS16_UNDEFINED;
2314
            p[19] = VA_BITS16_UNDEFINED;
2315
            p[20] = VA_BITS16_UNDEFINED;
2316
            p[21] = VA_BITS16_UNDEFINED;
2317
            p[22] = VA_BITS16_UNDEFINED;
2318
            p[23] = VA_BITS16_UNDEFINED;
2319
            p[24] = VA_BITS16_UNDEFINED;
2320
            p[25] = VA_BITS16_UNDEFINED;
2321
            p[26] = VA_BITS16_UNDEFINED;
2322
            p[27] = VA_BITS16_UNDEFINED;
2323
            p[28] = VA_BITS16_UNDEFINED;
2324
            p[29] = VA_BITS16_UNDEFINED;
2325
            p[30] = VA_BITS16_UNDEFINED;
2326
            p[31] = VA_BITS16_UNDEFINED;
2327
            p[32] = VA_BITS16_UNDEFINED;
2328
            p[33] = VA_BITS16_UNDEFINED;
2329
            p[34] = VA_BITS16_UNDEFINED;
2330
            p[35] = VA_BITS16_UNDEFINED;
2331
            return;
2332
         }
2333
      }
2334
   }
2335
2336
   /* else fall into slow case */
2337
   MC_(make_mem_undefined)(base, len);
2338
}
2339
2340
1 by Andrés Roldán
Import upstream version 2.1.1
2341
/*------------------------------------------------------------*/
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2342
/*--- Checking memory                                      ---*/
2343
/*------------------------------------------------------------*/
2344
1 by Andrés Roldán
Import upstream version 2.1.1
2345
typedef 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2346
   enum {
2347
      MC_Ok = 5, 
2348
      MC_AddrErr = 6, 
2349
      MC_ValueErr = 7
2350
   } 
2351
   MC_ReadResult;
2352
2353
2354
/* Check permissions for address range.  If inadequate permissions
1 by Andrés Roldán
Import upstream version 2.1.1
2355
   exist, *bad_addr is set to the offending address, so the caller can
2356
   know what it is. */
2357
2358
/* Returns True if [a .. a+len) is not addressible.  Otherwise,
2359
   returns False, and if bad_addr is non-NULL, sets *bad_addr to
2360
   indicate the lowest failing address.  Functions below are
2361
   similar. */
2362
Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2363
{
2364
   SizeT i;
2365
   UWord vabits2;
2366
2367
   PROF_EVENT(60, "check_mem_is_noaccess");
2368
   for (i = 0; i < len; i++) {
2369
      PROF_EVENT(61, "check_mem_is_noaccess(loop)");
2370
      vabits2 = get_vabits2(a);
2371
      if (VA_BITS2_NOACCESS != vabits2) {
2372
         if (bad_addr != NULL) *bad_addr = a;
2373
         return False;
2374
      }
2375
      a++;
2376
   }
2377
   return True;
2378
}
2379
2380
static Bool is_mem_addressable ( Addr a, SizeT len, Addr* bad_addr )
2381
{
2382
   SizeT i;
2383
   UWord vabits2;
2384
2385
   PROF_EVENT(62, "is_mem_addressable");
2386
   for (i = 0; i < len; i++) {
2387
      PROF_EVENT(63, "is_mem_addressable(loop)");
2388
      vabits2 = get_vabits2(a);
2389
      if (VA_BITS2_NOACCESS == vabits2) {
2390
         if (bad_addr != NULL) *bad_addr = a;
2391
         return False;
2392
      }
2393
      a++;
2394
   }
2395
   return True;
2396
}
2397
2398
static MC_ReadResult is_mem_defined ( Addr a, SizeT len, Addr* bad_addr )
2399
{
2400
   SizeT i;
2401
   UWord vabits2;
2402
2403
   PROF_EVENT(64, "is_mem_defined");
2404
   DEBUG("is_mem_defined\n");
2405
   for (i = 0; i < len; i++) {
2406
      PROF_EVENT(65, "is_mem_defined(loop)");
2407
      vabits2 = get_vabits2(a);
2408
      if (VA_BITS2_DEFINED != vabits2) {
2409
         // Error!  Nb: Report addressability errors in preference to
2410
         // definedness errors.  And don't report definedeness errors unless
2411
         // --undef-value-errors=yes.
2412
         if (bad_addr != NULL) *bad_addr = a;
2413
         if      ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr; 
2414
         else if ( MC_(clo_undef_value_errors)  ) return MC_ValueErr;
2415
      }
1 by Andrés Roldán
Import upstream version 2.1.1
2416
      a++;
2417
   }
2418
   return MC_Ok;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2419
}
1 by Andrés Roldán
Import upstream version 2.1.1
2420
2421
2422
/* Check a zero-terminated ascii string.  Tricky -- don't want to
2423
   examine the actual bytes, to find the end, until we're sure it is
2424
   safe to do so. */
2425
2426
static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2427
{
1 by Andrés Roldán
Import upstream version 2.1.1
2428
   UWord vabits2;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2429
2430
   PROF_EVENT(66, "mc_is_defined_asciiz");
2431
   DEBUG("mc_is_defined_asciiz\n");
2432
   while (True) {
1 by Andrés Roldán
Import upstream version 2.1.1
2433
      PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2434
      vabits2 = get_vabits2(a);
2435
      if (VA_BITS2_DEFINED != vabits2) {
2436
         // Error!  Nb: Report addressability errors in preference to
2437
         // definedness errors.  And don't report definedeness errors unless
2438
         // --undef-value-errors=yes.
2439
         if (bad_addr != NULL) *bad_addr = a;
2440
         if      ( VA_BITS2_NOACCESS == vabits2 ) return MC_AddrErr; 
2441
         else if ( MC_(clo_undef_value_errors)  ) return MC_ValueErr;
2442
      }
1 by Andrés Roldán
Import upstream version 2.1.1
2443
      /* Ok, a is safe to read. */
2444
      if (* ((UChar*)a) == 0) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2445
         return MC_Ok;
2446
      }
2447
      a++;
1 by Andrés Roldán
Import upstream version 2.1.1
2448
   }
2449
}
2450
2451
2452
/*------------------------------------------------------------*/
2453
/*--- Memory event handlers                                ---*/
2454
/*------------------------------------------------------------*/
2455
2456
static
2457
void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2458
                                Addr base, SizeT size )
2459
{
1 by Andrés Roldán
Import upstream version 2.1.1
2460
   Addr bad_addr;
2461
   Bool ok = is_mem_addressable ( base, size, &bad_addr );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2462
2463
   if (!ok) {
1 by Andrés Roldán
Import upstream version 2.1.1
2464
      switch (part) {
2465
      case Vg_CoreSysCall:
2466
         mc_record_memparam_error ( tid, bad_addr, /*isAddrErr*/True, s );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2467
         break;
1 by Andrés Roldán
Import upstream version 2.1.1
2468
2469
      case Vg_CoreSignal:
2470
         mc_record_core_mem_error( tid, /*isAddrErr*/True, s );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2471
         break;
1 by Andrés Roldán
Import upstream version 2.1.1
2472
2473
      default:
2474
         VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2475
      }
1 by Andrés Roldán
Import upstream version 2.1.1
2476
   }
2477
}
2478
2479
static
2480
void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2481
                            Addr base, SizeT size )
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2482
{     
1 by Andrés Roldán
Import upstream version 2.1.1
2483
   Addr bad_addr;
2484
   MC_ReadResult res = is_mem_defined ( base, size, &bad_addr );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2485
1 by Andrés Roldán
Import upstream version 2.1.1
2486
   if (MC_Ok != res) {
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2487
      Bool isAddrErr = ( MC_AddrErr == res ? True : False );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2488
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2489
      switch (part) {
1 by Andrés Roldán
Import upstream version 2.1.1
2490
      case Vg_CoreSysCall:
2491
         mc_record_memparam_error ( tid, bad_addr, isAddrErr, s );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2492
         break;
1 by Andrés Roldán
Import upstream version 2.1.1
2493
      
2494
      /* If we're being asked to jump to a silly address, record an error 
2495
         message before potentially crashing the entire system. */
2496
      case Vg_CoreTranslate:
2497
         mc_record_jump_error( tid, bad_addr );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2498
         break;
1 by Andrés Roldán
Import upstream version 2.1.1
2499
2500
      default:
2501
         VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2502
      }
1 by Andrés Roldán
Import upstream version 2.1.1
2503
   }
2504
}
2505
2506
static
2507
void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2508
                                   Char* s, Addr str )
1 by Andrés Roldán
Import upstream version 2.1.1
2509
{
2510
   MC_ReadResult res;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2511
   Addr bad_addr = 0;   // shut GCC up
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2512
2513
   tl_assert(part == Vg_CoreSysCall);
2514
   res = mc_is_defined_asciiz ( (Addr)str, &bad_addr );
2515
   if (MC_Ok != res) {
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2516
      Bool isAddrErr = ( MC_AddrErr == res ? True : False );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2517
      mc_record_memparam_error ( tid, bad_addr, isAddrErr, s );
2518
   }
1 by Andrés Roldán
Import upstream version 2.1.1
2519
}
2520
2521
static
2522
void mc_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2523
{
1 by Andrés Roldán
Import upstream version 2.1.1
2524
   /* Ignore the permissions, just make it defined.  Seems to work... */
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2525
   // Because code is defined, initialised variables get put in the data
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2526
   // segment and are defined, and uninitialised variables get put in the
2527
   // bss segment and are auto-zeroed (and so defined).  
2528
   //
2529
   // It's possible that there will be padding between global variables.
2530
   // This will also be auto-zeroed, and marked as defined by Memcheck.  If
2531
   // a program uses it, Memcheck will not complain.  This is arguably a
2532
   // false negative, but it's a grey area -- the behaviour is defined (the
2533
   // padding is zeroed) but it's probably not what the user intended.  And
2534
   // we can't avoid it.
2535
   DEBUG("mc_new_mem_startup(%p, %llu, rr=%u, ww=%u, xx=%u)\n",
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2536
         a, (ULong)len, rr, ww, xx);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2537
   MC_(make_mem_defined)(a, len);
2538
}
1 by Andrés Roldán
Import upstream version 2.1.1
2539
2540
static
2541
void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
2542
{
1 by Andrés Roldán
Import upstream version 2.1.1
2543
   MC_(make_mem_defined)(a, len);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2544
}
2545
2546
static
2547
void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
2548
{
2549
   MC_(make_mem_defined)(a, len);
2550
}
2551
2552
1 by Andrés Roldán
Import upstream version 2.1.1
2553
/*------------------------------------------------------------*/
2554
/*--- Register event handlers                              ---*/
2555
/*------------------------------------------------------------*/
2556
2557
/* When some chunk of guest state is written, mark the corresponding
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2558
   shadow area as valid.  This is used to initialise arbitrarily large
2559
   chunks of guest state, hence the _SIZE value, which has to be as
2560
   big as the biggest guest state.
2561
*/
2562
static void mc_post_reg_write ( CorePart part, ThreadId tid, 
2563
                                OffT offset, SizeT size)
2564
{
2565
#  define MAX_REG_WRITE_SIZE 1408
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2566
   UChar area[MAX_REG_WRITE_SIZE];
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2567
   tl_assert(size <= MAX_REG_WRITE_SIZE);
2568
   VG_(memset)(area, V_BITS8_DEFINED, size);
2569
   VG_(set_shadow_regs_area)( tid, offset, size, area );
2570
#  undef MAX_REG_WRITE_SIZE
2571
}
2572
2573
static 
2574
void mc_post_reg_write_clientcall ( ThreadId tid, 
2575
                                    OffT offset, SizeT size,
2576
                                    Addr f)
2577
{
2578
   mc_post_reg_write(/*dummy*/0, tid, offset, size);
2579
}
2580
2581
/* Look at the definedness of the guest's shadow state for 
2582
   [offset, offset+len).  If any part of that is undefined, record 
2583
   a parameter error.
2584
*/
2585
static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s, 
2586
                              OffT offset, SizeT size)
2587
{
2588
   Int   i;
2589
   Bool  bad;
2590
2591
   UChar area[16];
2592
   tl_assert(size <= 16);
2593
2594
   VG_(get_shadow_regs_area)( tid, offset, size, area );
2595
2596
   bad = False;
2597
   for (i = 0; i < size; i++) {
2598
      if (area[i] != V_BITS8_DEFINED) {
2599
         bad = True;
2600
         break;
2601
      }
2602
   }
2603
2604
   if (bad)
2605
      mc_record_regparam_error ( tid, s );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2606
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2607
2608
2609
/*------------------------------------------------------------*/
2610
/*--- Error types                                          ---*/
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2611
/*------------------------------------------------------------*/
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2612
2613
// Different kinds of blocks.
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2614
typedef enum {
2615
   Block_Mallocd = 111,
2616
   Block_Freed,
2617
   Block_Mempool,
2618
   Block_MempoolChunk,
2619
   Block_UserG
2620
} BlockKind;
2621
2622
/* ------------------ Addresses -------------------- */
2623
2624
/* The classification of a faulting address. */
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2625
typedef 
2626
   enum { 
2627
      Addr_Undescribed,   // as-yet unclassified
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2628
      Addr_Unknown,       // classification yielded nothing useful
2629
      Addr_Stack,          
2630
      Addr_Block,
2631
   }
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2632
   AddrTag;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2633
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2634
typedef
2635
   struct _AddrInfo
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2636
   AddrInfo;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2637
2638
struct _AddrInfo {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2639
   AddrTag tag;
2640
   union {
2641
      // As-yet unclassified.
2642
      struct { } Undescribed;
2643
2644
      // On a stack.
2645
      struct {
2646
         ThreadId tid;        // Which thread's stack?
2647
      } Stack;
2648
2649
      // This covers heap blocks (normal and from mempools) and user-defined
2650
      // blocks.
2651
      struct {
2652
         BlockKind   block_kind;
2653
         Char*       block_desc;    // "block", "mempool" or user-defined
2654
         SizeT       block_szB;
2655
         OffT        rwoffset;
2656
         ExeContext* lastchange;
2657
      } Block;
2658
2659
      // Classification yielded nothing useful.
2660
      struct { } Unknown;
2661
2662
   } Addr;
2663
};
2664
2665
/* ------------------ Errors ----------------------- */
2666
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2667
/* What kind of error it is. */
2668
typedef 
2669
   enum { 
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2670
      Err_Value,
2671
      Err_Cond,
2672
      Err_CoreMem,
2673
      Err_Addr, 
2674
      Err_Jump, 
2675
      Err_RegParam,
2676
      Err_MemParam,
2677
      Err_User,
2678
      Err_Free,
2679
      Err_FreeMismatch,
2680
      Err_Overlap,
2681
      Err_Leak,
2682
      Err_IllegalMempool,
2683
   }
2684
   MC_ErrorTag;
2685
2686
2687
typedef struct _MC_Error MC_Error;
2688
2689
struct _MC_Error {
2690
   // Nb: we don't need the tag here, as it's stored in the Error type! Yuk.
2691
   //MC_ErrorTag tag;
2692
2693
   union {
2694
      // Use of an undefined value:
2695
      // - as a pointer in a load or store
2696
      // - as a jump target
2697
      struct {
2698
         SizeT szB;     // size of value in bytes
2699
      } Value;
2700
2701
      // Use of an undefined value in a conditional branch or move.
2702
      struct {
2703
      } Cond;
2704
2705
      // Addressability error in core (signal-handling) operation.
2706
      // It would be good to get rid of this error kind, merge it with
2707
      // another one somehow.
2708
      struct {
2709
      } CoreMem;
2710
2711
      // Use of an unaddressable memory location in a load or store.
2712
      struct {
2713
         Bool     isWrite;    // read or write?
2714
         SizeT    szB;        // not used for exec (jump) errors
2715
         Bool     maybe_gcc;  // True if just below %esp -- could be a gcc bug
2716
         AddrInfo ai;
2717
      } Addr;
2718
2719
      // Jump to an unaddressable memory location.
2720
      struct {
2721
         AddrInfo ai;
2722
      } Jump;
2723
2724
      // System call register input contains undefined bytes.
2725
      struct {
2726
      } RegParam;
2727
2728
      // System call memory input contains undefined/unaddressable bytes
2729
      struct {
2730
         Bool     isAddrErr;  // Addressability or definedness error?
2731
         AddrInfo ai;
2732
      } MemParam;
2733
2734
      // Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
2735
      struct {
2736
         Bool     isAddrErr;  // Addressability or definedness error?
2737
         AddrInfo ai;
2738
      } User;
2739
2740
      // Program tried to free() something that's not a heap block (this
2741
      // covers double-frees). */
2742
      struct {
2743
         AddrInfo ai;
2744
      } Free;
2745
2746
      // Program allocates heap block with one function
2747
      // (malloc/new/new[]/custom) and deallocates with not the matching one.
2748
      struct {
2749
         AddrInfo ai;
2750
      } FreeMismatch;
2751
2752
      // Call to strcpy, memcpy, etc, with overlapping blocks.
2753
      struct {
2754
         Addr src;   // Source block
2755
         Addr dst;   // Destination block
2756
         Int  szB;   // Size in bytes;  0 if unused.
2757
      } Overlap;
2758
2759
      // A memory leak.
2760
      struct {
2761
         UInt        n_this_record;
2762
         UInt        n_total_records;
2763
         LossRecord* lossRecord;
2764
      } Leak;
2765
2766
      // A memory pool error.
2767
      struct {
2768
         AddrInfo ai;
2769
      } IllegalMempool;
2770
2771
   } Err;
2772
};
2773
2774
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2775
/*------------------------------------------------------------*/
2776
/*--- Printing errors                                      ---*/
2777
/*------------------------------------------------------------*/
2778
2779
static void mc_pp_AddrInfo ( Addr a, AddrInfo* ai, Bool maybe_gcc )
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2780
{
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2781
   HChar* xpre  = VG_(clo_xml) ? "  <auxwhat>" : " ";
2782
   HChar* xpost = VG_(clo_xml) ? "</auxwhat>"  : "";
2783
2784
   switch (ai->tag) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2785
      case Addr_Unknown:
2786
         if (maybe_gcc) {
2787
            VG_(message)(Vg_UserMsg, 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2788
               "%sAddress 0x%llx is just below the stack ptr.  "
2789
               "To suppress, use: --workaround-gcc296-bugs=yes%s",
2790
               xpre, (ULong)a, xpost
2791
            );
2792
	 } else {
2793
            VG_(message)(Vg_UserMsg, 
2794
               "%sAddress 0x%llx "
2795
               "is not stack'd, malloc'd or (recently) free'd%s",
2796
               xpre, (ULong)a, xpost);
2797
         }
2798
         break;
2799
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2800
      case Addr_Stack: 
2801
         VG_(message)(Vg_UserMsg, 
2802
                      "%sAddress 0x%llx is on thread %d's stack%s", 
2803
                      xpre, (ULong)a, ai->Addr.Stack.tid, xpost);
2804
         break;
2805
2806
      case Addr_Block: {
2807
         SizeT block_szB  = ai->Addr.Block.block_szB;
2808
         OffT  rwoffset   = ai->Addr.Block.rwoffset;
2809
         SizeT delta;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2810
         const Char* relative;
2811
2812
         if (rwoffset < 0) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2813
            delta    = (SizeT)(-rwoffset);
2814
            relative = "before";
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2815
         } else if (rwoffset >= block_szB) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2816
            delta    = rwoffset - block_szB;
2817
            relative = "after";
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2818
         } else {
2819
            delta    = rwoffset;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2820
            relative = "inside";
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2821
         }
2822
         VG_(message)(Vg_UserMsg, 
2823
            "%sAddress 0x%lx is %,lu bytes %s a %s of size %,lu %s%s",
2824
            xpre,
2825
            a, delta, relative, ai->Addr.Block.block_desc,
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2826
            block_szB,
2827
            ai->Addr.Block.block_kind==Block_Mallocd ? "alloc'd" 
2828
            : ai->Addr.Block.block_kind==Block_Freed ? "free'd" 
2829
                                                     : "client-defined",
2830
            xpost);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2831
         VG_(pp_ExeContext)(ai->Addr.Block.lastchange);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2832
         break;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2833
      }
2834
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2835
      default:
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2836
         VG_(tool_panic)("mc_pp_AddrInfo");
2837
   }
2838
}
2839
2840
static const HChar* str_leak_lossmode ( Reachedness lossmode )
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2841
{
2842
   const HChar *loss = "?";
2843
   switch (lossmode) {
2844
      case Unreached:    loss = "definitely lost"; break;
2845
      case IndirectLeak: loss = "indirectly lost"; break;
2846
      case Interior:     loss = "possibly lost"; break;
2847
      case Proper:       loss = "still reachable"; break;
2848
   }
2849
   return loss;
2850
}
2851
2852
static const HChar* xml_leak_kind ( Reachedness lossmode )
2853
{
2854
   const HChar *loss = "?";
2855
   switch (lossmode) {
2856
      case Unreached:    loss = "Leak_DefinitelyLost"; break;
2857
      case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
2858
      case Interior:     loss = "Leak_PossiblyLost"; break;
2859
      case Proper:       loss = "Leak_StillReachable"; break;
2860
   }
2861
   return loss;
2862
}
2863
2864
static void mc_pp_msg( Char* xml_name, Error* err, const HChar* format, ... )
2865
{
2866
   HChar* xpre  = VG_(clo_xml) ? "  <what>" : "";
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2867
   HChar* xpost = VG_(clo_xml) ? "</what>"  : "";
2868
   Char buf[256];
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2869
   va_list vargs;
2870
2871
   if (VG_(clo_xml))
2872
      VG_(message)(Vg_UserMsg, "  <kind>%s</kind>", xml_name);
2873
   // Stick xpre and xpost on the front and back of the format string.
2874
   VG_(snprintf)(buf, 256, "%s%s%s", xpre, format, xpost);
2875
   va_start(vargs, format);
2876
   VG_(vmessage) ( Vg_UserMsg, buf, vargs );
2877
   va_end(vargs);
2878
   VG_(pp_ExeContext)( VG_(get_error_where)(err) );
2879
}
2880
2881
static void mc_pp_Error ( Error* err )
2882
{
2883
   MC_Error* extra = VG_(get_error_extra)(err);
2884
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2885
   switch (VG_(get_error_kind)(err)) {
2886
      case Err_CoreMem: {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2887
         /* What the hell *is* a CoreMemError? jrs 2005-May-18 */
2888
         /* As of 2006-Dec-14, it's caused by unaddressable bytes in a
2889
            signal handler frame.  --njn */
2890
         mc_pp_msg("CoreMemError", err,
2891
                   "%s contains unaddressable byte(s)", 
2892
                   VG_(get_error_string)(err));
2893
         break;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2894
      } 
2895
      
2896
      case Err_Value:
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2897
         mc_pp_msg("UninitValue", err,
2898
                   "Use of uninitialised value of size %d",
2899
                   extra->Err.Value.szB);
2900
         break;
2901
2902
      case Err_Cond:
2903
         mc_pp_msg("UninitCondition", err,
2904
                   "Conditional jump or move depends"
2905
                   " on uninitialised value(s)");
2906
         break;
2907
2908
      case Err_RegParam:
2909
         mc_pp_msg("SyscallParam", err,
2910
                   "Syscall param %s contains uninitialised byte(s)",
2911
                   VG_(get_error_string)(err));
2912
         break;
2913
2914
      case Err_MemParam:
2915
         mc_pp_msg("SyscallParam", err,
2916
                   "Syscall param %s points to %s byte(s)",
2917
                   VG_(get_error_string)(err),
2918
                   ( extra->Err.MemParam.isAddrErr 
2919
                     ? "unaddressable" : "uninitialised" ));
2920
         mc_pp_AddrInfo(VG_(get_error_address)(err),
2921
                        &extra->Err.MemParam.ai, False);
2922
         break;
2923
2924
      case Err_User:
2925
         mc_pp_msg("ClientCheck", err,
2926
                   "%s byte(s) found during client check request", 
2927
                   ( extra->Err.User.isAddrErr
2928
                     ? "Unaddressable" : "Uninitialised" ));
2929
         mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.User.ai,
2930
                        False);
2931
         break;
2932
2933
      case Err_Free:
2934
         mc_pp_msg("InvalidFree", err,
2935
                   "Invalid free() / delete / delete[]");
2936
         mc_pp_AddrInfo(VG_(get_error_address)(err),
2937
                        &extra->Err.Free.ai, False);
2938
         break;
2939
2940
      case Err_FreeMismatch:
2941
         mc_pp_msg("MismatchedFree", err,
2942
                   "Mismatched free() / delete / delete []");
2943
         mc_pp_AddrInfo(VG_(get_error_address)(err),
2944
                        &extra->Err.FreeMismatch.ai, False);
2945
         break;
2946
2947
      case Err_Addr:
2948
         if (extra->Err.Addr.isWrite) {
2949
            mc_pp_msg("InvalidWrite", err,
2950
                      "Invalid write of size %d", 
2951
                      extra->Err.Addr.szB); 
2952
         } else {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2953
            mc_pp_msg("InvalidRead", err,
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2954
                      "Invalid read of size %d", 
2955
                      extra->Err.Addr.szB); 
2956
         }
2957
         mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Addr.ai,
2958
                        extra->Err.Addr.maybe_gcc);
2959
         break;
2960
2961
      case Err_Jump:
2962
         mc_pp_msg("InvalidJump", err,
2963
                   "Jump to the invalid address stated on the next line");
2964
         mc_pp_AddrInfo(VG_(get_error_address)(err), &extra->Err.Jump.ai,
2965
                        False);
2966
         break;
2967
2968
      case Err_Overlap:
2969
         if (extra->Err.Overlap.szB == 0)
2970
            mc_pp_msg("Overlap", err,
2971
                      "Source and destination overlap in %s(%p, %p)",
2972
                      VG_(get_error_string)(err),
2973
                      extra->Err.Overlap.dst, extra->Err.Overlap.src);
2974
         else
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
2975
            mc_pp_msg("Overlap", err,
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
2976
                      "Source and destination overlap in %s(%p, %p, %d)",
2977
                      VG_(get_error_string)(err),
2978
                      extra->Err.Overlap.dst, extra->Err.Overlap.src,
2979
                      extra->Err.Overlap.szB);
2980
         break;
2981
2982
      case Err_IllegalMempool:
2983
         mc_pp_msg("InvalidMemPool", err,
2984
                   "Illegal memory pool address");
2985
         mc_pp_AddrInfo(VG_(get_error_address)(err),
2986
                        &extra->Err.IllegalMempool.ai, False);
2987
         break;
2988
2989
      case Err_Leak: {
2990
         HChar*      xpre  = VG_(clo_xml) ? "  <what>" : "";
2991
         HChar*      xpost = VG_(clo_xml) ? "</what>"  : "";
2992
         UInt        n_this_record   = extra->Err.Leak.n_this_record;
2993
         UInt        n_total_records = extra->Err.Leak.n_total_records;
2994
         LossRecord* l               = extra->Err.Leak.lossRecord;
2995
2996
         if (VG_(clo_xml)) {
2997
            VG_(message)(Vg_UserMsg, "  <kind>%t</kind>",
2998
                         xml_leak_kind(l->loss_mode));
2999
         } else {
3000
            VG_(message)(Vg_UserMsg, "");
3001
         }
3002
3003
         if (l->indirect_bytes) {
3004
            VG_(message)(Vg_UserMsg, 
3005
               "%s%,lu (%,lu direct, %,lu indirect) bytes in %,u blocks"
3006
               " are %s in loss record %,u of %,u%s",
3007
               xpre,
3008
               l->total_bytes + l->indirect_bytes, 
3009
               l->total_bytes, l->indirect_bytes, l->num_blocks,
3010
               str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
3011
               xpost
3012
            );
3013
            if (VG_(clo_xml)) {
3014
               // Nb: don't put commas in these XML numbers 
3015
               VG_(message)(Vg_UserMsg, "  <leakedbytes>%lu</leakedbytes>", 
3016
                                        l->total_bytes + l->indirect_bytes);
3017
               VG_(message)(Vg_UserMsg, "  <leakedblocks>%u</leakedblocks>", 
3018
                                        l->num_blocks);
3019
            }
3020
         } else {
3021
            VG_(message)(
3022
               Vg_UserMsg, 
3023
               "%s%,lu bytes in %,u blocks are %s in loss record %,u of %,u%s",
3024
               xpre,
3025
               l->total_bytes, l->num_blocks,
3026
               str_leak_lossmode(l->loss_mode), n_this_record, n_total_records,
3027
               xpost
3028
            );
3029
            if (VG_(clo_xml)) {
3030
               VG_(message)(Vg_UserMsg, "  <leakedbytes>%d</leakedbytes>", 
3031
                                        l->total_bytes);
3032
               VG_(message)(Vg_UserMsg, "  <leakedblocks>%d</leakedblocks>", 
3033
                                        l->num_blocks);
3034
            }
3035
         }
3036
         VG_(pp_ExeContext)(l->allocated_at);
3037
         break;
3038
      }
3039
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3040
      default: 
3041
         VG_(printf)("Error:\n  unknown Memcheck error code %d\n",
3042
                     VG_(get_error_kind)(err));
3043
         VG_(tool_panic)("unknown error code in mc_pp_Error)");
3044
   }
3045
}
3046
3047
/*------------------------------------------------------------*/
3048
/*--- Recording errors                                     ---*/
3049
/*------------------------------------------------------------*/
3050
3051
/* These many bytes below %ESP are considered addressible if we're
3052
   doing the --workaround-gcc296-bugs hack. */
3053
#define VG_GCC296_BUG_STACK_SLOP 1024
3054
3055
/* Is this address within some small distance below %ESP?  Used only
3056
   for the --workaround-gcc296-bugs kludge. */
3057
static Bool is_just_below_ESP( Addr esp, Addr aa )
3058
{
3059
   if (esp > aa && (esp - aa) <= VG_GCC296_BUG_STACK_SLOP)
3060
      return True;
3061
   else
3062
      return False;
3063
}
3064
3065
/* --- Called from generated and non-generated code --- */
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3066
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3067
static void mc_record_address_error ( ThreadId tid, Addr a, Int szB,
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3068
                                      Bool isWrite )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3069
{
3070
   MC_Error extra;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3071
   Bool     just_below_esp;
3072
3073
   if (in_ignored_range(a)) 
3074
      return;
3075
3076
#  if defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
3077
   /* AIX zero-page handling.  On AIX, reads from page zero are,
3078
      bizarrely enough, legitimate.  Writes to page zero aren't,
3079
      though.  Since memcheck can't distinguish reads from writes, the
3080
      best we can do is to 'act normal' and mark the A bits in the
3081
      normal way as noaccess, but then hide any reads from that page
3082
      that get reported here. */
3083
   if ((!isWrite) && a >= 0 && a < 4096 && a+szB <= 4096) 
3084
      return;
3085
3086
   /* Appalling AIX hack.  It suppresses reads done by glink
3087
      fragments.  Getting rid of this would require figuring out
3088
      somehow where the referenced data areas are (and their
3089
      sizes). */
3090
   if ((!isWrite) && szB == sizeof(Word)) { 
3091
      UInt i1, i2;
3092
      UInt* pc = (UInt*)VG_(get_IP)(tid);
3093
      if (sizeof(Word) == 4) {
3094
         i1 = 0x800c0000; /* lwz r0,0(r12) */
3095
         i2 = 0x804c0004; /* lwz r2,4(r12) */
3096
      } else {
3097
         i1 = 0xe80c0000; /* ld  r0,0(r12) */
3098
         i2 = 0xe84c0008; /* ld  r2,8(r12) */
3099
      }
3100
      if (pc[0] == i1 && pc[1] == i2) return;
3101
      if (pc[0] == i2 && pc[-1] == i1) return;
3102
   }
3103
#  endif
3104
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3105
   just_below_esp = is_just_below_ESP( VG_(get_SP)(tid), a );
3106
3107
   /* If this is caused by an access immediately below %ESP, and the
3108
      user asks nicely, we just ignore it. */
3109
   if (MC_(clo_workaround_gcc296_bugs) && just_below_esp)
3110
      return;
3111
3112
   extra.Err.Addr.isWrite   = isWrite;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3113
   extra.Err.Addr.szB       = szB;
3114
   extra.Err.Addr.maybe_gcc = just_below_esp;
3115
   extra.Err.Addr.ai.tag    = Addr_Undescribed;
3116
   VG_(maybe_record_error)( tid, Err_Addr, a, /*s*/NULL, &extra );
3117
}
3118
3119
static void mc_record_value_error ( ThreadId tid, Int szB )
3120
{
3121
   MC_Error extra;
3122
   tl_assert(MC_(clo_undef_value_errors));
3123
   extra.Err.Value.szB = szB;
3124
   VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
3125
}
3126
3127
static void mc_record_cond_error ( ThreadId tid )
3128
{
3129
   tl_assert(MC_(clo_undef_value_errors));
3130
   VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, /*extra*/NULL);
3131
}
3132
3133
/* --- Called from non-generated code --- */
3134
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3135
/* This is for memory errors in pthread functions, as opposed to pthread API
3136
   errors which are found by the core. */
3137
static void mc_record_core_mem_error ( ThreadId tid, Bool isAddrErr, Char* msg )
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3138
{
3139
   VG_(maybe_record_error)( tid, Err_CoreMem, /*addr*/0, msg, /*extra*/NULL );
3140
}
3141
3142
static void mc_record_regparam_error ( ThreadId tid, Char* msg )
3143
{
3144
   tl_assert(VG_INVALID_THREADID != tid);
3145
   VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, /*extra*/NULL );
3146
}
3147
3148
static void mc_record_memparam_error ( ThreadId tid, Addr a, 
3149
                                       Bool isAddrErr, Char* msg )
3150
{
3151
   MC_Error extra;
3152
   tl_assert(VG_INVALID_THREADID != tid);
3153
   if (!isAddrErr) 
3154
      tl_assert(MC_(clo_undef_value_errors));
3155
   extra.Err.MemParam.isAddrErr = isAddrErr;
3156
   extra.Err.MemParam.ai.tag    = Addr_Undescribed;
3157
   VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
3158
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3159
3160
static void mc_record_jump_error ( ThreadId tid, Addr a )
3161
{
3162
   MC_Error extra;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3163
   tl_assert(VG_INVALID_THREADID != tid);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3164
   extra.Err.Jump.ai.tag = Addr_Undescribed;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3165
   VG_(maybe_record_error)( tid, Err_Jump, a, /*s*/NULL, &extra );
3166
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3167
3168
void MC_(record_free_error) ( ThreadId tid, Addr a ) 
3169
{
3170
   MC_Error extra;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3171
   tl_assert(VG_INVALID_THREADID != tid);
3172
   extra.Err.Free.ai.tag = Addr_Undescribed;
3173
   VG_(maybe_record_error)( tid, Err_Free, a, /*s*/NULL, &extra );
3174
}
3175
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3176
void MC_(record_freemismatch_error) ( ThreadId tid, MC_Chunk* mc )
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3177
{
3178
   MC_Error extra;
3179
   AddrInfo* ai = &extra.Err.FreeMismatch.ai;
3180
   tl_assert(VG_INVALID_THREADID != tid);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3181
   ai->tag = Addr_Block;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3182
   ai->Addr.Block.block_kind = Block_Mallocd;  // Nb: Not 'Block_Freed'
3183
   ai->Addr.Block.block_desc = "block";
3184
   ai->Addr.Block.block_szB  = mc->szB;
3185
   ai->Addr.Block.rwoffset   = 0;
3186
   ai->Addr.Block.lastchange = mc->where;
3187
   VG_(maybe_record_error)( tid, Err_FreeMismatch, mc->data, /*s*/NULL,
3188
                            &extra );
3189
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3190
3191
void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a ) 
3192
{
3193
   MC_Error extra;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3194
   tl_assert(VG_INVALID_THREADID != tid);
3195
   extra.Err.IllegalMempool.ai.tag = Addr_Undescribed;
3196
   VG_(maybe_record_error)( tid, Err_IllegalMempool, a, /*s*/NULL, &extra );
3197
}
3198
3199
static void mc_record_overlap_error ( ThreadId tid, Char* function,
3200
                                      Addr src, Addr dst, SizeT szB )
3201
{
3202
   MC_Error extra;
3203
   tl_assert(VG_INVALID_THREADID != tid);
3204
   extra.Err.Overlap.src = src;
3205
   extra.Err.Overlap.dst = dst;
3206
   extra.Err.Overlap.szB = szB;
3207
   VG_(maybe_record_error)( 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3208
      tid, Err_Overlap, /*addr*/0, /*s*/function, &extra );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3209
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3210
3211
Bool MC_(record_leak_error) ( ThreadId tid, UInt n_this_record,
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3212
                              UInt n_total_records, LossRecord* lossRecord,
3213
                              Bool print_record )
3214
{
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3215
   MC_Error extra;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3216
   extra.Err.Leak.n_this_record   = n_this_record;
3217
   extra.Err.Leak.n_total_records = n_total_records;
3218
   extra.Err.Leak.lossRecord      = lossRecord;
3219
   return
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3220
   VG_(unique_error) ( tid, Err_Leak, /*Addr*/0, /*s*/NULL, &extra,
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3221
                       lossRecord->allocated_at, print_record,
3222
                       /*allow_GDB_attach*/False, /*count_error*/False );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3223
}
3224
3225
static void mc_record_user_error ( ThreadId tid, Addr a, Bool isAddrErr )
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3226
{
3227
   MC_Error extra;
3228
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3229
   tl_assert(VG_INVALID_THREADID != tid);
3230
   extra.Err.User.isAddrErr = isAddrErr;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3231
   extra.Err.User.ai.tag    = Addr_Undescribed;
3232
   VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
3233
}
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3234
3235
/*------------------------------------------------------------*/
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3236
/*--- Other error operations                               ---*/
3237
/*------------------------------------------------------------*/
3238
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3239
/* Compare error contexts, to detect duplicates.  Note that if they
3240
   are otherwise the same, the faulting addrs and associated rwoffsets
3241
   are allowed to be different.  */
3242
static Bool mc_eq_Error ( VgRes res, Error* e1, Error* e2 )
3243
{
3244
   MC_Error* extra1 = VG_(get_error_extra)(e1);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3245
   MC_Error* extra2 = VG_(get_error_extra)(e2);
3246
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3247
   /* Guaranteed by calling function */
3248
   tl_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
3249
   
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
3250
   switch (VG_(get_error_kind)(e1)) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3251
      case Err_CoreMem: {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3252
         Char *e1s, *e2s;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3253
         e1s = VG_(get_error_string)(e1);
3254
         e2s = VG_(get_error_string)(e2);
3255
         if (e1s == e2s)                   return True;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3256
         if (VG_STREQ(e1s, e2s))           return True;
3257
         return False;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3258
      }
3259
3260
      case Err_RegParam:
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3261
         return VG_STREQ(VG_(get_error_string)(e1), VG_(get_error_string)(e2));
3262
3263
      // Perhaps we should also check the addrinfo.akinds for equality.
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3264
      // That would result in more error reports, but only in cases where
3265
      // a register contains uninitialised bytes and points to memory
3266
      // containing uninitialised bytes.  Currently, the 2nd of those to be
3267
      // detected won't be reported.  That is (nearly?) always the memory
3268
      // error, which is good.
3269
      case Err_MemParam:
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3270
         if (!VG_STREQ(VG_(get_error_string)(e1),
3271
                       VG_(get_error_string)(e2))) return False;
3272
         // fall through
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3273
      case Err_User:
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3274
         return ( extra1->Err.User.isAddrErr == extra2->Err.User.isAddrErr
3275
                ? True : False );
3276
3277
      case Err_Free:
3278
      case Err_FreeMismatch:
3279
      case Err_Jump:
3280
      case Err_IllegalMempool:
3281
      case Err_Overlap:
3282
      case Err_Cond:
3283
         return True;
3284
3285
      case Err_Addr:
3286
         return ( extra1->Err.Addr.szB == extra2->Err.Addr.szB
3287
                ? True : False );
3288
3289
      case Err_Value:
3290
         return ( extra1->Err.Value.szB == extra2->Err.Value.szB
3291
                ? True : False );
3292
3293
      case Err_Leak:
3294
         VG_(tool_panic)("Shouldn't get Err_Leak in mc_eq_Error,\n"
3295
                         "since it's handled with VG_(unique_error)()!");
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3296
3297
      default: 
3298
         VG_(printf)("Error:\n  unknown error code %d\n",
3299
                     VG_(get_error_kind)(e1));
3300
         VG_(tool_panic)("unknown error code in mc_eq_Error");
3301
   }
3302
}
3303
3304
/* Function used when searching MC_Chunk lists */
3305
static Bool addr_is_in_MC_Chunk(MC_Chunk* mc, Addr a)
3306
{
3307
   // Nb: this is not quite right!  It assumes that the heap block has
3308
   // a redzone of size MC_MALLOC_REDZONE_SZB.  That's true for malloc'd
3309
   // blocks, but not necessarily true for custom-alloc'd blocks.  So
3310
   // in some cases this could result in an incorrect description (eg.
3311
   // saying "12 bytes after block A" when really it's within block B.
3312
   // Fixing would require adding redzone size to MC_Chunks, though.
3313
   return VG_(addr_is_in_block)( a, mc->data, mc->szB,
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3314
                                 MC_MALLOC_REDZONE_SZB );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3315
}
3316
3317
// Forward declaration
3318
static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai );
3319
3320
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3321
/* Describe an address as best you can, for error messages,
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3322
   putting the result in ai. */
3323
static void describe_addr ( Addr a, AddrInfo* ai )
3324
{
3325
   MC_Chunk* mc;
3326
   ThreadId  tid;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3327
   Addr      stack_min, stack_max;
3328
3329
   tl_assert(Addr_Undescribed == ai->tag);
3330
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3331
   /* Perhaps it's a user-def'd block? */
3332
   if (client_perm_maybe_describe( a, ai ))
3333
      return;
3334
3335
   /* Perhaps it's on a thread's stack? */
3336
   VG_(thread_stack_reset_iter)();
3337
   while ( VG_(thread_stack_next)(&tid, &stack_min, &stack_max) ) {
3338
      if (stack_min <= a && a <= stack_max) {
3339
         ai->tag            = Addr_Stack;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3340
         ai->Addr.Stack.tid = tid;
3341
         return;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3342
      }
3343
   }
3344
   /* Search for a recently freed block which might bracket it. */
3345
   mc = MC_(get_freed_list_head)();
3346
   while (mc) {
3347
      if (addr_is_in_MC_Chunk(mc, a)) {
3348
         ai->tag = Addr_Block;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3349
         ai->Addr.Block.block_kind = Block_Freed;
3350
         ai->Addr.Block.block_desc = "block";
3351
         ai->Addr.Block.block_szB  = mc->szB;
3352
         ai->Addr.Block.rwoffset   = (Int)a - (Int)mc->data;
3353
         ai->Addr.Block.lastchange = mc->where;
3354
         return;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3355
      }
3356
      mc = mc->next; 
3357
   }
3358
   /* Search for a currently malloc'd block which might bracket it. */
3359
   VG_(HT_ResetIter)(MC_(malloc_list));
3360
   while ( (mc = VG_(HT_Next)(MC_(malloc_list))) ) {
3361
      if (addr_is_in_MC_Chunk(mc, a)) {
3362
         ai->tag = Addr_Block;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3363
         ai->Addr.Block.block_kind = Block_Mallocd;
3364
         ai->Addr.Block.block_desc = "block";
3365
         ai->Addr.Block.block_szB  = mc->szB;
3366
         ai->Addr.Block.rwoffset   = (Int)a - (Int)mc->data;
3367
         ai->Addr.Block.lastchange = mc->where;
3368
         return;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3369
      }
3370
   }
3371
   /* Clueless ... */
3372
   ai->tag = Addr_Unknown;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3373
   return;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3374
}
3375
3376
/* Updates the copy with address info if necessary (but not for all errors). */
3377
static UInt mc_update_extra( Error* err )
3378
{
3379
   MC_Error* extra = VG_(get_error_extra)(err);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3380
3381
   switch (VG_(get_error_kind)(err)) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3382
   // These ones don't have addresses associated with them, and so don't
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3383
   // need any updating.
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3384
   case Err_CoreMem:
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3385
   case Err_Value:
3386
   case Err_Cond:
3387
   case Err_Overlap:
3388
   case Err_RegParam:
3389
   // For Err_Leaks the returned size does not matter -- they are always
3390
   // shown with VG_(unique_error)() so they 'extra' not copied.  But we make it
3391
   // consistent with the others.
3392
   case Err_Leak:
3393
      return sizeof(MC_Error);
3394
3395
   // These ones always involve a memory address.
3396
   case Err_Addr:
3397
      describe_addr ( VG_(get_error_address)(err), &extra->Err.Addr.ai );
3398
      return sizeof(MC_Error);
3399
   case Err_MemParam:
3400
      describe_addr ( VG_(get_error_address)(err), &extra->Err.MemParam.ai );
3401
      return sizeof(MC_Error);
3402
   case Err_Jump:
3403
      describe_addr ( VG_(get_error_address)(err), &extra->Err.Jump.ai );
3404
      return sizeof(MC_Error);
3405
   case Err_User:
3406
      describe_addr ( VG_(get_error_address)(err), &extra->Err.User.ai );
3407
      return sizeof(MC_Error);
3408
   case Err_Free:
3409
      describe_addr ( VG_(get_error_address)(err), &extra->Err.Free.ai );
3410
      return sizeof(MC_Error);
3411
   case Err_IllegalMempool:
3412
      describe_addr ( VG_(get_error_address)(err),
3413
                      &extra->Err.IllegalMempool.ai );
3414
      return sizeof(MC_Error);
3415
3416
   // Err_FreeMismatches have already had their address described;  this is
3417
   // possible because we have the MC_Chunk on hand when the error is
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3418
   // detected.  However, the address may be part of a user block, and if so
3419
   // we override the pre-determined description with a user block one.
3420
   case Err_FreeMismatch: {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3421
      tl_assert(extra && Block_Mallocd ==
3422
                extra->Err.FreeMismatch.ai.Addr.Block.block_kind);
3423
      (void)client_perm_maybe_describe( VG_(get_error_address)(err), 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3424
                                        &extra->Err.FreeMismatch.ai );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3425
      return sizeof(MC_Error);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3426
   }
3427
3428
   default: VG_(tool_panic)("mc_update_extra: bad errkind");
3429
   }
3430
}
3431
3432
/*------------------------------------------------------------*/
3433
/*--- Suppressions                                         ---*/
3434
/*------------------------------------------------------------*/
3435
3436
typedef 
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3437
   enum { 
3438
      ParamSupp,     // Bad syscall params
3439
      UserSupp,      // Errors arising from client-request checks
3440
      CoreMemSupp,   // Memory errors in core (pthread ops, signal handling)
3441
3442
      // Undefined value errors of given size
3443
      Value1Supp, Value2Supp, Value4Supp, Value8Supp, Value16Supp,
3444
3445
      // Undefined value error in conditional.
3446
      CondSupp,
3447
3448
      // Unaddressable read/write attempt at given size
3449
      Addr1Supp, Addr2Supp, Addr4Supp, Addr8Supp, Addr16Supp,
3450
3451
      JumpSupp,      // Jump to unaddressable target
3452
      FreeSupp,      // Invalid or mismatching free
3453
      OverlapSupp,   // Overlapping blocks in memcpy(), strcpy(), etc
3454
      LeakSupp,      // Something to be suppressed in a leak check.
3455
      MempoolSupp,   // Memory pool suppression.
3456
   } 
3457
   MC_SuppKind;
3458
3459
static Bool mc_recognised_suppression ( Char* name, Supp* su )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3460
{
3461
   SuppKind skind;
3462
3463
   if      (VG_STREQ(name, "Param"))   skind = ParamSupp;
3464
   else if (VG_STREQ(name, "User"))    skind = UserSupp;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3465
   else if (VG_STREQ(name, "CoreMem")) skind = CoreMemSupp;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3466
   else if (VG_STREQ(name, "Addr1"))   skind = Addr1Supp;
3467
   else if (VG_STREQ(name, "Addr2"))   skind = Addr2Supp;
3468
   else if (VG_STREQ(name, "Addr4"))   skind = Addr4Supp;
3469
   else if (VG_STREQ(name, "Addr8"))   skind = Addr8Supp;
3470
   else if (VG_STREQ(name, "Addr16"))  skind = Addr16Supp;
3471
   else if (VG_STREQ(name, "Jump"))    skind = JumpSupp;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3472
   else if (VG_STREQ(name, "Free"))    skind = FreeSupp;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3473
   else if (VG_STREQ(name, "Leak"))    skind = LeakSupp;
3474
   else if (VG_STREQ(name, "Overlap")) skind = OverlapSupp;
3475
   else if (VG_STREQ(name, "Mempool")) skind = MempoolSupp;
3476
   else if (VG_STREQ(name, "Cond"))    skind = CondSupp;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3477
   else if (VG_STREQ(name, "Value0"))  skind = CondSupp; /* backwards compat */
3478
   else if (VG_STREQ(name, "Value1"))  skind = Value1Supp;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3479
   else if (VG_STREQ(name, "Value2"))  skind = Value2Supp;
3480
   else if (VG_STREQ(name, "Value4"))  skind = Value4Supp;
3481
   else if (VG_STREQ(name, "Value8"))  skind = Value8Supp;
3482
   else if (VG_STREQ(name, "Value16")) skind = Value16Supp;
3483
   else 
3484
      return False;
3485
3486
   VG_(set_supp_kind)(su, skind);
3487
   return True;
3488
}
3489
3490
static 
3491
Bool mc_read_extra_suppression_info ( Int fd, Char* buf, Int nBuf, Supp *su )
3492
{
3493
   Bool eof;
3494
3495
   if (VG_(get_supp_kind)(su) == ParamSupp) {
3496
      eof = VG_(get_line) ( fd, buf, nBuf );
3497
      if (eof) return False;
3498
      VG_(set_supp_string)(su, VG_(strdup)(buf));
3499
   }
3500
   return True;
3501
}
3502
3503
static Bool mc_error_matches_suppression(Error* err, Supp* su)
3504
{
3505
   Int       su_szB;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3506
   MC_Error* extra = VG_(get_error_extra)(err);
3507
   ErrorKind ekind = VG_(get_error_kind )(err);
3508
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3509
   switch (VG_(get_supp_kind)(su)) {
3510
      case ParamSupp:
3511
         return ((ekind == Err_RegParam || ekind == Err_MemParam)
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3512
              && VG_STREQ(VG_(get_error_string)(err), 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3513
                          VG_(get_supp_string)(su)));
3514
3515
      case UserSupp:
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3516
         return (ekind == Err_User);
3517
3518
      case CoreMemSupp:
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3519
         return (ekind == Err_CoreMem
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3520
              && VG_STREQ(VG_(get_error_string)(err),
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3521
                          VG_(get_supp_string)(su)));
3522
3523
      case Value1Supp: su_szB = 1; goto value_case;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3524
      case Value2Supp: su_szB = 2; goto value_case;
3525
      case Value4Supp: su_szB = 4; goto value_case;
3526
      case Value8Supp: su_szB = 8; goto value_case;
3527
      case Value16Supp:su_szB =16; goto value_case;
3528
      value_case:
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3529
         return (ekind == Err_Value && extra->Err.Value.szB == su_szB);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3530
3531
      case CondSupp:
3532
         return (ekind == Err_Cond);
3533
3534
      case Addr1Supp: su_szB = 1; goto addr_case;
3535
      case Addr2Supp: su_szB = 2; goto addr_case;
3536
      case Addr4Supp: su_szB = 4; goto addr_case;
3537
      case Addr8Supp: su_szB = 8; goto addr_case;
3538
      case Addr16Supp:su_szB =16; goto addr_case;
3539
      addr_case:
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3540
         return (ekind == Err_Addr && extra->Err.Addr.szB == su_szB);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3541
3542
      case JumpSupp:
3543
         return (ekind == Err_Jump);
3544
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3545
      case FreeSupp:
3546
         return (ekind == Err_Free || ekind == Err_FreeMismatch);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3547
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3548
      case OverlapSupp:
3549
         return (ekind == Err_Overlap);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3550
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3551
      case LeakSupp:
3552
         return (ekind == Err_Leak);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3553
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3554
      case MempoolSupp:
3555
         return (ekind == Err_IllegalMempool);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3556
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3557
      default:
3558
         VG_(printf)("Error:\n"
3559
                     "  unknown suppression type %d\n",
3560
                     VG_(get_supp_kind)(su));
3561
         VG_(tool_panic)("unknown suppression type in "
3562
                         "MC_(error_matches_suppression)");
3563
   }
3564
}
3565
3566
static Char* mc_get_error_name ( Error* err )
3567
{
3568
   switch (VG_(get_error_kind)(err)) {
3569
   case Err_RegParam:       return "Param";
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3570
   case Err_MemParam:       return "Param";
3571
   case Err_User:           return "User";
3572
   case Err_FreeMismatch:   return "Free";
3573
   case Err_IllegalMempool: return "Mempool";
3574
   case Err_Free:           return "Free";
3575
   case Err_Jump:           return "Jump";
3576
   case Err_CoreMem:        return "CoreMem";
3577
   case Err_Overlap:        return "Overlap";
3578
   case Err_Leak:           return "Leak";
3579
   case Err_Cond:           return "Cond";
3580
   case Err_Addr: {
3581
      MC_Error* extra = VG_(get_error_extra)(err);
3582
      switch ( extra->Err.Addr.szB ) {
3583
      case 1:               return "Addr1";
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3584
      case 2:               return "Addr2";
3585
      case 4:               return "Addr4";
3586
      case 8:               return "Addr8";
3587
      case 16:              return "Addr16";
3588
      default:              VG_(tool_panic)("unexpected size for Addr");
3589
      }
3590
   }
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3591
   case Err_Value: {
3592
      MC_Error* extra = VG_(get_error_extra)(err);
3593
      switch ( extra->Err.Value.szB ) {
3594
      case 1:               return "Value1";
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3595
      case 2:               return "Value2";
3596
      case 4:               return "Value4";
3597
      case 8:               return "Value8";
3598
      case 16:              return "Value16";
3599
      default:              VG_(tool_panic)("unexpected size for Value");
3600
      }
3601
   }
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3602
   default:                 VG_(tool_panic)("get_error_name: unexpected type");
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3603
   }
3604
}
3605
3606
static void mc_print_extra_suppression_info ( Error* err )
3607
{
3608
   ErrorKind ekind = VG_(get_error_kind )(err);
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
3609
   if (Err_RegParam == ekind || Err_MemParam == ekind) {
3610
      VG_(printf)("   %s\n", VG_(get_error_string)(err));
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3611
   }
3612
}
3613
3614
/*------------------------------------------------------------*/
3615
/*--- Functions called directly from generated code:       ---*/
3616
/*--- Load/store handlers.                                 ---*/
3617
/*------------------------------------------------------------*/
3618
3619
/* Types:  LOADV32, LOADV16, LOADV8 are:
3620
               UWord fn ( Addr a )
3621
   so they return 32-bits on 32-bit machines and 64-bits on
3622
   64-bit machines.  Addr has the same size as a host word.
3623
3624
   LOADV64 is always  ULong fn ( Addr a )
3625
3626
   Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
3627
   are a UWord, and for STOREV64 they are a ULong.
3628
*/
1 by Andrés Roldán
Import upstream version 2.1.1
3629
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
3630
/* If any part of '_a' indicated by the mask is 1, either
3631
   '_a' is not naturally '_sz/8'-aligned, or it exceeds the range
3632
   covered by the primary map. */
3633
#define UNALIGNED_OR_HIGH(_a,_sz)   ((_a) & MASK((_sz>>3)))
3634
#define MASK(_sz)   ( ~((0x10000-(_sz)) | ((N_PRIMARY_MAP-1) << 16)) )
3635
3636
3637
/* ------------------------ Size = 8 ------------------------ */
3638
3639
static INLINE
3640
ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
3641
{
3642
   UWord   sm_off16, vabits16;
3643
   SecMap* sm;
3644
3645
   PROF_EVENT(200, "mc_LOADV64");
3646
3647
#ifndef PERF_FAST_LOADV
3648
   return mc_LOADVn_slow( a, 64, isBigEndian );
3649
#else
3650
   if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
3651
      PROF_EVENT(201, "mc_LOADV64-slow1");
3652
      return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
3653
   }
3654
3655
   sm       = get_secmap_for_reading_low(a);
3656
   sm_off16 = SM_OFF_16(a);
3657
   vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3658
3659
   // Handle common case quickly: a is suitably aligned, is mapped, and
3660
   // addressible.
3661
   // Convert V bits from compact memory form to expanded register form.
3662
   if (EXPECTED_TAKEN(vabits16 == VA_BITS16_DEFINED)) {
3663
      return V_BITS64_DEFINED;
3664
   } else if (EXPECTED_TAKEN(vabits16 == VA_BITS16_UNDEFINED)) {
3665
      return V_BITS64_UNDEFINED;
3666
   } else {
3667
      /* Slow case: the 8 bytes are not all-defined or all-undefined. */
3668
      PROF_EVENT(202, "mc_LOADV64-slow2");
3669
      return mc_LOADVn_slow( a, 64, isBigEndian );
3670
   }
3671
#endif
3672
}
3673
3674
VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
3675
{
3676
   return mc_LOADV64(a, True);
3677
}
3678
VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
3679
{
3680
   return mc_LOADV64(a, False);
3681
}
3682
3683
3684
static INLINE
3685
void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
3686
{
3687
   UWord   sm_off16, vabits16;
3688
   SecMap* sm;
3689
3690
   PROF_EVENT(210, "mc_STOREV64");
3691
3692
#ifndef PERF_FAST_STOREV
3693
   // XXX: this slow case seems to be marginally faster than the fast case!
3694
   // Investigate further.
3695
   mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
3696
#else
3697
   if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,64) )) {
3698
      PROF_EVENT(211, "mc_STOREV64-slow1");
3699
      mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
3700
      return;
3701
   }
3702
3703
   sm       = get_secmap_for_reading_low(a);
3704
   sm_off16 = SM_OFF_16(a);
3705
   vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
3706
3707
   if (EXPECTED_TAKEN( !is_distinguished_sm(sm) && 
3708
                       (VA_BITS16_DEFINED   == vabits16 ||
3709
                        VA_BITS16_UNDEFINED == vabits16) ))
3710
   {
3711
      /* Handle common case quickly: a is suitably aligned, */
3712
      /* is mapped, and is addressible. */
3713
      // Convert full V-bits in register to compact 2-bit form.
3714
      if (V_BITS64_DEFINED == vbits64) {
3715
         ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
3716
      } else if (V_BITS64_UNDEFINED == vbits64) {
3717
         ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
3718
      } else {
3719
         /* Slow but general case -- writing partially defined bytes. */
3720
         PROF_EVENT(212, "mc_STOREV64-slow2");
3721
         mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
3722
      }
3723
   } else {
3724
      /* Slow but general case. */
3725
      PROF_EVENT(213, "mc_STOREV64-slow3");
3726
      mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
3727
   }
3728
#endif
3729
}
3730
3731
VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
3732
{
3733
   mc_STOREV64(a, vbits64, True);
3734
}
3735
VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
3736
{
3737
   mc_STOREV64(a, vbits64, False);
3738
}
3739
3740
3741
/* ------------------------ Size = 4 ------------------------ */
3742
3743
static INLINE
3744
UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
3745
{
3746
   UWord   sm_off, vabits8;
3747
   SecMap* sm;
3748
3749
   PROF_EVENT(220, "mc_LOADV32");
3750
3751
#ifndef PERF_FAST_LOADV
3752
   return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
3753
#else
3754
   if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
3755
      PROF_EVENT(221, "mc_LOADV32-slow1");
3756
      return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
3757
   }
3758
3759
   sm      = get_secmap_for_reading_low(a);
3760
   sm_off  = SM_OFF(a);
3761
   vabits8 = sm->vabits8[sm_off];
3762
3763
   // Handle common case quickly: a is suitably aligned, is mapped, and the
3764
   // entire word32 it lives in is addressible.
3765
   // Convert V bits from compact memory form to expanded register form.
3766
   // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
3767
   // Almost certainly not necessary, but be paranoid.
3768
   if (EXPECTED_TAKEN(vabits8 == VA_BITS8_DEFINED)) {
3769
      return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
3770
   } else if (EXPECTED_TAKEN(vabits8 == VA_BITS8_UNDEFINED)) {
3771
      return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
3772
   } else {
3773
      /* Slow case: the 4 bytes are not all-defined or all-undefined. */
3774
      PROF_EVENT(222, "mc_LOADV32-slow2");
3775
      return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
3776
   }
3777
#endif
3778
}
3779
3780
VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
3781
{
3782
   return mc_LOADV32(a, True);
3783
}
3784
VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
3785
{
3786
   return mc_LOADV32(a, False);
3787
}
3788
3789
3790
static INLINE
3791
void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
3792
{
3793
   UWord   sm_off, vabits8;
3794
   SecMap* sm;
3795
3796
   PROF_EVENT(230, "mc_STOREV32");
3797
3798
#ifndef PERF_FAST_STOREV
3799
   mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
3800
#else
3801
   if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,32) )) {
3802
      PROF_EVENT(231, "mc_STOREV32-slow1");
3803
      mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
3804
      return;
3805
   }
3806
3807
   sm      = get_secmap_for_reading_low(a);
3808
   sm_off  = SM_OFF(a);
3809
   vabits8 = sm->vabits8[sm_off];
3810
3811
//---------------------------------------------------------------------------
3812
#if 1
3813
   // Cleverness:  sometimes we don't have to write the shadow memory at
3814
   // all, if we can tell that what we want to write is the same as what is
3815
   // already there.
3816
   if (V_BITS32_DEFINED == vbits32) {
3817
      if (vabits8 == (UInt)VA_BITS8_DEFINED) {
3818
         return;
3819
      } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
3820
         sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
3821
      } else {
3822
         // not defined/undefined, or distinguished and changing state
3823
         PROF_EVENT(232, "mc_STOREV32-slow2");
3824
         mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
3825
      }
3826
   } else if (V_BITS32_UNDEFINED == vbits32) {
3827
      if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
3828
         return;
3829
      } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
3830
         sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
3831
      } else {
3832
         // not defined/undefined, or distinguished and changing state
3833
         PROF_EVENT(233, "mc_STOREV32-slow3");
3834
         mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
3835
      }
3836
   } else {
3837
      // Partially defined word
3838
      PROF_EVENT(234, "mc_STOREV32-slow4");
3839
      mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
3840
   }
3841
//---------------------------------------------------------------------------
3842
#else
3843
   if (EXPECTED_TAKEN( !is_distinguished_sm(sm) && 
3844
                       (VA_BITS8_DEFINED   == vabits8 ||
3845
                        VA_BITS8_UNDEFINED == vabits8) ))
3846
   {
3847
      /* Handle common case quickly: a is suitably aligned, */
3848
      /* is mapped, and is addressible. */
3849
      // Convert full V-bits in register to compact 2-bit form.
3850
      if (V_BITS32_DEFINED == vbits32) {
3851
         sm->vabits8[sm_off] = VA_BITS8_DEFINED;
3852
      } else if (V_BITS32_UNDEFINED == vbits32) {
3853
         sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
3854
      } else {
3855
         /* Slow but general case -- writing partially defined bytes. */
3856
         PROF_EVENT(232, "mc_STOREV32-slow2");
3857
         mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
3858
      }
3859
   } else {
3860
      /* Slow but general case. */
3861
      PROF_EVENT(233, "mc_STOREV32-slow3");
3862
      mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
3863
   }
3864
#endif
3865
//---------------------------------------------------------------------------
3866
#endif
3867
}
3868
3869
VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
3870
{
3871
   mc_STOREV32(a, vbits32, True);
3872
}
3873
VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
3874
{
3875
   mc_STOREV32(a, vbits32, False);
3876
}
3877
3878
3879
/* ------------------------ Size = 2 ------------------------ */
3880
3881
static INLINE
3882
UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
3883
{
3884
   UWord   sm_off, vabits8;
3885
   SecMap* sm;
3886
3887
   PROF_EVENT(240, "mc_LOADV16");
3888
3889
#ifndef PERF_FAST_LOADV
3890
   return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
3891
#else
3892
   if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
3893
      PROF_EVENT(241, "mc_LOADV16-slow1");
3894
      return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
3895
   }
3896
3897
   sm      = get_secmap_for_reading_low(a);
3898
   sm_off  = SM_OFF(a);
3899
   vabits8 = sm->vabits8[sm_off];
3900
   // Handle common case quickly: a is suitably aligned, is mapped, and is
3901
   // addressible.
3902
   // Convert V bits from compact memory form to expanded register form
3903
   if      (vabits8 == VA_BITS8_DEFINED  ) { return V_BITS16_DEFINED;   }
3904
   else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
3905
   else {
3906
      // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
3907
      // the two sub-bytes.
3908
      UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
3909
      if      (vabits4 == VA_BITS4_DEFINED  ) { return V_BITS16_DEFINED;   }
3910
      else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
3911
      else {
3912
         /* Slow case: the two bytes are not all-defined or all-undefined. */
3913
         PROF_EVENT(242, "mc_LOADV16-slow2");
3914
         return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
3915
      }
3916
   }
3917
#endif
3918
}
3919
3920
VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
3921
{
3922
   return mc_LOADV16(a, True);
3923
}
3924
VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
3925
{
3926
   return mc_LOADV16(a, False);
3927
}
3928
3929
3930
static INLINE
3931
void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
3932
{
3933
   UWord   sm_off, vabits8;
3934
   SecMap* sm;
3935
3936
   PROF_EVENT(250, "mc_STOREV16");
3937
3938
#ifndef PERF_FAST_STOREV
3939
   mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
3940
#else
3941
   if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,16) )) {
3942
      PROF_EVENT(251, "mc_STOREV16-slow1");
3943
      mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
3944
      return;
3945
   }
3946
3947
   sm      = get_secmap_for_reading_low(a);
3948
   sm_off  = SM_OFF(a);
3949
   vabits8 = sm->vabits8[sm_off];
3950
   if (EXPECTED_TAKEN( !is_distinguished_sm(sm) && 
3951
                       (VA_BITS8_DEFINED   == vabits8 ||
3952
                        VA_BITS8_UNDEFINED == vabits8) ))
3953
   {
3954
      /* Handle common case quickly: a is suitably aligned, */
3955
      /* is mapped, and is addressible. */
3956
      // Convert full V-bits in register to compact 2-bit form.
3957
      if (V_BITS16_DEFINED == vbits16) {
3958
         insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
3959
                                      &(sm->vabits8[sm_off]) );
3960
      } else if (V_BITS16_UNDEFINED == vbits16) {
3961
         insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
3962
                                      &(sm->vabits8[sm_off]) );
3963
      } else {
3964
         /* Slow but general case -- writing partially defined bytes. */
3965
         PROF_EVENT(252, "mc_STOREV16-slow2");
3966
         mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
3967
      }
3968
   } else {
3969
      /* Slow but general case. */
3970
      PROF_EVENT(253, "mc_STOREV16-slow3");
3971
      mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
3972
   }
3973
#endif
3974
}
3975
3976
VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
3977
{
3978
   mc_STOREV16(a, vbits16, True);
3979
}
3980
VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
3981
{
3982
   mc_STOREV16(a, vbits16, False);
3983
}
3984
3985
3986
/* ------------------------ Size = 1 ------------------------ */
3987
/* Note: endianness is irrelevant for size == 1 */
3988
3989
VG_REGPARM(1)
3990
UWord MC_(helperc_LOADV8) ( Addr a )
3991
{
3992
   UWord   sm_off, vabits8;
3993
   SecMap* sm;
3994
3995
   PROF_EVENT(260, "mc_LOADV8");
3996
3997
#ifndef PERF_FAST_LOADV
3998
   return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
3999
#else
4000
   if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
4001
      PROF_EVENT(261, "mc_LOADV8-slow1");
4002
      return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
4003
   }
4004
4005
   sm      = get_secmap_for_reading_low(a);
4006
   sm_off  = SM_OFF(a);
4007
   vabits8 = sm->vabits8[sm_off];
4008
   // Convert V bits from compact memory form to expanded register form
4009
   // Handle common case quickly: a is mapped, and the entire
4010
   // word32 it lives in is addressible.
4011
   if      (vabits8 == VA_BITS8_DEFINED  ) { return V_BITS8_DEFINED;   }
4012
   else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
4013
   else {
4014
      // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
4015
      // the single byte.
4016
      UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
4017
      if      (vabits2 == VA_BITS2_DEFINED  ) { return V_BITS8_DEFINED;   }
4018
      else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
4019
      else {
4020
         /* Slow case: the byte is not all-defined or all-undefined. */
4021
         PROF_EVENT(262, "mc_LOADV8-slow2");
4022
         return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
4023
      }
4024
   }
4025
#endif
4026
}
4027
4028
4029
VG_REGPARM(2)
4030
void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
4031
{
4032
   UWord   sm_off, vabits8;
4033
   SecMap* sm;
4034
4035
   PROF_EVENT(270, "mc_STOREV8");
4036
4037
#ifndef PERF_FAST_STOREV
4038
   mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
4039
#else
4040
   if (EXPECTED_NOT_TAKEN( UNALIGNED_OR_HIGH(a,8) )) {
4041
      PROF_EVENT(271, "mc_STOREV8-slow1");
4042
      mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
4043
      return;
4044
   }
4045
4046
   sm      = get_secmap_for_reading_low(a);
4047
   sm_off  = SM_OFF(a);
4048
   vabits8 = sm->vabits8[sm_off];
4049
   if (EXPECTED_TAKEN
4050
         ( !is_distinguished_sm(sm) &&
4051
           ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
4052
          || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
4053
           )
4054
         )
4055
      )
4056
   {
4057
      /* Handle common case quickly: a is mapped, the entire word32 it
4058
         lives in is addressible. */
4059
      // Convert full V-bits in register to compact 2-bit form.
4060
      if (V_BITS8_DEFINED == vbits8) {
4061
         insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
4062
                                       &(sm->vabits8[sm_off]) );
4063
      } else if (V_BITS8_UNDEFINED == vbits8) {
4064
         insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
4065
                                       &(sm->vabits8[sm_off]) );
4066
      } else {
4067
         /* Slow but general case -- writing partially defined bytes. */
4068
         PROF_EVENT(272, "mc_STOREV8-slow2");
4069
         mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
4070
      }
4071
   } else {
4072
      /* Slow but general case. */
4073
      PROF_EVENT(273, "mc_STOREV8-slow3");
4074
      mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
4075
   }
4076
#endif
4077
}
4078
4079
4080
/*------------------------------------------------------------*/
4081
/*--- Functions called directly from generated code:       ---*/
4082
/*--- Value-check failure handlers.                        ---*/
4083
/*------------------------------------------------------------*/
4084
1 by Andrés Roldán
Import upstream version 2.1.1
4085
void MC_(helperc_value_check0_fail) ( void )
4086
{
4087
   mc_record_cond_error ( VG_(get_running_tid)() );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4088
}
1 by Andrés Roldán
Import upstream version 2.1.1
4089
4090
void MC_(helperc_value_check1_fail) ( void )
4091
{
4092
   mc_record_value_error ( VG_(get_running_tid)(), 1 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4093
}
1 by Andrés Roldán
Import upstream version 2.1.1
4094
4095
void MC_(helperc_value_check4_fail) ( void )
4096
{
4097
   mc_record_value_error ( VG_(get_running_tid)(), 4 );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4098
}
4099
4100
void MC_(helperc_value_check8_fail) ( void )
4101
{
4102
   mc_record_value_error ( VG_(get_running_tid)(), 8 );
4103
}
4104
4105
VG_REGPARM(1) void MC_(helperc_complain_undef) ( HWord sz )
4106
{
4107
   mc_record_value_error ( VG_(get_running_tid)(), (Int)sz );
4108
}
1 by Andrés Roldán
Import upstream version 2.1.1
4109
4110
4111
/*------------------------------------------------------------*/
4112
/*--- Metadata get/set functions, for client requests.     ---*/
4113
/*------------------------------------------------------------*/
4114
4115
// Nb: this expands the V+A bits out into register-form V bits, even though
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4116
// they're in memory.  This is for backward compatibility, and because it's
4117
// probably what the user wants.
4118
4119
/* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
4120
   error [no longer used], 3 == addressing error. */
4121
/* Nb: We used to issue various definedness/addressability errors from here,
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4122
   but we took them out because they ranged from not-very-helpful to
4123
   downright annoying, and they complicated the error data structures. */
4124
static Int mc_get_or_set_vbits_for_client ( 
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4125
   ThreadId tid,
1 by Andrés Roldán
Import upstream version 2.1.1
4126
   Addr a, 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4127
   Addr vbits, 
4128
   SizeT szB, 
4129
   Bool setting /* True <=> set vbits,  False <=> get vbits */ 
1 by Andrés Roldán
Import upstream version 2.1.1
4130
)
4131
{
4132
   SizeT i;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4133
   Bool  ok;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4134
   UChar vbits8;
4135
4136
   /* Check that arrays are addressible before doing any getting/setting. */
4137
   for (i = 0; i < szB; i++) {
4138
      if (VA_BITS2_NOACCESS == get_vabits2(a + i) ||
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4139
          VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
4140
         return 3;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4141
      }
4142
   }
4143
4144
   /* Do the copy */
1 by Andrés Roldán
Import upstream version 2.1.1
4145
   if (setting) {
4146
      /* setting */
4147
      for (i = 0; i < szB; i++) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4148
         ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
4149
         tl_assert(ok);
4150
      }
1 by Andrés Roldán
Import upstream version 2.1.1
4151
   } else {
4152
      /* getting */
4153
      for (i = 0; i < szB; i++) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4154
         ok = get_vbits8(a + i, &vbits8);
4155
         tl_assert(ok);
4156
         ((UChar*)vbits)[i] = vbits8;
4157
      }
1 by Andrés Roldán
Import upstream version 2.1.1
4158
      // The bytes in vbits[] have now been set, so mark them as such.
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4159
      MC_(make_mem_defined)(vbits, szB);
4160
   }
1 by Andrés Roldán
Import upstream version 2.1.1
4161
4162
   return 1;
4163
}
4164
4165
4166
/*------------------------------------------------------------*/
4167
/*--- Detecting leaked (unreachable) malloc'd blocks.      ---*/
4168
/*------------------------------------------------------------*/
4169
4170
/* For the memory leak detector, say whether an entire 64k chunk of
4171
   address space is possibly in use, or not.  If in doubt return
4172
   True.
4173
*/
4174
static
4175
Bool mc_is_within_valid_secondary ( Addr a )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4176
{
1 by Andrés Roldán
Import upstream version 2.1.1
4177
   SecMap* sm = maybe_get_secmap_for ( a );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4178
   if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4179
       || in_ignored_range(a)) {
4180
      /* Definitely not in use. */
1 by Andrés Roldán
Import upstream version 2.1.1
4181
      return False;
4182
   } else {
4183
      return True;
4184
   }
4185
}
4186
4187
4188
/* For the memory leak detector, say whether or not a given word
4189
   address is to be regarded as valid. */
4190
static
4191
Bool mc_is_valid_aligned_word ( Addr a )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4192
{
1 by Andrés Roldán
Import upstream version 2.1.1
4193
   tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4194
   if (sizeof(UWord) == 4) {
4195
      tl_assert(VG_IS_4_ALIGNED(a));
4196
   } else {
4197
      tl_assert(VG_IS_8_ALIGNED(a));
4198
   }
4199
   if (is_mem_defined( a, sizeof(UWord), NULL ) == MC_Ok
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4200
       && !in_ignored_range(a)) {
4201
      return True;
1 by Andrés Roldán
Import upstream version 2.1.1
4202
   } else {
4203
      return False;
4204
   }
4205
}
4206
4207
4208
/* Leak detector for this tool.  We don't actually do anything, merely
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4209
   run the generic leak detector with suitable parameters for this
1 by Andrés Roldán
Import upstream version 2.1.1
4210
   tool. */
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4211
static void mc_detect_memory_leaks ( ThreadId tid, LeakCheckMode mode )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4212
{
4213
   MC_(do_detect_memory_leaks) ( 
4214
      tid, 
4215
      mode, 
4216
      mc_is_within_valid_secondary, 
4217
      mc_is_valid_aligned_word 
4218
   );
4219
}
4220
4221
4222
/*------------------------------------------------------------*/
4223
/*--- Initialisation                                       ---*/
4224
/*------------------------------------------------------------*/
4225
4226
static void init_shadow_memory ( void )
4227
{
4228
   Int     i;
4229
   SecMap* sm;
4230
4231
   tl_assert(V_BIT_UNDEFINED   == 1);
4232
   tl_assert(V_BIT_DEFINED     == 0);
4233
   tl_assert(V_BITS8_UNDEFINED == 0xFF);
4234
   tl_assert(V_BITS8_DEFINED   == 0);
4235
4236
   /* Build the 3 distinguished secondaries */
4237
   sm = &sm_distinguished[SM_DIST_NOACCESS];
4238
   for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
4239
4240
   sm = &sm_distinguished[SM_DIST_UNDEFINED];
4241
   for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
4242
4243
   sm = &sm_distinguished[SM_DIST_DEFINED];
4244
   for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
4245
4246
   /* Set up the primary map. */
4247
   /* These entries gradually get overwritten as the used address
4248
      space expands. */
4249
   for (i = 0; i < N_PRIMARY_MAP; i++)
4250
      primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
4251
4252
   /* Auxiliary primary maps */
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4253
   init_auxmap_L1_L2();
4254
4255
   /* auxmap_size = auxmap_used = 0; 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4256
      no ... these are statically initialised */
4257
4258
   /* Secondary V bit table */
4259
   secVBitTable = createSecVBitTable();
4260
}
4261
4262
4263
/*------------------------------------------------------------*/
4264
/*--- Sanity check machinery (permanently engaged)         ---*/
4265
/*------------------------------------------------------------*/
4266
4267
static Bool mc_cheap_sanity_check ( void )
4268
{
1 by Andrés Roldán
Import upstream version 2.1.1
4269
   /* nothing useful we can rapidly check */
4270
   n_sanity_cheap++;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4271
   PROF_EVENT(490, "cheap_sanity_check");
4272
   return True;
4273
}
4274
4275
static Bool mc_expensive_sanity_check ( void )
4276
{
4277
   Int     i;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4278
   Word    n_secmaps_found;
4279
   SecMap* sm;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4280
   HChar*  errmsg;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4281
   Bool    bad = False;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4282
4283
   if (0) VG_(printf)("expensive sanity check\n");
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4284
   if (0) return True;
4285
4286
   n_sanity_expensive++;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4287
   PROF_EVENT(491, "expensive_sanity_check");
4288
4289
   /* Check that the 3 distinguished SMs are still as they should be. */
4290
4291
   /* Check noaccess DSM. */
4292
   sm = &sm_distinguished[SM_DIST_NOACCESS];
4293
   for (i = 0; i < SM_CHUNKS; i++)
4294
      if (sm->vabits8[i] != VA_BITS8_NOACCESS)
4295
         bad = True;
4296
4297
   /* Check undefined DSM. */
4298
   sm = &sm_distinguished[SM_DIST_UNDEFINED];
4299
   for (i = 0; i < SM_CHUNKS; i++)
4300
      if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
4301
         bad = True;
4302
4303
   /* Check defined DSM. */
4304
   sm = &sm_distinguished[SM_DIST_DEFINED];
4305
   for (i = 0; i < SM_CHUNKS; i++)
4306
      if (sm->vabits8[i] != VA_BITS8_DEFINED)
4307
         bad = True;
4308
4309
   if (bad) {
4310
      VG_(printf)("memcheck expensive sanity: "
4311
                  "distinguished_secondaries have changed\n");
4312
      return False;
4313
   }
4314
4315
   /* If we're not checking for undefined value errors, the secondary V bit
4316
    * table should be empty. */
4317
   if (!MC_(clo_undef_value_errors)) {
4318
      if (0 != VG_(OSetGen_Size)(secVBitTable))
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4319
         return False;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4320
   }
4321
4322
   /* check the auxiliary maps, very thoroughly */
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4323
   n_secmaps_found = 0;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4324
   errmsg = check_auxmap_L1_L2_sanity( &n_secmaps_found );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4325
   if (errmsg) {
4326
      VG_(printf)("memcheck expensive sanity, auxmaps:\n\t%s", errmsg);
4327
      return False;
4328
   }
4329
4330
   /* n_secmaps_found is now the number referred to by the auxiliary
4331
      primary map.  Now add on the ones referred to by the main
4332
      primary map. */
4333
   for (i = 0; i < N_PRIMARY_MAP; i++) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4334
      if (primary_map[i] == NULL) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4335
         bad = True;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4336
      } else {
4337
         if (!is_distinguished_sm(primary_map[i]))
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4338
            n_secmaps_found++;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4339
      }
4340
   }
4341
4342
   /* check that the number of secmaps issued matches the number that
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4343
      are reachable (iow, no secmap leaks) */
4344
   if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4345
      bad = True;
4346
4347
   if (bad) {
4348
      VG_(printf)("memcheck expensive sanity: "
4349
                  "apparent secmap leakage\n");
4350
      return False;
4351
   }
4352
4353
   if (bad) {
4354
      VG_(printf)("memcheck expensive sanity: "
4355
                  "auxmap covers wrong address space\n");
4356
      return False;
4357
   }
4358
4359
   /* there is only one pointer to each secmap (expensive) */
4360
4361
   return True;
4362
}
4363
1 by Andrés Roldán
Import upstream version 2.1.1
4364
/*------------------------------------------------------------*/
4365
/*--- Command line args                                    ---*/
4366
/*------------------------------------------------------------*/
4367
4368
Bool          MC_(clo_partial_loads_ok)       = False;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4369
Long          MC_(clo_freelist_vol)           = 10*1000*1000LL;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4370
LeakCheckMode MC_(clo_leak_check)             = LC_Summary;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4371
VgRes         MC_(clo_leak_resolution)        = Vg_LowRes;
4372
Bool          MC_(clo_show_reachable)         = False;
4373
Bool          MC_(clo_workaround_gcc296_bugs) = False;
4374
Bool          MC_(clo_undef_value_errors)     = True;
4375
Int           MC_(clo_malloc_fill)            = -1;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4376
Int           MC_(clo_free_fill)              = -1;
4377
1 by Andrés Roldán
Import upstream version 2.1.1
4378
static Bool mc_process_cmd_line_options(Char* arg)
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4379
{
1 by Andrés Roldán
Import upstream version 2.1.1
4380
	VG_BOOL_CLO(arg, "--partial-loads-ok",      MC_(clo_partial_loads_ok))
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4381
   else VG_BOOL_CLO(arg, "--show-reachable",        MC_(clo_show_reachable))
4382
   else VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",MC_(clo_workaround_gcc296_bugs))
4383
4384
   else VG_BOOL_CLO(arg, "--undef-value-errors",    MC_(clo_undef_value_errors))
4385
   
4386
   else VG_BNUM_CLO(arg, "--freelist-vol",  MC_(clo_freelist_vol), 
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4387
                                            0, 10*1000*1000*1000LL)
4388
   
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4389
   else if (VG_CLO_STREQ(arg, "--leak-check=no"))
4390
      MC_(clo_leak_check) = LC_Off;
4391
   else if (VG_CLO_STREQ(arg, "--leak-check=summary"))
4392
      MC_(clo_leak_check) = LC_Summary;
4393
   else if (VG_CLO_STREQ(arg, "--leak-check=yes") ||
4394
	    VG_CLO_STREQ(arg, "--leak-check=full"))
4395
      MC_(clo_leak_check) = LC_Full;
4396
4397
   else if (VG_CLO_STREQ(arg, "--leak-resolution=low"))
4398
      MC_(clo_leak_resolution) = Vg_LowRes;
4399
   else if (VG_CLO_STREQ(arg, "--leak-resolution=med"))
4400
      MC_(clo_leak_resolution) = Vg_MedRes;
4401
   else if (VG_CLO_STREQ(arg, "--leak-resolution=high"))
4402
      MC_(clo_leak_resolution) = Vg_HighRes;
4403
4404
   else if (VG_CLO_STREQN(16,arg,"--ignore-ranges=")) {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4405
      Int    i;
4406
      UChar* txt = (UChar*)(arg+16);
4407
      Bool   ok  = parse_ignore_ranges(txt);
4408
      if (!ok)
4409
        return False;
4410
      tl_assert(ignoreRanges.used >= 0);
4411
      tl_assert(ignoreRanges.used < M_IGNORE_RANGES);
4412
      for (i = 0; i < ignoreRanges.used; i++) {
4413
         Addr s = ignoreRanges.start[i];
4414
         Addr e = ignoreRanges.end[i];
4415
         Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
4416
         if (e <= s) {
4417
            VG_(message)(Vg_DebugMsg, 
4418
               "ERROR: --ignore-ranges: end <= start in range:");
4419
            VG_(message)(Vg_DebugMsg, 
4420
               "       0x%lx-0x%lx", s, e);
4421
            return False;
4422
         }
4423
         if (e - s > limit) {
4424
            VG_(message)(Vg_DebugMsg, 
4425
               "ERROR: --ignore-ranges: suspiciously large range:");
4426
            VG_(message)(Vg_DebugMsg, 
4427
               "       0x%lx-0x%lx (size %ld)", s, e, (UWord)(e-s));
4428
            return False;
4429
	 }
4430
      }
4431
   }
4432
4433
   else VG_BHEX_CLO(arg, "--malloc-fill", MC_(clo_malloc_fill), 0x00, 0xFF)
4434
   else VG_BHEX_CLO(arg, "--free-fill",   MC_(clo_free_fill), 0x00, 0xFF)
4435
4436
   else
1 by Andrés Roldán
Import upstream version 2.1.1
4437
      return VG_(replacement_malloc_process_cmd_line_option)(arg);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4438
1 by Andrés Roldán
Import upstream version 2.1.1
4439
   return True;
4440
}
4441
4442
static void mc_print_usage(void)
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4443
{  
4444
   VG_(printf)(
4445
"    --leak-check=no|summary|full     search for memory leaks at exit?  [summary]\n"
4446
"    --leak-resolution=low|med|high   how much bt merging in leak check [low]\n"
4447
"    --show-reachable=no|yes          show reachable blocks in leak check? [no]\n"
4448
"    --undef-value-errors=no|yes      check for undefined value errors [yes]\n"
4449
"    --partial-loads-ok=no|yes        too hard to explain here; see manual [no]\n"
4450
"    --freelist-vol=<number>          volume of freed blocks queue [10000000]\n"
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4451
"    --workaround-gcc296-bugs=no|yes  self explanatory [no]\n"
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4452
"    --ignore-ranges=0xPP-0xQQ[,0xRR-0xSS]   assume given addresses are OK\n"
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4453
"    --malloc-fill=<hexnumber>        fill malloc'd areas with given value\n"
4454
"    --free-fill=<hexnumber>          fill free'd areas with given value\n"
4455
   );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4456
   VG_(replacement_malloc_print_usage)();
4457
}
4458
4459
static void mc_print_debug_usage(void)
4460
{  
4461
   VG_(replacement_malloc_print_debug_usage)();
4462
}
4463
4464
1 by Andrés Roldán
Import upstream version 2.1.1
4465
/*------------------------------------------------------------*/
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4466
/*--- Client requests                                      ---*/
4467
/*------------------------------------------------------------*/
4468
4469
/* Client block management:
4470
  
4471
   This is managed as an expanding array of client block descriptors.
4472
   Indices of live descriptors are issued to the client, so it can ask
4473
   to free them later.  Therefore we cannot slide live entries down
4474
   over dead ones.  Instead we must use free/inuse flags and scan for
4475
   an empty slot at allocation time.  This in turn means allocation is
4476
   relatively expensive, so we hope this does not happen too often. 
4477
4478
   An unused block has start == size == 0
4479
*/
4480
4481
typedef
4482
   struct {
4483
      Addr          start;
4484
      SizeT         size;
4485
      ExeContext*   where;
4486
      Char*            desc;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4487
   } 
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4488
   CGenBlock;
4489
4490
/* This subsystem is self-initialising. */
4491
static UInt       cgb_size = 0;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4492
static UInt       cgb_used = 0;
4493
static CGenBlock* cgbs     = NULL;
4494
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4495
/* Stats for this subsystem. */
4496
static UInt cgb_used_MAX = 0;   /* Max in use. */
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4497
static UInt cgb_allocs   = 0;   /* Number of allocs. */
4498
static UInt cgb_discards = 0;   /* Number of discards. */
4499
static UInt cgb_search   = 0;   /* Number of searches. */
4500
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4501
4502
static
4503
Int alloc_client_block ( void )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4504
{
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4505
   UInt       i, sz_new;
4506
   CGenBlock* cgbs_new;
4507
4508
   cgb_allocs++;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4509
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4510
   for (i = 0; i < cgb_used; i++) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4511
      cgb_search++;
4512
      if (cgbs[i].start == 0 && cgbs[i].size == 0)
4513
         return i;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4514
   }
4515
4516
   /* Not found.  Try to allocate one at the end. */
4517
   if (cgb_used < cgb_size) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4518
      cgb_used++;
4519
      return cgb_used-1;
4520
   }
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4521
4522
   /* Ok, we have to allocate a new one. */
4523
   tl_assert(cgb_used == cgb_size);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4524
   sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
4525
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4526
   cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
4527
   for (i = 0; i < cgb_used; i++) 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4528
      cgbs_new[i] = cgbs[i];
4529
4530
   if (cgbs != NULL)
4531
      VG_(free)( cgbs );
4532
   cgbs = cgbs_new;
4533
4534
   cgb_size = sz_new;
4535
   cgb_used++;
4536
   if (cgb_used > cgb_used_MAX)
4537
      cgb_used_MAX = cgb_used;
4538
   return cgb_used-1;
4539
}
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4540
4541
4542
static void show_client_block_stats ( void )
4543
{
4544
   VG_(message)(Vg_DebugMsg, 
4545
      "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
4546
      cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4547
   );
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4548
}
4549
4550
static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai )
4551
{
4552
   UInt i;
4553
4554
   /* Perhaps it's a general block ? */
4555
   for (i = 0; i < cgb_used; i++) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4556
      if (cgbs[i].start == 0 && cgbs[i].size == 0) 
4557
         continue;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4558
      // Use zero as the redzone for client blocks.
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4559
      if (VG_(addr_is_in_block)(a, cgbs[i].start, cgbs[i].size, 0)) {
4560
         /* OK - maybe it's a mempool, too? */
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4561
         MC_Mempool* mp = VG_(HT_lookup)(MC_(mempool_list),
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4562
                                          (UWord)cgbs[i].start);
4563
         if (mp != NULL) {
4564
            if (mp->chunks != NULL) {
4565
               MC_Chunk* mc;
4566
               VG_(HT_ResetIter)(mp->chunks);
4567
               while ( (mc = VG_(HT_Next)(mp->chunks)) ) {
4568
                  if (addr_is_in_MC_Chunk(mc, a)) {
4569
                     ai->tag = Addr_Block;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4570
                     ai->Addr.Block.block_kind = Block_MempoolChunk;
4571
                     ai->Addr.Block.block_desc = "block";
4572
                     ai->Addr.Block.block_szB  = mc->szB;
4573
                     ai->Addr.Block.rwoffset   = (Int)a - (Int)mc->data;
4574
                     ai->Addr.Block.lastchange = mc->where;
4575
                     return True;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4576
                  }
4577
               }
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4578
            }
4579
            ai->tag = Addr_Block;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4580
            ai->Addr.Block.block_kind = Block_Mempool;
4581
            ai->Addr.Block.block_desc = "mempool";
4582
            ai->Addr.Block.block_szB  = cgbs[i].size;
4583
            ai->Addr.Block.rwoffset   = (Int)(a) - (Int)(cgbs[i].start);
4584
            ai->Addr.Block.lastchange = cgbs[i].where;
4585
            return True;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4586
         }
4587
         ai->tag = Addr_Block;
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4588
         ai->Addr.Block.block_kind = Block_UserG;
4589
         ai->Addr.Block.block_desc = cgbs[i].desc;
4590
         ai->Addr.Block.block_szB  = cgbs[i].size;
4591
         ai->Addr.Block.rwoffset   = (Int)(a) - (Int)(cgbs[i].start);
4592
         ai->Addr.Block.lastchange = cgbs[i].where;
4593
         return True;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4594
      }
4595
   }
4596
   return False;
4597
}
4598
4599
static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4600
{
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4601
   Int   i;
4602
   Bool  ok;
4603
   Addr  bad_addr;
4604
4605
   if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4606
    && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4607
    && VG_USERREQ__FREELIKE_BLOCK   != arg[0]
4608
    && VG_USERREQ__CREATE_MEMPOOL   != arg[0]
4609
    && VG_USERREQ__DESTROY_MEMPOOL  != arg[0]
4610
    && VG_USERREQ__MEMPOOL_ALLOC    != arg[0]
4611
    && VG_USERREQ__MEMPOOL_FREE     != arg[0]
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4612
    && VG_USERREQ__MEMPOOL_TRIM     != arg[0]
4613
    && VG_USERREQ__MOVE_MEMPOOL     != arg[0]
4614
    && VG_USERREQ__MEMPOOL_CHANGE   != arg[0]
4615
    && VG_USERREQ__MEMPOOL_EXISTS   != arg[0])
4616
      return False;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4617
4618
   switch (arg[0]) {
4619
      case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4620
         ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
4621
         if (!ok)
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4622
            mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4623
         *ret = ok ? (UWord)NULL : bad_addr;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4624
         break;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4625
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4626
      case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4627
         MC_ReadResult res;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4628
         res = is_mem_defined ( arg[1], arg[2], &bad_addr );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4629
         if (MC_AddrErr == res)
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4630
            mc_record_user_error ( tid, bad_addr, /*isAddrErr*/True );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4631
         else if (MC_ValueErr == res)
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4632
            mc_record_user_error ( tid, bad_addr, /*isAddrErr*/False );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4633
         *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4634
         break;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4635
      }
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4636
4637
      case VG_USERREQ__DO_LEAK_CHECK:
4638
         mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full);
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4639
         *ret = 0; /* return value is meaningless */
4640
         break;
4641
4642
      case VG_USERREQ__MAKE_MEM_NOACCESS:
4643
         MC_(make_mem_noaccess) ( arg[1], arg[2] );
4644
         *ret = -1;
4645
         break;
4646
4647
      case VG_USERREQ__MAKE_MEM_UNDEFINED:
4648
         MC_(make_mem_undefined) ( arg[1], arg[2] );
4649
         *ret = -1;
4650
         break;
4651
4652
      case VG_USERREQ__MAKE_MEM_DEFINED:
4653
         MC_(make_mem_defined) ( arg[1], arg[2] );
4654
         *ret = -1;
4655
         break;
4656
4657
      case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
4658
         make_mem_defined_if_addressable ( arg[1], arg[2] );
4659
         *ret = -1;
4660
         break;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4661
4662
      case VG_USERREQ__CREATE_BLOCK: /* describe a block */
4663
         if (arg[1] != 0 && arg[2] != 0) {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4664
            i = alloc_client_block();
4665
            /* VG_(printf)("allocated %d %p\n", i, cgbs); */
4666
            cgbs[i].start = arg[1];
4667
            cgbs[i].size  = arg[2];
4668
            cgbs[i].desc  = VG_(strdup)((Char *)arg[3]);
4669
            cgbs[i].where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4670
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4671
            *ret = i;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4672
         } else
4673
            *ret = -1;
4674
         break;
4675
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4676
      case VG_USERREQ__DISCARD: /* discard */
4677
         if (cgbs == NULL 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4678
             || arg[2] >= cgb_used ||
4679
             (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
4680
            *ret = 1;
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4681
         } else {
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4682
            tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
4683
            cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
4684
            VG_(free)(cgbs[arg[2]].desc);
4685
            cgb_discards++;
4686
            *ret = 0;
4687
         }
4688
         break;
4689
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4690
      case VG_USERREQ__GET_VBITS:
4691
         *ret = mc_get_or_set_vbits_for_client
4692
                   ( tid, arg[1], arg[2], arg[3], False /* get them */ );
4693
         break;
4694
4695
      case VG_USERREQ__SET_VBITS:
4696
         *ret = mc_get_or_set_vbits_for_client
4697
                   ( tid, arg[1], arg[2], arg[3], True /* set them */ );
4698
         break;
4699
4700
      case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4701
         UWord** argp = (UWord**)arg;
4702
         // MC_(bytes_leaked) et al were set by the last leak check (or zero
4703
         // if no prior leak checks performed).
4704
         *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
4705
         *argp[2] = MC_(bytes_dubious);
4706
         *argp[3] = MC_(bytes_reachable);
4707
         *argp[4] = MC_(bytes_suppressed);
4708
         // there is no argp[5]
4709
         //*argp[5] = MC_(bytes_indirect);
4710
         // XXX need to make *argp[1-4] defined
4711
         *ret = 0;
4712
         return True;
4713
      }
4714
      case VG_USERREQ__MALLOCLIKE_BLOCK: {
4715
         Addr p         = (Addr)arg[1];
4716
         SizeT sizeB    =       arg[2];
4717
         UInt rzB       =       arg[3];
4718
         Bool is_zeroed = (Bool)arg[4];
4719
4720
         MC_(new_block) ( tid, p, sizeB, /*ignored*/0, rzB, is_zeroed, 
4721
                          MC_AllocCustom, MC_(malloc_list) );
4722
         return True;
4723
      }
4724
      case VG_USERREQ__FREELIKE_BLOCK: {
4725
         Addr p         = (Addr)arg[1];
4726
         UInt rzB       =       arg[2];
4727
4728
         MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
4729
         return True;
4730
      }
4731
4732
      case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
4733
         Char* s   = (Char*)arg[1];
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4734
         Addr  dst = (Addr) arg[2];
4735
         Addr  src = (Addr) arg[3];
4736
         SizeT len = (SizeT)arg[4];
4737
         mc_record_overlap_error(tid, s, src, dst, len);
4738
         return True;
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4739
      }
4740
4741
      case VG_USERREQ__CREATE_MEMPOOL: {
4742
         Addr pool      = (Addr)arg[1];
4743
         UInt rzB       =       arg[2];
4744
         Bool is_zeroed = (Bool)arg[3];
4745
4746
         MC_(create_mempool) ( pool, rzB, is_zeroed );
4747
         return True;
4748
      }
4749
4750
      case VG_USERREQ__DESTROY_MEMPOOL: {
4751
         Addr pool      = (Addr)arg[1];
4752
4753
         MC_(destroy_mempool) ( pool );
4754
         return True;
4755
      }
4756
4757
      case VG_USERREQ__MEMPOOL_ALLOC: {
4758
         Addr pool      = (Addr)arg[1];
4759
         Addr addr      = (Addr)arg[2];
4760
         UInt size      =       arg[3];
4761
4762
         MC_(mempool_alloc) ( tid, pool, addr, size );
4763
         return True;
4764
      }
4765
4766
      case VG_USERREQ__MEMPOOL_FREE: {
4767
         Addr pool      = (Addr)arg[1];
4768
         Addr addr      = (Addr)arg[2];
4769
4770
         MC_(mempool_free) ( pool, addr );
4771
         return True;
4772
      }
4773
4774
      case VG_USERREQ__MEMPOOL_TRIM: {
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4775
         Addr pool      = (Addr)arg[1];
4776
         Addr addr      = (Addr)arg[2];
4777
         UInt size      =       arg[3];
4778
4779
         MC_(mempool_trim) ( pool, addr, size );
4780
         return True;
4781
      }
4782
4783
      case VG_USERREQ__MOVE_MEMPOOL: {
4784
         Addr poolA     = (Addr)arg[1];
4785
         Addr poolB     = (Addr)arg[2];
4786
4787
         MC_(move_mempool) ( poolA, poolB );
4788
         return True;
4789
      }
4790
4791
      case VG_USERREQ__MEMPOOL_CHANGE: {
4792
         Addr pool      = (Addr)arg[1];
4793
         Addr addrA     = (Addr)arg[2];
4794
         Addr addrB     = (Addr)arg[3];
4795
         UInt size      =       arg[4];
4796
4797
         MC_(mempool_change) ( pool, addrA, addrB, size );
4798
         return True;
4799
      }
4800
4801
      case VG_USERREQ__MEMPOOL_EXISTS: {
4802
         Addr pool      = (Addr)arg[1];
4803
4804
         *ret = (UWord) MC_(mempool_exists) ( pool );
4805
	 return True;
4806
      }
4807
4808
4809
      default:
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4810
         VG_(message)(Vg_UserMsg, 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4811
                      "Warning: unknown memcheck client request code %llx",
4812
                      (ULong)arg[0]);
4813
         return False;
4814
   }
1.2.1 by Andrés Roldán
Import upstream version 2.4.0
4815
   return True;
4816
}
4817
1 by Andrés Roldán
Import upstream version 2.1.1
4818
/*------------------------------------------------------------*/
4819
/*--- Crude profiling machinery.                           ---*/
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4820
/*------------------------------------------------------------*/
4821
4822
// We track a number of interesting events (using PROF_EVENT)
4823
// if MC_PROFILE_MEMORY is defined.
4824
4825
#ifdef MC_PROFILE_MEMORY
4826
4827
UInt   MC_(event_ctr)[N_PROF_EVENTS];
4828
HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
4829
4830
static void init_prof_mem ( void )
4831
{
4832
   Int i;
4833
   for (i = 0; i < N_PROF_EVENTS; i++) {
4834
      MC_(event_ctr)[i] = 0;
4835
      MC_(event_ctr_name)[i] = NULL;
4836
   }
4837
}
4838
4839
static void done_prof_mem ( void )
4840
{
4841
   Int  i;
4842
   Bool spaced = False;
4843
   for (i = 0; i < N_PROF_EVENTS; i++) {
4844
      if (!spaced && (i % 10) == 0) {
4845
         VG_(printf)("\n");
4846
         spaced = True;
4847
      }
4848
      if (MC_(event_ctr)[i] > 0) {
4849
         spaced = False;
4850
         VG_(printf)( "prof mem event %3d: %9d   %s\n", 
4851
                      i, MC_(event_ctr)[i],
4852
                      MC_(event_ctr_name)[i] 
4853
                         ? MC_(event_ctr_name)[i] : "unnamed");
4854
      }
4855
   }
4856
}
4857
4858
#else
4859
4860
static void init_prof_mem ( void ) { }
4861
static void done_prof_mem ( void ) { }
4862
4863
#endif
4864
4865
/*------------------------------------------------------------*/
4866
/*--- Setup and finalisation                               ---*/
4867
/*------------------------------------------------------------*/
4868
4869
static void mc_post_clo_init ( void )
4870
{
4871
   /* If we've been asked to emit XML, mash around various other
4872
      options so as to constrain the output somewhat. */
4873
   if (VG_(clo_xml)) {
4874
      /* Extract as much info as possible from the leak checker. */
4875
      /* MC_(clo_show_reachable) = True; */
4876
      MC_(clo_leak_check) = LC_Full;
4877
   }
4878
}
4879
4880
static void print_SM_info(char* type, int n_SMs)
4881
{
4882
   VG_(message)(Vg_DebugMsg,
4883
      " memcheck: SMs: %s = %d (%dk, %dM)",
4884
      type,
4885
      n_SMs,
4886
      n_SMs * sizeof(SecMap) / 1024,
4887
      n_SMs * sizeof(SecMap) / (1024 * 1024) );
4888
}
4889
4890
static void mc_fini ( Int exitcode )
4891
{
4892
   MC_(print_malloc_stats)();
4893
4894
   if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4895
      if (MC_(clo_leak_check) == LC_Off)
4896
         VG_(message)(Vg_UserMsg, 
4897
             "For a detailed leak analysis,  rerun with: --leak-check=yes");
4898
4899
      VG_(message)(Vg_UserMsg, 
4900
                   "For counts of detected errors, rerun with: -v");
4901
   }
4902
   if (MC_(clo_leak_check) != LC_Off)
4903
      mc_detect_memory_leaks(1/*bogus ThreadId*/, MC_(clo_leak_check));
4904
4905
   done_prof_mem();
4906
4907
   if (VG_(clo_verbosity) > 1) {
4908
      SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
4909
      
4910
      VG_(message)(Vg_DebugMsg,
4911
         " memcheck: sanity checks: %d cheap, %d expensive",
4912
         n_sanity_cheap, n_sanity_expensive );
4913
      VG_(message)(Vg_DebugMsg,
4914
         " memcheck: auxmaps: %d auxmap entries (%dk, %dM) in use",
4915
         n_auxmap_L2_nodes, 
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4916
         n_auxmap_L2_nodes * 64, 
4917
         n_auxmap_L2_nodes / 16 );
4918
      VG_(message)(Vg_DebugMsg,
4919
         " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10",
4920
         n_auxmap_L1_searches, n_auxmap_L1_cmps,
4921
         (10ULL * n_auxmap_L1_cmps) 
4922
            / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1) 
4923
      );   
4924
      VG_(message)(Vg_DebugMsg,
4925
         " memcheck: auxmaps_L2: %lld searches, %lld nodes",
4926
         n_auxmap_L2_searches, n_auxmap_L2_nodes
4927
      );   
4928
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4929
      print_SM_info("n_issued     ", n_issued_SMs);
4930
      print_SM_info("n_deissued   ", n_deissued_SMs);
4931
      print_SM_info("max_noaccess ", max_noaccess_SMs);
4932
      print_SM_info("max_undefined", max_undefined_SMs);
4933
      print_SM_info("max_defined  ", max_defined_SMs);
4934
      print_SM_info("max_non_DSM  ", max_non_DSM_SMs);
4935
4936
      // Three DSMs, plus the non-DSM ones
4937
      max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
4938
      // The 3*sizeof(Word) bytes is the AVL node metadata size.
4939
      // The 4*sizeof(Word) bytes is the malloc metadata size.
4940
      // Hardwiring these sizes in sucks, but I don't see how else to do it.
4941
      max_secVBit_szB = max_secVBit_nodes * 
4942
            (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
4943
      max_shmem_szB   = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
4944
4945
      VG_(message)(Vg_DebugMsg,
4946
         " memcheck: max sec V bit nodes:    %d (%dk, %dM)",
4947
         max_secVBit_nodes, max_secVBit_szB / 1024,
4948
                            max_secVBit_szB / (1024 * 1024));
4949
      VG_(message)(Vg_DebugMsg,
4950
         " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)",
4951
         sec_vbits_new_nodes + sec_vbits_updates,
4952
         sec_vbits_new_nodes, sec_vbits_updates );
4953
      VG_(message)(Vg_DebugMsg,
4954
         " memcheck: max shadow mem size:   %dk, %dM",
4955
         max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
4956
   }
4957
4958
   if (0) {
4959
      VG_(message)(Vg_DebugMsg, 
4960
        "------ Valgrind's client block stats follow ---------------" );
4961
      show_client_block_stats();
4962
   }
4963
}
4964
4965
static void mc_pre_clo_init(void)
4966
{
1 by Andrés Roldán
Import upstream version 2.1.1
4967
   VG_(details_name)            ("Memcheck");
4968
   VG_(details_version)         (NULL);
4969
   VG_(details_description)     ("a memory error detector");
4970
   VG_(details_copyright_author)(
4971
      "Copyright (C) 2002-2007, and GNU GPL'd, by Julian Seward et al.");
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4972
   VG_(details_bug_reports_to)  (VG_BUGS_TO);
1 by Andrés Roldán
Import upstream version 2.1.1
4973
   VG_(details_avg_translation_sizeB) ( 556 );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4974
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4975
   VG_(basic_tool_funcs)          (mc_post_clo_init,
4976
                                   MC_(instrument),
4977
                                   mc_fini);
4978
1 by Andrés Roldán
Import upstream version 2.1.1
4979
   VG_(needs_final_IR_tidy_pass)  ( MC_(final_tidy) );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4980
4981
4982
   VG_(needs_core_errors)         ();
1 by Andrés Roldán
Import upstream version 2.1.1
4983
   VG_(needs_tool_errors)         (mc_eq_Error,
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4984
                                   mc_pp_Error,
4985
                                   True,/*show TIDs for errors*/
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
4986
                                   mc_update_extra,
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4987
                                   mc_recognised_suppression,
4988
                                   mc_read_extra_suppression_info,
4989
                                   mc_error_matches_suppression,
4990
                                   mc_get_error_name,
4991
                                   mc_print_extra_suppression_info);
4992
   VG_(needs_libc_freeres)        ();
1 by Andrés Roldán
Import upstream version 2.1.1
4993
   VG_(needs_command_line_options)(mc_process_cmd_line_options,
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
4994
                                   mc_print_usage,
4995
                                   mc_print_debug_usage);
4996
   VG_(needs_client_requests)     (mc_handle_client_request);
4997
   VG_(needs_sanity_checks)       (mc_cheap_sanity_check,
4998
                                   mc_expensive_sanity_check);
4999
   VG_(needs_malloc_replacement)  (MC_(malloc),
5000
                                   MC_(__builtin_new),
5001
                                   MC_(__builtin_vec_new),
5002
                                   MC_(memalign),
5003
                                   MC_(calloc),
5004
                                   MC_(free),
5005
                                   MC_(__builtin_delete),
5006
                                   MC_(__builtin_vec_delete),
5007
                                   MC_(realloc),
5008
                                   MC_MALLOC_REDZONE_SZB );
5009
   VG_(needs_xml_output)          ();
5010
5011
   VG_(track_new_mem_startup)     ( mc_new_mem_startup );
5012
   VG_(track_new_mem_stack_signal)( MC_(make_mem_undefined) );
5013
   VG_(track_new_mem_brk)         ( MC_(make_mem_undefined) );
5014
   VG_(track_new_mem_mmap)        ( mc_new_mem_mmap );
5015
   
1 by Andrés Roldán
Import upstream version 2.1.1
5016
   VG_(track_copy_mem_remap)      ( MC_(copy_address_range_state) );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
5017
5018
   // Nb: we don't do anything with mprotect.  This means that V bits are
5019
   // preserved if a program, for example, marks some memory as inaccessible
5020
   // and then later marks it as accessible again.
5021
   // 
5022
   // If an access violation occurs (eg. writing to read-only memory) we let
5023
   // it fault and print an informative termination message.  This doesn't
5024
   // happen if the program catches the signal, though, which is bad.  If we
5025
   // had two A bits (for readability and writability) that were completely
5026
   // distinct from V bits, then we could handle all this properly.
5027
   VG_(track_change_mem_mprotect) ( NULL );
5028
      
1 by Andrés Roldán
Import upstream version 2.1.1
5029
   VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) ); 
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
5030
   VG_(track_die_mem_brk)         ( MC_(make_mem_noaccess) );
5031
   VG_(track_die_mem_munmap)      ( MC_(make_mem_noaccess) ); 
5032
5033
#ifdef PERF_FAST_STACK
5034
   VG_(track_new_mem_stack_4)     ( mc_new_mem_stack_4   );
5035
   VG_(track_new_mem_stack_8)     ( mc_new_mem_stack_8   );
5036
   VG_(track_new_mem_stack_12)    ( mc_new_mem_stack_12  );
5037
   VG_(track_new_mem_stack_16)    ( mc_new_mem_stack_16  );
5038
   VG_(track_new_mem_stack_32)    ( mc_new_mem_stack_32  );
5039
   VG_(track_new_mem_stack_112)   ( mc_new_mem_stack_112 );
5040
   VG_(track_new_mem_stack_128)   ( mc_new_mem_stack_128 );
5041
   VG_(track_new_mem_stack_144)   ( mc_new_mem_stack_144 );
5042
   VG_(track_new_mem_stack_160)   ( mc_new_mem_stack_160 );
5043
#endif
5044
   VG_(track_new_mem_stack)       ( mc_new_mem_stack     );
5045
5046
#ifdef PERF_FAST_STACK
5047
   VG_(track_die_mem_stack_4)     ( mc_die_mem_stack_4   );
5048
   VG_(track_die_mem_stack_8)     ( mc_die_mem_stack_8   );
5049
   VG_(track_die_mem_stack_12)    ( mc_die_mem_stack_12  );
5050
   VG_(track_die_mem_stack_16)    ( mc_die_mem_stack_16  );
5051
   VG_(track_die_mem_stack_32)    ( mc_die_mem_stack_32  );
5052
   VG_(track_die_mem_stack_112)   ( mc_die_mem_stack_112 );
5053
   VG_(track_die_mem_stack_128)   ( mc_die_mem_stack_128 );
5054
   VG_(track_die_mem_stack_144)   ( mc_die_mem_stack_144 );
5055
   VG_(track_die_mem_stack_160)   ( mc_die_mem_stack_160 );
5056
#endif
5057
   VG_(track_die_mem_stack)       ( mc_die_mem_stack     );
5058
   
1 by Andrés Roldán
Import upstream version 2.1.1
5059
   VG_(track_ban_mem_stack)       ( MC_(make_mem_noaccess) );
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
5060
5061
   VG_(track_pre_mem_read)        ( check_mem_is_defined );
5062
   VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
5063
   VG_(track_pre_mem_write)       ( check_mem_is_addressable );
5064
   VG_(track_post_mem_write)      ( mc_post_mem_write );
5065
5066
   if (MC_(clo_undef_value_errors))
5067
      VG_(track_pre_reg_read)     ( mc_pre_reg_read );
5068
5069
   VG_(track_post_reg_write)                  ( mc_post_reg_write );
5070
   VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
5071
1 by Andrés Roldán
Import upstream version 2.1.1
5072
   init_shadow_memory();
5073
   MC_(malloc_list)  = VG_(HT_construct)( "MC_(malloc_list)" );
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
5074
   MC_(mempool_list) = VG_(HT_construct)( "MC_(mempool_list)" );
5075
   init_prof_mem();
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
5076
5077
   tl_assert( mc_expensive_sanity_check() );
5078
5079
   // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
5080
   tl_assert(sizeof(UWord) == sizeof(Addr));
5081
   // Call me paranoid.  I don't care.
1.2.3 by Andrés Roldán
Import upstream version 3.3.1
5082
   tl_assert(sizeof(void*) == sizeof(Addr));
5083
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
5084
   // BYTES_PER_SEC_VBIT_NODE must be a power of two.
5085
   tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
5086
}
5087
5088
VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
5089
1 by Andrés Roldán
Import upstream version 2.1.1
5090
/*--------------------------------------------------------------------*/
5091
/*--- end                                                          ---*/
1.2.2 by Andrés Roldán
Import upstream version 3.2.1
5092
/*--------------------------------------------------------------------*/
1 by Andrés Roldán
Import upstream version 2.1.1
5093