2
/*--------------------------------------------------------------------*/
3
/*--- The address space manager: segment initialisation and ---*/
4
/*--- tracking, stack operations ---*/
5
/*--- m_aspacemgr.c ---*/
6
/*--------------------------------------------------------------------*/
9
This file is part of Valgrind, a dynamic binary instrumentation
12
Copyright (C) 2000-2006 Julian Seward
15
This program is free software; you can redistribute it and/or
16
modify it under the terms of the GNU General Public License as
17
published by the Free Software Foundation; either version 2 of the
18
License, or (at your option) any later version.
20
This program is distributed in the hope that it will be useful, but
21
WITHOUT ANY WARRANTY; without even the implied warranty of
22
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23
General Public License for more details.
25
You should have received a copy of the GNU General Public License
26
along with this program; if not, write to the Free Software
27
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
30
The GNU General Public License is contained in the file COPYING.
33
/* One of the important design goals of the address space manager is
34
to minimise dependence on other modules. Hence the following
35
minimal set of imports. */
37
#include "pub_core_basics.h" // types
39
#include "pub_core_debuglog.h" // VG_(debugLog)
41
#include "pub_core_libcbase.h" // VG_(strlen), VG_(strcmp)
43
// VG_PGROUNDDN, VG_PGROUNDUP
45
#include "pub_core_syscall.h" // VG_(do_syscallN)
46
// VG_(mk_SysRes_Error)
47
// VG_(mk_SysRes_Success)
49
#include "pub_core_options.h" // VG_(clo_sanity_level)
51
#include "vki_unistd.h" // __NR_* constants
53
#include "pub_core_aspacemgr.h" // self
56
/* Note: many of the exported functions implemented below are
57
described more fully in comments in pub_core_aspacemgr.h.
61
/*-----------------------------------------------------------------*/
65
/*-----------------------------------------------------------------*/
69
The purpose of the address space manager (aspacem) is:
71
(1) to record the disposition of all parts of the process' address
74
(2) to the extent that it can, influence layout in ways favourable
77
It is important to appreciate that whilst it can and does attempt
78
to influence layout, and usually succeeds, it isn't possible to
79
impose absolute control: in the end, the kernel is the final
80
arbiter, and can always bounce our requests.
84
The strategy is therefore as follows:
86
* Track ownership of mappings. Each one can belong either to
87
Valgrind or to the client.
89
* Try to place the client's fixed and hinted mappings at the
90
requested addresses. Fixed mappings are allowed anywhere except
91
in areas reserved by Valgrind; the client can trash its own
92
mappings if it wants. Hinted mappings are allowed providing they
93
fall entirely in free areas; if not, they will be placed by
94
aspacem in a free area.
96
* Anonymous mappings are allocated so as to keep Valgrind and
97
client areas widely separated when possible. If address space
98
runs low, then they may become intermingled: aspacem will attempt
99
to use all possible space. But under most circumstances lack of
100
address space is not a problem and so the areas will remain far
103
Searches for client space start at aspacem_cStart and will wrap
104
around the end of the available space if needed. Searches for
105
Valgrind space start at aspacem_vStart and will also wrap around.
106
Because aspacem_cStart is approximately at the start of the
107
available space and aspacem_vStart is approximately in the
108
middle, for the most part the client anonymous mappings will be
109
clustered towards the start of available space, and Valgrind ones
112
The available space is delimited by aspacem_minAddr and
113
aspacem_maxAddr. aspacem is flexible and can operate with these
114
at any (sane) setting. For 32-bit Linux, aspacem_minAddr is set
115
to some low-ish value at startup (64M) and aspacem_maxAddr is
116
derived from the stack pointer at system startup. This seems a
117
reliable way to establish the initial boundaries.
119
64-bit Linux is similar except for the important detail that the
120
upper boundary is set to 32G. The reason is so that all
121
anonymous mappings (basically all client data areas) are kept
122
below 32G, since that is the maximum range that memcheck can
123
track shadow memory using a fast 2-level sparse array. It can go
124
beyond that but runs much more slowly. The 32G limit is
125
arbitrary and is trivially changed. So, with the current
126
settings, programs on 64-bit Linux will appear to run out of
127
address space and presumably fail at the 32G limit. Given the
128
9/8 space overhead of Memcheck, that means you should be able to
129
memcheckify programs that use up to about 14G natively.
131
Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
132
anonymous mappings. The client can still do fixed and hinted maps
133
at any addresses provided they do not overlap Valgrind's segments.
134
This makes Valgrind able to load prelinked .so's at their requested
135
addresses on 64-bit platforms, even if they are very high (eg,
138
At startup, aspacem establishes the usable limits, and advises
139
m_main to place the client stack at the top of the range, which on
140
a 32-bit machine will be just below the real initial stack. One
141
effect of this is that self-hosting sort-of works, because an inner
142
valgrind will then place its client's stack just below its own
145
The segment array and segment kinds
146
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
147
The central data structure is the segment array (segments[0
148
.. nsegments_used-1]). This covers the entire address space in
149
order, giving account of every byte of it. Free spaces are
150
represented explicitly as this makes many operations simpler.
151
Mergeable adjacent segments are aggressively merged so as to create
152
a "normalised" representation (preen_nsegments).
154
There are 7 (mutually-exclusive) segment kinds, the meaning of
157
SkFree: a free space, which may be allocated either to Valgrind (V)
160
SkAnonC: an anonymous mapping belonging to C. For these, aspacem
161
tracks a boolean indicating whether or not is is part of the
162
client's heap area (can't remember why).
164
SkFileC: a file mapping belonging to C.
166
SkShmC: a shared memory segment belonging to C.
168
SkAnonV: an anonymous mapping belonging to V. These cover all V's
169
dynamic memory needs, including non-client malloc/free areas,
170
shadow memory, and the translation cache.
172
SkFileV: a file mapping belonging to V. As far as I know these are
173
only created transiently for the purposes of reading debug info.
175
SkResvn: a reservation segment.
177
These are mostly straightforward. Reservation segments have some
180
A reservation segment is unmapped from the kernel's point of view,
181
but is an area in which aspacem will not create anonymous maps
182
(either Vs or Cs). The idea is that we will try to keep it clear
183
when the choice to do so is ours. Reservation segments are
184
'invisible' from the client's point of view: it may choose to park
185
a fixed mapping in the middle of one, and that's just tough -- we
186
can't do anything about that. From the client's perspective
187
reservations are semantically equivalent to (although
188
distinguishable from, if it makes enquiries) free areas.
190
Reservations are a primitive mechanism provided for whatever
191
purposes the rest of the system wants. Currently they are used to
192
reserve the expansion space into which a growdown stack is
193
expanded, and into which the data segment is extended. Note,
194
though, those uses are entirely external to this module, which only
195
supplies the primitives.
197
Reservations may be shrunk in order that an adjoining anonymous
198
mapping may be extended. This makes dataseg/stack expansion work.
199
A reservation may not be shrunk below one page.
201
The advise/notify concept
202
~~~~~~~~~~~~~~~~~~~~~~~~~
203
All mmap-related calls must be routed via aspacem. Calling
204
sys_mmap directly from the rest of the system is very dangerous
205
because aspacem's data structures will become out of date.
207
The fundamental mode of operation of aspacem is to support client
208
mmaps. Here's what happens (in ML_(generic_PRE_sys_mmap)):
210
* m_syswrap intercepts the mmap call. It examines the parameters
211
and identifies the requested placement constraints. There are
212
three possibilities: no constraint (MAny), hinted (MHint, "I
213
prefer X but will accept anything"), and fixed (MFixed, "X or
216
* This request is passed to VG_(am_get_advisory). This decides on
217
a placement as described in detail in Strategy above. It may
218
also indicate that the map should fail, because it would trash
219
one of Valgrind's areas, which would probably kill the system.
221
* Control returns to the wrapper. If VG_(am_get_advisory) has
222
declared that the map should fail, then it must be made to do so.
223
Usually, though, the request is considered acceptable, in which
224
case an "advised" address is supplied. The advised address
225
replaces the original address supplied by the client, and
228
Note at this point that although aspacem has been asked for
229
advice on where to place the mapping, no commitment has yet been
230
made by either it or the kernel.
232
* The adjusted request is handed off to the kernel.
234
* The kernel's result is examined. If the map succeeded, aspacem
235
is told of the outcome (VG_(am_notify_client_mmap)), so it can
236
update its records accordingly.
238
This then is the central advise-notify idiom for handling client
239
mmap/munmap/mprotect/shmat:
241
* ask aspacem for an advised placement (or a veto)
243
* if not vetoed, hand request to kernel, using the advised placement
245
* examine result, and if successful, notify aspacem of the result.
247
There are also many convenience functions, eg
248
VG_(am_mmap_anon_fixed_client), which do both phases entirely within
251
To debug all this, a sync-checker is provided. It reads
252
/proc/self/maps, compares what it sees with aspacem's records, and
253
complains if there is a difference. --sanity-level=3 runs it before
254
and after each syscall, which is a powerful, if slow way of finding
255
buggy syscall wrappers.
259
Up to and including Valgrind 2.4.1, x86 segmentation was used to
260
enforce seperation of V and C, so that wild writes by C could not
261
trash V. This got called "pointercheck". Unfortunately, the new
262
more flexible memory layout, plus the need to be portable across
263
different architectures, means doing this in hardware is no longer
264
viable, and doing it in software is expensive. So at the moment we
269
/*-----------------------------------------------------------------*/
271
/*--- The Address Space Manager's state. ---*/
273
/*-----------------------------------------------------------------*/
275
/* ------ start of STATE for the address-space manager ------ */
277
/* Max number of segments we can track. */
278
#define VG_N_SEGMENTS 5000
280
/* Max number of segment file names we can track. */
281
#define VG_N_SEGNAMES 1000
283
/* Max length of a segment file name. */
284
#define VG_MAX_SEGNAMELEN 1000
291
HChar fname[VG_MAX_SEGNAMELEN];
295
/* Filename table. _used is the high water mark; an entry is only
296
valid if its index >= 0, < _used, and its .inUse field == True.
297
The .mark field is used to garbage-collect dead entries.
299
static SegName segnames[VG_N_SEGNAMES];
300
static Int segnames_used = 0;
303
/* Array [0 .. nsegments_used-1] of all mappings. */
304
/* Sorted by .addr field. */
305
/* I: len may not be zero. */
306
/* I: overlapping segments are not allowed. */
307
/* I: the segments cover the entire address space precisely. */
308
/* Each segment can optionally hold an index into the filename table. */
310
static NSegment nsegments[VG_N_SEGMENTS];
311
static Int nsegments_used = 0;
313
#define Addr_MIN ((Addr)0)
314
#define Addr_MAX ((Addr)(-1ULL))
318
// The smallest address that aspacem will try to allocate
319
static Addr aspacem_minAddr = 0;
321
// The largest address that aspacem will try to allocate
322
static Addr aspacem_maxAddr = 0;
324
// Where aspacem will start looking for client space
325
static Addr aspacem_cStart = 0;
327
// Where aspacem will start looking for Valgrind space
328
static Addr aspacem_vStart = 0;
331
#define AM_SANITY_CHECK \
333
if (VG_(clo_sanity_level >= 3)) \
334
aspacem_assert(VG_(am_do_sync_check) \
335
(__PRETTY_FUNCTION__,__FILE__,__LINE__)); \
338
/* ------ end of STATE for the address-space manager ------ */
340
/* ------ Forwards decls ------ */
341
static void aspacem_exit ( Int );
342
static Int find_nsegment_idx ( Addr a );
344
static void parse_procselfmaps (
345
void (*record_mapping)( Addr addr, SizeT len, UInt prot,
346
UInt dev, UInt ino, ULong offset,
347
const UChar* filename ),
348
void (*record_gap)( Addr addr, SizeT len )
352
/*-----------------------------------------------------------------*/
354
/*--- Stuff to make aspacem almost completely independent of ---*/
355
/*--- the rest of Valgrind. ---*/
357
/*-----------------------------------------------------------------*/
359
//--------------------------------------------------------------
360
// Simple assert and assert-like fns, which avoid dependence on
361
// m_libcassert, and hence on the entire debug-info reader swamp
363
static void aspacem_barf ( HChar* what )
365
VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s\n", what);
366
VG_(debugLog)(0, "aspacem", "Exiting now.\n");
370
static void aspacem_barf_toolow ( HChar* what )
372
VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: %s is too low.\n", what);
373
VG_(debugLog)(0, "aspacem", " Increase it and rebuild. "
378
static void aspacem_assert_fail( const HChar* expr,
383
VG_(debugLog)(0, "aspacem", "Valgrind: FATAL: aspacem assertion failed:\n");
384
VG_(debugLog)(0, "aspacem", " %s\n", expr);
385
VG_(debugLog)(0, "aspacem", " at %s:%d (%s)\n", file,line,fn);
386
VG_(debugLog)(0, "aspacem", "Exiting now.\n");
390
#define aspacem_assert(expr) \
391
((void) ((expr) ? 0 : \
392
(aspacem_assert_fail(#expr, \
393
__FILE__, __LINE__, \
394
__PRETTY_FUNCTION__))))
397
//--------------------------------------------------------------
398
// A simple sprintf implementation, so as to avoid dependence on
401
static void add_to_aspacem_sprintf_buf ( HChar c, void *p )
403
HChar** aspacem_sprintf_ptr = p;
404
*(*aspacem_sprintf_ptr)++ = c;
408
UInt aspacem_vsprintf ( HChar* buf, const HChar *format, va_list vargs )
411
Char *aspacem_sprintf_ptr = buf;
413
ret = VG_(debugLog_vprintf)
414
( add_to_aspacem_sprintf_buf,
415
&aspacem_sprintf_ptr, format, vargs );
416
add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr);
422
UInt aspacem_sprintf ( HChar* buf, const HChar *format, ... )
427
va_start(vargs,format);
428
ret = aspacem_vsprintf(buf, format, vargs);
435
//--------------------------------------------------------------
436
// Direct access to a handful of syscalls. This avoids dependence on
437
// m_libc*. THESE DO NOT UPDATE THE SEGMENT LIST. DO NOT USE THEM
438
// UNLESS YOU KNOW WHAT YOU ARE DOING.
440
SysRes VG_(am_do_mmap_NO_NOTIFY)( Addr start, SizeT length, UInt prot,
441
UInt flags, UInt fd, Off64T offset)
444
aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
445
# if defined(VGP_x86_linux) || defined(VGP_ppc32_linux)
446
res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
447
prot, flags, fd, offset / VKI_PAGE_SIZE);
448
# elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux)
449
res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
450
prot, flags, fd, offset);
452
# error Unknown platform
457
static SysRes do_mprotect_NO_NOTIFY(Addr start, SizeT length, UInt prot)
459
return VG_(do_syscall3)(__NR_mprotect, (UWord)start, length, prot );
462
static SysRes do_munmap_NO_NOTIFY(Addr start, SizeT length)
464
return VG_(do_syscall2)(__NR_munmap, (UWord)start, length );
467
static SysRes do_extend_mapping_NO_NOTIFY( Addr old_addr, SizeT old_len,
470
/* Extend the mapping old_addr .. old_addr+old_len-1 to have length
471
new_len, WITHOUT moving it. If it can't be extended in place,
473
return VG_(do_syscall5)(
475
old_addr, old_len, new_len,
476
0/*flags, meaning: must be at old_addr, else FAIL */,
477
0/*new_addr, is ignored*/
481
static SysRes do_relocate_nooverlap_mapping_NO_NOTIFY(
482
Addr old_addr, Addr old_len,
483
Addr new_addr, Addr new_len
486
/* Move the mapping old_addr .. old_addr+old_len-1 to the new
487
location and with the new length. Only needs to handle the case
488
where the two areas do not overlap, neither length is zero, and
489
all args are page aligned. */
490
return VG_(do_syscall5)(
492
old_addr, old_len, new_len,
493
VKI_MREMAP_MAYMOVE|VKI_MREMAP_FIXED/*move-or-fail*/,
498
static Int aspacem_readlink(HChar* path, HChar* buf, UInt bufsiz)
501
res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
502
return res.isError ? -1 : res.val;
505
static Int aspacem_fstat( Int fd, struct vki_stat* buf )
507
SysRes res = VG_(do_syscall2)(__NR_fstat, fd, (UWord)buf);
508
return res.isError ? (-1) : 0;
512
static Int aspacem_fstat64( Int fd, struct vki_stat64* buf )
514
SysRes res = VG_(do_syscall2)(__NR_fstat64, fd, (UWord)buf);
515
return res.isError ? (-1) : 0;
519
static void aspacem_exit( Int status )
521
(void)VG_(do_syscall1)(__NR_exit_group, status );
522
(void)VG_(do_syscall1)(__NR_exit, status );
523
/* Why are we still alive here? */
525
*(volatile Int *)0 = 'x';
526
aspacem_assert(2+2 == 5);
529
static SysRes aspacem_open ( const Char* pathname, Int flags, Int mode )
531
SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
535
static void aspacem_close ( Int fd )
537
(void)VG_(do_syscall1)(__NR_close, fd);
540
static Int aspacem_read ( Int fd, void* buf, Int count)
542
SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
543
return res.isError ? -1 : res.val;
547
//--------------------------------------------------------------
548
// Functions for extracting information about file descriptors.
550
/* Extract the device and inode numbers for a fd. */
552
Bool get_inode_for_fd ( Int fd, /*OUT*/UWord* dev,
553
/*OUT*/UWord* ino, /*OUT*/UInt* mode )
558
struct vki_stat64 buf64;
559
/* Try fstat64 first as it can cope with minor and major device
560
numbers outside the 0-255 range and it works properly for x86
561
binaries on amd64 systems where fstat seems to be broken. */
562
r = aspacem_fstat64(fd, &buf64);
566
*mode = buf64.st_mode;
570
r = aspacem_fstat(fd, &buf);
580
/* Given a file descriptor, attempt to deduce its filename. To do
581
this, we use /proc/self/fd/<FD>. If this doesn't point to a file,
582
or if it doesn't exist, we return False. */
584
Bool get_name_for_fd ( Int fd, /*OUT*/HChar* buf, Int nbuf )
589
aspacem_sprintf(tmp, "/proc/self/fd/%d", fd);
590
for (i = 0; i < nbuf; i++) buf[i] = 0;
592
if (aspacem_readlink(tmp, buf, nbuf) > 0 && buf[0] == '/')
599
/*-----------------------------------------------------------------*/
601
/*--- SegName array management. ---*/
603
/*-----------------------------------------------------------------*/
605
/* Searches the filename table to find an index for the given name.
606
If none is found, an index is allocated and the name stored. If no
607
space is available we just give up. If the string is too long to
610
static Int allocate_segname ( const HChar* name )
614
aspacem_assert(name);
616
if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
618
len = VG_(strlen)(name);
619
if (len >= VG_MAX_SEGNAMELEN-1) {
623
/* first see if we already have the name. */
624
for (i = 0; i < segnames_used; i++) {
625
if (!segnames[i].inUse)
627
if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
632
/* no we don't. So look for a free slot. */
633
for (i = 0; i < segnames_used; i++)
634
if (!segnames[i].inUse)
637
if (i == segnames_used) {
638
/* no free slots .. advance the high-water mark. */
639
if (segnames_used+1 < VG_N_SEGNAMES) {
643
aspacem_barf_toolow("VG_N_SEGNAMES");
648
segnames[i].inUse = True;
649
for (j = 0; j < len; j++)
650
segnames[i].fname[j] = name[j];
651
aspacem_assert(len < VG_MAX_SEGNAMELEN);
652
segnames[i].fname[len] = 0;
657
/*-----------------------------------------------------------------*/
659
/*--- Displaying the segment array. ---*/
661
/*-----------------------------------------------------------------*/
663
static HChar* show_SegKind ( SegKind sk )
666
case SkFree: return " ";
667
case SkAnonC: return "anon";
668
case SkAnonV: return "ANON";
669
case SkFileC: return "file";
670
case SkFileV: return "FILE";
671
case SkShmC: return "shm ";
672
case SkResvn: return "RSVN";
673
default: return "????";
677
static HChar* show_ShrinkMode ( ShrinkMode sm )
680
case SmLower: return "SmLower";
681
case SmUpper: return "SmUpper";
682
case SmFixed: return "SmFixed";
683
default: return "Sm?????";
687
static void show_Addr_concisely ( /*OUT*/HChar* buf, Addr aA )
692
if (a < 10*1000*1000ULL) {
695
else if (a < 999999ULL * (1ULL<<20)) {
699
else if (a < 999999ULL * (1ULL<<30)) {
703
else if (a < 999999ULL * (1ULL<<40)) {
711
aspacem_sprintf(buf, fmt, a);
715
/* Show full details of an NSegment */
717
static void __attribute__ ((unused))
718
show_nsegment_full ( Int logLevel, NSegment* seg )
720
HChar* name = "(none)";
721
if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
722
&& segnames[seg->fnIdx].inUse
723
&& segnames[seg->fnIdx].fname[0] != 0)
724
name = segnames[seg->fnIdx].fname;
726
VG_(debugLog)(logLevel, "aspacem",
727
"NSegment{%s, start=0x%llx, end=0x%llx, smode=%s, dev=%llu, "
728
"ino=%llu, offset=%llu, fnIdx=%d, hasR=%d, hasW=%d, hasX=%d, "
729
"hasT=%d, mark=%d, name=\"%s\"}\n",
730
show_SegKind(seg->kind),
733
show_ShrinkMode(seg->smode),
734
(ULong)seg->dev, (ULong)seg->ino, (ULong)seg->offset, seg->fnIdx,
735
(Int)seg->hasR, (Int)seg->hasW, (Int)seg->hasX, (Int)seg->hasT,
742
/* Show an NSegment in a user-friendly-ish way. */
744
static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
747
ULong len = ((ULong)seg->end) - ((ULong)seg->start) + 1;
748
show_Addr_concisely(len_buf, len);
755
"%3d: %s %010llx-%010llx %s\n",
756
segNo, show_SegKind(seg->kind),
757
(ULong)seg->start, (ULong)seg->end, len_buf
761
case SkAnonC: case SkAnonV: case SkShmC:
764
"%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
765
segNo, show_SegKind(seg->kind),
766
(ULong)seg->start, (ULong)seg->end, len_buf,
767
seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
768
seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
769
seg->isCH ? 'H' : '-'
773
case SkFileC: case SkFileV:
776
"%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
777
"i=%-7lld o=%-7lld (%d)\n",
778
segNo, show_SegKind(seg->kind),
779
(ULong)seg->start, (ULong)seg->end, len_buf,
780
seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
781
seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
782
seg->isCH ? 'H' : '-',
783
(ULong)seg->dev, (ULong)seg->ino, (Long)seg->offset, seg->fnIdx
790
"%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
791
segNo, show_SegKind(seg->kind),
792
(ULong)seg->start, (ULong)seg->end, len_buf,
793
seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
794
seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
795
seg->isCH ? 'H' : '-',
796
show_ShrinkMode(seg->smode)
803
"%3d: ???? UNKNOWN SEGMENT KIND\n",
810
/* Print out the segment array (debugging only!). */
811
void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
814
VG_(debugLog)(logLevel, "aspacem",
815
"<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
816
who, nsegments_used, segnames_used);
817
for (i = 0; i < segnames_used; i++) {
818
if (!segnames[i].inUse)
820
VG_(debugLog)(logLevel, "aspacem",
821
"(%2d) %s\n", i, segnames[i].fname);
823
for (i = 0; i < nsegments_used; i++)
824
show_nsegment( logLevel, i, &nsegments[i] );
825
VG_(debugLog)(logLevel, "aspacem",
830
/* Get the filename corresponding to this segment, if known and if it
831
has one. The returned name's storage cannot be assumed to be
832
persistent, so the caller should immediately copy the name
834
HChar* VG_(am_get_filename)( NSegment* seg )
839
if (i < 0 || i >= segnames_used || !segnames[i].inUse)
842
return &segnames[i].fname[0];
845
/* Collect up the start addresses of all non-free, non-resvn segments.
846
The interface is a bit strange in order to avoid potential
847
segment-creation races caused by dynamic allocation of the result
850
The function first computes how many entries in the result
851
buffer *starts will be needed. If this number <= nStarts,
852
they are placed in starts[0..], and the number is returned.
853
If nStarts is not large enough, nothing is written to
854
starts[0..], and the negation of the size is returned.
856
Correct use of this function may mean calling it multiple times in
857
order to establish a suitably-sized buffer. */
859
Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
863
/* don't pass dumbass arguments */
864
aspacem_assert(nStarts >= 0);
867
for (i = 0; i < nsegments_used; i++) {
868
if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
873
if (nSegs > nStarts) {
874
/* The buffer isn't big enough. Tell the caller how big it needs
879
/* There's enough space. So write into the result buffer. */
880
aspacem_assert(nSegs <= nStarts);
883
for (i = 0; i < nsegments_used; i++) {
884
if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
886
starts[j] = nsegments[i].start;
890
aspacem_assert(j == nSegs); /* this should not fail */
895
/*-----------------------------------------------------------------*/
897
/*--- Sanity checking and preening of the segment array. ---*/
899
/*-----------------------------------------------------------------*/
901
/* Check representational invariants for NSegments. */
903
static Bool sane_NSegment ( NSegment* s )
905
if (s == NULL) return False;
907
/* No zero sized segments and no wraparounds. */
908
if (s->start >= s->end) return False;
910
/* .mark is used for admin purposes only. */
911
if (s->mark) return False;
913
/* require page alignment */
914
if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
915
if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
922
&& s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
923
&& !s->hasR && !s->hasW && !s->hasX && !s->hasT
926
case SkAnonC: case SkAnonV: case SkShmC:
929
&& s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
930
&& (s->kind==SkAnonC ? True : !s->isCH);
932
case SkFileC: case SkFileV:
935
&& (s->fnIdx == -1 ||
936
(s->fnIdx >= 0 && s->fnIdx < segnames_used && segnames[s->fnIdx].inUse))
941
s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
942
&& !s->hasR && !s->hasW && !s->hasX && !s->hasT
951
/* Try merging s2 into s1, if possible. If successful, s1 is
952
modified, and True is returned. Otherwise s1 is unchanged and
953
False is returned. */
955
static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
957
if (s1->kind != s2->kind)
960
if (s1->end+1 != s2->start)
963
/* reject cases which would cause wraparound */
964
if (s1->start > s2->end)
973
case SkAnonC: case SkAnonV:
974
if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
975
&& s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
977
s1->hasT |= s2->hasT;
982
case SkFileC: case SkFileV:
983
if (s1->hasR == s2->hasR
984
&& s1->hasW == s2->hasW && s1->hasX == s2->hasX
985
&& s1->dev == s2->dev && s1->ino == s2->ino
986
&& s2->offset == s1->offset
987
+ ((ULong)s2->start) - ((ULong)s1->start) ) {
989
s1->hasT |= s2->hasT;
998
if (s1->smode == SmFixed && s2->smode == SmFixed) {
1012
/* Sanity-check and canonicalise the segment array (merge mergable
1013
segments). Returns True if any segments were merged. */
1015
static Bool preen_nsegments ( void )
1017
Int i, j, r, w, nsegments_used_old = nsegments_used;
1019
/* Pass 1: check the segment array covers the entire address space
1020
exactly once, and also that each segment is sane. */
1021
aspacem_assert(nsegments_used > 0);
1022
aspacem_assert(nsegments[0].start == Addr_MIN);
1023
aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
1025
aspacem_assert(sane_NSegment(&nsegments[0]));
1026
for (i = 1; i < nsegments_used; i++) {
1027
aspacem_assert(sane_NSegment(&nsegments[i]));
1028
aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
1031
/* Pass 2: merge as much as possible, using
1032
maybe_merge_segments. */
1034
for (r = 1; r < nsegments_used; r++) {
1035
if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
1040
nsegments[w] = nsegments[r];
1044
aspacem_assert(w > 0 && w <= nsegments_used);
1047
/* Pass 3: free up unused string table slots */
1048
/* clear mark bits */
1049
for (i = 0; i < segnames_used; i++)
1050
segnames[i].mark = False;
1052
for (i = 0; i < nsegments_used; i++) {
1053
j = nsegments[i].fnIdx;
1054
aspacem_assert(j >= -1 && j < segnames_used);
1056
aspacem_assert(segnames[j].inUse);
1057
segnames[j].mark = True;
1061
for (i = 0; i < segnames_used; i++) {
1062
if (segnames[i].mark == False) {
1063
segnames[i].inUse = False;
1064
segnames[i].fname[0] = 0;
1068
return nsegments_used != nsegments_used_old;
1072
/* Check the segment array corresponds with the kernel's view of
1073
memory layout. sync_check_ok returns True if no anomalies were
1074
found, else False. In the latter case the mismatching segments are
1077
The general idea is: we get the kernel to show us all its segments
1078
and also the gaps in between. For each such interval, try and find
1079
a sequence of appropriate intervals in our segment array which
1080
cover or more than cover the kernel's interval, and which all have
1081
suitable kinds/permissions etc.
1083
Although any specific kernel interval is not matched exactly to a
1084
valgrind interval or sequence thereof, eventually any disagreement
1085
on mapping boundaries will be detected. This is because, if for
1086
example valgrind's intervals cover a greater range than the current
1087
kernel interval, it must be the case that a neighbouring free-space
1088
interval belonging to valgrind cannot cover the neighbouring
1089
free-space interval belonging to the kernel. So the disagreement
1092
In other words, we examine each kernel interval in turn, and check
1093
we do not disagree over the range of that interval. Because all of
1094
the address space is examined, any disagreements must eventually be
1098
static Bool sync_check_ok = False;
1100
static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
1101
UInt dev, UInt ino, ULong offset,
1102
const UChar* filename )
1107
/* If a problem has already been detected, don't continue comparing
1108
segments, so as to avoid flooding the output with error
1116
/* The kernel should not give us wraparounds. */
1117
aspacem_assert(addr <= addr + len - 1);
1119
iLo = find_nsegment_idx( addr );
1120
iHi = find_nsegment_idx( addr + len - 1 );
1122
/* These 5 should be guaranteed by find_nsegment_idx. */
1123
aspacem_assert(0 <= iLo && iLo < nsegments_used);
1124
aspacem_assert(0 <= iHi && iHi < nsegments_used);
1125
aspacem_assert(iLo <= iHi);
1126
aspacem_assert(nsegments[iLo].start <= addr );
1127
aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
1129
/* x86 doesn't differentiate 'x' and 'r' (at least, all except the
1130
most recent NX-bit enabled CPUs) and so recent kernels attempt
1131
to provide execute protection by placing all executable mappings
1132
low down in the address space and then reducing the size of the
1133
code segment to prevent code at higher addresses being executed.
1135
These kernels report which mappings are really executable in
1136
the /proc/self/maps output rather than mirroring what was asked
1137
for when each mapping was created. In order to cope with this we
1138
have a slopyXcheck mode which we enable on x86 - in this mode we
1139
allow the kernel to report execute permission when we weren't
1140
expecting it but not vice versa. */
1141
sloppyXcheck = False;
1142
# if defined(VGA_x86)
1143
sloppyXcheck = True;
1146
/* NSegments iLo .. iHi inclusive should agree with the presented
1148
for (i = iLo; i <= iHi; i++) {
1150
Bool same, cmp_offsets, cmp_devino;
1153
/* compare the kernel's offering against ours. */
1154
same = nsegments[i].kind == SkAnonC
1155
|| nsegments[i].kind == SkAnonV
1156
|| nsegments[i].kind == SkFileC
1157
|| nsegments[i].kind == SkFileV
1158
|| nsegments[i].kind == SkShmC;
1161
if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
1162
if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
1163
if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
1166
= nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
1169
= nsegments[i].dev != 0 || nsegments[i].ino != 0;
1171
/* Consider other reasons to not compare dev/inode */
1173
/* bproc does some godawful hack on /dev/zero at process
1174
migration, which changes the name of it, and its dev & ino */
1175
if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
1178
/* hack apparently needed on MontaVista Linux */
1179
if (filename && VG_(strstr)(filename, "/.lib-ro/"))
1182
/* If we are doing sloppy execute permission checks then we
1183
allow segment to have X permission when we weren't expecting
1184
it (but not vice versa) so if the kernel reported execute
1185
permission then pretend that this segment has it regardless
1186
of what we were expecting. */
1187
if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
1188
seg_prot |= VKI_PROT_EXEC;
1194
? (nsegments[i].dev == dev && nsegments[i].ino == ino)
1197
? nsegments[i].start-nsegments[i].offset == addr-offset
1200
sync_check_ok = False;
1203
"sync_check_mapping_callback: segment mismatch: V's seg:\n");
1204
show_nsegment_full( 0, &nsegments[i] );
1209
/* Looks harmless. Keep going. */
1213
VG_(debugLog)(0,"aspacem",
1214
"sync_check_mapping_callback: "
1215
"segment mismatch: kernel's seg:\n");
1216
VG_(debugLog)(0,"aspacem",
1217
"start=0x%llx end=0x%llx prot=%u "
1218
"dev=%u ino=%u offset=%lld name=\"%s\"\n",
1219
(ULong)addr, ((ULong)addr) + ((ULong)len) - 1,
1220
prot, dev, ino, offset,
1221
filename ? (HChar*)filename : "(none)" );
1225
static void sync_check_gap_callback ( Addr addr, SizeT len )
1229
/* If a problem has already been detected, don't continue comparing
1230
segments, so as to avoid flooding the output with error
1238
/* The kernel should not give us wraparounds. */
1239
aspacem_assert(addr <= addr + len - 1);
1241
iLo = find_nsegment_idx( addr );
1242
iHi = find_nsegment_idx( addr + len - 1 );
1244
/* These 5 should be guaranteed by find_nsegment_idx. */
1245
aspacem_assert(0 <= iLo && iLo < nsegments_used);
1246
aspacem_assert(0 <= iHi && iHi < nsegments_used);
1247
aspacem_assert(iLo <= iHi);
1248
aspacem_assert(nsegments[iLo].start <= addr );
1249
aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
1251
/* NSegments iLo .. iHi inclusive should agree with the presented
1253
for (i = iLo; i <= iHi; i++) {
1257
/* compare the kernel's offering against ours. */
1258
same = nsegments[i].kind == SkFree
1259
|| nsegments[i].kind == SkResvn;
1262
sync_check_ok = False;
1265
"sync_check_mapping_callback: segment mismatch: V's gap:\n");
1266
show_nsegment_full( 0, &nsegments[i] );
1271
/* Looks harmless. Keep going. */
1275
VG_(debugLog)(0,"aspacem",
1276
"sync_check_gap_callback: segment mismatch: kernel's gap:\n");
1277
VG_(debugLog)(0,"aspacem",
1278
"start=0x%llx end=0x%llx\n",
1279
(ULong)addr, ((ULong)addr) + ((ULong)len) - 1 );
1284
/* Sanity check: check that Valgrind and the kernel agree on the
1285
address space layout. Prints offending segments and call point if
1286
a discrepancy is detected, but does not abort the system. Returned
1287
Bool is False if a discrepancy was found. */
1289
Bool VG_(am_do_sync_check) ( const HChar* fn,
1290
const HChar* file, Int line )
1292
sync_check_ok = True;
1294
VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
1295
parse_procselfmaps( sync_check_mapping_callback,
1296
sync_check_gap_callback );
1297
if (!sync_check_ok) {
1298
VG_(debugLog)(0,"aspacem",
1299
"sync check at %s:%d (%s): FAILED\n",
1301
VG_(debugLog)(0,"aspacem", "\n");
1306
VG_(am_show_nsegments)(0,"post syncheck failure");
1307
VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1313
return sync_check_ok;
1317
/*-----------------------------------------------------------------*/
1319
/*--- Low level access / modification of the segment array. ---*/
1321
/*-----------------------------------------------------------------*/
1323
/* Binary search the interval array for a given address. Since the
1324
array covers the entire address space the search cannot fail. */
1325
static Int find_nsegment_idx ( Addr a )
1327
Addr a_mid_lo, a_mid_hi;
1330
hi = nsegments_used-1;
1332
/* current unsearched space is from lo to hi, inclusive. */
1334
/* Not found. This can't happen. */
1335
aspacem_barf("find_nsegment_idx: not found");
1337
mid = (lo + hi) / 2;
1338
a_mid_lo = nsegments[mid].start;
1339
a_mid_hi = nsegments[mid].end;
1341
if (a < a_mid_lo) { hi = mid-1; continue; }
1342
if (a > a_mid_hi) { lo = mid+1; continue; }
1343
aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
1344
aspacem_assert(0 <= mid && mid < nsegments_used);
1350
/* Finds the segment containing 'a'. Only returns file/anon/resvn
1352
NSegment* VG_(am_find_nsegment) ( Addr a )
1354
Int i = find_nsegment_idx(a);
1355
aspacem_assert(i >= 0 && i < nsegments_used);
1356
aspacem_assert(nsegments[i].start <= a);
1357
aspacem_assert(a <= nsegments[i].end);
1358
if (nsegments[i].kind == SkFree)
1361
return &nsegments[i];
1365
/* Given a pointer to a seg, tries to figure out which one it is in
1366
nsegments[..]. Very paranoid. */
1367
static Int segAddr_to_index ( NSegment* seg )
1370
if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
1372
i = ((UChar*)seg - (UChar*)(&nsegments[0])) / sizeof(NSegment);
1373
if (i < 0 || i >= nsegments_used)
1375
if (seg == &nsegments[i])
1381
/* Find the next segment along from 'here', if it is a file/anon/resvn
1383
NSegment* VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
1385
Int i = segAddr_to_index(here);
1386
if (i < 0 || i >= nsegments_used)
1390
if (i >= nsegments_used)
1397
switch (nsegments[i].kind) {
1398
case SkFileC: case SkFileV: case SkShmC:
1399
case SkAnonC: case SkAnonV: case SkResvn:
1400
return &nsegments[i];
1408
/* Trivial fn: return the total amount of space in anonymous mappings,
1409
both for V and the client. Is used for printing stats in
1410
out-of-memory messages. */
1411
ULong VG_(am_get_anonsize_total)( void )
1415
for (i = 0; i < nsegments_used; i++) {
1416
if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
1417
total += (ULong)nsegments[i].end
1418
- (ULong)nsegments[i].start + 1ULL;
1425
/* Test if a piece of memory is addressable by the client with at
1426
least the "prot" protection permissions by examining the underlying
1427
segments. If freeOk is True then SkFree areas are also allowed.
1430
Bool is_valid_for_client( Addr start, SizeT len, UInt prot, Bool freeOk )
1433
Bool needR, needW, needX;
1436
return True; /* somewhat dubious case */
1437
if (start + len < start)
1438
return False; /* reject wraparounds */
1440
needR = toBool(prot & VKI_PROT_READ);
1441
needW = toBool(prot & VKI_PROT_WRITE);
1442
needX = toBool(prot & VKI_PROT_EXEC);
1444
iLo = find_nsegment_idx(start);
1445
aspacem_assert(start >= nsegments[iLo].start);
1447
if (start+len-1 <= nsegments[iLo].end) {
1448
/* This is a speedup hack which avoids calling find_nsegment_idx
1449
a second time when possible. It is always correct to just
1450
use the "else" clause below, but is_valid_for_client is
1451
called a lot by the leak checker, so avoiding pointless calls
1452
to find_nsegment_idx, which can be expensive, is helpful. */
1455
iHi = find_nsegment_idx(start + len - 1);
1458
for (i = iLo; i <= iHi; i++) {
1459
if ( (nsegments[i].kind == SkFileC
1460
|| nsegments[i].kind == SkAnonC
1461
|| nsegments[i].kind == SkShmC
1462
|| (nsegments[i].kind == SkFree && freeOk)
1463
|| (nsegments[i].kind == SkResvn && freeOk))
1464
&& (needR ? nsegments[i].hasR : True)
1465
&& (needW ? nsegments[i].hasW : True)
1466
&& (needX ? nsegments[i].hasX : True) ) {
1475
/* Test if a piece of memory is addressable by the client with at
1476
least the "prot" protection permissions by examining the underlying
1478
Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
1481
return is_valid_for_client( start, len, prot, False/*free not OK*/ );
1484
/* Variant of VG_(am_is_valid_for_client) which allows free areas to
1485
be consider part of the client's addressable space. It also
1486
considers reservations to be allowable, since from the client's
1487
point of view they don't exist. */
1488
Bool VG_(am_is_valid_for_client_or_free_or_resvn)
1489
( Addr start, SizeT len, UInt prot )
1491
return is_valid_for_client( start, len, prot, True/*free is OK*/ );
1495
/* Test if a piece of memory is addressable by valgrind with at least
1496
PROT_NONE protection permissions by examining the underlying
1498
static Bool is_valid_for_valgrind( Addr start, SizeT len )
1503
return True; /* somewhat dubious case */
1504
if (start + len < start)
1505
return False; /* reject wraparounds */
1507
iLo = find_nsegment_idx(start);
1508
iHi = find_nsegment_idx(start + len - 1);
1509
for (i = iLo; i <= iHi; i++) {
1510
if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkAnonV) {
1520
/* Returns True if any part of the address range is marked as having
1521
translations made from it. This is used to determine when to
1522
discard code, so if in doubt return True. */
1524
static Bool any_Ts_in_range ( Addr start, SizeT len )
1527
aspacem_assert(len > 0);
1528
aspacem_assert(start + len > start);
1529
iLo = find_nsegment_idx(start);
1530
iHi = find_nsegment_idx(start + len - 1);
1531
for (i = iLo; i <= iHi; i++) {
1532
if (nsegments[i].hasT)
1539
/*-----------------------------------------------------------------*/
1541
/*--- Modifying the segment array, and constructing segments. ---*/
1543
/*-----------------------------------------------------------------*/
1545
/* Split the segment containing 'a' into two, so that 'a' is
1546
guaranteed to be the start of a new segment. If 'a' is already the
1547
start of a segment, do nothing. */
1549
static void split_nsegment_at ( Addr a )
1553
aspacem_assert(a > 0);
1554
aspacem_assert(VG_IS_PAGE_ALIGNED(a));
1556
i = find_nsegment_idx(a);
1557
aspacem_assert(i >= 0 && i < nsegments_used);
1559
if (nsegments[i].start == a)
1560
/* 'a' is already the start point of a segment, so nothing to be
1564
/* else we have to slide the segments upwards to make a hole */
1565
if (nsegments_used >= VG_N_SEGMENTS)
1566
aspacem_barf_toolow("VG_N_SEGMENTS");
1567
for (j = nsegments_used-1; j > i; j--)
1568
nsegments[j+1] = nsegments[j];
1571
nsegments[i+1] = nsegments[i];
1572
nsegments[i+1].start = a;
1573
nsegments[i].end = a-1;
1575
if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
1576
nsegments[i+1].offset
1577
+= ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
1579
aspacem_assert(sane_NSegment(&nsegments[i]));
1580
aspacem_assert(sane_NSegment(&nsegments[i+1]));
1584
/* Do the minimum amount of segment splitting necessary to ensure that
1585
sLo is the first address denoted by some segment and sHi is the
1586
highest address denoted by some other segment. Returns the indices
1587
of the lowest and highest segments in the range. */
1590
void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
1594
aspacem_assert(sLo < sHi);
1595
aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
1596
aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
1599
split_nsegment_at(sLo);
1601
split_nsegment_at(sHi+1);
1603
*iLo = find_nsegment_idx(sLo);
1604
*iHi = find_nsegment_idx(sHi);
1605
aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
1606
aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
1607
aspacem_assert(*iLo <= *iHi);
1608
aspacem_assert(nsegments[*iLo].start == sLo);
1609
aspacem_assert(nsegments[*iHi].end == sHi);
1610
/* Not that I'm overly paranoid or anything, definitely not :-) */
1614
/* Add SEG to the collection, deleting/truncating any it overlaps.
1615
This deals with all the tricky cases of splitting up segments as
1618
static void add_segment ( NSegment* seg )
1620
Int i, iLo, iHi, delta;
1621
Bool segment_is_sane;
1623
Addr sStart = seg->start;
1624
Addr sEnd = seg->end;
1626
aspacem_assert(sStart <= sEnd);
1627
aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
1628
aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
1630
segment_is_sane = sane_NSegment(seg);
1631
if (!segment_is_sane) show_nsegment_full(0,seg);
1632
aspacem_assert(segment_is_sane);
1634
split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
1636
/* Now iLo .. iHi inclusive is the range of segment indices which
1637
seg will replace. If we're replacing more than one segment,
1638
slide those above the range down to fill the hole. */
1640
aspacem_assert(delta >= 0);
1642
for (i = iLo; i < nsegments_used-delta; i++)
1643
nsegments[i] = nsegments[i+delta];
1644
nsegments_used -= delta;
1647
nsegments[iLo] = *seg;
1649
(void)preen_nsegments();
1650
if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
1654
/* Clear out an NSegment record. */
1656
static void init_nsegment ( /*OUT*/NSegment* seg )
1661
seg->smode = SmFixed;
1667
seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
1671
/* Make an NSegment which holds a reservation. */
1673
static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
1675
aspacem_assert(start < end);
1676
aspacem_assert(VG_IS_PAGE_ALIGNED(start));
1677
aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
1679
seg->kind = SkResvn;
1685
/*-----------------------------------------------------------------*/
1687
/*--- Startup, including reading /proc/self/maps. ---*/
1689
/*-----------------------------------------------------------------*/
1691
static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
1692
UInt dev, UInt ino, ULong offset,
1693
const UChar* filename )
1696
init_nsegment( &seg );
1698
seg.end = addr+len-1;
1701
seg.offset = offset;
1702
seg.hasR = toBool(prot & VKI_PROT_READ);
1703
seg.hasW = toBool(prot & VKI_PROT_WRITE);
1704
seg.hasX = toBool(prot & VKI_PROT_EXEC);
1707
/* Don't use the presence of a filename to decide if a segment in
1708
the initial /proc/self/maps to decide if the segment is an AnonV
1709
or FileV segment as some systems don't report the filename. Use
1710
the device and inode numbers instead. Fixes bug #124528. */
1712
if (dev != 0 && ino != 0)
1715
seg.fnIdx = allocate_segname( filename );
1717
if (0) show_nsegment( 2,0, &seg );
1718
add_segment( &seg );
1721
/* Initialise the address space manager, setting up the initial
1722
segment list, and reading /proc/self/maps into it. This must
1723
be called before any other function.
1725
Takes a pointer to the SP at the time V gained control. This is
1726
taken to be the highest usable address (more or less). Based on
1727
that (and general consultation of tea leaves, etc) return a
1728
suggested end address for the client's stack. */
1730
Addr VG_(am_startup) ( Addr sp_at_startup )
1733
Addr suggested_clstack_top;
1735
aspacem_assert(sizeof(Word) == sizeof(void*));
1736
aspacem_assert(sizeof(Addr) == sizeof(void*));
1737
aspacem_assert(sizeof(SizeT) == sizeof(void*));
1738
aspacem_assert(sizeof(SSizeT) == sizeof(void*));
1741
/* If these fail, we'd better change the type of dev and ino in
1742
NSegment accordingly. */
1743
struct vki_stat buf;
1744
aspacem_assert(sizeof(buf.st_dev) == sizeof(seg.dev));
1745
aspacem_assert(sizeof(buf.st_ino) == sizeof(seg.ino));
1748
/* Add a single interval covering the entire address space. */
1749
init_nsegment(&seg);
1751
seg.start = Addr_MIN;
1756
/* Establish address limits and block out unusable parts
1759
VG_(debugLog)(2, "aspacem",
1760
" sp_at_startup = 0x%010llx (supplied)\n",
1761
(ULong)sp_at_startup );
1763
aspacem_minAddr = (Addr) 0x04000000; // 64M
1765
# if VG_WORDSIZE == 8
1766
aspacem_maxAddr = (Addr)0x800000000 - 1; // 32G
1767
# ifdef ENABLE_INNER
1768
aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
1771
aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
1774
aspacem_cStart = aspacem_minAddr; // 64M
1775
aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
1776
# ifdef ENABLE_INNER
1777
aspacem_vStart -= 0x10000000; // 256M
1780
suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
1783
aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
1784
aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
1785
aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
1786
aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
1787
aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
1789
VG_(debugLog)(2, "aspacem",
1790
" minAddr = 0x%010llx (computed)\n",
1791
(ULong)aspacem_minAddr);
1792
VG_(debugLog)(2, "aspacem",
1793
" maxAddr = 0x%010llx (computed)\n",
1794
(ULong)aspacem_maxAddr);
1795
VG_(debugLog)(2, "aspacem",
1796
" cStart = 0x%010llx (computed)\n",
1797
(ULong)aspacem_cStart);
1798
VG_(debugLog)(2, "aspacem",
1799
" vStart = 0x%010llx (computed)\n",
1800
(ULong)aspacem_vStart);
1801
VG_(debugLog)(2, "aspacem",
1802
"suggested_clstack_top = 0x%010llx (computed)\n",
1803
(ULong)suggested_clstack_top);
1805
if (aspacem_cStart > Addr_MIN) {
1806
init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
1809
if (aspacem_maxAddr < Addr_MAX) {
1810
init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
1814
/* Create a 1-page reservation at the notional initial
1815
client/valgrind boundary. This isn't strictly necessary, but
1816
because the advisor does first-fit and starts searches for
1817
valgrind allocations at the boundary, this is kind of necessary
1818
in order to get it to start allocating in the right place. */
1819
init_resvn(&seg, aspacem_vStart, aspacem_vStart + VKI_PAGE_SIZE - 1);
1822
VG_(am_show_nsegments)(2, "Initial layout");
1824
VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
1825
parse_procselfmaps( read_maps_callback, NULL );
1827
VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
1830
return suggested_clstack_top;
1834
/*-----------------------------------------------------------------*/
1836
/*--- The core query-notify mechanism. ---*/
1838
/*-----------------------------------------------------------------*/
1840
/* Query aspacem to ask where a mapping should go. */
1842
Addr VG_(am_get_advisory) ( MapRequest* req,
1846
/* This function implements allocation policy.
1848
The nature of the allocation request is determined by req, which
1849
specifies the start and length of the request and indicates
1850
whether the start address is mandatory, a hint, or irrelevant,
1851
and by forClient, which says whether this is for the client or
1854
Return values: the request can be vetoed (*ok is set to False),
1855
in which case the caller should not attempt to proceed with
1856
making the mapping. Otherwise, *ok is set to True, the caller
1857
may proceed, and the preferred address at which the mapping
1858
should happen is returned.
1860
Note that this is an advisory system only: the kernel can in
1861
fact do whatever it likes as far as placement goes, and we have
1862
no absolute control over it.
1864
Allocations will never be granted in a reserved area.
1866
The Default Policy is:
1868
Search the address space for two free intervals: one of them
1869
big enough to contain the request without regard to the
1870
specified address (viz, as if it was a floating request) and
1871
the other being able to contain the request at the specified
1872
address (viz, as if were a fixed request). Then, depending on
1873
the outcome of the search and the kind of request made, decide
1874
whether the request is allowable and what address to advise.
1876
The Default Policy is overriden by Policy Exception #1:
1878
If the request is for a fixed client map, we are prepared to
1879
grant it providing all areas inside the request are either
1880
free, reservations, or mappings belonging to the client. In
1881
other words we are prepared to let the client trash its own
1882
mappings if it wants to.
1884
The Default Policy is overriden by Policy Exception #2:
1886
If the request is for a hinted client map, we are prepared to
1887
grant it providing all areas inside the request are either
1888
free or reservations. In other words we are prepared to let
1889
the client have a hinted mapping anywhere it likes provided
1890
it does not trash either any of its own mappings or any of
1891
valgrind's mappings.
1894
Addr holeStart, holeEnd, holeLen;
1895
Bool fixed_not_required;
1897
Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
1899
Addr reqStart = req->rkind==MAny ? 0 : req->start;
1900
Addr reqEnd = reqStart + req->len - 1;
1901
Addr reqLen = req->len;
1903
/* These hold indices for segments found during search, or -1 if not
1908
aspacem_assert(nsegments_used > 0);
1911
VG_(am_show_nsegments)(0,"getAdvisory");
1912
VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
1913
(ULong)req->start, (ULong)req->len);
1916
/* Reject zero-length requests */
1917
if (req->len == 0) {
1922
/* Reject wraparounds */
1923
if ((req->rkind==MFixed || req->rkind==MHint)
1924
&& req->start + req->len < req->start) {
1929
/* ------ Implement Policy Exception #1 ------ */
1931
if (forClient && req->rkind == MFixed) {
1932
Int iLo = find_nsegment_idx(reqStart);
1933
Int iHi = find_nsegment_idx(reqEnd);
1935
for (i = iLo; i <= iHi; i++) {
1936
if (nsegments[i].kind == SkFree
1937
|| nsegments[i].kind == SkFileC
1938
|| nsegments[i].kind == SkAnonC
1939
|| nsegments[i].kind == SkShmC
1940
|| nsegments[i].kind == SkResvn) {
1948
/* Acceptable. Granted. */
1952
/* Not acceptable. Fail. */
1957
/* ------ Implement Policy Exception #2 ------ */
1959
if (forClient && req->rkind == MHint) {
1960
Int iLo = find_nsegment_idx(reqStart);
1961
Int iHi = find_nsegment_idx(reqEnd);
1963
for (i = iLo; i <= iHi; i++) {
1964
if (nsegments[i].kind == SkFree
1965
|| nsegments[i].kind == SkResvn) {
1973
/* Acceptable. Granted. */
1977
/* Not acceptable. Fall through to the default policy. */
1980
/* ------ Implement the Default Policy ------ */
1982
/* Don't waste time looking for a fixed match if not requested to. */
1983
fixed_not_required = req->rkind == MAny;
1985
i = find_nsegment_idx(startPoint);
1987
/* Examine holes from index i back round to i-1. Record the
1988
index first fixed hole and the first floating hole which would
1989
satisfy the request. */
1990
for (j = 0; j < nsegments_used; j++) {
1992
if (nsegments[i].kind != SkFree) {
1994
if (i >= nsegments_used) i = 0;
1998
holeStart = nsegments[i].start;
1999
holeEnd = nsegments[i].end;
2002
aspacem_assert(holeStart <= holeEnd);
2003
aspacem_assert(aspacem_minAddr <= holeStart);
2004
aspacem_assert(holeEnd <= aspacem_maxAddr);
2006
/* See if it's any use to us. */
2007
holeLen = holeEnd - holeStart + 1;
2009
if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
2012
if (floatIdx == -1 && holeLen >= reqLen)
2015
/* Don't waste time searching once we've found what we wanted. */
2016
if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
2020
if (i >= nsegments_used) i = 0;
2023
aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
2025
aspacem_assert(nsegments[fixedIdx].kind == SkFree);
2027
aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
2029
aspacem_assert(nsegments[floatIdx].kind == SkFree);
2033
/* Now see if we found anything which can satisfy the request. */
2034
switch (req->rkind) {
2036
if (fixedIdx >= 0) {
2045
if (fixedIdx >= 0) {
2049
if (floatIdx >= 0) {
2051
return nsegments[floatIdx].start;
2056
if (floatIdx >= 0) {
2058
return nsegments[floatIdx].start;
2067
aspacem_barf("getAdvisory: unknown request kind");
2072
/* Convenience wrapper for VG_(am_get_advisory) for client floating or
2073
fixed requests. If start is zero, a floating request is issued; if
2074
nonzero, a fixed request at that address is issued. Same comments
2075
about return values apply. */
2077
Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
2081
mreq.rkind = start==0 ? MAny : MFixed;
2084
return VG_(am_get_advisory)( &mreq, True/*client*/, ok );
2088
/* Notifies aspacem that the client completed an mmap successfully.
2089
The segment array is updated accordingly. If the returned Bool is
2090
True, the caller should immediately discard translations from the
2091
specified address range. */
2094
VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
2095
Int fd, Off64T offset )
2097
HChar buf[VKI_PATH_MAX];
2103
aspacem_assert(len > 0);
2104
aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2105
aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2106
aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
2108
/* Discard is needed if any of the just-trashed range had T. */
2109
needDiscard = any_Ts_in_range( a, len );
2111
init_nsegment( &seg );
2112
seg.kind = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
2114
seg.end = a + len - 1;
2115
seg.hasR = toBool(prot & VKI_PROT_READ);
2116
seg.hasW = toBool(prot & VKI_PROT_WRITE);
2117
seg.hasX = toBool(prot & VKI_PROT_EXEC);
2118
if (!(flags & VKI_MAP_ANONYMOUS)) {
2119
// Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2120
seg.offset = offset;
2121
if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
2126
if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2127
seg.fnIdx = allocate_segname( buf );
2130
add_segment( &seg );
2135
/* Notifies aspacem that the client completed a shmat successfully.
2136
The segment array is updated accordingly. If the returned Bool is
2137
True, the caller should immediately discard translations from the
2138
specified address range. */
2141
VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
2146
aspacem_assert(len > 0);
2147
aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2148
aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2150
/* Discard is needed if any of the just-trashed range had T. */
2151
needDiscard = any_Ts_in_range( a, len );
2153
init_nsegment( &seg );
2156
seg.end = a + len - 1;
2158
seg.hasR = toBool(prot & VKI_PROT_READ);
2159
seg.hasW = toBool(prot & VKI_PROT_WRITE);
2160
seg.hasX = toBool(prot & VKI_PROT_EXEC);
2161
add_segment( &seg );
2166
/* Notifies aspacem that an mprotect was completed successfully. The
2167
segment array is updated accordingly. Note, as with
2168
VG_(am_notify_munmap), it is not the job of this function to reject
2169
stupid mprotects, for example the client doing mprotect of
2170
non-client areas. Such requests should be intercepted earlier, by
2171
the syscall wrapper for mprotect. This function merely records
2172
whatever it is told. If the returned Bool is True, the caller
2173
should immediately discard translations from the specified address
2176
Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
2179
Bool newR, newW, newX, needDiscard;
2181
aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2182
aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2187
newR = toBool(prot & VKI_PROT_READ);
2188
newW = toBool(prot & VKI_PROT_WRITE);
2189
newX = toBool(prot & VKI_PROT_EXEC);
2191
/* Discard is needed if we're dumping X permission */
2192
needDiscard = any_Ts_in_range( start, len ) && !newX;
2194
split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2196
iLo = find_nsegment_idx(start);
2197
iHi = find_nsegment_idx(start + len - 1);
2199
for (i = iLo; i <= iHi; i++) {
2200
/* Apply the permissions to all relevant segments. */
2201
switch (nsegments[i].kind) {
2202
case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
2203
nsegments[i].hasR = newR;
2204
nsegments[i].hasW = newW;
2205
nsegments[i].hasX = newX;
2206
aspacem_assert(sane_NSegment(&nsegments[i]));
2213
/* Changing permissions could have made previously un-mergable
2214
segments mergeable. Therefore have to re-preen them. */
2215
(void)preen_nsegments();
2221
/* Notifies aspacem that an munmap completed successfully. The
2222
segment array is updated accordingly. As with
2223
VG_(am_notify_munmap), we merely record the given info, and don't
2224
check it for sensibleness. If the returned Bool is True, the
2225
caller should immediately discard translations from the specified
2228
Bool VG_(am_notify_munmap)( Addr start, SizeT len )
2232
aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2233
aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2238
needDiscard = any_Ts_in_range( start, len );
2240
init_nsegment( &seg );
2242
seg.end = start + len - 1;
2244
/* The segment becomes unused (free). Segments from above
2245
aspacem_maxAddr were originally SkResvn and so we make them so
2246
again. Note, this isn't really right when the segment straddles
2247
the aspacem_maxAddr boundary - then really it should be split in
2248
two, the lower part marked as SkFree and the upper part as
2249
SkResvn. Ah well. */
2250
if (start > aspacem_maxAddr
2251
&& /* check previous comparison is meaningful */
2252
aspacem_maxAddr < Addr_MAX)
2255
/* Ditto for segments from below aspacem_minAddr. */
2256
if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
2261
add_segment( &seg );
2263
/* Unmapping could create two adjacent free segments, so a preen is
2264
needed. add_segment() will do that, so no need to here. */
2270
/*-----------------------------------------------------------------*/
2272
/*--- Handling mappings which do not arise directly from the ---*/
2273
/*--- simulation of the client. ---*/
2275
/*-----------------------------------------------------------------*/
2277
/* --- --- --- map, unmap, protect --- --- --- */
2279
/* Map a file at a fixed address for the client, and update the
2280
segment array accordingly. */
2282
SysRes VG_(am_mmap_file_fixed_client)
2283
( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
2292
HChar buf[VKI_PATH_MAX];
2294
/* Not allowable. */
2296
|| !VG_IS_PAGE_ALIGNED(start)
2297
|| !VG_IS_PAGE_ALIGNED(offset))
2298
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2300
/* Ask for an advisory. If it's negative, fail immediately. */
2304
advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2305
if (!ok || advised != start)
2306
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2308
/* We have been advised that the mapping is allowable at the
2309
specified address. So hand it off to the kernel, and propagate
2310
any resulting failure immediately. */
2311
sres = VG_(am_do_mmap_NO_NOTIFY)(
2312
start, length, prot,
2313
VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2319
if (sres.val != start) {
2320
/* I don't think this can happen. It means the kernel made a
2321
fixed map succeed but not at the requested location. Try to
2322
repair the damage, then return saying the mapping failed. */
2323
(void)do_munmap_NO_NOTIFY( sres.val, length );
2324
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2327
/* Ok, the mapping succeeded. Now notify the interval map. */
2328
init_nsegment( &seg );
2331
seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2332
seg.offset = offset;
2333
seg.hasR = toBool(prot & VKI_PROT_READ);
2334
seg.hasW = toBool(prot & VKI_PROT_WRITE);
2335
seg.hasX = toBool(prot & VKI_PROT_EXEC);
2336
if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
2341
if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2342
seg.fnIdx = allocate_segname( buf );
2344
add_segment( &seg );
2351
/* Map anonymously at a fixed address for the client, and update
2352
the segment array accordingly. */
2354
SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
2362
/* Not allowable. */
2363
if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
2364
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2366
/* Ask for an advisory. If it's negative, fail immediately. */
2370
advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2371
if (!ok || advised != start)
2372
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2374
/* We have been advised that the mapping is allowable at the
2375
specified address. So hand it off to the kernel, and propagate
2376
any resulting failure immediately. */
2377
sres = VG_(am_do_mmap_NO_NOTIFY)(
2378
start, length, prot,
2379
VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2385
if (sres.val != start) {
2386
/* I don't think this can happen. It means the kernel made a
2387
fixed map succeed but not at the requested location. Try to
2388
repair the damage, then return saying the mapping failed. */
2389
(void)do_munmap_NO_NOTIFY( sres.val, length );
2390
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2393
/* Ok, the mapping succeeded. Now notify the interval map. */
2394
init_nsegment( &seg );
2397
seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2398
seg.hasR = toBool(prot & VKI_PROT_READ);
2399
seg.hasW = toBool(prot & VKI_PROT_WRITE);
2400
seg.hasX = toBool(prot & VKI_PROT_EXEC);
2401
add_segment( &seg );
2408
/* Map anonymously at an unconstrained address for the client, and
2409
update the segment array accordingly. */
2411
SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
2419
/* Not allowable. */
2421
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2423
/* Ask for an advisory. If it's negative, fail immediately. */
2427
advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2429
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2431
/* We have been advised that the mapping is allowable at the
2432
advised address. So hand it off to the kernel, and propagate
2433
any resulting failure immediately. */
2434
sres = VG_(am_do_mmap_NO_NOTIFY)(
2435
advised, length, prot,
2436
VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2442
if (sres.val != advised) {
2443
/* I don't think this can happen. It means the kernel made a
2444
fixed map succeed but not at the requested location. Try to
2445
repair the damage, then return saying the mapping failed. */
2446
(void)do_munmap_NO_NOTIFY( sres.val, length );
2447
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2450
/* Ok, the mapping succeeded. Now notify the interval map. */
2451
init_nsegment( &seg );
2453
seg.start = advised;
2454
seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2455
seg.hasR = toBool(prot & VKI_PROT_READ);
2456
seg.hasW = toBool(prot & VKI_PROT_WRITE);
2457
seg.hasX = toBool(prot & VKI_PROT_EXEC);
2458
add_segment( &seg );
2465
/* Map anonymously at an unconstrained address for V, and update the
2466
segment array accordingly. This is fundamentally how V allocates
2467
itself more address space when needed. */
2469
SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
2477
/* Not allowable. */
2479
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2481
/* Ask for an advisory. If it's negative, fail immediately. */
2485
advised = VG_(am_get_advisory)( &req, False/*valgrind*/, &ok );
2487
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2489
/* We have been advised that the mapping is allowable at the
2490
specified address. So hand it off to the kernel, and propagate
2491
any resulting failure immediately. */
2492
sres = VG_(am_do_mmap_NO_NOTIFY)(
2494
VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2495
VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2501
if (sres.val != advised) {
2502
/* I don't think this can happen. It means the kernel made a
2503
fixed map succeed but not at the requested location. Try to
2504
repair the damage, then return saying the mapping failed. */
2505
(void)do_munmap_NO_NOTIFY( sres.val, length );
2506
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2509
/* Ok, the mapping succeeded. Now notify the interval map. */
2510
init_nsegment( &seg );
2512
seg.start = advised;
2513
seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2517
add_segment( &seg );
2523
/* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
2525
void* VG_(am_shadow_alloc)(SizeT size)
2527
SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
2528
return sres.isError ? NULL : (void*)sres.val;
2533
/* Map a file at an unconstrained address for V, and update the
2534
segment array accordingly. This is used by V for transiently
2535
mapping in object files to read their debug info. */
2537
SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
2538
Int fd, Off64T offset )
2547
HChar buf[VKI_PATH_MAX];
2549
/* Not allowable. */
2550
if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
2551
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2553
/* Ask for an advisory. If it's negative, fail immediately. */
2557
advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2559
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2561
/* We have been advised that the mapping is allowable at the
2562
specified address. So hand it off to the kernel, and propagate
2563
any resulting failure immediately. */
2564
sres = VG_(am_do_mmap_NO_NOTIFY)(
2565
advised, length, prot,
2566
VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2572
if (sres.val != advised) {
2573
/* I don't think this can happen. It means the kernel made a
2574
fixed map succeed but not at the requested location. Try to
2575
repair the damage, then return saying the mapping failed. */
2576
(void)do_munmap_NO_NOTIFY( sres.val, length );
2577
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2580
/* Ok, the mapping succeeded. Now notify the interval map. */
2581
init_nsegment( &seg );
2583
seg.start = sres.val;
2584
seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2585
seg.offset = offset;
2586
seg.hasR = toBool(prot & VKI_PROT_READ);
2587
seg.hasW = toBool(prot & VKI_PROT_WRITE);
2588
seg.hasX = toBool(prot & VKI_PROT_EXEC);
2589
if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
2594
if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2595
seg.fnIdx = allocate_segname( buf );
2597
add_segment( &seg );
2604
/* --- --- munmap helper --- --- */
2607
SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
2608
Addr start, SizeT len, Bool forClient )
2613
if (!VG_IS_PAGE_ALIGNED(start))
2617
*need_discard = False;
2618
return VG_(mk_SysRes_Success)( 0 );
2621
if (start + len < len)
2624
len = VG_PGROUNDUP(len);
2625
aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2626
aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2629
if (!VG_(am_is_valid_for_client_or_free_or_resvn)
2630
( start, len, VKI_PROT_NONE ))
2633
if (!is_valid_for_valgrind( start, len ))
2637
d = any_Ts_in_range( start, len );
2639
sres = do_munmap_NO_NOTIFY( start, len );
2643
VG_(am_notify_munmap)( start, len );
2649
return VG_(mk_SysRes_Error)( VKI_EINVAL );
2652
/* Unmap the given address range and update the segment array
2653
accordingly. This fails if the range isn't valid for the client.
2654
If *need_discard is True after a successful return, the caller
2655
should immediately discard translations from the specified address
2658
SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
2659
Addr start, SizeT len )
2661
return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
2664
/* Unmap the given address range and update the segment array
2665
accordingly. This fails if the range isn't valid for valgrind. */
2667
SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
2670
SysRes r = am_munmap_both_wrk( &need_discard,
2671
start, len, False/*valgrind*/ );
2672
/* If this assertion fails, it means we allowed translations to be
2673
made from a V-owned section. Which shouldn't happen. */
2675
aspacem_assert(!need_discard);
2679
/* Let (start,len) denote an area within a single Valgrind-owned
2680
segment (anon or file). Change the ownership of [start, start+len)
2681
to the client instead. Fails if (start,len) does not denote a
2682
suitable segment. */
2684
Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
2690
if (start + len < start)
2692
if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
2695
i = find_nsegment_idx(start);
2696
if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
2698
if (start+len-1 > nsegments[i].end)
2701
aspacem_assert(start >= nsegments[i].start);
2702
aspacem_assert(start+len-1 <= nsegments[i].end);
2704
/* This scheme is like how mprotect works: split the to-be-changed
2705
range into its own segment(s), then mess with them (it). There
2706
should be only one. */
2707
split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2708
aspacem_assert(iLo == iHi);
2709
switch (nsegments[iLo].kind) {
2710
case SkFileV: nsegments[iLo].kind = SkFileC; break;
2711
case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
2712
default: aspacem_assert(0); /* can't happen - guarded above */
2720
/* --- --- --- reservations --- --- --- */
2722
/* Create a reservation from START .. START+LENGTH-1, with the given
2723
ShrinkMode. When checking whether the reservation can be created,
2724
also ensure that at least abs(EXTRA) extra free bytes will remain
2725
above (> 0) or below (< 0) the reservation.
2727
The reservation will only be created if it, plus the extra-zone,
2728
falls entirely within a single free segment. The returned Bool
2729
indicates whether the creation succeeded. */
2731
Bool VG_(am_create_reservation) ( Addr start, SizeT length,
2732
ShrinkMode smode, SSizeT extra )
2737
/* start and end, not taking into account the extra space. */
2738
Addr start1 = start;
2739
Addr end1 = start + length - 1;
2741
/* start and end, taking into account the extra space. */
2742
Addr start2 = start1;
2745
if (extra < 0) start2 += extra; // this moves it down :-)
2746
if (extra > 0) end2 += extra;
2748
aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2749
aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
2750
aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
2751
aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
2753
startI = find_nsegment_idx( start2 );
2754
endI = find_nsegment_idx( end2 );
2756
/* If the start and end points don't fall within the same (free)
2757
segment, we're hosed. This does rely on the assumption that all
2758
mergeable adjacent segments can be merged, but add_segment()
2759
should ensure that. */
2763
if (nsegments[startI].kind != SkFree)
2766
/* Looks good - make the reservation. */
2767
aspacem_assert(nsegments[startI].start <= start2);
2768
aspacem_assert(end2 <= nsegments[startI].end);
2770
init_nsegment( &seg );
2772
seg.start = start1; /* NB: extra space is not included in the
2776
add_segment( &seg );
2783
/* Let SEG be an anonymous client mapping. This fn extends the
2784
mapping by DELTA bytes, taking the space from a reservation section
2785
which must be adjacent. If DELTA is positive, the segment is
2786
extended forwards in the address space, and the reservation must be
2787
the next one along. If DELTA is negative, the segment is extended
2788
backwards in the address space and the reservation must be the
2789
previous one. DELTA must be page aligned. abs(DELTA) must not
2790
exceed the size of the reservation segment minus one page, that is,
2791
the reservation segment after the operation must be at least one
2794
Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
2801
/* Find the segment array index for SEG. If the assertion fails it
2802
probably means you passed in a bogus SEG. */
2803
segA = segAddr_to_index( seg );
2804
aspacem_assert(segA >= 0 && segA < nsegments_used);
2806
if (nsegments[segA].kind != SkAnonC)
2812
prot = (nsegments[segA].hasR ? VKI_PROT_READ : 0)
2813
| (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
2814
| (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
2816
aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
2820
/* Extending the segment forwards. */
2822
if (segR >= nsegments_used
2823
|| nsegments[segR].kind != SkResvn
2824
|| nsegments[segR].smode != SmLower
2825
|| nsegments[segR].start != nsegments[segA].end + 1
2826
|| delta + VKI_PAGE_SIZE
2827
> (nsegments[segR].end - nsegments[segR].start + 1))
2830
/* Extend the kernel's mapping. */
2831
sres = VG_(am_do_mmap_NO_NOTIFY)(
2832
nsegments[segR].start, delta,
2834
VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2838
return False; /* kernel bug if this happens? */
2839
if (sres.val != nsegments[segR].start) {
2840
/* kernel bug if this happens? */
2841
(void)do_munmap_NO_NOTIFY( sres.val, delta );
2845
/* Ok, success with the kernel. Update our structures. */
2846
nsegments[segR].start += delta;
2847
nsegments[segA].end += delta;
2848
aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2852
/* Extending the segment backwards. */
2854
aspacem_assert(delta > 0);
2858
|| nsegments[segR].kind != SkResvn
2859
|| nsegments[segR].smode != SmUpper
2860
|| nsegments[segR].end + 1 != nsegments[segA].start
2861
|| delta + VKI_PAGE_SIZE
2862
> (nsegments[segR].end - nsegments[segR].start + 1))
2865
/* Extend the kernel's mapping. */
2866
sres = VG_(am_do_mmap_NO_NOTIFY)(
2867
nsegments[segA].start-delta, delta,
2869
VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2873
return False; /* kernel bug if this happens? */
2874
if (sres.val != nsegments[segA].start-delta) {
2875
/* kernel bug if this happens? */
2876
(void)do_munmap_NO_NOTIFY( sres.val, delta );
2880
/* Ok, success with the kernel. Update our structures. */
2881
nsegments[segR].end -= delta;
2882
nsegments[segA].start -= delta;
2883
aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2892
/* --- --- --- resizing/move a mapping --- --- --- */
2894
/* Let SEG be a client mapping (anonymous or file). This fn extends
2895
the mapping forwards only by DELTA bytes, and trashes whatever was
2896
in the new area. Fails if SEG is not a single client mapping or if
2897
the new area is not accessible to the client. Fails if DELTA is
2898
not page aligned. *seg is invalid after a successful return. If
2899
*need_discard is True after a successful return, the caller should
2900
immediately discard translations from the new area. */
2902
Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
2903
NSegment* seg, SizeT delta )
2907
NSegment seg_copy = *seg;
2908
SizeT seg_old_len = seg->end + 1 - seg->start;
2911
VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
2913
if (seg->kind != SkFileC && seg->kind != SkAnonC)
2916
if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
2919
xStart = seg->end+1;
2920
if (xStart + delta < delta)
2923
if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
2928
sres = do_extend_mapping_NO_NOTIFY( seg->start,
2930
seg_old_len + delta );
2935
/* the area must not have moved */
2936
aspacem_assert(sres.val == seg->start);
2939
*need_discard = any_Ts_in_range( seg_copy.end+1, delta );
2941
seg_copy.end += delta;
2942
add_segment( &seg_copy );
2945
VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
2952
/* Remap the old address range to the new address range. Fails if any
2953
parameter is not page aligned, if the either size is zero, if any
2954
wraparound is implied, if the old address range does not fall
2955
entirely within a single segment, if the new address range overlaps
2956
with the old one, or if the old address range is not a valid client
2957
mapping. If *need_discard is True after a successful return, the
2958
caller should immediately discard translations from both specified
2961
Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
2962
Addr old_addr, SizeT old_len,
2963
Addr new_addr, SizeT new_len )
2969
if (old_len == 0 || new_len == 0)
2972
if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
2973
|| !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
2976
if (old_addr + old_len < old_addr
2977
|| new_addr + new_len < new_addr)
2980
if (old_addr + old_len - 1 < new_addr
2981
|| new_addr + new_len - 1 < old_addr) {
2986
iLo = find_nsegment_idx( old_addr );
2987
iHi = find_nsegment_idx( old_addr + old_len - 1 );
2991
if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
2994
sres = do_relocate_nooverlap_mapping_NO_NOTIFY( old_addr, old_len,
2995
new_addr, new_len );
3000
aspacem_assert(sres.val == new_addr);
3003
*need_discard = any_Ts_in_range( old_addr, old_len )
3004
|| any_Ts_in_range( new_addr, new_len );
3006
seg = nsegments[iLo];
3008
/* Mark the new area based on the old seg. */
3009
if (seg.kind == SkFileC) {
3010
seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
3012
aspacem_assert(seg.kind == SkAnonC);
3013
aspacem_assert(seg.offset == 0);
3015
seg.start = new_addr;
3016
seg.end = new_addr + new_len - 1;
3017
add_segment( &seg );
3019
/* Create a free hole in the old location. */
3020
init_nsegment( &seg );
3021
seg.start = old_addr;
3022
seg.end = old_addr + old_len - 1;
3023
/* See comments in VG_(am_notify_munmap) about this SkResvn vs
3025
if (old_addr > aspacem_maxAddr
3026
&& /* check previous comparison is meaningful */
3027
aspacem_maxAddr < Addr_MAX)
3032
add_segment( &seg );
3039
/*-----------------------------------------------------------------*/
3041
/*--- Manage stacks for Valgrind itself. ---*/
3043
/*-----------------------------------------------------------------*/
3045
/* Allocate and initialise a VgStack (anonymous client space).
3046
Protect the stack active area and the guard areas appropriately.
3047
Returns NULL on failure, else the address of the bottom of the
3048
stack. On success, also sets *initial_sp to what the stack pointer
3049
should be set to. */
3051
VgStack* VG_(am_alloc_VgStack)( /*OUT*/Addr* initial_sp )
3059
/* Allocate the stack. */
3060
szB = VG_STACK_GUARD_SZB
3061
+ VG_STACK_ACTIVE_SZB + VG_STACK_GUARD_SZB;
3063
sres = VG_(am_mmap_anon_float_valgrind)( szB );
3067
stack = (VgStack*)sres.val;
3069
aspacem_assert(VG_IS_PAGE_ALIGNED(szB));
3070
aspacem_assert(VG_IS_PAGE_ALIGNED(stack));
3072
/* Protect the guard areas. */
3073
sres = do_mprotect_NO_NOTIFY(
3075
VG_STACK_GUARD_SZB, VKI_PROT_NONE
3077
if (sres.isError) goto protect_failed;
3078
VG_(am_notify_mprotect)(
3079
(Addr) &stack->bytes[0],
3080
VG_STACK_GUARD_SZB, VKI_PROT_NONE
3083
sres = do_mprotect_NO_NOTIFY(
3084
(Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
3085
VG_STACK_GUARD_SZB, VKI_PROT_NONE
3087
if (sres.isError) goto protect_failed;
3088
VG_(am_notify_mprotect)(
3089
(Addr) &stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB],
3090
VG_STACK_GUARD_SZB, VKI_PROT_NONE
3093
/* Looks good. Fill the active area with junk so we can later
3094
tell how much got used. */
3096
p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
3097
for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
3100
*initial_sp = (Addr)&stack->bytes[VG_STACK_GUARD_SZB + VG_STACK_ACTIVE_SZB];
3102
*initial_sp &= ~((Addr)0xF);
3104
VG_(debugLog)( 1,"aspacem","allocated thread stack at 0x%llx size %d\n",
3105
(ULong)(Addr)stack, szB);
3110
/* The stack was allocated, but we can't protect it. Unmap it and
3111
return NULL (failure). */
3112
(void)do_munmap_NO_NOTIFY( (Addr)stack, szB );
3118
/* Figure out how many bytes of the stack's active area have not
3119
been used. Used for estimating if we are close to overflowing it. */
3121
Int VG_(am_get_VgStack_unused_szB)( VgStack* stack )
3126
p = (UInt*)&stack->bytes[VG_STACK_GUARD_SZB];
3127
for (i = 0; i < VG_STACK_ACTIVE_SZB/sizeof(UInt); i++)
3128
if (p[i] != 0xDEADBEEF)
3131
return i * sizeof(UInt);
3135
/*-----------------------------------------------------------------*/
3137
/*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
3138
/*--- Almost completely independent of the stuff above. The ---*/
3139
/*--- only function it 'exports' to the code above this comment ---*/
3140
/*--- is parse_procselfmaps. ---*/
3142
/*-----------------------------------------------------------------*/
3144
/* Size of a smallish table used to read /proc/self/map entries. */
3145
#define M_PROCMAP_BUF 100000
3147
/* static ... to keep it out of the stack frame. */
3148
static Char procmap_buf[M_PROCMAP_BUF];
3150
/* Records length of /proc/self/maps read into procmap_buf. */
3151
static Int buf_n_tot;
3155
static Int hexdigit ( Char c )
3157
if (c >= '0' && c <= '9') return (Int)(c - '0');
3158
if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
3159
if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
3163
static Int decdigit ( Char c )
3165
if (c >= '0' && c <= '9') return (Int)(c - '0');
3169
static Int readchar ( const Char* buf, Char* ch )
3171
if (*buf == 0) return 0;
3176
static Int readhex ( const Char* buf, UWord* val )
3178
/* Read a word-sized hex number. */
3181
while (hexdigit(*buf) >= 0) {
3182
*val = (*val << 4) + hexdigit(*buf);
3188
static Int readhex64 ( const Char* buf, ULong* val )
3190
/* Read a potentially 64-bit hex number. */
3193
while (hexdigit(*buf) >= 0) {
3194
*val = (*val << 4) + hexdigit(*buf);
3200
static Int readdec ( const Char* buf, UInt* val )
3204
while (hexdigit(*buf) >= 0) {
3205
*val = (*val * 10) + decdigit(*buf);
3212
/* Get the contents of /proc/self/maps into a static buffer. If
3213
there's a syntax error, it won't fit, or other failure, just
3216
static void read_procselfmaps_into_buf ( void )
3221
/* Read the initial memory mapping from the /proc filesystem. */
3222
fd = aspacem_open( "/proc/self/maps", VKI_O_RDONLY, 0 );
3224
aspacem_barf("can't open /proc/self/maps");
3228
n_chunk = aspacem_read( fd.val, &procmap_buf[buf_n_tot],
3229
M_PROCMAP_BUF - buf_n_tot );
3230
buf_n_tot += n_chunk;
3231
} while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
3233
aspacem_close(fd.val);
3235
if (buf_n_tot >= M_PROCMAP_BUF-5)
3236
aspacem_barf_toolow("M_PROCMAP_BUF");
3238
aspacem_barf("I/O error on /proc/self/maps");
3240
procmap_buf[buf_n_tot] = 0;
3243
/* Parse /proc/self/maps. For each map entry, call
3244
record_mapping, passing it, in this order:
3246
start address in memory
3248
page protections (using the VKI_PROT_* flags)
3249
mapped file device and inode
3250
offset in file, or zero if no file
3251
filename, zero terminated, or NULL if no file
3253
So the sig of the called fn might be
3255
void (*record_mapping)( Addr start, SizeT size, UInt prot,
3256
UInt dev, UInt info,
3257
ULong foffset, UChar* filename )
3259
Note that the supplied filename is transiently stored; record_mapping
3260
should make a copy if it wants to keep it.
3262
Nb: it is important that this function does not alter the contents of
3265
static void parse_procselfmaps (
3266
void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3267
UInt dev, UInt ino, ULong offset,
3268
const UChar* filename ),
3269
void (*record_gap)( Addr addr, SizeT len )
3273
Addr start, endPlusOne, gapStart;
3275
UChar rr, ww, xx, pp, ch, tmp;
3277
UWord maj, min, dev;
3280
foffset = ino = 0; /* keep gcc-4.1.0 happy */
3282
read_procselfmaps_into_buf();
3284
aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
3287
VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
3289
/* Ok, it's safely aboard. Parse the entries. */
3291
gapStart = Addr_MIN;
3293
if (i >= buf_n_tot) break;
3295
/* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
3296
j = readhex(&procmap_buf[i], &start);
3297
if (j > 0) i += j; else goto syntaxerror;
3298
j = readchar(&procmap_buf[i], &ch);
3299
if (j == 1 && ch == '-') i += j; else goto syntaxerror;
3300
j = readhex(&procmap_buf[i], &endPlusOne);
3301
if (j > 0) i += j; else goto syntaxerror;
3303
j = readchar(&procmap_buf[i], &ch);
3304
if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3306
j = readchar(&procmap_buf[i], &rr);
3307
if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
3308
j = readchar(&procmap_buf[i], &ww);
3309
if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
3310
j = readchar(&procmap_buf[i], &xx);
3311
if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
3312
/* This field is the shared/private flag */
3313
j = readchar(&procmap_buf[i], &pp);
3314
if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
3315
i += j; else goto syntaxerror;
3317
j = readchar(&procmap_buf[i], &ch);
3318
if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3320
j = readhex64(&procmap_buf[i], &foffset);
3321
if (j > 0) i += j; else goto syntaxerror;
3323
j = readchar(&procmap_buf[i], &ch);
3324
if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3326
j = readhex(&procmap_buf[i], &maj);
3327
if (j > 0) i += j; else goto syntaxerror;
3328
j = readchar(&procmap_buf[i], &ch);
3329
if (j == 1 && ch == ':') i += j; else goto syntaxerror;
3330
j = readhex(&procmap_buf[i], &min);
3331
if (j > 0) i += j; else goto syntaxerror;
3333
j = readchar(&procmap_buf[i], &ch);
3334
if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3336
j = readdec(&procmap_buf[i], &ino);
3337
if (j > 0) i += j; else goto syntaxerror;
3342
VG_(debugLog)(0, "Valgrind:",
3343
"FATAL: syntax error reading /proc/self/maps\n");
3350
for (; k <= i; k++) {
3351
buf50[m] = procmap_buf[k];
3355
VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
3361
/* Try and find the name of the file mapped to this segment, if
3362
it exists. Note that files can contains spaces. */
3364
// Move i to the next non-space char, which should be either a '/' or
3366
while (procmap_buf[i] == ' ' && i < buf_n_tot-1) i++;
3368
// Move i_eol to the end of the line.
3370
while (procmap_buf[i_eol] != '\n' && i_eol < buf_n_tot-1) i_eol++;
3372
// If there's a filename...
3373
if (i < i_eol-1 && procmap_buf[i] == '/') {
3374
/* Minor hack: put a '\0' at the filename end for the call to
3375
'record_mapping', then restore the old char with 'tmp'. */
3376
filename = &procmap_buf[i];
3377
tmp = filename[i_eol - i];
3378
filename[i_eol - i] = '\0';
3386
if (rr == 'r') prot |= VKI_PROT_READ;
3387
if (ww == 'w') prot |= VKI_PROT_WRITE;
3388
if (xx == 'x') prot |= VKI_PROT_EXEC;
3390
/* Linux has two ways to encode a device number when it
3391
is exposed to user space (via fstat etc). The old way
3392
is the traditional unix scheme that produces a 16 bit
3393
device number with the top 8 being the major number and
3394
the bottom 8 the minor number.
3396
The new scheme allows for a 12 bit major number and
3397
a 20 bit minor number by using a 32 bit device number
3398
and putting the top 12 bits of the minor number into
3399
the top 12 bits of the device number thus leaving an
3400
extra 4 bits for the major number.
3402
If the minor and major number are both single byte
3403
values then both schemes give the same result so we
3404
use the new scheme here in case either number is
3405
outside the 0-255 range and then use fstat64 when
3406
available (or fstat on 64 bit systems) so that we
3407
should always have a new style device number and
3408
everything should match. */
3409
dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
3411
if (record_gap && gapStart < start)
3412
(*record_gap) ( gapStart, start-gapStart );
3414
(*record_mapping) ( start, endPlusOne-start,
3416
foffset, filename );
3419
filename[i_eol - i] = tmp;
3423
gapStart = endPlusOne;
3426
if (record_gap && gapStart < Addr_MAX)
3427
(*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
3430
/*--------------------------------------------------------------------*/
3432
/*--------------------------------------------------------------------*/