26
26
#include <sys/mman.h>
29
#include "qemu-common.h"
31
30
//#define DEBUG_MMAP
34
pthread_mutex_t mmap_mutex;
35
static int __thread mmap_lock_count;
39
if (mmap_lock_count++ == 0) {
40
pthread_mutex_lock(&mmap_mutex);
44
void mmap_unlock(void)
46
if (--mmap_lock_count == 0) {
47
pthread_mutex_unlock(&mmap_mutex);
51
/* Grab lock to make sure things are in a consistent state after fork(). */
52
void mmap_fork_start(void)
56
pthread_mutex_lock(&mmap_mutex);
59
void mmap_fork_end(int child)
62
pthread_mutex_init(&mmap_mutex, NULL);
64
pthread_mutex_unlock(&mmap_mutex);
67
/* We aren't threadsafe to start with, so no need to worry about locking. */
72
void mmap_unlock(void)
77
void *qemu_vmalloc(size_t size)
82
/* Use map and mark the pages as used. */
83
p = mmap(NULL, size, PROT_READ | PROT_WRITE,
84
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
86
addr = (unsigned long)p;
87
if (addr == (target_ulong) addr) {
88
/* Allocated region overlaps guest address space.
90
page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size),
98
void *qemu_malloc(size_t size)
102
p = qemu_vmalloc(size);
107
/* We use map, which is always zero initialized. */
108
void * qemu_mallocz(size_t size)
110
return qemu_malloc(size);
113
void qemu_free(void *ptr)
115
/* FIXME: We should unmark the reserved pages here. However this gets
116
complicated when one target page spans multiple host pages, so we
119
p = (size_t *)((char *)ptr - 16);
123
32
/* NOTE: all the constants are the HOST ones, but addresses are target. */
124
33
int target_mprotect(abi_ulong start, abi_ulong len, int prot)
140
49
end = start + len;
143
prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
52
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
148
57
host_start = start & qemu_host_page_mask;
149
58
host_end = HOST_PAGE_ALIGN(end);
150
59
if (start > host_start) {
180
89
if (host_start < host_end) {
181
90
ret = mprotect(g2h(host_start), host_end - host_start, prot);
185
94
page_set_flags(start, start + len, prot | PAGE_VALID);
193
98
/* map an incomplete host page */
253
158
static abi_ulong mmap_next_start = 0x40000000;
256
unsigned long last_brk;
258
161
/* find a free memory area of size 'size'. The search starts at
259
162
'start'. If 'start' == 0, then a default start address is used.
260
163
Return -1 if error.
262
/* page_init() marks pages used by the host as reserved to be sure not
165
/* XXX: should mark pages used by the host as reserved to be sure not
264
167
static abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
266
169
abi_ulong addr, addr1, addr_start;
268
unsigned long new_brk;
270
new_brk = (unsigned long)sbrk(0);
271
if (last_brk && last_brk < new_brk && last_brk == (target_ulong)last_brk) {
272
/* This is a hack to catch the host allocating memory with brk().
273
If it uses mmap then we loose.
274
FIXME: We really want to avoid the host allocating memory in
275
the first place, and maybe leave some slack to avoid switching
277
page_set_flags(last_brk & TARGET_PAGE_MASK,
278
TARGET_PAGE_ALIGN(new_brk),
283
172
size = HOST_PAGE_ALIGN(size);
284
173
start = start & qemu_host_page_mask;
310
199
abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
311
200
unsigned long host_start;
314
202
#ifdef DEBUG_MMAP
316
204
printf("mmap: start=0x" TARGET_FMT_lx
357
245
mmap_start = mmap_find_vma(real_start, host_len);
358
246
if (mmap_start == (abi_ulong)-1) {
362
250
/* Note: we prefer to control the mapping address. It is
363
251
especially important if qemu_host_page_size >
365
253
p = mmap(g2h(mmap_start),
366
254
host_len, prot, flags | MAP_FIXED, fd, host_offset);
367
255
if (p == MAP_FAILED)
369
257
/* update start so that it points to the file position at 'offset' */
370
258
host_start = (unsigned long)p;
371
259
if (!(flags & MAP_ANONYMOUS))
372
260
host_start += offset - host_offset;
373
261
start = h2g(host_start);
378
263
if (start & ~TARGET_PAGE_MASK) {
382
267
end = start + len;
383
268
real_end = HOST_PAGE_ALIGN(end);
385
for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) {
386
flg = page_get_flags(addr);
387
if (flg & PAGE_RESERVED) {
393
270
/* worst case: we cannot map the file because the offset is not
394
271
aligned, so we read it */
395
272
if (!(flags & MAP_ANONYMOUS) &&
399
276
if ((flags & MAP_TYPE) == MAP_SHARED &&
400
277
(prot & PROT_WRITE)) {
404
281
retaddr = target_mmap(start, len, prot | PROT_WRITE,
405
282
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
407
284
if (retaddr == -1)
409
286
pread(fd, g2h(start), len, offset);
410
287
if (!(prot & PROT_WRITE)) {
411
288
ret = target_mprotect(start, len, prot);
424
299
ret = mmap_frag(real_start, start, end,
425
300
prot, flags, fd, offset);
430
305
ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
431
306
prot, flags, fd, offset);
434
309
real_start += qemu_host_page_size;
436
311
/* handle the end of the mapping */
455
330
p = mmap(g2h(real_start), real_end - real_start,
456
331
prot, flags, fd, offset1);
457
332
if (p == MAP_FAILED)
462
337
page_set_flags(start, start + len, prot | PAGE_VALID);
464
339
#ifdef DEBUG_MMAP
465
printf("ret=0x" TARGET_FMT_lx "\n", start);
340
printf("ret=0x%llx\n", start);
466
341
page_dump(stdout);
476
347
int target_munmap(abi_ulong start, abi_ulong len)
515
385
real_end -= qemu_host_page_size;
519
388
/* unmap what we can */
520
389
if (real_start < real_end) {
521
390
ret = munmap(g2h(real_start), real_end - real_start);
525
page_set_flags(start, start + len, 0);
395
page_set_flags(start, start + len, 0);
530
399
/* XXX: currently, we only handle MAP_ANONYMOUS and not MAP_FIXED
537
406
unsigned long host_addr;
540
408
/* XXX: use 5 args syscall */
541
409
host_addr = (long)mremap(g2h(old_addr), old_size, new_size, flags);
542
if (host_addr == -1) {
545
new_addr = h2g(host_addr);
546
prot = page_get_flags(old_addr);
547
page_set_flags(old_addr, old_addr + old_size, 0);
548
page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
412
new_addr = h2g(host_addr);
413
prot = page_get_flags(old_addr);
414
page_set_flags(old_addr, old_addr + old_size, 0);
415
page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);