2
* mmap support for qemu
4
* Copyright (c) 2003 Fabrice Bellard
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, write to the Free Software
18
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
32
/* NOTE: all the constants are the HOST ones */
33
int target_mprotect(unsigned long start, unsigned long len, int prot)
35
unsigned long end, host_start, host_end, addr;
39
printf("mprotect: start=0x%lx len=0x%lx prot=%c%c%c\n", start, len,
40
prot & PROT_READ ? 'r' : '-',
41
prot & PROT_WRITE ? 'w' : '-',
42
prot & PROT_EXEC ? 'x' : '-');
45
if ((start & ~TARGET_PAGE_MASK) != 0)
47
len = TARGET_PAGE_ALIGN(len);
51
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
56
host_start = start & qemu_host_page_mask;
57
host_end = HOST_PAGE_ALIGN(end);
58
if (start > host_start) {
59
/* handle host page containing start */
61
for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
62
prot1 |= page_get_flags(addr);
64
if (host_end == host_start + qemu_host_page_size) {
65
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
66
prot1 |= page_get_flags(addr);
70
ret = mprotect((void *)host_start, qemu_host_page_size, prot1 & PAGE_BITS);
73
host_start += qemu_host_page_size;
77
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
78
prot1 |= page_get_flags(addr);
80
ret = mprotect((void *)(host_end - qemu_host_page_size), qemu_host_page_size,
84
host_end -= qemu_host_page_size;
87
/* handle the pages in the middle */
88
if (host_start < host_end) {
89
ret = mprotect((void *)host_start, host_end - host_start, prot);
93
page_set_flags(start, start + len, prot | PAGE_VALID);
97
/* map an incomplete host page */
98
int mmap_frag(unsigned long host_start,
99
unsigned long start, unsigned long end,
100
int prot, int flags, int fd, unsigned long offset)
102
unsigned long host_end, ret, addr;
105
host_end = host_start + qemu_host_page_size;
107
/* get the protection of the target pages outside the mapping */
109
for(addr = host_start; addr < host_end; addr++) {
110
if (addr < start || addr >= end)
111
prot1 |= page_get_flags(addr);
115
/* no page was there, so we allocate one */
116
ret = (long)mmap((void *)host_start, qemu_host_page_size, prot,
117
flags | MAP_ANONYMOUS, -1, 0);
123
prot_new = prot | prot1;
124
if (!(flags & MAP_ANONYMOUS)) {
125
/* msync() won't work here, so we return an error if write is
126
possible while it is a shared mapping */
128
if ((flags & MAP_TYPE) == MAP_SHARED &&
130
if ((flags & MAP_SHARED) &&
135
/* adjust protection to be able to read */
136
if (!(prot1 & PROT_WRITE))
137
mprotect((void *)host_start, qemu_host_page_size, prot1 | PROT_WRITE);
139
/* read the corresponding file data */
140
pread(fd, (void *)start, end - start, offset);
142
/* put final protection */
143
if (prot_new != (prot1 | PROT_WRITE))
144
mprotect((void *)host_start, qemu_host_page_size, prot_new);
146
/* just update the protection */
147
if (prot_new != prot1) {
148
mprotect((void *)host_start, qemu_host_page_size, prot_new);
154
/* NOTE: all the constants are the HOST ones */
155
long target_mmap(unsigned long start, unsigned long len, int prot,
156
int flags, int fd, unsigned long offset)
158
unsigned long ret, end, host_start, host_end, retaddr, host_offset, host_len;
159
#if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__)
160
static unsigned long last_start = 0x40000000;
165
printf("mmap: start=0x%lx len=0x%lx prot=%c%c%c flags=",
167
prot & PROT_READ ? 'r' : '-',
168
prot & PROT_WRITE ? 'w' : '-',
169
prot & PROT_EXEC ? 'x' : '-');
170
if (flags & MAP_FIXED)
171
printf("MAP_FIXED ");
172
if (flags & MAP_ANONYMOUS)
175
# define MAP_TYPE 0x3
177
switch(flags & MAP_TYPE) {
179
printf("MAP_PRIVATE ");
182
printf("MAP_SHARED ");
185
printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
188
printf("fd=%d offset=%lx\n", fd, offset);
192
if (offset & ~TARGET_PAGE_MASK)
195
len = TARGET_PAGE_ALIGN(len);
198
host_start = start & qemu_host_page_mask;
200
if (!(flags & MAP_FIXED)) {
201
#if defined(__alpha__) || defined(__sparc__) || defined(__x86_64__)
202
/* tell the kenel to search at the same place as i386 */
203
if (host_start == 0) {
204
host_start = last_start;
205
last_start += HOST_PAGE_ALIGN(len);
208
if (qemu_host_page_size != qemu_real_host_page_size) {
209
/* NOTE: this code is only for debugging with '-p' option */
210
/* reserve a memory area */
211
host_len = HOST_PAGE_ALIGN(len) + qemu_host_page_size - TARGET_PAGE_SIZE;
212
host_start = (long)mmap((void *)host_start, host_len, PROT_NONE,
213
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
214
if (host_start == -1)
216
host_end = host_start + host_len;
217
start = HOST_PAGE_ALIGN(host_start);
218
end = start + HOST_PAGE_ALIGN(len);
219
if (start > host_start)
220
munmap((void *)host_start, start - host_start);
222
munmap((void *)end, host_end - end);
223
/* use it as a fixed mapping */
226
/* if not fixed, no need to do anything */
227
host_offset = offset & qemu_host_page_mask;
228
host_len = len + offset - host_offset;
229
start = (long)mmap((void *)host_start, host_len,
230
prot, flags, fd, host_offset);
233
/* update start so that it points to the file position at 'offset' */
234
if (!(flags & MAP_ANONYMOUS))
235
start += offset - host_offset;
240
if (start & ~TARGET_PAGE_MASK)
243
host_end = HOST_PAGE_ALIGN(end);
245
/* worst case: we cannot map the file because the offset is not
246
aligned, so we read it */
247
if (!(flags & MAP_ANONYMOUS) &&
248
(offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
249
/* msync() won't work here, so we return an error if write is
250
possible while it is a shared mapping */
252
if ((flags & MAP_TYPE) == MAP_SHARED &&
254
if ((flags & MAP_SHARED) &&
258
retaddr = target_mmap(start, len, prot | PROT_WRITE,
259
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
263
pread(fd, (void *)start, len, offset);
264
if (!(prot & PROT_WRITE)) {
265
ret = target_mprotect(start, len, prot);
272
/* handle the start of the mapping */
273
if (start > host_start) {
274
if (host_end == host_start + qemu_host_page_size) {
275
/* one single host page */
276
ret = mmap_frag(host_start, start, end,
277
prot, flags, fd, offset);
282
ret = mmap_frag(host_start, start, host_start + qemu_host_page_size,
283
prot, flags, fd, offset);
286
host_start += qemu_host_page_size;
288
/* handle the end of the mapping */
289
if (end < host_end) {
290
ret = mmap_frag(host_end - qemu_host_page_size,
291
host_end - qemu_host_page_size, host_end,
293
offset + host_end - qemu_host_page_size - start);
296
host_end -= qemu_host_page_size;
299
/* map the middle (easier) */
300
if (host_start < host_end) {
301
unsigned long offset1;
302
if (flags & MAP_ANONYMOUS)
305
offset1 = offset + host_start - start;
306
ret = (long)mmap((void *)host_start, host_end - host_start,
307
prot, flags, fd, offset1);
312
page_set_flags(start, start + len, prot | PAGE_VALID);
315
printf("target_mmap: ret=0x%lx\n", (long)start);
322
int target_munmap(unsigned long start, unsigned long len)
324
unsigned long end, host_start, host_end, addr;
328
printf("munmap: start=0x%lx len=0x%lx\n", start, len);
330
if (start & ~TARGET_PAGE_MASK)
332
len = TARGET_PAGE_ALIGN(len);
336
host_start = start & qemu_host_page_mask;
337
host_end = HOST_PAGE_ALIGN(end);
339
if (start > host_start) {
340
/* handle host page containing start */
342
for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
343
prot |= page_get_flags(addr);
345
if (host_end == host_start + qemu_host_page_size) {
346
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
347
prot |= page_get_flags(addr);
352
host_start += qemu_host_page_size;
354
if (end < host_end) {
356
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
357
prot |= page_get_flags(addr);
360
host_end -= qemu_host_page_size;
363
/* unmap what we can */
364
if (host_start < host_end) {
365
ret = munmap((void *)host_start, host_end - host_start);
370
page_set_flags(start, start + len, 0);
374
/* XXX: currently, we only handle MAP_ANONYMOUS and not MAP_FIXED
375
blocks which have been allocated starting on a host page */
376
long target_mremap(unsigned long old_addr, unsigned long old_size,
377
unsigned long new_size, unsigned long flags,
378
unsigned long new_addr)
381
/* XXX: use 5 args syscall */
382
new_addr = (long)mremap((void *)old_addr, old_size, new_size, flags);
385
prot = page_get_flags(old_addr);
386
page_set_flags(old_addr, old_addr + old_size, 0);
387
page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
390
qerror("target_mremap: unsupported\n");
395
int target_msync(unsigned long start, unsigned long len, int flags)
399
if (start & ~TARGET_PAGE_MASK)
401
len = TARGET_PAGE_ALIGN(len);
408
start &= qemu_host_page_mask;
409
return msync((void *)start, end - start, flags);