75
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
73
#elif defined(TARGET_PPC64)
77
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
83
76
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
77
#define TARGET_PHYS_ADDR_SPACE_BITS 32
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
80
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
89
81
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
91
83
/* any access to the tbs or the page table must use this lock */
92
84
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
have limited branch ranges (possibly also PPC) so place it in a
97
section close to code segment. */
98
#define code_gen_section \
99
__attribute__((__section__(".gen_code"))) \
100
__attribute__((aligned (32)))
102
#define code_gen_section \
103
__attribute__((aligned (32)))
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
86
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
111
87
uint8_t *code_gen_ptr;
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
116
91
uint8_t *phys_ram_base;
117
92
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
93
static ram_addr_t phys_ram_alloc_offset = 0;
122
95
CPUState *first_cpu;
123
96
/* current CPU in the current thread. It is only valid inside
125
98
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
1 = Precise instruction counting.
128
2 = Adaptive rate instruction counting. */
130
/* Current instruction counter. While executing translated code this may
131
include some instructions that have not yet been executed. */
134
100
typedef struct PageDesc {
135
101
/* list of TBs intersecting this ram page */
196
162
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197
163
typedef struct subpage_t {
198
164
target_phys_addr_t base;
199
CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200
CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201
void *opaque[TARGET_PAGE_SIZE][2][4];
165
CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166
CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167
void *opaque[TARGET_PAGE_SIZE];
205
static void map_exec(void *addr, long size)
208
VirtualProtect(addr, size,
209
PAGE_EXECUTE_READWRITE, &old_protect);
213
static void map_exec(void *addr, long size)
215
unsigned long start, end, page_size;
217
page_size = getpagesize();
218
start = (unsigned long)addr;
219
start &= ~(page_size - 1);
221
end = (unsigned long)addr + size;
222
end += page_size - 1;
223
end &= ~(page_size - 1);
225
mprotect((void *)start, end - start,
226
PROT_READ | PROT_WRITE | PROT_EXEC);
230
170
static void page_init(void)
232
172
/* NOTE: we can always suppose that qemu_host_page_size >=
236
176
SYSTEM_INFO system_info;
238
179
GetSystemInfo(&system_info);
239
180
qemu_real_host_page_size = system_info.dwPageSize;
182
VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183
PAGE_EXECUTE_READWRITE, &old_protect);
242
186
qemu_real_host_page_size = getpagesize();
188
unsigned long start, end;
190
start = (unsigned long)code_gen_buffer;
191
start &= ~(qemu_real_host_page_size - 1);
193
end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194
end += qemu_real_host_page_size - 1;
195
end &= ~(qemu_real_host_page_size - 1);
197
mprotect((void *)start, end - start,
198
PROT_READ | PROT_WRITE | PROT_EXEC);
244
202
if (qemu_host_page_size == 0)
245
203
qemu_host_page_size = qemu_real_host_page_size;
246
204
if (qemu_host_page_size < TARGET_PAGE_SIZE)
251
209
qemu_host_page_mask = ~(qemu_host_page_size - 1);
252
210
l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253
211
memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
255
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
257
long long startaddr, endaddr;
262
last_brk = (unsigned long)sbrk(0);
263
f = fopen("/proc/self/maps", "r");
266
n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
268
startaddr = MIN(startaddr,
269
(1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
endaddr = MIN(endaddr,
271
(1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
page_set_flags(startaddr & TARGET_PAGE_MASK,
273
TARGET_PAGE_ALIGN(endaddr),
284
static inline PageDesc **page_l1_map(target_ulong index)
286
#if TARGET_LONG_BITS > 32
287
/* Host memory outside guest VM. For 32-bit targets we have already
288
excluded high addresses. */
289
if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292
return &l1_map[index >> L2_BITS];
295
static inline PageDesc *page_find_alloc(target_ulong index)
214
static inline PageDesc *page_find_alloc(unsigned int index)
297
216
PageDesc **lp, *p;
298
lp = page_l1_map(index);
218
lp = &l1_map[index >> L2_BITS];
304
221
/* allocate if not found */
305
#if defined(CONFIG_USER_ONLY)
307
size_t len = sizeof(PageDesc) * L2_SIZE;
308
/* Don't use qemu_malloc because it may recurse. */
309
p = mmap(0, len, PROT_READ | PROT_WRITE,
310
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
313
if (addr == (target_ulong)addr) {
314
page_set_flags(addr & TARGET_PAGE_MASK,
315
TARGET_PAGE_ALIGN(addr + len),
319
p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
222
p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
223
memset(p, 0, sizeof(PageDesc) * L2_SIZE);
323
226
return p + (index & (L2_SIZE - 1));
326
static inline PageDesc *page_find(target_ulong index)
229
static inline PageDesc *page_find(unsigned int index)
329
lp = page_l1_map(index);
233
p = l1_map[index >> L2_BITS];
336
236
return p + (index & (L2_SIZE - 1));
382
282
static void tlb_protect_code(ram_addr_t ram_addr);
383
283
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384
284
target_ulong vaddr);
385
#define mmap_lock() do { } while(0)
386
#define mmap_unlock() do { } while(0)
389
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391
#if defined(CONFIG_USER_ONLY)
392
/* Currently it is not recommanded to allocate big chunks of data in
393
user mode. It will change when a dedicated libc will be used */
394
#define USE_STATIC_CODE_GEN_BUFFER
397
#ifdef USE_STATIC_CODE_GEN_BUFFER
398
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
401
static void code_gen_alloc(unsigned long tb_size)
403
#ifdef USE_STATIC_CODE_GEN_BUFFER
404
code_gen_buffer = static_code_gen_buffer;
405
code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406
map_exec(code_gen_buffer, code_gen_buffer_size);
408
code_gen_buffer_size = tb_size;
409
if (code_gen_buffer_size == 0) {
410
#if defined(CONFIG_USER_ONLY)
411
/* in user mode, phys_ram_size is not meaningful */
412
code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414
/* XXX: needs ajustments */
415
code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
418
if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419
code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420
/* The code gen buffer location may have constraints depending on
421
the host cpu and OS */
422
#if defined(__linux__)
427
flags = MAP_PRIVATE | MAP_ANONYMOUS;
428
#if defined(__x86_64__)
430
/* Cannot map more than that */
431
if (code_gen_buffer_size > (800 * 1024 * 1024))
432
code_gen_buffer_size = (800 * 1024 * 1024);
433
#elif defined(__sparc_v9__)
434
// Map the buffer below 2G, so we can use direct calls and branches
436
start = (void *) 0x60000000UL;
437
if (code_gen_buffer_size > (512 * 1024 * 1024))
438
code_gen_buffer_size = (512 * 1024 * 1024);
440
code_gen_buffer = mmap(start, code_gen_buffer_size,
441
PROT_WRITE | PROT_READ | PROT_EXEC,
443
if (code_gen_buffer == MAP_FAILED) {
444
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
448
#elif defined(__FreeBSD__)
452
flags = MAP_PRIVATE | MAP_ANONYMOUS;
453
#if defined(__x86_64__)
454
/* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455
* 0x40000000 is free */
457
addr = (void *)0x40000000;
458
/* Cannot map more than that */
459
if (code_gen_buffer_size > (800 * 1024 * 1024))
460
code_gen_buffer_size = (800 * 1024 * 1024);
462
code_gen_buffer = mmap(addr, code_gen_buffer_size,
463
PROT_WRITE | PROT_READ | PROT_EXEC,
465
if (code_gen_buffer == MAP_FAILED) {
466
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
471
code_gen_buffer = qemu_malloc(code_gen_buffer_size);
472
if (!code_gen_buffer) {
473
fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476
map_exec(code_gen_buffer, code_gen_buffer_size);
478
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
479
map_exec(code_gen_prologue, sizeof(code_gen_prologue));
480
code_gen_buffer_max_size = code_gen_buffer_size -
481
code_gen_max_block_size();
482
code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
483
tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
486
/* Must be called before using the QEMU cpus. 'tb_size' is the size
487
(in bytes) allocated to the translation buffer. Zero means default
489
void cpu_exec_init_all(unsigned long tb_size)
492
code_gen_alloc(tb_size);
493
code_gen_ptr = code_gen_buffer;
495
#if !defined(CONFIG_USER_ONLY)
500
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502
#define CPU_COMMON_SAVE_VERSION 1
504
static void cpu_common_save(QEMUFile *f, void *opaque)
506
CPUState *env = opaque;
508
qemu_put_be32s(f, &env->halted);
509
qemu_put_be32s(f, &env->interrupt_request);
512
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
514
CPUState *env = opaque;
516
if (version_id != CPU_COMMON_SAVE_VERSION)
519
qemu_get_be32s(f, &env->halted);
520
qemu_get_be32s(f, &env->interrupt_request);
527
287
void cpu_exec_init(CPUState *env)
1469
1191
/* mask must never be zero, except for A20 change call */
1470
1192
void cpu_interrupt(CPUState *env, int mask)
1472
#if !defined(USE_NPTL)
1473
1194
TranslationBlock *tb;
1474
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1195
static int interrupt_lock;
1478
old_mask = env->interrupt_request;
1479
/* FIXME: This is probably not threadsafe. A different thread could
1480
be in the middle of a read-modify-write operation. */
1481
1197
env->interrupt_request |= mask;
1482
#if defined(USE_NPTL)
1483
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1484
problem and hope the cpu will stop of its own accord. For userspace
1485
emulation this often isn't actually as bad as it sounds. Often
1486
signals are used primarily to interrupt blocking syscalls. */
1489
env->icount_decr.u16.high = 0xffff;
1490
#ifndef CONFIG_USER_ONLY
1491
/* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1492
an async event happened and we need to process it. */
1494
&& (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1495
cpu_abort(env, "Raised interrupt while not in I/O function");
1499
tb = env->current_tb;
1500
/* if the cpu is currently executing code, we must unlink it and
1501
all the potentially executing TB */
1502
if (tb && !testandset(&interrupt_lock)) {
1503
env->current_tb = NULL;
1504
tb_reset_jump_recursive(tb);
1505
resetlock(&interrupt_lock);
1198
/* if the cpu is currently executing code, we must unlink it and
1199
all the potentially executing TB */
1200
tb = env->current_tb;
1201
if (tb && !testandset(&interrupt_lock)) {
1202
env->current_tb = NULL;
1203
tb_reset_jump_recursive(tb);
1511
1208
void cpu_reset_interrupt(CPUState *env, int mask)
1513
1210
env->interrupt_request &= ~mask;
1516
const CPULogItem cpu_log_items[] = {
1213
CPULogItem cpu_log_items[] = {
1517
1214
{ CPU_LOG_TB_OUT_ASM, "out_asm",
1518
1215
"show generated host assembly code for each compiled TB" },
1519
1216
{ CPU_LOG_TB_IN_ASM, "in_asm",
1520
1217
"show target assembly code for each compiled TB" },
1521
1218
{ CPU_LOG_TB_OP, "op",
1522
"show micro ops for each compiled TB" },
1219
"show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1523
1221
{ CPU_LOG_TB_OP_OPT, "op_opt",
1526
"before eflags optimization and "
1222
"show micro ops after optimization for each compiled TB" },
1528
"after liveness analysis" },
1529
1224
{ CPU_LOG_INT, "int",
1530
1225
"show interrupts/exceptions in short format" },
1531
1226
{ CPU_LOG_EXEC, "exec",
1856
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1577
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1578
unsigned long start)
1858
if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1859
tlb_entry->addr_write = vaddr;
1581
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1582
addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1583
if (addr == start) {
1584
tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1862
/* update the TLB corresponding to virtual page vaddr
1863
so that it is no longer dirty */
1864
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1589
/* update the TLB corresponding to virtual page vaddr and phys addr
1590
addr so that it is no longer dirty */
1591
static inline void tlb_set_dirty(CPUState *env,
1592
unsigned long addr, target_ulong vaddr)
1868
vaddr &= TARGET_PAGE_MASK;
1596
addr &= TARGET_PAGE_MASK;
1869
1597
i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1870
tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1871
tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1598
tlb_set_dirty1(&env->tlb_table[0][i], addr);
1599
tlb_set_dirty1(&env->tlb_table[1][i], addr);
1872
1600
#if (NB_MMU_MODES >= 3)
1873
tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1601
tlb_set_dirty1(&env->tlb_table[2][i], addr);
1874
1602
#if (NB_MMU_MODES == 4)
1875
tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1603
tlb_set_dirty1(&env->tlb_table[3][i], addr);
1912
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1913
/* IO memory case (romd handled later) */
1914
address |= TLB_MMIO;
1916
addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1917
if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1919
iotlb = pd & TARGET_PAGE_MASK;
1920
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1921
iotlb |= IO_MEM_NOTDIRTY;
1923
iotlb |= IO_MEM_ROM;
1925
/* IO handlers are currently passed a phsical address.
1926
It would be nice to pass an offset from the base address
1927
of that region. This would avoid having to special case RAM,
1928
and avoid full address decoding in every device.
1929
We can't use the high bits of pd for this because
1930
IO_MEM_ROMD uses these as a ram address. */
1931
iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1934
code_address = address;
1935
/* Make accesses to pages with watchpoints go via the
1936
watchpoint trap routines. */
1937
for (i = 0; i < env->nb_watchpoints; i++) {
1938
if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1939
iotlb = io_mem_watch + paddr;
1940
/* TODO: The memory case can be optimized by not trapping
1941
reads of pages with a write breakpoint. */
1942
address |= TLB_MMIO;
1946
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1947
env->iotlb[mmu_idx][index] = iotlb - vaddr;
1948
te = &env->tlb_table[mmu_idx][index];
1949
te->addend = addend - vaddr;
1950
if (prot & PAGE_READ) {
1951
te->addr_read = address;
1956
if (prot & PAGE_EXEC) {
1957
te->addr_code = code_address;
1961
if (prot & PAGE_WRITE) {
1962
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1963
(pd & IO_MEM_ROMD)) {
1964
/* Write access calls the I/O callback. */
1965
te->addr_write = address | TLB_MMIO;
1966
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1967
!cpu_physical_memory_is_dirty(pd)) {
1968
te->addr_write = address | TLB_NOTDIRTY;
1970
te->addr_write = address;
1973
te->addr_write = -1;
1637
#if !defined(CONFIG_SOFTMMU)
1641
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1642
/* IO memory case */
1643
address = vaddr | pd;
1646
/* standard memory */
1648
addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1651
/* Make accesses to pages with watchpoints go via the
1652
watchpoint trap routines. */
1653
for (i = 0; i < env->nb_watchpoints; i++) {
1654
if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1655
if (address & ~TARGET_PAGE_MASK) {
1656
env->watchpoint[i].addend = 0;
1657
address = vaddr | io_mem_watch;
1659
env->watchpoint[i].addend = pd - paddr +
1660
(unsigned long) phys_ram_base;
1661
/* TODO: Figure out how to make read watchpoints coexist
1663
pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1668
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1670
te = &env->tlb_table[mmu_idx][index];
1671
te->addend = addend;
1672
if (prot & PAGE_READ) {
1673
te->addr_read = address;
1677
if (prot & PAGE_EXEC) {
1678
te->addr_code = address;
1682
if (prot & PAGE_WRITE) {
1683
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1684
(pd & IO_MEM_ROMD)) {
1685
/* write access calls the I/O callback */
1686
te->addr_write = vaddr |
1687
(pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1688
} else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1689
!cpu_physical_memory_is_dirty(pd)) {
1690
te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1692
te->addr_write = address;
1695
te->addr_write = -1;
1698
#if !defined(CONFIG_SOFTMMU)
1700
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1701
/* IO access: no mapping is done as it will be handled by the
1703
if (!(env->hflags & HF_SOFTMMU_MASK))
1708
if (vaddr >= MMAP_AREA_END) {
1711
if (prot & PROT_WRITE) {
1712
if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1713
#if defined(TARGET_HAS_SMC) || 1
1716
((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1717
!cpu_physical_memory_is_dirty(pd))) {
1718
/* ROM: we do as if code was inside */
1719
/* if code is present, we only map as read only and save the
1723
vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1726
vp->valid_tag = virt_valid_tag;
1727
prot &= ~PAGE_WRITE;
1730
map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1731
MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1732
if (map_addr == MAP_FAILED) {
1733
cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1743
/* called from signal handler: invalidate the code and unprotect the
1744
page. Return TRUE if the fault was succesfully handled. */
1745
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1747
#if !defined(CONFIG_SOFTMMU)
1750
#if defined(DEBUG_TLB)
1751
printf("page_unprotect: addr=0x%08x\n", addr);
1753
addr &= TARGET_PAGE_MASK;
1755
/* if it is not mapped, no need to worry here */
1756
if (addr >= MMAP_AREA_END)
1758
vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1761
/* NOTE: in this case, validate_tag is _not_ tested as it
1762
validates only the code TLB */
1763
if (vp->valid_tag != virt_valid_tag)
1765
if (!(vp->prot & PAGE_WRITE))
1767
#if defined(DEBUG_TLB)
1768
printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1769
addr, vp->phys_addr, vp->prot);
1771
if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1772
cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1773
(unsigned long)addr, vp->prot);
1774
/* set the dirty bit */
1775
phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1776
/* flush the code inside */
1777
tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1980
1786
void tlb_flush(CPUState *env, int flush_global)
2196
1988
page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2197
1989
io memory page */
2198
1990
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2200
ram_addr_t phys_offset)
1992
unsigned long phys_offset)
2202
1994
target_phys_addr_t addr, end_addr;
2203
1995
PhysPageDesc *p;
2205
ram_addr_t orig_size = size;
1997
unsigned long orig_size = size;
2209
/* XXX: should not depend on cpu context */
2211
if (env->kqemu_enabled) {
2212
kqemu_set_phys_mem(start_addr, size, phys_offset);
2216
kvm_set_phys_mem(start_addr, size, phys_offset);
2218
2000
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2219
2001
end_addr = start_addr + (target_phys_addr_t)size;
2220
2002
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2221
2003
p = phys_page_find(addr >> TARGET_PAGE_BITS);
2222
2004
if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2223
ram_addr_t orig_memory = p->phys_offset;
2005
unsigned long orig_memory = p->phys_offset;
2224
2006
target_phys_addr_t start_addr2, end_addr2;
2225
2007
int need_subpage = 0;
2227
2009
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2229
if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2230
2012
if (!(orig_memory & IO_MEM_SUBPAGE)) {
2231
2013
subpage = subpage_init((addr & TARGET_PAGE_MASK),
2232
2014
&p->phys_offset, orig_memory);
2306
2088
#ifdef DEBUG_UNASSIGNED
2307
2089
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2309
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2310
do_unassigned_access(addr, 0, 0, 0, 1);
2315
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2317
#ifdef DEBUG_UNASSIGNED
2318
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2320
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2321
do_unassigned_access(addr, 0, 0, 0, 2);
2326
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2328
#ifdef DEBUG_UNASSIGNED
2329
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2331
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2332
do_unassigned_access(addr, 0, 0, 0, 4);
2092
do_unassigned_access(addr, 0, 0, 0);
2094
do_unassigned_access(addr, 0, 0, 0);
2339
2101
#ifdef DEBUG_UNASSIGNED
2340
2102
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2342
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2343
do_unassigned_access(addr, 1, 0, 0, 1);
2347
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2349
#ifdef DEBUG_UNASSIGNED
2350
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2352
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2353
do_unassigned_access(addr, 1, 0, 0, 2);
2357
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2359
#ifdef DEBUG_UNASSIGNED
2360
printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2362
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2363
do_unassigned_access(addr, 1, 0, 0, 4);
2105
do_unassigned_access(addr, 1, 0, 0);
2107
do_unassigned_access(addr, 1, 0, 0);
2367
2111
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2368
2112
unassigned_mem_readb,
2369
unassigned_mem_readw,
2370
unassigned_mem_readl,
2113
unassigned_mem_readb,
2114
unassigned_mem_readb,
2373
2117
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2374
2118
unassigned_mem_writeb,
2375
unassigned_mem_writew,
2376
unassigned_mem_writel,
2119
unassigned_mem_writeb,
2120
unassigned_mem_writeb,
2379
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2123
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2125
unsigned long ram_addr;
2382
2126
int dirty_flags;
2127
ram_addr = addr - (unsigned long)phys_ram_base;
2383
2128
dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2384
2129
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2385
2130
#if !defined(CONFIG_USER_ONLY)
2463
2210
notdirty_mem_writel,
2466
/* Generate a debug exception if a watchpoint has been hit. */
2467
static void check_watchpoint(int offset, int flags)
2469
CPUState *env = cpu_single_env;
2473
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2474
for (i = 0; i < env->nb_watchpoints; i++) {
2475
if (vaddr == env->watchpoint[i].vaddr
2476
&& (env->watchpoint[i].type & flags)) {
2477
env->watchpoint_hit = i + 1;
2478
cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2213
#if defined(CONFIG_SOFTMMU)
2484
2214
/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2485
2215
so these check for a hit then pass through to the normal out-of-line
2486
2216
phys routines. */
2487
2217
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2489
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2490
2219
return ldub_phys(addr);
2493
2222
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2495
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2496
2224
return lduw_phys(addr);
2499
2227
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2501
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2502
2229
return ldl_phys(addr);
2232
/* Generate a debug exception if a watchpoint has been hit.
2233
Returns the real physical address of the access. addr will be a host
2234
address in case of a RAM location. */
2235
static target_ulong check_watchpoint(target_phys_addr_t addr)
2237
CPUState *env = cpu_single_env;
2239
target_ulong retaddr;
2243
for (i = 0; i < env->nb_watchpoints; i++) {
2244
watch = env->watchpoint[i].vaddr;
2245
if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2246
retaddr = addr - env->watchpoint[i].addend;
2247
if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2248
cpu_single_env->watchpoint_hit = i + 1;
2249
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2505
2257
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2508
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2260
addr = check_watchpoint(addr);
2509
2261
stb_phys(addr, val);
2512
2264
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2515
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2267
addr = check_watchpoint(addr);
2516
2268
stw_phys(addr, val);
2519
2271
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2522
check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2274
addr = check_watchpoint(addr);
2523
2275
stl_phys(addr, val);
2699
2450
/* mem_read and mem_write are arrays of functions containing the
2700
2451
function to access byte (index 0), word (index 1) and dword (index
2701
2). Functions can be omitted with a NULL function pointer. The
2702
registered functions may be modified dynamically later.
2703
If io_index is non zero, the corresponding io zone is
2704
modified. If it is zero, a new io zone is allocated. The return
2705
value can be used with cpu_register_physical_memory(). (-1) is
2706
returned if error. */
2452
2). All functions must be supplied. If io_index is non zero, the
2453
corresponding io zone is modified. If it is zero, a new io zone is
2454
allocated. The return value can be used with
2455
cpu_register_physical_memory(). (-1) is returned if error. */
2707
2456
int cpu_register_io_memory(int io_index,
2708
2457
CPUReadMemoryFunc **mem_read,
2709
2458
CPUWriteMemoryFunc **mem_write,
2712
int i, subwidth = 0;
2714
2463
if (io_index <= 0) {
2715
2464
if (io_mem_nb >= IO_MEM_NB_ENTRIES)
3151
/* in deterministic execution mode, instructions doing device I/Os
3152
must be at the end of the TB */
3153
void cpu_io_recompile(CPUState *env, void *retaddr)
3155
TranslationBlock *tb;
3157
target_ulong pc, cs_base;
3160
tb = tb_find_pc((unsigned long)retaddr);
3162
cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3165
n = env->icount_decr.u16.low + tb->icount;
3166
cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3167
/* Calculate how many instructions had been executed before the fault
3169
n = n - env->icount_decr.u16.low;
3170
/* Generate a new TB ending on the I/O insn. */
3172
/* On MIPS and SH, delay slot instructions can only be restarted if
3173
they were already the first instruction in the TB. If this is not
3174
the first instruction in a TB then re-execute the preceding
3176
#if defined(TARGET_MIPS)
3177
if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3178
env->active_tc.PC -= 4;
3179
env->icount_decr.u16.low++;
3180
env->hflags &= ~MIPS_HFLAG_BMASK;
3182
#elif defined(TARGET_SH4)
3183
if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3186
env->icount_decr.u16.low++;
3187
env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3190
/* This should never happen. */
3191
if (n > CF_COUNT_MASK)
3192
cpu_abort(env, "TB too big during recompile");
3194
cflags = n | CF_LAST_IO;
3196
cs_base = tb->cs_base;
3198
tb_phys_invalidate(tb, -1);
3199
/* FIXME: In theory this could raise an exception. In practice
3200
we have already translated the block once so it's probably ok. */
3201
tb_gen_code(env, pc, cs_base, flags, cflags);
3202
/* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3203
the first in the TB) then we end up generating a whole new TB and
3204
repeating the fault, which is horribly inefficient.
3205
Better would be to execute just this insn uncached, or generate a
3207
cpu_resume_from_signal(env, NULL);
3210
2886
void dump_exec_info(FILE *f,
3211
2887
int (*cpu_fprintf)(FILE *f, const char *fmt, ...))