39
38
int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
40
int mmu_idx, int is_softmmu)
39
int is_user, int is_softmmu)
42
41
env->tea = address;
43
env->exception_index = 0;
47
44
env->exception_index = 0x0a0;
51
47
env->exception_index = 0x0c0;
50
env->exception_index = 0x0a0;
76
75
void do_interrupt(CPUState * env)
78
int do_irq = env->interrupt_request & CPU_INTERRUPT_HARD;
79
int do_exp, irq_vector = env->exception_index;
81
/* prioritize exceptions over interrupts */
83
do_exp = env->exception_index != -1;
84
do_irq = do_irq && (env->exception_index == -1);
86
if (env->sr & SR_BL) {
87
if (do_exp && env->exception_index != 0x1e0) {
88
env->exception_index = 0x000; /* masked exception -> reset */
90
if (do_irq && !env->intr_at_halt) {
93
env->intr_at_halt = 0;
97
irq_vector = sh_intc_get_pending_vector(env->intc_handle,
98
(env->sr >> 4) & 0xf);
99
if (irq_vector == -1) {
104
77
if (loglevel & CPU_LOG_INT) {
105
78
const char *expname;
106
79
switch (env->exception_index) {
144
117
expname = "trapa";
147
expname = do_irq ? "interrupt" : "???";
150
123
fprintf(logfile, "exception 0x%03x [%s] raised\n",
151
irq_vector, expname);
124
env->exception_index, expname);
152
125
cpu_dump_state(env, logfile, fprintf, 0);
155
128
env->ssr = env->sr;
157
130
env->sgr = env->gregs[15];
158
131
env->sr |= SR_BL | SR_MD | SR_RB;
160
if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
161
/* Branch instruction should be executed again before delay slot. */
163
/* Clear flags for exception/interrupt routine. */
164
env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL | DELAY_SLOT_TRUE);
166
if (env->flags & DELAY_SLOT_CLEARME)
170
env->expevt = env->exception_index;
171
switch (env->exception_index) {
176
env->sr |= 0xf << 4; /* IMASK */
177
env->pc = 0xa0000000;
181
env->pc = env->vbr + 0x400;
184
env->spc += 2; /* special case for TRAPA */
187
env->pc = env->vbr + 0x100;
194
env->intevt = irq_vector;
195
env->pc = env->vbr + 0x600;
133
env->expevt = env->exception_index & 0x7ff;
134
switch (env->exception_index) {
138
env->pc = env->vbr + 0x400;
141
env->pc = 0xa0000000;
144
env->pc = env->vbr + 0x100;
285
static int same_tlb_entry_exists(const tlb_t * haystack, uint8_t nbtlb,
286
const tlb_t * needle)
289
for (i = 0; i < nbtlb; i++)
290
if (!memcmp(&haystack[i], needle, sizeof(tlb_t)))
295
static void increment_urc(CPUState * env)
300
urb = ((env->mmucr) >> 18) & 0x3f;
301
urc = ((env->mmucr) >> 10) & 0x3f;
303
if (urc == urb || urc == UTLB_SIZE - 1)
305
env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10);
308
234
/* Find itlb entry - update itlb from utlb if necessary and asked for
309
235
Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE
310
236
Update the itlb from utlb if update is not 0
320
246
else if (e == MMU_DTLB_MISS && update) {
321
247
e = find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid);
324
249
n = itlb_replacement(env);
325
ientry = &env->itlb[n];
327
if (!same_tlb_entry_exists(env->utlb, UTLB_SIZE, ientry))
328
tlb_flush_page(env, ientry->vpn << 10);
330
*ientry = env->utlb[e];
250
env->itlb[n] = env->utlb[e];
332
} else if (e == MMU_DTLB_MISS)
334
} else if (e == MMU_DTLB_MISS)
337
255
update_itlb_use(env, e);
342
260
Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */
343
261
int find_utlb_entry(CPUState * env, target_ulong address, int use_asid)
345
/* per utlb access */
266
urb = ((env->mmucr) >> 18) & 0x3f;
267
urc = ((env->mmucr) >> 10) & 0x3f;
269
if (urc == urb || urc == UTLB_SIZE - 1)
271
env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10);
348
273
/* Return entry */
349
274
return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid);
429
354
return (rw & PAGE_WRITE) ? MMU_DTLB_MISS_WRITE :
430
355
MMU_DTLB_MISS_READ;
432
if (address >= 0x80000000 && address < 0xc0000000) {
433
/* Mask upper 3 bits for P1 and P2 areas */
434
*physical = address & 0x1fffffff;
435
} else if (address >= 0xfc000000) {
437
* Mask upper 3 bits for control registers in P4 area,
438
* to unify access to control registers via P0-P3 area.
439
* The addresses for cache store queue, TLB address array
442
*physical = address & 0x1fffffff;
444
/* access to cache store queue, or TLB address array. */
357
/* Mask upper 3 bits */
358
*physical = address & 0x1FFFFFFF;
447
359
*prot = PAGE_READ | PAGE_WRITE;
462
374
int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
463
int mmu_idx, int is_softmmu)
375
int is_user, int is_softmmu)
465
377
target_ulong physical, page_offset, page_size;
466
378
int prot, ret, access_type;
475
case 2: /* READ_ACCESS_TYPE == 2 defined in softmmu_template.h */
485
fprintf(stderr, "%s pc %08x ad %08x rw %d mmu_idx %d smmu %d\n",
486
__func__, env->pc, address, rw, mmu_idx, is_softmmu);
382
fprintf(stderr, "%s pc %08x ad %08x rw %d is_user %d smmu %d\n",
383
__func__, env->pc, address, rw, is_user, is_softmmu);
489
386
access_type = ACCESS_INT;
529
426
address = (address & TARGET_PAGE_MASK) + page_offset;
530
427
physical = (physical & TARGET_PAGE_MASK) + page_offset;
532
return tlb_set_page(env, address, physical, prot, mmu_idx, is_softmmu);
429
return tlb_set_page(env, address, physical, prot, is_user, is_softmmu);
535
432
target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
544
void cpu_load_tlb(CPUState * env)
546
int n = cpu_mmucr_urc(env->mmucr);
547
tlb_t * entry = &env->utlb[n];
550
/* Overwriting valid entry in utlb. */
551
target_ulong address = entry->vpn << 10;
552
if (!same_tlb_entry_exists(env->itlb, ITLB_SIZE, entry)) {
553
tlb_flush_page(env, address);
557
/* per utlb access cannot implemented. */
560
/* Take values into cpu status from registers. */
561
entry->asid = (uint8_t)cpu_pteh_asid(env->pteh);
562
entry->vpn = cpu_pteh_vpn(env->pteh);
563
entry->v = (uint8_t)cpu_ptel_v(env->ptel);
564
entry->ppn = cpu_ptel_ppn(env->ptel);
565
entry->sz = (uint8_t)cpu_ptel_sz(env->ptel);
568
entry->size = 1024; /* 1K */
571
entry->size = 1024 * 4; /* 4K */
574
entry->size = 1024 * 64; /* 64K */
577
entry->size = 1024 * 1024; /* 1M */
583
entry->sh = (uint8_t)cpu_ptel_sh(env->ptel);
584
entry->c = (uint8_t)cpu_ptel_c(env->ptel);
585
entry->pr = (uint8_t)cpu_ptel_pr(env->ptel);
586
entry->d = (uint8_t)cpu_ptel_d(env->ptel);
587
entry->wt = (uint8_t)cpu_ptel_wt(env->ptel);
588
entry->sa = (uint8_t)cpu_ptea_sa(env->ptea);
589
entry->tc = (uint8_t)cpu_ptea_tc(env->ptea);
592
void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, target_phys_addr_t addr,
595
int associate = addr & 0x0000080;
596
uint32_t vpn = (mem_value & 0xfffffc00) >> 10;
597
uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9);
598
uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
599
uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
603
tlb_t * utlb_match_entry = NULL;
604
int needs_tlb_flush = 0;
607
for (i = 0; i < UTLB_SIZE; i++) {
608
tlb_t * entry = &s->utlb[i];
612
if (entry->vpn == vpn && entry->asid == asid) {
613
if (utlb_match_entry) {
614
/* Multiple TLB Exception */
615
s->exception_index = 0x140;
623
utlb_match_entry = entry;
625
increment_urc(s); /* per utlb access */
629
for (i = 0; i < ITLB_SIZE; i++) {
630
tlb_t * entry = &s->itlb[i];
631
if (entry->vpn == vpn && entry->asid == asid) {
634
if (utlb_match_entry)
635
*entry = *utlb_match_entry;
643
tlb_flush_page(s, vpn << 10);
646
int index = (addr & 0x00003f00) >> 8;
647
tlb_t * entry = &s->utlb[index];
649
/* Overwriting valid entry in utlb. */
650
target_ulong address = entry->vpn << 10;
651
if (!same_tlb_entry_exists(s->itlb, ITLB_SIZE, entry)) {
652
tlb_flush_page(s, address);