4
extern MIPS_State* mstate;
5
/* Address decoder macros.
6
* Bits 29, 30 and 31 of the virtual address select the user or kernel address spaces.
11
return bits(va, 31, 29);
15
vaddr_compat_region(UInt32 va)
17
return bits(va, 31, 30);
21
get_random(MIPS_State* mstate) //Fix me: Shi yang 2006-08-10
23
int wired = mstate->cp0[Wired];
24
int free = tlb_size - wired;
25
return (tlb_size - 1 - wired + (mstate->now - mstate->random_seed)) % free + wired;
29
enter_kernel_mode(MIPS_State* mstate)
31
copy_bits(mstate->cp0[SR], (bits(mstate->cp0[SR], 3, 0) << 2), 5, 2); //Shi yang 2006-08-10
32
clear_bits(mstate->cp0[SR], 1, 0); //Enter kernel mode
37
leave_kernel_mode(MIPS_State* mstate)
39
copy_bits(mstate->cp0[SR], (bits(mstate->cp0[SR], 5, 2)), 3, 0);
40
copy_bits(mstate->cp0[SR], (bits(mstate->cp0[SR], 3, 2) << 4), 5, 4);
41
if (bit(mstate->cp0[SR], 1) == 0)
42
mstate->mode |= kmode; //From kernel mode to kernel mode
44
mstate->mode = umode; //From kernel mode to user mode
47
/* An ASID is represented by a 16 bit integer that is either a positive
48
* (non-zero) ASID, complemented if the G bit is also set. asid_match()
49
* checks two ASIDs for equivalence. Yet another demonstration of the
50
* versatility of exclusive-or. ;)
53
asid_match(Int16 asid1, Int16 asid2)
55
return (asid1 ^ asid2) <= 0;
59
/* Operations on the TLB lookup map. The map is a hash table indexed by a
60
* hash value computed from the ASID and the bits of virtual page number
61
* unaffected by any page mask. The result of the map lookup is a TLB
62
* entry. The hash function doesn't provide an ideal distribution, but as
63
* a minimum, it maintains a separate hash chain per ASID, and provides
64
* good distribution at least for sparse address spaces. The hash table
65
* always has an extra entry pointing to an invalid page.
71
return (bits(va, 31, 12) % tlb_map_size);
75
va_match(VA va1, VA va2)
77
return !(va1 ^ va2); //Shi yang 2006-08-11
80
// I-cache buffer operations.
83
ibuf_match(PA pa, PA tag) //Shi yang 2006-08-10
85
return ((pa >> log2_icache_line) ^ tag) == 0;
88
// Some state information constants.
91
allow_xinstr(MIPS_State* mstate)
93
return mstate->mode & (kmode|xmode);
97
branch_delay_slot(MIPS_State* mstate) //Shi yang 2006-08-10
99
return mstate->pipeline == branch_delay;
103
process_address_error(MIPS_State* mstate, int type, VA va)
105
int exc = (type == data_store) ? EXC_AdES : EXC_AdEL;
106
mstate->cp0[BadVAddr] = va;
107
process_exception(mstate, exc, common_vector);
111
process_tlb_refill(MIPS_State* mstate, int type, VA va)
113
int exc = (type == data_store) ? EXC_TLBS : EXC_TLBL;
114
int vec = tlb_refill_vector;
115
mstate->cp0[BadVAddr] = va;
116
mstate->cp0[Context] = copy_bits(mstate->cp0[Context],
117
bits(mstate->cp0[BadVAddr], 31, 13), 20, 2); //Shi yang 2006-08-10
118
mstate->cp0[EntryHi] = (va & (bitsmask(11, 0)))
119
| (bits(mstate->asid, 5, 0) << 6);
120
process_exception(mstate, exc, vec);
124
process_tlb_invalid(MIPS_State* mstate, int type, VA va)
126
int exc = (type == data_store) ? EXC_TLBS : EXC_TLBL;
127
mstate->cp0[BadVAddr] = va;
128
mstate->cp0[Context] = copy_bits(mstate->cp0[Context],
129
bits(mstate->cp0[BadVAddr], 31, 13), 20, 2); //Shi yang 2006-08-10
130
mstate->cp0[EntryHi] = (va & (bitsmask(11, 0)))
131
| (bits(mstate->asid, 5, 0) << 6);
132
process_exception(mstate, exc, common_vector);
136
process_tlb_modified(MIPS_State* mstate, VA va)
138
mstate->cp0[BadVAddr] = va;
139
mstate->cp0[Context] = copy_bits(mstate->cp0[Context],
140
bits(mstate->cp0[BadVAddr], 31, 13), 20, 2);
141
mstate->cp0[EntryHi] = (va & (bitsmask(11, 0)))
142
| (bits(mstate->asid, 7, 0) << 6);
143
process_exception(mstate, EXC_Mod, common_vector);
147
process_bus_error(MIPS_State* mstate, int type)
149
int exc = (type == instr_fetch) ? EXC_IBE : EXC_DBE;
150
process_exception(mstate, exc, common_vector);
154
process_integer_overflow(MIPS_State* mstate)
156
process_exception(mstate,EXC_Ov, common_vector);
160
process_trap(MIPS_State* mstate)
162
printf("Begin process_trap.\n");
163
process_exception(mstate, EXC_Tr, common_vector);
167
process_syscall(MIPS_State* mstate)
169
process_exception(mstate, EXC_Sys, common_vector);
173
process_breakpoint(MIPS_State* mstate)
175
process_exception(mstate, EXC_Bp, common_vector);
179
process_reserved_instruction(MIPS_State* mstate)
181
process_exception(mstate, EXC_RI, common_vector);
185
process_coprocessor_unusable(MIPS_State* mstate, int c)
187
process_exception(mstate, EXC_CpU | (c << (Cause_CE_First)), common_vector);
191
process_fp_exception(MIPS_State* mstate)
193
process_exception(mstate,EXC_FPE, common_vector);
197
process_interrupt(MIPS_State* mstate)
199
process_exception(mstate,EXC_Int, common_vector);
202
/* Endianess parameters. The data received from the memory bus is always
203
* in the host byte order, and uses host byte addressing.
206
big_endian_mem(MIPS_State* mstate)
208
return bit(mstate->cp0[Config], Config_BE);
212
reverse_endian(MIPS_State* mstate)
214
return mstate->mode & rmode;
218
big_endian_cpu(MIPS_State* mstate)
220
return mstate->mode & bmode;
224
swizzle_word(UInt32 x, UInt32 addr) //Fix me: Shi yang
226
return x; //Shi yang 2006-08-18
231
swizzle_byte(UInt32 x, UInt32 addr)
234
offset = (((UInt32) mstate->bigendSig * 3) ^ (addr & 3)) << 3; //Shi yang 2006-08-18
235
return (x >> offset) & 0xffL;
239
swizzle_doublebyte(UInt32 x, UInt32 addr)
242
offset = (((UInt32)mstate->bigendSig * 2) ^ (addr & 2)) << 3; //Shi yang 2006-08-18
243
return (x >> offset) & 0xffffL;
248
sign_extend_Int32(Int32 x, int n)
250
if (((Int32)-1 >> 1) < 0) {
251
// Host platform does arithmetic right shifts.
253
return y << (8 * sizeof(Int32) - n) >> (8 * sizeof(Int32) - n);
254
} else if (n < 8 * sizeof(Int32)) {
255
// We have to manually patch the high-order bits.
257
return set_bits(x, 8 * sizeof(Int32) - 1, n);
259
return clear_bits(x, 8 * sizeof(Int32) - 1, n);
264
sign_extend_UInt32(UInt32 x, int n)
266
if (((UInt32)-1 >> 1) < 0) {
267
// Host platform does arithmetic right shifts.
269
return y << (8 * sizeof(UInt32) - n) >> (8 * sizeof(UInt32) - n);
270
} else if (n < 8 * sizeof(UInt32)) {
271
// We have to manually patch the high-order bits.
273
return set_bits(x, 8 * sizeof(UInt32) - 1, n);
275
return clear_bits(x, 8 * sizeof(UInt32) - 1, n);
281
sign_extend_Int64(Int64 x, int n)
283
if (((Int64)-1 >> 1) < 0) {
284
// Host platform does arithmetic right shifts.
286
return y << (8 * sizeof(Int64) - n) >> (8 * sizeof(Int64) - n);
287
} else if (n < 8 * sizeof(Int64)) {
288
// We have to manually patch the high-order bits.
290
return set_bits(x, 8 * sizeof(Int64) - 1, n);
292
return clear_bits(x, 8 * sizeof(Int64) - 1, n);
297
sign_extend_UInt64(UInt64 x, int n)
299
if (((UInt64)-1 >> 1) < 0) {
300
// Host platform does arithmetic right shifts.
302
return y << (8 * sizeof(UInt64) - n) >> (8 * sizeof(UInt64) - n);
303
} else if (n < 8 * sizeof(UInt64)) {
304
// We have to manually patch the high-order bits.
306
return set_bits(x, 8 * sizeof(UInt64) - 1, n);
308
return clear_bits(x, 8 * sizeof(UInt64) - 1, n);
312
// Specialisations for 32 and 64 bit types.
315
divide_Int32(Int32 a, Int32 b)
317
if (32 <= sizeof(int)) {
318
div_t r = div((int)(a), (int)(b));
319
DivResult(r.quot, r.rem);
321
ldiv_t r = ldiv((long)(a),(long)(b));
322
DivResult(r.quot, r.rem);
327
divide_UInt32(UInt32 a, UInt32 b)
329
if (32 < sizeof(long)) {
330
ldiv_t r = ldiv((long)(a), (long)(b));
331
DivResult(r.quot, r.rem);
333
DivResult(a / b, a % b);
338
divide_Int64(Int64 a, Int64 b)
340
if (64 <= sizeof(long)) {
341
ldiv_t r = ldiv((long)(a), (long)(b));
342
DivResult(r.quot, r.rem);
343
} else if ((Int64)(-2) % (Int64)(3) < 0)
344
// Hardware division rounds towards zero.
345
DivResult(a / b, a % b)
347
// Hardware division rounds towards negative infinity, so fix it.
349
DivResult(a / b, a % b)
351
DivResult(-(-a / b), -(-a % b))
353
DivResult(-(a / -b), -(a % -b))