4
* Copyright (c) 2003 Fabrice Bellard
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, write to the Free Software
18
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20
#define CPU_NO_GLOBAL_REGS
22
#include "host-utils.h"
27
#define raise_exception_err(a, b)\
30
fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
(raise_exception_err)(a, b);\
35
const uint8_t parity_table[256] = {
36
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71
const uint8_t rclw_table[32] = {
72
0, 1, 2, 3, 4, 5, 6, 7,
73
8, 9,10,11,12,13,14,15,
74
16, 0, 1, 2, 3, 4, 5, 6,
75
7, 8, 9,10,11,12,13,14,
79
const uint8_t rclb_table[32] = {
80
0, 1, 2, 3, 4, 5, 6, 7,
81
8, 0, 1, 2, 3, 4, 5, 6,
82
7, 8, 0, 1, 2, 3, 4, 5,
83
6, 7, 8, 0, 1, 2, 3, 4,
86
const CPU86_LDouble f15rk[7] =
88
0.00000000000000000000L,
89
1.00000000000000000000L,
90
3.14159265358979323851L, /*pi*/
91
0.30102999566398119523L, /*lg2*/
92
0.69314718055994530943L, /*ln2*/
93
1.44269504088896340739L, /*l2e*/
94
3.32192809488736234781L, /*l2t*/
97
/* broken thread support */
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101
void helper_lock(void)
103
spin_lock(&global_cpu_lock);
106
void helper_unlock(void)
108
spin_unlock(&global_cpu_lock);
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
113
load_eflags(t0, update_mask);
116
target_ulong helper_read_eflags(void)
119
eflags = cc_table[CC_OP].compute_all();
120
eflags |= (DF & DF_MASK);
121
eflags |= env->eflags & ~(VM_MASK | RF_MASK);
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
137
index = selector & ~7;
138
if ((index + 7) > dt->limit)
140
ptr = dt->base + index;
141
*e1_ptr = ldl_kernel(ptr);
142
*e2_ptr = ldl_kernel(ptr + 4);
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
149
limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
if (e2 & DESC_G_MASK)
151
limit = (limit << 12) | 0xfff;
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
157
return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
162
sc->base = get_seg_base(e1, e2);
163
sc->limit = get_seg_limit(e1, e2);
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
171
cpu_x86_load_seg_cache(env, seg, selector,
172
(selector << 4), 0xffff, 0);
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
uint32_t *esp_ptr, int dpl)
178
int type, index, shift;
183
printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
for(i=0;i<env->tr.limit;i++) {
185
printf("%02x ", env->tr.base[i]);
186
if ((i & 7) == 7) printf("\n");
192
if (!(env->tr.flags & DESC_P_MASK))
193
cpu_abort(env, "invalid tss");
194
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
196
cpu_abort(env, "invalid tss type");
198
index = (dpl * 4 + 2) << shift;
199
if (index + (4 << shift) - 1 > env->tr.limit)
200
raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
202
*esp_ptr = lduw_kernel(env->tr.base + index);
203
*ss_ptr = lduw_kernel(env->tr.base + index + 2);
205
*esp_ptr = ldl_kernel(env->tr.base + index);
206
*ss_ptr = lduw_kernel(env->tr.base + index + 4);
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
216
if ((selector & 0xfffc) != 0) {
217
if (load_segment(&e1, &e2, selector) != 0)
218
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
if (!(e2 & DESC_S_MASK))
220
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
cpl = env->hflags & HF_CPL_MASK;
224
if (seg_reg == R_CS) {
225
if (!(e2 & DESC_CS_MASK))
226
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
/* XXX: is it correct ? */
229
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
if ((e2 & DESC_C_MASK) && dpl > rpl)
231
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
} else if (seg_reg == R_SS) {
233
/* SS must be writable data */
234
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
if (dpl != cpl || dpl != rpl)
237
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
239
/* not readable code */
240
if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
/* if data or non conforming code, checks the rights */
243
if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
if (dpl < cpl || dpl < rpl)
245
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
if (!(e2 & DESC_P_MASK))
249
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
cpu_x86_load_seg_cache(env, seg_reg, selector,
251
get_seg_base(e1, e2),
252
get_seg_limit(e1, e2),
255
if (seg_reg == R_SS || seg_reg == R_CS)
256
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260
#define SWITCH_TSS_JMP 0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
uint32_t e1, uint32_t e2, int source,
269
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
target_ulong tss_base;
271
uint32_t new_regs[8], new_segs[6];
272
uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
uint32_t old_eflags, eflags_mask;
278
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
280
if (loglevel & CPU_LOG_PCALL)
281
fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
284
/* if task gate, we read the TSS segment and we load it */
286
if (!(e2 & DESC_P_MASK))
287
raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
tss_selector = e1 >> 16;
289
if (tss_selector & 4)
290
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
if (load_segment(&e1, &e2, tss_selector) != 0)
292
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
if (e2 & DESC_S_MASK)
294
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
297
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300
if (!(e2 & DESC_P_MASK))
301
raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
307
tss_limit = get_seg_limit(e1, e2);
308
tss_base = get_seg_base(e1, e2);
309
if ((tss_selector & 4) != 0 ||
310
tss_limit < tss_limit_max)
311
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
314
old_tss_limit_max = 103;
316
old_tss_limit_max = 43;
318
/* read all the registers from the new TSS */
321
new_cr3 = ldl_kernel(tss_base + 0x1c);
322
new_eip = ldl_kernel(tss_base + 0x20);
323
new_eflags = ldl_kernel(tss_base + 0x24);
324
for(i = 0; i < 8; i++)
325
new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
for(i = 0; i < 6; i++)
327
new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
new_ldt = lduw_kernel(tss_base + 0x60);
329
new_trap = ldl_kernel(tss_base + 0x64);
333
new_eip = lduw_kernel(tss_base + 0x0e);
334
new_eflags = lduw_kernel(tss_base + 0x10);
335
for(i = 0; i < 8; i++)
336
new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
for(i = 0; i < 4; i++)
338
new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
new_ldt = lduw_kernel(tss_base + 0x2a);
345
/* NOTE: we must avoid memory exceptions during the task switch,
346
so we make dummy accesses before */
347
/* XXX: it can still fail in some cases, so a bigger hack is
348
necessary to valid the TLB after having done the accesses */
350
v1 = ldub_kernel(env->tr.base);
351
v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
stb_kernel(env->tr.base, v1);
353
stb_kernel(env->tr.base + old_tss_limit_max, v2);
355
/* clear busy bit (it is restartable) */
356
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
359
ptr = env->gdt.base + (env->tr.selector & ~7);
360
e2 = ldl_kernel(ptr + 4);
361
e2 &= ~DESC_TSS_BUSY_MASK;
362
stl_kernel(ptr + 4, e2);
364
old_eflags = compute_eflags();
365
if (source == SWITCH_TSS_IRET)
366
old_eflags &= ~NT_MASK;
368
/* save the current state in the old TSS */
371
stl_kernel(env->tr.base + 0x20, next_eip);
372
stl_kernel(env->tr.base + 0x24, old_eflags);
373
stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
for(i = 0; i < 6; i++)
382
stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
385
stw_kernel(env->tr.base + 0x0e, next_eip);
386
stw_kernel(env->tr.base + 0x10, old_eflags);
387
stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
for(i = 0; i < 4; i++)
396
stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
399
/* now if an exception occurs, it will occurs in the next task
402
if (source == SWITCH_TSS_CALL) {
403
stw_kernel(tss_base, env->tr.selector);
404
new_eflags |= NT_MASK;
408
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
411
ptr = env->gdt.base + (tss_selector & ~7);
412
e2 = ldl_kernel(ptr + 4);
413
e2 |= DESC_TSS_BUSY_MASK;
414
stl_kernel(ptr + 4, e2);
417
/* set the new CPU state */
418
/* from this point, any exception which occurs can give problems */
419
env->cr[0] |= CR0_TS_MASK;
420
env->hflags |= HF_TS_MASK;
421
env->tr.selector = tss_selector;
422
env->tr.base = tss_base;
423
env->tr.limit = tss_limit;
424
env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426
if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
cpu_x86_update_cr3(env, new_cr3);
430
/* load all registers without an exception, then reload them with
431
possible exception */
433
eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
436
eflags_mask &= 0xffff;
437
load_eflags(new_eflags, eflags_mask);
438
/* XXX: what to do in 16 bit case ? */
447
if (new_eflags & VM_MASK) {
448
for(i = 0; i < 6; i++)
449
load_seg_vm(i, new_segs[i]);
450
/* in vm86, CPL is always 3 */
451
cpu_x86_set_cpl(env, 3);
453
/* CPL is set the RPL of CS */
454
cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
/* first just selectors as the rest may trigger exceptions */
456
for(i = 0; i < 6; i++)
457
cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
460
env->ldt.selector = new_ldt & ~4;
467
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
469
if ((new_ldt & 0xfffc) != 0) {
471
index = new_ldt & ~7;
472
if ((index + 7) > dt->limit)
473
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
ptr = dt->base + index;
475
e1 = ldl_kernel(ptr);
476
e2 = ldl_kernel(ptr + 4);
477
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
if (!(e2 & DESC_P_MASK))
480
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
load_seg_cache_raw_dt(&env->ldt, e1, e2);
484
/* load the segments */
485
if (!(new_eflags & VM_MASK)) {
486
tss_load_seg(R_CS, new_segs[R_CS]);
487
tss_load_seg(R_SS, new_segs[R_SS]);
488
tss_load_seg(R_ES, new_segs[R_ES]);
489
tss_load_seg(R_DS, new_segs[R_DS]);
490
tss_load_seg(R_FS, new_segs[R_FS]);
491
tss_load_seg(R_GS, new_segs[R_GS]);
494
/* check that EIP is in the CS segment limits */
495
if (new_eip > env->segs[R_CS].limit) {
496
/* XXX: different exception if CALL ? */
497
raise_exception_err(EXCP0D_GPF, 0);
501
/* check if Port I/O is allowed in TSS */
502
static inline void check_io(int addr, int size)
504
int io_offset, val, mask;
506
/* TSS must be a valid 32 bit one */
507
if (!(env->tr.flags & DESC_P_MASK) ||
508
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
511
io_offset = lduw_kernel(env->tr.base + 0x66);
512
io_offset += (addr >> 3);
513
/* Note: the check needs two bytes */
514
if ((io_offset + 1) > env->tr.limit)
516
val = lduw_kernel(env->tr.base + io_offset);
518
mask = (1 << size) - 1;
519
/* all bits must be zero to allow the I/O */
520
if ((val & mask) != 0) {
522
raise_exception_err(EXCP0D_GPF, 0);
526
void helper_check_iob(uint32_t t0)
531
void helper_check_iow(uint32_t t0)
536
void helper_check_iol(uint32_t t0)
541
void helper_outb(uint32_t port, uint32_t data)
543
cpu_outb(env, port, data & 0xff);
546
target_ulong helper_inb(uint32_t port)
548
return cpu_inb(env, port);
551
void helper_outw(uint32_t port, uint32_t data)
553
cpu_outw(env, port, data & 0xffff);
556
target_ulong helper_inw(uint32_t port)
558
return cpu_inw(env, port);
561
void helper_outl(uint32_t port, uint32_t data)
563
cpu_outl(env, port, data);
566
target_ulong helper_inl(uint32_t port)
568
return cpu_inl(env, port);
571
static inline unsigned int get_sp_mask(unsigned int e2)
573
if (e2 & DESC_B_MASK)
580
#define SET_ESP(val, sp_mask)\
582
if ((sp_mask) == 0xffff)\
583
ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584
else if ((sp_mask) == 0xffffffffLL)\
585
ESP = (uint32_t)(val);\
590
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
593
/* XXX: add a is_user flag to have proper security support */
594
#define PUSHW(ssp, sp, sp_mask, val)\
597
stw_kernel((ssp) + (sp & (sp_mask)), (val));\
600
#define PUSHL(ssp, sp, sp_mask, val)\
603
stl_kernel((ssp) + (sp & (sp_mask)), (val));\
606
#define POPW(ssp, sp, sp_mask, val)\
608
val = lduw_kernel((ssp) + (sp & (sp_mask)));\
612
#define POPL(ssp, sp, sp_mask, val)\
614
val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
618
/* protected mode interrupt */
619
static void do_interrupt_protected(int intno, int is_int, int error_code,
620
unsigned int next_eip, int is_hw)
623
target_ulong ptr, ssp;
624
int type, dpl, selector, ss_dpl, cpl;
625
int has_error_code, new_stack, shift;
626
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627
uint32_t old_eip, sp_mask;
630
if (!is_int && !is_hw) {
649
if (intno * 8 + 7 > dt->limit)
650
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
651
ptr = dt->base + intno * 8;
652
e1 = ldl_kernel(ptr);
653
e2 = ldl_kernel(ptr + 4);
654
/* check gate type */
655
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
657
case 5: /* task gate */
658
/* must do that check here to return the correct error code */
659
if (!(e2 & DESC_P_MASK))
660
raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
661
switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
662
if (has_error_code) {
665
/* push the error code */
666
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
668
if (env->segs[R_SS].flags & DESC_B_MASK)
672
esp = (ESP - (2 << shift)) & mask;
673
ssp = env->segs[R_SS].base + esp;
675
stl_kernel(ssp, error_code);
677
stw_kernel(ssp, error_code);
681
case 6: /* 286 interrupt gate */
682
case 7: /* 286 trap gate */
683
case 14: /* 386 interrupt gate */
684
case 15: /* 386 trap gate */
687
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
690
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
691
cpl = env->hflags & HF_CPL_MASK;
692
/* check privilege if software int */
693
if (is_int && dpl < cpl)
694
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
695
/* check valid bit */
696
if (!(e2 & DESC_P_MASK))
697
raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
699
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
700
if ((selector & 0xfffc) == 0)
701
raise_exception_err(EXCP0D_GPF, 0);
703
if (load_segment(&e1, &e2, selector) != 0)
704
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
706
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
709
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
710
if (!(e2 & DESC_P_MASK))
711
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
712
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
713
/* to inner privilege */
714
get_ss_esp_from_tss(&ss, &esp, dpl);
715
if ((ss & 0xfffc) == 0)
716
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
718
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
719
if (load_segment(&ss_e1, &ss_e2, ss) != 0)
720
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
723
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
724
if (!(ss_e2 & DESC_S_MASK) ||
725
(ss_e2 & DESC_CS_MASK) ||
726
!(ss_e2 & DESC_W_MASK))
727
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728
if (!(ss_e2 & DESC_P_MASK))
729
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
731
sp_mask = get_sp_mask(ss_e2);
732
ssp = get_seg_base(ss_e1, ss_e2);
733
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
734
/* to same privilege */
735
if (env->eflags & VM_MASK)
736
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738
sp_mask = get_sp_mask(env->segs[R_SS].flags);
739
ssp = env->segs[R_SS].base;
743
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
744
new_stack = 0; /* avoid warning */
745
sp_mask = 0; /* avoid warning */
746
ssp = 0; /* avoid warning */
747
esp = 0; /* avoid warning */
753
/* XXX: check that enough room is available */
754
push_size = 6 + (new_stack << 2) + (has_error_code << 1);
755
if (env->eflags & VM_MASK)
761
if (env->eflags & VM_MASK) {
762
PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763
PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764
PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765
PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
767
PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768
PUSHL(ssp, esp, sp_mask, ESP);
770
PUSHL(ssp, esp, sp_mask, compute_eflags());
771
PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772
PUSHL(ssp, esp, sp_mask, old_eip);
773
if (has_error_code) {
774
PUSHL(ssp, esp, sp_mask, error_code);
778
if (env->eflags & VM_MASK) {
779
PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780
PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781
PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782
PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
784
PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785
PUSHW(ssp, esp, sp_mask, ESP);
787
PUSHW(ssp, esp, sp_mask, compute_eflags());
788
PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789
PUSHW(ssp, esp, sp_mask, old_eip);
790
if (has_error_code) {
791
PUSHW(ssp, esp, sp_mask, error_code);
796
if (env->eflags & VM_MASK) {
797
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
802
ss = (ss & ~3) | dpl;
803
cpu_x86_load_seg_cache(env, R_SS, ss,
804
ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
806
SET_ESP(esp, sp_mask);
808
selector = (selector & ~3) | dpl;
809
cpu_x86_load_seg_cache(env, R_CS, selector,
810
get_seg_base(e1, e2),
811
get_seg_limit(e1, e2),
813
cpu_x86_set_cpl(env, dpl);
816
/* interrupt gate clear IF mask */
817
if ((type & 1) == 0) {
818
env->eflags &= ~IF_MASK;
820
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
825
#define PUSHQ(sp, val)\
828
stq_kernel(sp, (val));\
831
#define POPQ(sp, val)\
833
val = ldq_kernel(sp);\
837
static inline target_ulong get_rsp_from_tss(int level)
842
printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
843
env->tr.base, env->tr.limit);
846
if (!(env->tr.flags & DESC_P_MASK))
847
cpu_abort(env, "invalid tss");
848
index = 8 * level + 4;
849
if ((index + 7) > env->tr.limit)
850
raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
851
return ldq_kernel(env->tr.base + index);
854
/* 64 bit interrupt */
855
static void do_interrupt64(int intno, int is_int, int error_code,
856
target_ulong next_eip, int is_hw)
860
int type, dpl, selector, cpl, ist;
861
int has_error_code, new_stack;
862
uint32_t e1, e2, e3, ss;
863
target_ulong old_eip, esp, offset;
866
if (!is_int && !is_hw) {
885
if (intno * 16 + 15 > dt->limit)
886
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
887
ptr = dt->base + intno * 16;
888
e1 = ldl_kernel(ptr);
889
e2 = ldl_kernel(ptr + 4);
890
e3 = ldl_kernel(ptr + 8);
891
/* check gate type */
892
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
894
case 14: /* 386 interrupt gate */
895
case 15: /* 386 trap gate */
898
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
901
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902
cpl = env->hflags & HF_CPL_MASK;
903
/* check privilege if software int */
904
if (is_int && dpl < cpl)
905
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906
/* check valid bit */
907
if (!(e2 & DESC_P_MASK))
908
raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
910
offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
912
if ((selector & 0xfffc) == 0)
913
raise_exception_err(EXCP0D_GPF, 0);
915
if (load_segment(&e1, &e2, selector) != 0)
916
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
918
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
919
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
921
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
922
if (!(e2 & DESC_P_MASK))
923
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
924
if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
925
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
927
/* to inner privilege */
929
esp = get_rsp_from_tss(ist + 3);
931
esp = get_rsp_from_tss(dpl);
932
esp &= ~0xfLL; /* align stack */
935
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
936
/* to same privilege */
937
if (env->eflags & VM_MASK)
938
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941
esp = get_rsp_from_tss(ist + 3);
944
esp &= ~0xfLL; /* align stack */
947
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948
new_stack = 0; /* avoid warning */
949
esp = 0; /* avoid warning */
952
PUSHQ(esp, env->segs[R_SS].selector);
954
PUSHQ(esp, compute_eflags());
955
PUSHQ(esp, env->segs[R_CS].selector);
957
if (has_error_code) {
958
PUSHQ(esp, error_code);
963
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
967
selector = (selector & ~3) | dpl;
968
cpu_x86_load_seg_cache(env, R_CS, selector,
969
get_seg_base(e1, e2),
970
get_seg_limit(e1, e2),
972
cpu_x86_set_cpl(env, dpl);
975
/* interrupt gate clear IF mask */
976
if ((type & 1) == 0) {
977
env->eflags &= ~IF_MASK;
979
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
983
#if defined(CONFIG_USER_ONLY)
984
void helper_syscall(int next_eip_addend)
986
env->exception_index = EXCP_SYSCALL;
987
env->exception_next_eip = env->eip + next_eip_addend;
991
void helper_syscall(int next_eip_addend)
995
if (!(env->efer & MSR_EFER_SCE)) {
996
raise_exception_err(EXCP06_ILLOP, 0);
998
selector = (env->star >> 32) & 0xffff;
1000
if (env->hflags & HF_LMA_MASK) {
1003
ECX = env->eip + next_eip_addend;
1004
env->regs[11] = compute_eflags();
1006
code64 = env->hflags & HF_CS64_MASK;
1008
cpu_x86_set_cpl(env, 0);
1009
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1011
DESC_G_MASK | DESC_P_MASK |
1013
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1014
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1016
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1018
DESC_W_MASK | DESC_A_MASK);
1019
env->eflags &= ~env->fmask;
1020
load_eflags(env->eflags, 0);
1022
env->eip = env->lstar;
1024
env->eip = env->cstar;
1028
ECX = (uint32_t)(env->eip + next_eip_addend);
1030
cpu_x86_set_cpl(env, 0);
1031
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1033
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1035
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1036
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1038
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1040
DESC_W_MASK | DESC_A_MASK);
1041
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1042
env->eip = (uint32_t)env->star;
1047
void helper_sysret(int dflag)
1051
if (!(env->efer & MSR_EFER_SCE)) {
1052
raise_exception_err(EXCP06_ILLOP, 0);
1054
cpl = env->hflags & HF_CPL_MASK;
1055
if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1056
raise_exception_err(EXCP0D_GPF, 0);
1058
selector = (env->star >> 48) & 0xffff;
1059
#ifdef TARGET_X86_64
1060
if (env->hflags & HF_LMA_MASK) {
1062
cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1064
DESC_G_MASK | DESC_P_MASK |
1065
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1070
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1072
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1075
env->eip = (uint32_t)ECX;
1077
cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1079
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
DESC_W_MASK | DESC_A_MASK);
1082
load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1083
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1084
cpu_x86_set_cpl(env, 3);
1088
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1090
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1091
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1092
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1093
env->eip = (uint32_t)ECX;
1094
cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1096
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098
DESC_W_MASK | DESC_A_MASK);
1099
env->eflags |= IF_MASK;
1100
cpu_x86_set_cpl(env, 3);
1103
if (kqemu_is_ok(env)) {
1104
if (env->hflags & HF_LMA_MASK)
1105
CC_OP = CC_OP_EFLAGS;
1106
env->exception_index = -1;
1112
/* real mode interrupt */
1113
static void do_interrupt_real(int intno, int is_int, int error_code,
1114
unsigned int next_eip)
1117
target_ulong ptr, ssp;
1119
uint32_t offset, esp;
1120
uint32_t old_cs, old_eip;
1122
/* real mode (simpler !) */
1124
if (intno * 4 + 3 > dt->limit)
1125
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126
ptr = dt->base + intno * 4;
1127
offset = lduw_kernel(ptr);
1128
selector = lduw_kernel(ptr + 2);
1130
ssp = env->segs[R_SS].base;
1135
old_cs = env->segs[R_CS].selector;
1136
/* XXX: use SS segment size ? */
1137
PUSHW(ssp, esp, 0xffff, compute_eflags());
1138
PUSHW(ssp, esp, 0xffff, old_cs);
1139
PUSHW(ssp, esp, 0xffff, old_eip);
1141
/* update processor state */
1142
ESP = (ESP & ~0xffff) | (esp & 0xffff);
1144
env->segs[R_CS].selector = selector;
1145
env->segs[R_CS].base = (selector << 4);
1146
env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1149
/* fake user mode interrupt */
1150
void do_interrupt_user(int intno, int is_int, int error_code,
1151
target_ulong next_eip)
1155
int dpl, cpl, shift;
1159
if (env->hflags & HF_LMA_MASK) {
1164
ptr = dt->base + (intno << shift);
1165
e2 = ldl_kernel(ptr + 4);
1167
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168
cpl = env->hflags & HF_CPL_MASK;
1169
/* check privilege if software int */
1170
if (is_int && dpl < cpl)
1171
raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1173
/* Since we emulate only user space, we cannot do more than
1174
exiting the emulation with the suitable exception and error
1181
* Begin execution of an interruption. is_int is TRUE if coming from
1182
* the int instruction. next_eip is the EIP value AFTER the interrupt
1183
* instruction. It is only relevant if is_int is TRUE.
1185
void do_interrupt(int intno, int is_int, int error_code,
1186
target_ulong next_eip, int is_hw)
1188
if (loglevel & CPU_LOG_INT) {
1189
if ((env->cr[0] & CR0_PE_MASK)) {
1191
fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192
count, intno, error_code, is_int,
1193
env->hflags & HF_CPL_MASK,
1194
env->segs[R_CS].selector, EIP,
1195
(int)env->segs[R_CS].base + EIP,
1196
env->segs[R_SS].selector, ESP);
1197
if (intno == 0x0e) {
1198
fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1200
fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1202
fprintf(logfile, "\n");
1203
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208
fprintf(logfile, " code=");
1209
ptr = env->segs[R_CS].base + env->eip;
1210
for(i = 0; i < 16; i++) {
1211
fprintf(logfile, " %02x", ldub(ptr + i));
1213
fprintf(logfile, "\n");
1219
if (env->cr[0] & CR0_PE_MASK) {
1221
if (env->hflags & HF_LMA_MASK) {
1222
do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1226
do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1229
do_interrupt_real(intno, is_int, error_code, next_eip);
1234
* Check nested exceptions and change to double or triple fault if
1235
* needed. It should only be called, if this is not an interrupt.
1236
* Returns the new exception number.
1238
static int check_exception(int intno, int *error_code)
1240
int first_contributory = env->old_exception == 0 ||
1241
(env->old_exception >= 10 &&
1242
env->old_exception <= 13);
1243
int second_contributory = intno == 0 ||
1244
(intno >= 10 && intno <= 13);
1246
if (loglevel & CPU_LOG_INT)
1247
fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1248
env->old_exception, intno);
1250
if (env->old_exception == EXCP08_DBLE)
1251
cpu_abort(env, "triple fault");
1253
if ((first_contributory && second_contributory)
1254
|| (env->old_exception == EXCP0E_PAGE &&
1255
(second_contributory || (intno == EXCP0E_PAGE)))) {
1256
intno = EXCP08_DBLE;
1260
if (second_contributory || (intno == EXCP0E_PAGE) ||
1261
(intno == EXCP08_DBLE))
1262
env->old_exception = intno;
1268
* Signal an interruption. It is executed in the main CPU loop.
1269
* is_int is TRUE if coming from the int instruction. next_eip is the
1270
* EIP value AFTER the interrupt instruction. It is only relevant if
1273
void raise_interrupt(int intno, int is_int, int error_code,
1274
int next_eip_addend)
1277
helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278
intno = check_exception(intno, &error_code);
1280
helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1283
env->exception_index = intno;
1284
env->error_code = error_code;
1285
env->exception_is_int = is_int;
1286
env->exception_next_eip = env->eip + next_eip_addend;
1290
/* shortcuts to generate exceptions */
1292
void (raise_exception_err)(int exception_index, int error_code)
1294
raise_interrupt(exception_index, 0, error_code, 0);
1297
void raise_exception(int exception_index)
1299
raise_interrupt(exception_index, 0, 0, 0);
1304
#if defined(CONFIG_USER_ONLY)
1306
void do_smm_enter(void)
1310
void helper_rsm(void)
1316
#ifdef TARGET_X86_64
1317
#define SMM_REVISION_ID 0x00020064
1319
#define SMM_REVISION_ID 0x00020000
1322
void do_smm_enter(void)
1324
target_ulong sm_state;
1328
if (loglevel & CPU_LOG_INT) {
1329
fprintf(logfile, "SMM: enter\n");
1330
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1333
env->hflags |= HF_SMM_MASK;
1334
cpu_smm_update(env);
1336
sm_state = env->smbase + 0x8000;
1338
#ifdef TARGET_X86_64
1339
for(i = 0; i < 6; i++) {
1341
offset = 0x7e00 + i * 16;
1342
stw_phys(sm_state + offset, dt->selector);
1343
stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1344
stl_phys(sm_state + offset + 4, dt->limit);
1345
stq_phys(sm_state + offset + 8, dt->base);
1348
stq_phys(sm_state + 0x7e68, env->gdt.base);
1349
stl_phys(sm_state + 0x7e64, env->gdt.limit);
1351
stw_phys(sm_state + 0x7e70, env->ldt.selector);
1352
stq_phys(sm_state + 0x7e78, env->ldt.base);
1353
stl_phys(sm_state + 0x7e74, env->ldt.limit);
1354
stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1356
stq_phys(sm_state + 0x7e88, env->idt.base);
1357
stl_phys(sm_state + 0x7e84, env->idt.limit);
1359
stw_phys(sm_state + 0x7e90, env->tr.selector);
1360
stq_phys(sm_state + 0x7e98, env->tr.base);
1361
stl_phys(sm_state + 0x7e94, env->tr.limit);
1362
stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1364
stq_phys(sm_state + 0x7ed0, env->efer);
1366
stq_phys(sm_state + 0x7ff8, EAX);
1367
stq_phys(sm_state + 0x7ff0, ECX);
1368
stq_phys(sm_state + 0x7fe8, EDX);
1369
stq_phys(sm_state + 0x7fe0, EBX);
1370
stq_phys(sm_state + 0x7fd8, ESP);
1371
stq_phys(sm_state + 0x7fd0, EBP);
1372
stq_phys(sm_state + 0x7fc8, ESI);
1373
stq_phys(sm_state + 0x7fc0, EDI);
1374
for(i = 8; i < 16; i++)
1375
stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1376
stq_phys(sm_state + 0x7f78, env->eip);
1377
stl_phys(sm_state + 0x7f70, compute_eflags());
1378
stl_phys(sm_state + 0x7f68, env->dr[6]);
1379
stl_phys(sm_state + 0x7f60, env->dr[7]);
1381
stl_phys(sm_state + 0x7f48, env->cr[4]);
1382
stl_phys(sm_state + 0x7f50, env->cr[3]);
1383
stl_phys(sm_state + 0x7f58, env->cr[0]);
1385
stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1386
stl_phys(sm_state + 0x7f00, env->smbase);
1388
stl_phys(sm_state + 0x7ffc, env->cr[0]);
1389
stl_phys(sm_state + 0x7ff8, env->cr[3]);
1390
stl_phys(sm_state + 0x7ff4, compute_eflags());
1391
stl_phys(sm_state + 0x7ff0, env->eip);
1392
stl_phys(sm_state + 0x7fec, EDI);
1393
stl_phys(sm_state + 0x7fe8, ESI);
1394
stl_phys(sm_state + 0x7fe4, EBP);
1395
stl_phys(sm_state + 0x7fe0, ESP);
1396
stl_phys(sm_state + 0x7fdc, EBX);
1397
stl_phys(sm_state + 0x7fd8, EDX);
1398
stl_phys(sm_state + 0x7fd4, ECX);
1399
stl_phys(sm_state + 0x7fd0, EAX);
1400
stl_phys(sm_state + 0x7fcc, env->dr[6]);
1401
stl_phys(sm_state + 0x7fc8, env->dr[7]);
1403
stl_phys(sm_state + 0x7fc4, env->tr.selector);
1404
stl_phys(sm_state + 0x7f64, env->tr.base);
1405
stl_phys(sm_state + 0x7f60, env->tr.limit);
1406
stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1408
stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1409
stl_phys(sm_state + 0x7f80, env->ldt.base);
1410
stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1411
stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1413
stl_phys(sm_state + 0x7f74, env->gdt.base);
1414
stl_phys(sm_state + 0x7f70, env->gdt.limit);
1416
stl_phys(sm_state + 0x7f58, env->idt.base);
1417
stl_phys(sm_state + 0x7f54, env->idt.limit);
1419
for(i = 0; i < 6; i++) {
1422
offset = 0x7f84 + i * 12;
1424
offset = 0x7f2c + (i - 3) * 12;
1425
stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1426
stl_phys(sm_state + offset + 8, dt->base);
1427
stl_phys(sm_state + offset + 4, dt->limit);
1428
stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1430
stl_phys(sm_state + 0x7f14, env->cr[4]);
1432
stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1433
stl_phys(sm_state + 0x7ef8, env->smbase);
1435
/* init SMM cpu state */
1437
#ifdef TARGET_X86_64
1438
cpu_load_efer(env, 0);
1440
load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1441
env->eip = 0x00008000;
1442
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1444
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1445
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1446
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1447
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1448
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1450
cpu_x86_update_cr0(env,
1451
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1452
cpu_x86_update_cr4(env, 0);
1453
env->dr[7] = 0x00000400;
1454
CC_OP = CC_OP_EFLAGS;
1457
void helper_rsm(void)
1459
target_ulong sm_state;
1463
sm_state = env->smbase + 0x8000;
1464
#ifdef TARGET_X86_64
1465
cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1467
for(i = 0; i < 6; i++) {
1468
offset = 0x7e00 + i * 16;
1469
cpu_x86_load_seg_cache(env, i,
1470
lduw_phys(sm_state + offset),
1471
ldq_phys(sm_state + offset + 8),
1472
ldl_phys(sm_state + offset + 4),
1473
(lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1476
env->gdt.base = ldq_phys(sm_state + 0x7e68);
1477
env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1479
env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1480
env->ldt.base = ldq_phys(sm_state + 0x7e78);
1481
env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1482
env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1484
env->idt.base = ldq_phys(sm_state + 0x7e88);
1485
env->idt.limit = ldl_phys(sm_state + 0x7e84);
1487
env->tr.selector = lduw_phys(sm_state + 0x7e90);
1488
env->tr.base = ldq_phys(sm_state + 0x7e98);
1489
env->tr.limit = ldl_phys(sm_state + 0x7e94);
1490
env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1492
EAX = ldq_phys(sm_state + 0x7ff8);
1493
ECX = ldq_phys(sm_state + 0x7ff0);
1494
EDX = ldq_phys(sm_state + 0x7fe8);
1495
EBX = ldq_phys(sm_state + 0x7fe0);
1496
ESP = ldq_phys(sm_state + 0x7fd8);
1497
EBP = ldq_phys(sm_state + 0x7fd0);
1498
ESI = ldq_phys(sm_state + 0x7fc8);
1499
EDI = ldq_phys(sm_state + 0x7fc0);
1500
for(i = 8; i < 16; i++)
1501
env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1502
env->eip = ldq_phys(sm_state + 0x7f78);
1503
load_eflags(ldl_phys(sm_state + 0x7f70),
1504
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1505
env->dr[6] = ldl_phys(sm_state + 0x7f68);
1506
env->dr[7] = ldl_phys(sm_state + 0x7f60);
1508
cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1509
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1510
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1512
val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1513
if (val & 0x20000) {
1514
env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1517
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1518
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1519
load_eflags(ldl_phys(sm_state + 0x7ff4),
1520
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521
env->eip = ldl_phys(sm_state + 0x7ff0);
1522
EDI = ldl_phys(sm_state + 0x7fec);
1523
ESI = ldl_phys(sm_state + 0x7fe8);
1524
EBP = ldl_phys(sm_state + 0x7fe4);
1525
ESP = ldl_phys(sm_state + 0x7fe0);
1526
EBX = ldl_phys(sm_state + 0x7fdc);
1527
EDX = ldl_phys(sm_state + 0x7fd8);
1528
ECX = ldl_phys(sm_state + 0x7fd4);
1529
EAX = ldl_phys(sm_state + 0x7fd0);
1530
env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1531
env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1533
env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1534
env->tr.base = ldl_phys(sm_state + 0x7f64);
1535
env->tr.limit = ldl_phys(sm_state + 0x7f60);
1536
env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1538
env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1539
env->ldt.base = ldl_phys(sm_state + 0x7f80);
1540
env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1541
env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1543
env->gdt.base = ldl_phys(sm_state + 0x7f74);
1544
env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1546
env->idt.base = ldl_phys(sm_state + 0x7f58);
1547
env->idt.limit = ldl_phys(sm_state + 0x7f54);
1549
for(i = 0; i < 6; i++) {
1551
offset = 0x7f84 + i * 12;
1553
offset = 0x7f2c + (i - 3) * 12;
1554
cpu_x86_load_seg_cache(env, i,
1555
ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1556
ldl_phys(sm_state + offset + 8),
1557
ldl_phys(sm_state + offset + 4),
1558
(ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1560
cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1562
val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1563
if (val & 0x20000) {
1564
env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1567
CC_OP = CC_OP_EFLAGS;
1568
env->hflags &= ~HF_SMM_MASK;
1569
cpu_smm_update(env);
1571
if (loglevel & CPU_LOG_INT) {
1572
fprintf(logfile, "SMM: after RSM\n");
1573
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1577
#endif /* !CONFIG_USER_ONLY */
1580
/* division, flags are undefined */
1582
void helper_divb_AL(target_ulong t0)
1584
unsigned int num, den, q, r;
1586
num = (EAX & 0xffff);
1589
raise_exception(EXCP00_DIVZ);
1593
raise_exception(EXCP00_DIVZ);
1595
r = (num % den) & 0xff;
1596
EAX = (EAX & ~0xffff) | (r << 8) | q;
1599
void helper_idivb_AL(target_ulong t0)
1606
raise_exception(EXCP00_DIVZ);
1610
raise_exception(EXCP00_DIVZ);
1612
r = (num % den) & 0xff;
1613
EAX = (EAX & ~0xffff) | (r << 8) | q;
1616
void helper_divw_AX(target_ulong t0)
1618
unsigned int num, den, q, r;
1620
num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1621
den = (t0 & 0xffff);
1623
raise_exception(EXCP00_DIVZ);
1627
raise_exception(EXCP00_DIVZ);
1629
r = (num % den) & 0xffff;
1630
EAX = (EAX & ~0xffff) | q;
1631
EDX = (EDX & ~0xffff) | r;
1634
void helper_idivw_AX(target_ulong t0)
1638
num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1641
raise_exception(EXCP00_DIVZ);
1644
if (q != (int16_t)q)
1645
raise_exception(EXCP00_DIVZ);
1647
r = (num % den) & 0xffff;
1648
EAX = (EAX & ~0xffff) | q;
1649
EDX = (EDX & ~0xffff) | r;
1652
void helper_divl_EAX(target_ulong t0)
1654
unsigned int den, r;
1657
num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1660
raise_exception(EXCP00_DIVZ);
1665
raise_exception(EXCP00_DIVZ);
1670
void helper_idivl_EAX(target_ulong t0)
1675
num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1678
raise_exception(EXCP00_DIVZ);
1682
if (q != (int32_t)q)
1683
raise_exception(EXCP00_DIVZ);
1690
/* XXX: exception */
1691
void helper_aam(int base)
1697
EAX = (EAX & ~0xffff) | al | (ah << 8);
1701
void helper_aad(int base)
1705
ah = (EAX >> 8) & 0xff;
1706
al = ((ah * base) + al) & 0xff;
1707
EAX = (EAX & ~0xffff) | al;
1711
void helper_aaa(void)
1717
eflags = cc_table[CC_OP].compute_all();
1720
ah = (EAX >> 8) & 0xff;
1722
icarry = (al > 0xf9);
1723
if (((al & 0x0f) > 9 ) || af) {
1724
al = (al + 6) & 0x0f;
1725
ah = (ah + 1 + icarry) & 0xff;
1726
eflags |= CC_C | CC_A;
1728
eflags &= ~(CC_C | CC_A);
1731
EAX = (EAX & ~0xffff) | al | (ah << 8);
1736
void helper_aas(void)
1742
eflags = cc_table[CC_OP].compute_all();
1745
ah = (EAX >> 8) & 0xff;
1748
if (((al & 0x0f) > 9 ) || af) {
1749
al = (al - 6) & 0x0f;
1750
ah = (ah - 1 - icarry) & 0xff;
1751
eflags |= CC_C | CC_A;
1753
eflags &= ~(CC_C | CC_A);
1756
EAX = (EAX & ~0xffff) | al | (ah << 8);
1761
void helper_daa(void)
1766
eflags = cc_table[CC_OP].compute_all();
1772
if (((al & 0x0f) > 9 ) || af) {
1773
al = (al + 6) & 0xff;
1776
if ((al > 0x9f) || cf) {
1777
al = (al + 0x60) & 0xff;
1780
EAX = (EAX & ~0xff) | al;
1781
/* well, speed is not an issue here, so we compute the flags by hand */
1782
eflags |= (al == 0) << 6; /* zf */
1783
eflags |= parity_table[al]; /* pf */
1784
eflags |= (al & 0x80); /* sf */
1789
void helper_das(void)
1791
int al, al1, af, cf;
1794
eflags = cc_table[CC_OP].compute_all();
1801
if (((al & 0x0f) > 9 ) || af) {
1805
al = (al - 6) & 0xff;
1807
if ((al1 > 0x99) || cf) {
1808
al = (al - 0x60) & 0xff;
1811
EAX = (EAX & ~0xff) | al;
1812
/* well, speed is not an issue here, so we compute the flags by hand */
1813
eflags |= (al == 0) << 6; /* zf */
1814
eflags |= parity_table[al]; /* pf */
1815
eflags |= (al & 0x80); /* sf */
1820
void helper_into(int next_eip_addend)
1823
eflags = cc_table[CC_OP].compute_all();
1824
if (eflags & CC_O) {
1825
raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1829
void helper_cmpxchg8b(target_ulong a0)
1834
eflags = cc_table[CC_OP].compute_all();
1836
if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1837
stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1840
EDX = (uint32_t)(d >> 32);
1847
#ifdef TARGET_X86_64
1848
void helper_cmpxchg16b(target_ulong a0)
1853
eflags = cc_table[CC_OP].compute_all();
1856
if (d0 == EAX && d1 == EDX) {
1869
void helper_single_step(void)
1871
env->dr[6] |= 0x4000;
1872
raise_exception(EXCP01_SSTP);
1875
void helper_cpuid(void)
1879
helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1881
index = (uint32_t)EAX;
1882
/* test if maximum index reached */
1883
if (index & 0x80000000) {
1884
if (index > env->cpuid_xlevel)
1885
index = env->cpuid_level;
1887
if (index > env->cpuid_level)
1888
index = env->cpuid_level;
1893
EAX = env->cpuid_level;
1894
EBX = env->cpuid_vendor1;
1895
EDX = env->cpuid_vendor2;
1896
ECX = env->cpuid_vendor3;
1899
EAX = env->cpuid_version;
1900
EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1901
ECX = env->cpuid_ext_features;
1902
EDX = env->cpuid_features;
1905
/* cache info: needed for Pentium Pro compatibility */
1912
EAX = env->cpuid_xlevel;
1913
EBX = env->cpuid_vendor1;
1914
EDX = env->cpuid_vendor2;
1915
ECX = env->cpuid_vendor3;
1918
EAX = env->cpuid_features;
1920
ECX = env->cpuid_ext3_features;
1921
EDX = env->cpuid_ext2_features;
1926
EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1927
EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1928
ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1929
EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1932
/* cache info (L1 cache) */
1939
/* cache info (L2 cache) */
1946
/* virtual & phys address size in low 2 bytes. */
1947
/* XXX: This value must match the one used in the MMU code. */
1948
if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1949
/* 64 bit processor */
1950
#if defined(USE_KQEMU)
1951
EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1953
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1954
EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1957
#if defined(USE_KQEMU)
1958
EAX = 0x00000020; /* 32 bits physical */
1960
EAX = 0x00000024; /* 36 bits physical */
1974
/* reserved values: zero */
1983
void helper_enter_level(int level, int data32, target_ulong t1)
1986
uint32_t esp_mask, esp, ebp;
1988
esp_mask = get_sp_mask(env->segs[R_SS].flags);
1989
ssp = env->segs[R_SS].base;
1998
stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2001
stl(ssp + (esp & esp_mask), t1);
2008
stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2011
stw(ssp + (esp & esp_mask), t1);
2015
#ifdef TARGET_X86_64
2016
void helper_enter64_level(int level, int data64, target_ulong t1)
2018
target_ulong esp, ebp;
2038
stw(esp, lduw(ebp));
2046
void helper_lldt(int selector)
2050
int index, entry_limit;
2054
if ((selector & 0xfffc) == 0) {
2055
/* XXX: NULL selector case: invalid LDT */
2060
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2062
index = selector & ~7;
2063
#ifdef TARGET_X86_64
2064
if (env->hflags & HF_LMA_MASK)
2069
if ((index + entry_limit) > dt->limit)
2070
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2071
ptr = dt->base + index;
2072
e1 = ldl_kernel(ptr);
2073
e2 = ldl_kernel(ptr + 4);
2074
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2075
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2076
if (!(e2 & DESC_P_MASK))
2077
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2078
#ifdef TARGET_X86_64
2079
if (env->hflags & HF_LMA_MASK) {
2081
e3 = ldl_kernel(ptr + 8);
2082
load_seg_cache_raw_dt(&env->ldt, e1, e2);
2083
env->ldt.base |= (target_ulong)e3 << 32;
2087
load_seg_cache_raw_dt(&env->ldt, e1, e2);
2090
env->ldt.selector = selector;
2093
void helper_ltr(int selector)
2097
int index, type, entry_limit;
2101
if ((selector & 0xfffc) == 0) {
2102
/* NULL selector case: invalid TR */
2108
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2110
index = selector & ~7;
2111
#ifdef TARGET_X86_64
2112
if (env->hflags & HF_LMA_MASK)
2117
if ((index + entry_limit) > dt->limit)
2118
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119
ptr = dt->base + index;
2120
e1 = ldl_kernel(ptr);
2121
e2 = ldl_kernel(ptr + 4);
2122
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2123
if ((e2 & DESC_S_MASK) ||
2124
(type != 1 && type != 9))
2125
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2126
if (!(e2 & DESC_P_MASK))
2127
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2128
#ifdef TARGET_X86_64
2129
if (env->hflags & HF_LMA_MASK) {
2131
e3 = ldl_kernel(ptr + 8);
2132
e4 = ldl_kernel(ptr + 12);
2133
if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2134
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2135
load_seg_cache_raw_dt(&env->tr, e1, e2);
2136
env->tr.base |= (target_ulong)e3 << 32;
2140
load_seg_cache_raw_dt(&env->tr, e1, e2);
2142
e2 |= DESC_TSS_BUSY_MASK;
2143
stl_kernel(ptr + 4, e2);
2145
env->tr.selector = selector;
2148
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2149
void helper_load_seg(int seg_reg, int selector)
2158
cpl = env->hflags & HF_CPL_MASK;
2159
if ((selector & 0xfffc) == 0) {
2160
/* null selector case */
2162
#ifdef TARGET_X86_64
2163
&& (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2166
raise_exception_err(EXCP0D_GPF, 0);
2167
cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2174
index = selector & ~7;
2175
if ((index + 7) > dt->limit)
2176
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2177
ptr = dt->base + index;
2178
e1 = ldl_kernel(ptr);
2179
e2 = ldl_kernel(ptr + 4);
2181
if (!(e2 & DESC_S_MASK))
2182
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2184
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2185
if (seg_reg == R_SS) {
2186
/* must be writable segment */
2187
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2188
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2189
if (rpl != cpl || dpl != cpl)
2190
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2192
/* must be readable segment */
2193
if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2194
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2196
if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2197
/* if not conforming code, test rights */
2198
if (dpl < cpl || dpl < rpl)
2199
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2203
if (!(e2 & DESC_P_MASK)) {
2204
if (seg_reg == R_SS)
2205
raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2207
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2210
/* set the access bit if not already set */
2211
if (!(e2 & DESC_A_MASK)) {
2213
stl_kernel(ptr + 4, e2);
2216
cpu_x86_load_seg_cache(env, seg_reg, selector,
2217
get_seg_base(e1, e2),
2218
get_seg_limit(e1, e2),
2221
fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2222
selector, (unsigned long)sc->base, sc->limit, sc->flags);
2227
/* protected mode jump */
2228
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2229
int next_eip_addend)
2232
uint32_t e1, e2, cpl, dpl, rpl, limit;
2233
target_ulong next_eip;
2235
if ((new_cs & 0xfffc) == 0)
2236
raise_exception_err(EXCP0D_GPF, 0);
2237
if (load_segment(&e1, &e2, new_cs) != 0)
2238
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2239
cpl = env->hflags & HF_CPL_MASK;
2240
if (e2 & DESC_S_MASK) {
2241
if (!(e2 & DESC_CS_MASK))
2242
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2243
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2244
if (e2 & DESC_C_MASK) {
2245
/* conforming code segment */
2247
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2249
/* non conforming code segment */
2252
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2254
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2256
if (!(e2 & DESC_P_MASK))
2257
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2258
limit = get_seg_limit(e1, e2);
2259
if (new_eip > limit &&
2260
!(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2261
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2262
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2263
get_seg_base(e1, e2), limit, e2);
2266
/* jump to call or task gate */
2267
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2269
cpl = env->hflags & HF_CPL_MASK;
2270
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2272
case 1: /* 286 TSS */
2273
case 9: /* 386 TSS */
2274
case 5: /* task gate */
2275
if (dpl < cpl || dpl < rpl)
2276
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2277
next_eip = env->eip + next_eip_addend;
2278
switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2279
CC_OP = CC_OP_EFLAGS;
2281
case 4: /* 286 call gate */
2282
case 12: /* 386 call gate */
2283
if ((dpl < cpl) || (dpl < rpl))
2284
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2285
if (!(e2 & DESC_P_MASK))
2286
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2288
new_eip = (e1 & 0xffff);
2290
new_eip |= (e2 & 0xffff0000);
2291
if (load_segment(&e1, &e2, gate_cs) != 0)
2292
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2293
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2294
/* must be code segment */
2295
if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2296
(DESC_S_MASK | DESC_CS_MASK)))
2297
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2298
if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2299
(!(e2 & DESC_C_MASK) && (dpl != cpl)))
2300
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2301
if (!(e2 & DESC_P_MASK))
2302
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2303
limit = get_seg_limit(e1, e2);
2304
if (new_eip > limit)
2305
raise_exception_err(EXCP0D_GPF, 0);
2306
cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2307
get_seg_base(e1, e2), limit, e2);
2311
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2317
/* real mode call */
2318
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2319
int shift, int next_eip)
2322
uint32_t esp, esp_mask;
2327
esp_mask = get_sp_mask(env->segs[R_SS].flags);
2328
ssp = env->segs[R_SS].base;
2330
PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2331
PUSHL(ssp, esp, esp_mask, next_eip);
2333
PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2334
PUSHW(ssp, esp, esp_mask, next_eip);
2337
SET_ESP(esp, esp_mask);
2339
env->segs[R_CS].selector = new_cs;
2340
env->segs[R_CS].base = (new_cs << 4);
2343
/* protected mode call */
2344
void helper_lcall_protected(int new_cs, target_ulong new_eip,
2345
int shift, int next_eip_addend)
2348
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2349
uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2350
uint32_t val, limit, old_sp_mask;
2351
target_ulong ssp, old_ssp, next_eip;
2353
next_eip = env->eip + next_eip_addend;
2355
if (loglevel & CPU_LOG_PCALL) {
2356
fprintf(logfile, "lcall %04x:%08x s=%d\n",
2357
new_cs, (uint32_t)new_eip, shift);
2358
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2361
if ((new_cs & 0xfffc) == 0)
2362
raise_exception_err(EXCP0D_GPF, 0);
2363
if (load_segment(&e1, &e2, new_cs) != 0)
2364
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2365
cpl = env->hflags & HF_CPL_MASK;
2367
if (loglevel & CPU_LOG_PCALL) {
2368
fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2371
if (e2 & DESC_S_MASK) {
2372
if (!(e2 & DESC_CS_MASK))
2373
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2374
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2375
if (e2 & DESC_C_MASK) {
2376
/* conforming code segment */
2378
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2380
/* non conforming code segment */
2383
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2385
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387
if (!(e2 & DESC_P_MASK))
2388
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2390
#ifdef TARGET_X86_64
2391
/* XXX: check 16/32 bit cases in long mode */
2396
PUSHQ(rsp, env->segs[R_CS].selector);
2397
PUSHQ(rsp, next_eip);
2398
/* from this point, not restartable */
2400
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2401
get_seg_base(e1, e2),
2402
get_seg_limit(e1, e2), e2);
2408
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2409
ssp = env->segs[R_SS].base;
2411
PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2412
PUSHL(ssp, sp, sp_mask, next_eip);
2414
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2415
PUSHW(ssp, sp, sp_mask, next_eip);
2418
limit = get_seg_limit(e1, e2);
2419
if (new_eip > limit)
2420
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2421
/* from this point, not restartable */
2422
SET_ESP(sp, sp_mask);
2423
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2424
get_seg_base(e1, e2), limit, e2);
2428
/* check gate type */
2429
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2430
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2433
case 1: /* available 286 TSS */
2434
case 9: /* available 386 TSS */
2435
case 5: /* task gate */
2436
if (dpl < cpl || dpl < rpl)
2437
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2438
switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2439
CC_OP = CC_OP_EFLAGS;
2441
case 4: /* 286 call gate */
2442
case 12: /* 386 call gate */
2445
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2450
if (dpl < cpl || dpl < rpl)
2451
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2452
/* check valid bit */
2453
if (!(e2 & DESC_P_MASK))
2454
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2455
selector = e1 >> 16;
2456
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2457
param_count = e2 & 0x1f;
2458
if ((selector & 0xfffc) == 0)
2459
raise_exception_err(EXCP0D_GPF, 0);
2461
if (load_segment(&e1, &e2, selector) != 0)
2462
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2463
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2464
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2465
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2467
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2468
if (!(e2 & DESC_P_MASK))
2469
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2471
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2472
/* to inner privilege */
2473
get_ss_esp_from_tss(&ss, &sp, dpl);
2475
if (loglevel & CPU_LOG_PCALL)
2476
fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2477
ss, sp, param_count, ESP);
2479
if ((ss & 0xfffc) == 0)
2480
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2481
if ((ss & 3) != dpl)
2482
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2483
if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2484
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2485
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2487
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2488
if (!(ss_e2 & DESC_S_MASK) ||
2489
(ss_e2 & DESC_CS_MASK) ||
2490
!(ss_e2 & DESC_W_MASK))
2491
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492
if (!(ss_e2 & DESC_P_MASK))
2493
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2495
// push_size = ((param_count * 2) + 8) << shift;
2497
old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2498
old_ssp = env->segs[R_SS].base;
2500
sp_mask = get_sp_mask(ss_e2);
2501
ssp = get_seg_base(ss_e1, ss_e2);
2503
PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2504
PUSHL(ssp, sp, sp_mask, ESP);
2505
for(i = param_count - 1; i >= 0; i--) {
2506
val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2507
PUSHL(ssp, sp, sp_mask, val);
2510
PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2511
PUSHW(ssp, sp, sp_mask, ESP);
2512
for(i = param_count - 1; i >= 0; i--) {
2513
val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2514
PUSHW(ssp, sp, sp_mask, val);
2519
/* to same privilege */
2521
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2522
ssp = env->segs[R_SS].base;
2523
// push_size = (4 << shift);
2528
PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2529
PUSHL(ssp, sp, sp_mask, next_eip);
2531
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2532
PUSHW(ssp, sp, sp_mask, next_eip);
2535
/* from this point, not restartable */
2538
ss = (ss & ~3) | dpl;
2539
cpu_x86_load_seg_cache(env, R_SS, ss,
2541
get_seg_limit(ss_e1, ss_e2),
2545
selector = (selector & ~3) | dpl;
2546
cpu_x86_load_seg_cache(env, R_CS, selector,
2547
get_seg_base(e1, e2),
2548
get_seg_limit(e1, e2),
2550
cpu_x86_set_cpl(env, dpl);
2551
SET_ESP(sp, sp_mask);
2555
if (kqemu_is_ok(env)) {
2556
env->exception_index = -1;
2562
/* real and vm86 mode iret */
2563
void helper_iret_real(int shift)
2565
uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2569
sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2571
ssp = env->segs[R_SS].base;
2574
POPL(ssp, sp, sp_mask, new_eip);
2575
POPL(ssp, sp, sp_mask, new_cs);
2577
POPL(ssp, sp, sp_mask, new_eflags);
2580
POPW(ssp, sp, sp_mask, new_eip);
2581
POPW(ssp, sp, sp_mask, new_cs);
2582
POPW(ssp, sp, sp_mask, new_eflags);
2584
ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2585
load_seg_vm(R_CS, new_cs);
2587
if (env->eflags & VM_MASK)
2588
eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2590
eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2592
eflags_mask &= 0xffff;
2593
load_eflags(new_eflags, eflags_mask);
2594
env->hflags &= ~HF_NMI_MASK;
2597
static inline void validate_seg(int seg_reg, int cpl)
2602
/* XXX: on x86_64, we do not want to nullify FS and GS because
2603
they may still contain a valid base. I would be interested to
2604
know how a real x86_64 CPU behaves */
2605
if ((seg_reg == R_FS || seg_reg == R_GS) &&
2606
(env->segs[seg_reg].selector & 0xfffc) == 0)
2609
e2 = env->segs[seg_reg].flags;
2610
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2611
if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2612
/* data or non conforming code segment */
2614
cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2619
/* protected mode iret */
2620
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2622
uint32_t new_cs, new_eflags, new_ss;
2623
uint32_t new_es, new_ds, new_fs, new_gs;
2624
uint32_t e1, e2, ss_e1, ss_e2;
2625
int cpl, dpl, rpl, eflags_mask, iopl;
2626
target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2628
#ifdef TARGET_X86_64
2633
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2635
ssp = env->segs[R_SS].base;
2636
new_eflags = 0; /* avoid warning */
2637
#ifdef TARGET_X86_64
2643
POPQ(sp, new_eflags);
2649
POPL(ssp, sp, sp_mask, new_eip);
2650
POPL(ssp, sp, sp_mask, new_cs);
2653
POPL(ssp, sp, sp_mask, new_eflags);
2654
if (new_eflags & VM_MASK)
2655
goto return_to_vm86;
2659
POPW(ssp, sp, sp_mask, new_eip);
2660
POPW(ssp, sp, sp_mask, new_cs);
2662
POPW(ssp, sp, sp_mask, new_eflags);
2665
if (loglevel & CPU_LOG_PCALL) {
2666
fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2667
new_cs, new_eip, shift, addend);
2668
cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2671
if ((new_cs & 0xfffc) == 0)
2672
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2673
if (load_segment(&e1, &e2, new_cs) != 0)
2674
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2675
if (!(e2 & DESC_S_MASK) ||
2676
!(e2 & DESC_CS_MASK))
2677
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2678
cpl = env->hflags & HF_CPL_MASK;
2681
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2683
if (e2 & DESC_C_MASK) {
2685
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2688
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2690
if (!(e2 & DESC_P_MASK))
2691
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2694
if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2695
((env->hflags & HF_CS64_MASK) && !is_iret))) {
2696
/* return to same privilege level */
2697
cpu_x86_load_seg_cache(env, R_CS, new_cs,
2698
get_seg_base(e1, e2),
2699
get_seg_limit(e1, e2),
2702
/* return to different privilege level */
2703
#ifdef TARGET_X86_64
2712
POPL(ssp, sp, sp_mask, new_esp);
2713
POPL(ssp, sp, sp_mask, new_ss);
2717
POPW(ssp, sp, sp_mask, new_esp);
2718
POPW(ssp, sp, sp_mask, new_ss);
2721
if (loglevel & CPU_LOG_PCALL) {
2722
fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2726
if ((new_ss & 0xfffc) == 0) {
2727
#ifdef TARGET_X86_64
2728
/* NULL ss is allowed in long mode if cpl != 3*/
2729
/* XXX: test CS64 ? */
2730
if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2731
cpu_x86_load_seg_cache(env, R_SS, new_ss,
2733
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2734
DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2735
DESC_W_MASK | DESC_A_MASK);
2736
ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2740
raise_exception_err(EXCP0D_GPF, 0);
2743
if ((new_ss & 3) != rpl)
2744
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2745
if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2746
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2747
if (!(ss_e2 & DESC_S_MASK) ||
2748
(ss_e2 & DESC_CS_MASK) ||
2749
!(ss_e2 & DESC_W_MASK))
2750
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2751
dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2753
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2754
if (!(ss_e2 & DESC_P_MASK))
2755
raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2756
cpu_x86_load_seg_cache(env, R_SS, new_ss,
2757
get_seg_base(ss_e1, ss_e2),
2758
get_seg_limit(ss_e1, ss_e2),
2762
cpu_x86_load_seg_cache(env, R_CS, new_cs,
2763
get_seg_base(e1, e2),
2764
get_seg_limit(e1, e2),
2766
cpu_x86_set_cpl(env, rpl);
2768
#ifdef TARGET_X86_64
2769
if (env->hflags & HF_CS64_MASK)
2773
sp_mask = get_sp_mask(ss_e2);
2775
/* validate data segments */
2776
validate_seg(R_ES, rpl);
2777
validate_seg(R_DS, rpl);
2778
validate_seg(R_FS, rpl);
2779
validate_seg(R_GS, rpl);
2783
SET_ESP(sp, sp_mask);
2786
/* NOTE: 'cpl' is the _old_ CPL */
2787
eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2789
eflags_mask |= IOPL_MASK;
2790
iopl = (env->eflags >> IOPL_SHIFT) & 3;
2792
eflags_mask |= IF_MASK;
2794
eflags_mask &= 0xffff;
2795
load_eflags(new_eflags, eflags_mask);
2800
POPL(ssp, sp, sp_mask, new_esp);
2801
POPL(ssp, sp, sp_mask, new_ss);
2802
POPL(ssp, sp, sp_mask, new_es);
2803
POPL(ssp, sp, sp_mask, new_ds);
2804
POPL(ssp, sp, sp_mask, new_fs);
2805
POPL(ssp, sp, sp_mask, new_gs);
2807
/* modify processor state */
2808
load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2809
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2810
load_seg_vm(R_CS, new_cs & 0xffff);
2811
cpu_x86_set_cpl(env, 3);
2812
load_seg_vm(R_SS, new_ss & 0xffff);
2813
load_seg_vm(R_ES, new_es & 0xffff);
2814
load_seg_vm(R_DS, new_ds & 0xffff);
2815
load_seg_vm(R_FS, new_fs & 0xffff);
2816
load_seg_vm(R_GS, new_gs & 0xffff);
2818
env->eip = new_eip & 0xffff;
2822
void helper_iret_protected(int shift, int next_eip)
2824
int tss_selector, type;
2827
/* specific case for TSS */
2828
if (env->eflags & NT_MASK) {
2829
#ifdef TARGET_X86_64
2830
if (env->hflags & HF_LMA_MASK)
2831
raise_exception_err(EXCP0D_GPF, 0);
2833
tss_selector = lduw_kernel(env->tr.base + 0);
2834
if (tss_selector & 4)
2835
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2836
if (load_segment(&e1, &e2, tss_selector) != 0)
2837
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2838
type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2839
/* NOTE: we check both segment and busy TSS */
2841
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2842
switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2844
helper_ret_protected(shift, 1, 0);
2846
env->hflags &= ~HF_NMI_MASK;
2848
if (kqemu_is_ok(env)) {
2849
CC_OP = CC_OP_EFLAGS;
2850
env->exception_index = -1;
2856
void helper_lret_protected(int shift, int addend)
2858
helper_ret_protected(shift, 0, addend);
2860
if (kqemu_is_ok(env)) {
2861
env->exception_index = -1;
2867
void helper_sysenter(void)
2869
if (env->sysenter_cs == 0) {
2870
raise_exception_err(EXCP0D_GPF, 0);
2872
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2873
cpu_x86_set_cpl(env, 0);
2874
cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2876
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2878
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2879
cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2881
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2883
DESC_W_MASK | DESC_A_MASK);
2884
ESP = env->sysenter_esp;
2885
EIP = env->sysenter_eip;
2888
void helper_sysexit(void)
2892
cpl = env->hflags & HF_CPL_MASK;
2893
if (env->sysenter_cs == 0 || cpl != 0) {
2894
raise_exception_err(EXCP0D_GPF, 0);
2896
cpu_x86_set_cpl(env, 3);
2897
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2899
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2900
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2901
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2902
cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2904
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2905
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2906
DESC_W_MASK | DESC_A_MASK);
2910
if (kqemu_is_ok(env)) {
2911
env->exception_index = -1;
2917
#if defined(CONFIG_USER_ONLY)
2918
target_ulong helper_read_crN(int reg)
2923
void helper_write_crN(int reg, target_ulong t0)
2927
target_ulong helper_read_crN(int reg)
2931
helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2937
val = cpu_get_apic_tpr(env);
2943
void helper_write_crN(int reg, target_ulong t0)
2945
helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2948
cpu_x86_update_cr0(env, t0);
2951
cpu_x86_update_cr3(env, t0);
2954
cpu_x86_update_cr4(env, t0);
2957
cpu_set_apic_tpr(env, t0);
2967
void helper_lmsw(target_ulong t0)
2969
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2970
if already set to one. */
2971
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2972
helper_write_crN(0, t0);
2975
void helper_clts(void)
2977
env->cr[0] &= ~CR0_TS_MASK;
2978
env->hflags &= ~HF_TS_MASK;
2981
#if !defined(CONFIG_USER_ONLY)
2982
target_ulong helper_movtl_T0_cr8(void)
2984
return cpu_get_apic_tpr(env);
2989
void helper_movl_drN_T0(int reg, target_ulong t0)
2994
void helper_invlpg(target_ulong addr)
2996
helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2997
tlb_flush_page(env, addr);
3000
void helper_rdtsc(void)
3004
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3005
raise_exception(EXCP0D_GPF);
3007
helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3009
val = cpu_get_tsc(env);
3010
EAX = (uint32_t)(val);
3011
EDX = (uint32_t)(val >> 32);
3014
void helper_rdpmc(void)
3016
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3017
raise_exception(EXCP0D_GPF);
3019
helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3021
/* currently unimplemented */
3022
raise_exception_err(EXCP06_ILLOP, 0);
3025
#if defined(CONFIG_USER_ONLY)
3026
void helper_wrmsr(void)
3030
void helper_rdmsr(void)
3034
void helper_wrmsr(void)
3038
helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3040
val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3042
switch((uint32_t)ECX) {
3043
case MSR_IA32_SYSENTER_CS:
3044
env->sysenter_cs = val & 0xffff;
3046
case MSR_IA32_SYSENTER_ESP:
3047
env->sysenter_esp = val;
3049
case MSR_IA32_SYSENTER_EIP:
3050
env->sysenter_eip = val;
3052
case MSR_IA32_APICBASE:
3053
cpu_set_apic_base(env, val);
3057
uint64_t update_mask;
3059
if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3060
update_mask |= MSR_EFER_SCE;
3061
if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3062
update_mask |= MSR_EFER_LME;
3063
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3064
update_mask |= MSR_EFER_FFXSR;
3065
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3066
update_mask |= MSR_EFER_NXE;
3067
if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3068
update_mask |= MSR_EFER_SVME;
3069
cpu_load_efer(env, (env->efer & ~update_mask) |
3070
(val & update_mask));
3079
case MSR_VM_HSAVE_PA:
3080
env->vm_hsave = val;
3082
#ifdef TARGET_X86_64
3093
env->segs[R_FS].base = val;
3096
env->segs[R_GS].base = val;
3098
case MSR_KERNELGSBASE:
3099
env->kernelgsbase = val;
3103
/* XXX: exception ? */
3108
void helper_rdmsr(void)
3112
helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3114
switch((uint32_t)ECX) {
3115
case MSR_IA32_SYSENTER_CS:
3116
val = env->sysenter_cs;
3118
case MSR_IA32_SYSENTER_ESP:
3119
val = env->sysenter_esp;
3121
case MSR_IA32_SYSENTER_EIP:
3122
val = env->sysenter_eip;
3124
case MSR_IA32_APICBASE:
3125
val = cpu_get_apic_base(env);
3136
case MSR_VM_HSAVE_PA:
3137
val = env->vm_hsave;
3139
#ifdef TARGET_X86_64
3150
val = env->segs[R_FS].base;
3153
val = env->segs[R_GS].base;
3155
case MSR_KERNELGSBASE:
3156
val = env->kernelgsbase;
3160
case MSR_QPI_COMMBASE:
3161
if (env->kqemu_enabled) {
3162
val = kqemu_comm_base;
3169
/* XXX: exception ? */
3173
EAX = (uint32_t)(val);
3174
EDX = (uint32_t)(val >> 32);
3178
target_ulong helper_lsl(target_ulong selector1)
3181
uint32_t e1, e2, eflags, selector;
3182
int rpl, dpl, cpl, type;
3184
selector = selector1 & 0xffff;
3185
eflags = cc_table[CC_OP].compute_all();
3186
if (load_segment(&e1, &e2, selector) != 0)
3189
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3190
cpl = env->hflags & HF_CPL_MASK;
3191
if (e2 & DESC_S_MASK) {
3192
if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3195
if (dpl < cpl || dpl < rpl)
3199
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3210
if (dpl < cpl || dpl < rpl) {
3212
CC_SRC = eflags & ~CC_Z;
3216
limit = get_seg_limit(e1, e2);
3217
CC_SRC = eflags | CC_Z;
3221
target_ulong helper_lar(target_ulong selector1)
3223
uint32_t e1, e2, eflags, selector;
3224
int rpl, dpl, cpl, type;
3226
selector = selector1 & 0xffff;
3227
eflags = cc_table[CC_OP].compute_all();
3228
if ((selector & 0xfffc) == 0)
3230
if (load_segment(&e1, &e2, selector) != 0)
3233
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3234
cpl = env->hflags & HF_CPL_MASK;
3235
if (e2 & DESC_S_MASK) {
3236
if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3239
if (dpl < cpl || dpl < rpl)
3243
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3257
if (dpl < cpl || dpl < rpl) {
3259
CC_SRC = eflags & ~CC_Z;
3263
CC_SRC = eflags | CC_Z;
3264
return e2 & 0x00f0ff00;
3267
void helper_verr(target_ulong selector1)
3269
uint32_t e1, e2, eflags, selector;
3272
selector = selector1 & 0xffff;
3273
eflags = cc_table[CC_OP].compute_all();
3274
if ((selector & 0xfffc) == 0)
3276
if (load_segment(&e1, &e2, selector) != 0)
3278
if (!(e2 & DESC_S_MASK))
3281
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3282
cpl = env->hflags & HF_CPL_MASK;
3283
if (e2 & DESC_CS_MASK) {
3284
if (!(e2 & DESC_R_MASK))
3286
if (!(e2 & DESC_C_MASK)) {
3287
if (dpl < cpl || dpl < rpl)
3291
if (dpl < cpl || dpl < rpl) {
3293
CC_SRC = eflags & ~CC_Z;
3297
CC_SRC = eflags | CC_Z;
3300
void helper_verw(target_ulong selector1)
3302
uint32_t e1, e2, eflags, selector;
3305
selector = selector1 & 0xffff;
3306
eflags = cc_table[CC_OP].compute_all();
3307
if ((selector & 0xfffc) == 0)
3309
if (load_segment(&e1, &e2, selector) != 0)
3311
if (!(e2 & DESC_S_MASK))
3314
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3315
cpl = env->hflags & HF_CPL_MASK;
3316
if (e2 & DESC_CS_MASK) {
3319
if (dpl < cpl || dpl < rpl)
3321
if (!(e2 & DESC_W_MASK)) {
3323
CC_SRC = eflags & ~CC_Z;
3327
CC_SRC = eflags | CC_Z;
3330
/* x87 FPU helpers */
3332
static void fpu_set_exception(int mask)
3335
if (env->fpus & (~env->fpuc & FPUC_EM))
3336
env->fpus |= FPUS_SE | FPUS_B;
3339
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3342
fpu_set_exception(FPUS_ZE);
3346
void fpu_raise_exception(void)
3348
if (env->cr[0] & CR0_NE_MASK) {
3349
raise_exception(EXCP10_COPR);
3351
#if !defined(CONFIG_USER_ONLY)
3358
void helper_flds_FT0(uint32_t val)
3365
FT0 = float32_to_floatx(u.f, &env->fp_status);
3368
void helper_fldl_FT0(uint64_t val)
3375
FT0 = float64_to_floatx(u.f, &env->fp_status);
3378
void helper_fildl_FT0(int32_t val)
3380
FT0 = int32_to_floatx(val, &env->fp_status);
3383
void helper_flds_ST0(uint32_t val)
3390
new_fpstt = (env->fpstt - 1) & 7;
3392
env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3393
env->fpstt = new_fpstt;
3394
env->fptags[new_fpstt] = 0; /* validate stack entry */
3397
void helper_fldl_ST0(uint64_t val)
3404
new_fpstt = (env->fpstt - 1) & 7;
3406
env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3407
env->fpstt = new_fpstt;
3408
env->fptags[new_fpstt] = 0; /* validate stack entry */
3411
void helper_fildl_ST0(int32_t val)
3414
new_fpstt = (env->fpstt - 1) & 7;
3415
env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3416
env->fpstt = new_fpstt;
3417
env->fptags[new_fpstt] = 0; /* validate stack entry */
3420
void helper_fildll_ST0(int64_t val)
3423
new_fpstt = (env->fpstt - 1) & 7;
3424
env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3425
env->fpstt = new_fpstt;
3426
env->fptags[new_fpstt] = 0; /* validate stack entry */
3429
uint32_t helper_fsts_ST0(void)
3435
u.f = floatx_to_float32(ST0, &env->fp_status);
3439
uint64_t helper_fstl_ST0(void)
3445
u.f = floatx_to_float64(ST0, &env->fp_status);
3449
int32_t helper_fist_ST0(void)
3452
val = floatx_to_int32(ST0, &env->fp_status);
3453
if (val != (int16_t)val)
3458
int32_t helper_fistl_ST0(void)
3461
val = floatx_to_int32(ST0, &env->fp_status);
3465
int64_t helper_fistll_ST0(void)
3468
val = floatx_to_int64(ST0, &env->fp_status);
3472
int32_t helper_fistt_ST0(void)
3475
val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3476
if (val != (int16_t)val)
3481
int32_t helper_fisttl_ST0(void)
3484
val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3488
int64_t helper_fisttll_ST0(void)
3491
val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3495
void helper_fldt_ST0(target_ulong ptr)
3498
new_fpstt = (env->fpstt - 1) & 7;
3499
env->fpregs[new_fpstt].d = helper_fldt(ptr);
3500
env->fpstt = new_fpstt;
3501
env->fptags[new_fpstt] = 0; /* validate stack entry */
3504
void helper_fstt_ST0(target_ulong ptr)
3506
helper_fstt(ST0, ptr);
3509
void helper_fpush(void)
3514
void helper_fpop(void)
3519
void helper_fdecstp(void)
3521
env->fpstt = (env->fpstt - 1) & 7;
3522
env->fpus &= (~0x4700);
3525
void helper_fincstp(void)
3527
env->fpstt = (env->fpstt + 1) & 7;
3528
env->fpus &= (~0x4700);
3533
void helper_ffree_STN(int st_index)
3535
env->fptags[(env->fpstt + st_index) & 7] = 1;
3538
void helper_fmov_ST0_FT0(void)
3543
void helper_fmov_FT0_STN(int st_index)
3548
void helper_fmov_ST0_STN(int st_index)
3553
void helper_fmov_STN_ST0(int st_index)
3558
void helper_fxchg_ST0_STN(int st_index)
3566
/* FPU operations */
3568
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3570
void helper_fcom_ST0_FT0(void)
3574
ret = floatx_compare(ST0, FT0, &env->fp_status);
3575
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3579
void helper_fucom_ST0_FT0(void)
3583
ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3584
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3588
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3590
void helper_fcomi_ST0_FT0(void)
3595
ret = floatx_compare(ST0, FT0, &env->fp_status);
3596
eflags = cc_table[CC_OP].compute_all();
3597
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3602
void helper_fucomi_ST0_FT0(void)
3607
ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3608
eflags = cc_table[CC_OP].compute_all();
3609
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3614
void helper_fadd_ST0_FT0(void)
3619
void helper_fmul_ST0_FT0(void)
3624
void helper_fsub_ST0_FT0(void)
3629
void helper_fsubr_ST0_FT0(void)
3634
void helper_fdiv_ST0_FT0(void)
3636
ST0 = helper_fdiv(ST0, FT0);
3639
void helper_fdivr_ST0_FT0(void)
3641
ST0 = helper_fdiv(FT0, ST0);
3644
/* fp operations between STN and ST0 */
3646
void helper_fadd_STN_ST0(int st_index)
3648
ST(st_index) += ST0;
3651
void helper_fmul_STN_ST0(int st_index)
3653
ST(st_index) *= ST0;
3656
void helper_fsub_STN_ST0(int st_index)
3658
ST(st_index) -= ST0;
3661
void helper_fsubr_STN_ST0(int st_index)
3668
void helper_fdiv_STN_ST0(int st_index)
3672
*p = helper_fdiv(*p, ST0);
3675
void helper_fdivr_STN_ST0(int st_index)
3679
*p = helper_fdiv(ST0, *p);
3682
/* misc FPU operations */
3683
void helper_fchs_ST0(void)
3685
ST0 = floatx_chs(ST0);
3688
void helper_fabs_ST0(void)
3690
ST0 = floatx_abs(ST0);
3693
void helper_fld1_ST0(void)
3698
void helper_fldl2t_ST0(void)
3703
void helper_fldl2e_ST0(void)
3708
void helper_fldpi_ST0(void)
3713
void helper_fldlg2_ST0(void)
3718
void helper_fldln2_ST0(void)
3723
void helper_fldz_ST0(void)
3728
void helper_fldz_FT0(void)
3733
uint32_t helper_fnstsw(void)
3735
return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3738
uint32_t helper_fnstcw(void)
3743
static void update_fp_status(void)
3747
/* set rounding mode */
3748
switch(env->fpuc & RC_MASK) {
3751
rnd_type = float_round_nearest_even;
3754
rnd_type = float_round_down;
3757
rnd_type = float_round_up;
3760
rnd_type = float_round_to_zero;
3763
set_float_rounding_mode(rnd_type, &env->fp_status);
3765
switch((env->fpuc >> 8) & 3) {
3777
set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3781
void helper_fldcw(uint32_t val)
3787
void helper_fclex(void)
3789
env->fpus &= 0x7f00;
3792
void helper_fwait(void)
3794
if (env->fpus & FPUS_SE)
3795
fpu_raise_exception();
3799
void helper_fninit(void)
3816
void helper_fbld_ST0(target_ulong ptr)
3824
for(i = 8; i >= 0; i--) {
3826
val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3829
if (ldub(ptr + 9) & 0x80)
3835
void helper_fbst_ST0(target_ulong ptr)
3838
target_ulong mem_ref, mem_end;
3841
val = floatx_to_int64(ST0, &env->fp_status);
3843
mem_end = mem_ref + 9;
3850
while (mem_ref < mem_end) {
3855
v = ((v / 10) << 4) | (v % 10);
3858
while (mem_ref < mem_end) {
3863
void helper_f2xm1(void)
3865
ST0 = pow(2.0,ST0) - 1.0;
3868
void helper_fyl2x(void)
3870
CPU86_LDouble fptemp;
3874
fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3878
env->fpus &= (~0x4700);
3883
void helper_fptan(void)
3885
CPU86_LDouble fptemp;
3888
if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3894
env->fpus &= (~0x400); /* C2 <-- 0 */
3895
/* the above code is for |arg| < 2**52 only */
3899
void helper_fpatan(void)
3901
CPU86_LDouble fptemp, fpsrcop;
3905
ST1 = atan2(fpsrcop,fptemp);
3909
void helper_fxtract(void)
3911
CPU86_LDoubleU temp;
3912
unsigned int expdif;
3915
expdif = EXPD(temp) - EXPBIAS;
3916
/*DP exponent bias*/
3923
void helper_fprem1(void)
3925
CPU86_LDouble dblq, fpsrcop, fptemp;
3926
CPU86_LDoubleU fpsrcop1, fptemp1;
3928
signed long long int q;
3930
if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3931
ST0 = 0.0 / 0.0; /* NaN */
3932
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3938
fpsrcop1.d = fpsrcop;
3940
expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3943
/* optimisation? taken from the AMD docs */
3944
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3945
/* ST0 is unchanged */
3950
dblq = fpsrcop / fptemp;
3951
/* round dblq towards nearest integer */
3953
ST0 = fpsrcop - fptemp * dblq;
3955
/* convert dblq to q by truncating towards zero */
3957
q = (signed long long int)(-dblq);
3959
q = (signed long long int)dblq;
3961
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3962
/* (C0,C3,C1) <-- (q2,q1,q0) */
3963
env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3964
env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3965
env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3967
env->fpus |= 0x400; /* C2 <-- 1 */
3968
fptemp = pow(2.0, expdif - 50);
3969
fpsrcop = (ST0 / ST1) / fptemp;
3970
/* fpsrcop = integer obtained by chopping */
3971
fpsrcop = (fpsrcop < 0.0) ?
3972
-(floor(fabs(fpsrcop))) : floor(fpsrcop);
3973
ST0 -= (ST1 * fpsrcop * fptemp);
3977
void helper_fprem(void)
3979
CPU86_LDouble dblq, fpsrcop, fptemp;
3980
CPU86_LDoubleU fpsrcop1, fptemp1;
3982
signed long long int q;
3984
if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3985
ST0 = 0.0 / 0.0; /* NaN */
3986
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3990
fpsrcop = (CPU86_LDouble)ST0;
3991
fptemp = (CPU86_LDouble)ST1;
3992
fpsrcop1.d = fpsrcop;
3994
expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3997
/* optimisation? taken from the AMD docs */
3998
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3999
/* ST0 is unchanged */
4003
if ( expdif < 53 ) {
4004
dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4005
/* round dblq towards zero */
4006
dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4007
ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4009
/* convert dblq to q by truncating towards zero */
4011
q = (signed long long int)(-dblq);
4013
q = (signed long long int)dblq;
4015
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4016
/* (C0,C3,C1) <-- (q2,q1,q0) */
4017
env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4018
env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4019
env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4021
int N = 32 + (expdif % 32); /* as per AMD docs */
4022
env->fpus |= 0x400; /* C2 <-- 1 */
4023
fptemp = pow(2.0, (double)(expdif - N));
4024
fpsrcop = (ST0 / ST1) / fptemp;
4025
/* fpsrcop = integer obtained by chopping */
4026
fpsrcop = (fpsrcop < 0.0) ?
4027
-(floor(fabs(fpsrcop))) : floor(fpsrcop);
4028
ST0 -= (ST1 * fpsrcop * fptemp);
4032
void helper_fyl2xp1(void)
4034
CPU86_LDouble fptemp;
4037
if ((fptemp+1.0)>0.0) {
4038
fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4042
env->fpus &= (~0x4700);
4047
void helper_fsqrt(void)
4049
CPU86_LDouble fptemp;
4053
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4059
void helper_fsincos(void)
4061
CPU86_LDouble fptemp;
4064
if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4070
env->fpus &= (~0x400); /* C2 <-- 0 */
4071
/* the above code is for |arg| < 2**63 only */
4075
void helper_frndint(void)
4077
ST0 = floatx_round_to_int(ST0, &env->fp_status);
4080
void helper_fscale(void)
4082
ST0 = ldexp (ST0, (int)(ST1));
4085
void helper_fsin(void)
4087
CPU86_LDouble fptemp;
4090
if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4094
env->fpus &= (~0x400); /* C2 <-- 0 */
4095
/* the above code is for |arg| < 2**53 only */
4099
void helper_fcos(void)
4101
CPU86_LDouble fptemp;
4104
if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4108
env->fpus &= (~0x400); /* C2 <-- 0 */
4109
/* the above code is for |arg5 < 2**63 only */
4113
void helper_fxam_ST0(void)
4115
CPU86_LDoubleU temp;
4120
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4122
env->fpus |= 0x200; /* C1 <-- 1 */
4124
/* XXX: test fptags too */
4125
expdif = EXPD(temp);
4126
if (expdif == MAXEXPD) {
4127
#ifdef USE_X86LDOUBLE
4128
if (MANTD(temp) == 0x8000000000000000ULL)
4130
if (MANTD(temp) == 0)
4132
env->fpus |= 0x500 /*Infinity*/;
4134
env->fpus |= 0x100 /*NaN*/;
4135
} else if (expdif == 0) {
4136
if (MANTD(temp) == 0)
4137
env->fpus |= 0x4000 /*Zero*/;
4139
env->fpus |= 0x4400 /*Denormal*/;
4145
void helper_fstenv(target_ulong ptr, int data32)
4147
int fpus, fptag, exp, i;
4151
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4153
for (i=7; i>=0; i--) {
4155
if (env->fptags[i]) {
4158
tmp.d = env->fpregs[i].d;
4161
if (exp == 0 && mant == 0) {
4164
} else if (exp == 0 || exp == MAXEXPD
4165
#ifdef USE_X86LDOUBLE
4166
|| (mant & (1LL << 63)) == 0
4169
/* NaNs, infinity, denormal */
4176
stl(ptr, env->fpuc);
4178
stl(ptr + 8, fptag);
4179
stl(ptr + 12, 0); /* fpip */
4180
stl(ptr + 16, 0); /* fpcs */
4181
stl(ptr + 20, 0); /* fpoo */
4182
stl(ptr + 24, 0); /* fpos */
4185
stw(ptr, env->fpuc);
4187
stw(ptr + 4, fptag);
4195
void helper_fldenv(target_ulong ptr, int data32)
4200
env->fpuc = lduw(ptr);
4201
fpus = lduw(ptr + 4);
4202
fptag = lduw(ptr + 8);
4205
env->fpuc = lduw(ptr);
4206
fpus = lduw(ptr + 2);
4207
fptag = lduw(ptr + 4);
4209
env->fpstt = (fpus >> 11) & 7;
4210
env->fpus = fpus & ~0x3800;
4211
for(i = 0;i < 8; i++) {
4212
env->fptags[i] = ((fptag & 3) == 3);
4217
void helper_fsave(target_ulong ptr, int data32)
4222
helper_fstenv(ptr, data32);
4224
ptr += (14 << data32);
4225
for(i = 0;i < 8; i++) {
4227
helper_fstt(tmp, ptr);
4245
void helper_frstor(target_ulong ptr, int data32)
4250
helper_fldenv(ptr, data32);
4251
ptr += (14 << data32);
4253
for(i = 0;i < 8; i++) {
4254
tmp = helper_fldt(ptr);
4260
void helper_fxsave(target_ulong ptr, int data64)
4262
int fpus, fptag, i, nb_xmm_regs;
4266
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4268
for(i = 0; i < 8; i++) {
4269
fptag |= (env->fptags[i] << i);
4271
stw(ptr, env->fpuc);
4273
stw(ptr + 4, fptag ^ 0xff);
4274
#ifdef TARGET_X86_64
4276
stq(ptr + 0x08, 0); /* rip */
4277
stq(ptr + 0x10, 0); /* rdp */
4281
stl(ptr + 0x08, 0); /* eip */
4282
stl(ptr + 0x0c, 0); /* sel */
4283
stl(ptr + 0x10, 0); /* dp */
4284
stl(ptr + 0x14, 0); /* sel */
4288
for(i = 0;i < 8; i++) {
4290
helper_fstt(tmp, addr);
4294
if (env->cr[4] & CR4_OSFXSR_MASK) {
4295
/* XXX: finish it */
4296
stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4297
stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4298
if (env->hflags & HF_CS64_MASK)
4303
for(i = 0; i < nb_xmm_regs; i++) {
4304
stq(addr, env->xmm_regs[i].XMM_Q(0));
4305
stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4311
void helper_fxrstor(target_ulong ptr, int data64)
4313
int i, fpus, fptag, nb_xmm_regs;
4317
env->fpuc = lduw(ptr);
4318
fpus = lduw(ptr + 2);
4319
fptag = lduw(ptr + 4);
4320
env->fpstt = (fpus >> 11) & 7;
4321
env->fpus = fpus & ~0x3800;
4323
for(i = 0;i < 8; i++) {
4324
env->fptags[i] = ((fptag >> i) & 1);
4328
for(i = 0;i < 8; i++) {
4329
tmp = helper_fldt(addr);
4334
if (env->cr[4] & CR4_OSFXSR_MASK) {
4335
/* XXX: finish it */
4336
env->mxcsr = ldl(ptr + 0x18);
4338
if (env->hflags & HF_CS64_MASK)
4343
for(i = 0; i < nb_xmm_regs; i++) {
4344
env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4345
env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4351
#ifndef USE_X86LDOUBLE
4353
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4355
CPU86_LDoubleU temp;
4360
*pmant = (MANTD(temp) << 11) | (1LL << 63);
4361
/* exponent + sign */
4362
e = EXPD(temp) - EXPBIAS + 16383;
4363
e |= SIGND(temp) >> 16;
4367
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4369
CPU86_LDoubleU temp;
4373
/* XXX: handle overflow ? */
4374
e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4375
e |= (upper >> 4) & 0x800; /* sign */
4376
ll = (mant >> 11) & ((1LL << 52) - 1);
4378
temp.l.upper = (e << 20) | (ll >> 32);
4381
temp.ll = ll | ((uint64_t)e << 52);
4388
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4390
CPU86_LDoubleU temp;
4393
*pmant = temp.l.lower;
4394
*pexp = temp.l.upper;
4397
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4399
CPU86_LDoubleU temp;
4401
temp.l.upper = upper;
4402
temp.l.lower = mant;
4407
#ifdef TARGET_X86_64
4409
//#define DEBUG_MULDIV
4411
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4420
static void neg128(uint64_t *plow, uint64_t *phigh)
4424
add128(plow, phigh, 1, 0);
4427
/* return TRUE if overflow */
4428
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4430
uint64_t q, r, a1, a0;
4443
/* XXX: use a better algorithm */
4444
for(i = 0; i < 64; i++) {
4446
a1 = (a1 << 1) | (a0 >> 63);
4447
if (ab || a1 >= b) {
4453
a0 = (a0 << 1) | qb;
4455
#if defined(DEBUG_MULDIV)
4456
printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4457
*phigh, *plow, b, a0, a1);
4465
/* return TRUE if overflow */
4466
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4469
sa = ((int64_t)*phigh < 0);
4471
neg128(plow, phigh);
4475
if (div64(plow, phigh, b) != 0)
4478
if (*plow > (1ULL << 63))
4482
if (*plow >= (1ULL << 63))
4490
void helper_mulq_EAX_T0(target_ulong t0)
4494
mulu64(&r0, &r1, EAX, t0);
4501
void helper_imulq_EAX_T0(target_ulong t0)
4505
muls64(&r0, &r1, EAX, t0);
4509
CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4512
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4516
muls64(&r0, &r1, t0, t1);
4518
CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4522
void helper_divq_EAX(target_ulong t0)
4526
raise_exception(EXCP00_DIVZ);
4530
if (div64(&r0, &r1, t0))
4531
raise_exception(EXCP00_DIVZ);
4536
void helper_idivq_EAX(target_ulong t0)
4540
raise_exception(EXCP00_DIVZ);
4544
if (idiv64(&r0, &r1, t0))
4545
raise_exception(EXCP00_DIVZ);
4551
void helper_hlt(void)
4553
helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4555
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4557
env->exception_index = EXCP_HLT;
4561
void helper_monitor(target_ulong ptr)
4563
if ((uint32_t)ECX != 0)
4564
raise_exception(EXCP0D_GPF);
4565
/* XXX: store address ? */
4566
helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4569
void helper_mwait(void)
4571
if ((uint32_t)ECX != 0)
4572
raise_exception(EXCP0D_GPF);
4573
helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4574
/* XXX: not complete but not completely erroneous */
4575
if (env->cpu_index != 0 || env->next_cpu != NULL) {
4576
/* more than one CPU: do not sleep because another CPU may
4583
void helper_debug(void)
4585
env->exception_index = EXCP_DEBUG;
4589
void helper_raise_interrupt(int intno, int next_eip_addend)
4591
raise_interrupt(intno, 1, 0, next_eip_addend);
4594
void helper_raise_exception(int exception_index)
4596
raise_exception(exception_index);
4599
void helper_cli(void)
4601
env->eflags &= ~IF_MASK;
4604
void helper_sti(void)
4606
env->eflags |= IF_MASK;
4610
/* vm86plus instructions */
4611
void helper_cli_vm(void)
4613
env->eflags &= ~VIF_MASK;
4616
void helper_sti_vm(void)
4618
env->eflags |= VIF_MASK;
4619
if (env->eflags & VIP_MASK) {
4620
raise_exception(EXCP0D_GPF);
4625
void helper_set_inhibit_irq(void)
4627
env->hflags |= HF_INHIBIT_IRQ_MASK;
4630
void helper_reset_inhibit_irq(void)
4632
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4635
void helper_boundw(target_ulong a0, int v)
4639
high = ldsw(a0 + 2);
4641
if (v < low || v > high) {
4642
raise_exception(EXCP05_BOUND);
4647
void helper_boundl(target_ulong a0, int v)
4652
if (v < low || v > high) {
4653
raise_exception(EXCP05_BOUND);
4658
static float approx_rsqrt(float a)
4660
return 1.0 / sqrt(a);
4663
static float approx_rcp(float a)
4668
#if !defined(CONFIG_USER_ONLY)
4670
#define MMUSUFFIX _mmu
4673
#include "softmmu_template.h"
4676
#include "softmmu_template.h"
4679
#include "softmmu_template.h"
4682
#include "softmmu_template.h"
4686
/* try to fill the TLB and return an exception if error. If retaddr is
4687
NULL, it means that the function was called in C code (i.e. not
4688
from generated code or from helper.c) */
4689
/* XXX: fix it to restore all registers */
4690
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4692
TranslationBlock *tb;
4695
CPUX86State *saved_env;
4697
/* XXX: hack to restore env in all cases, even if not called from
4700
env = cpu_single_env;
4702
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4705
/* now we have a real cpu fault */
4706
pc = (unsigned long)retaddr;
4707
tb = tb_find_pc(pc);
4709
/* the PC is inside the translated code. It means that we have
4710
a virtual CPU fault */
4711
cpu_restore_state(tb, env, pc, NULL);
4714
raise_exception_err(env->exception_index, env->error_code);
4720
/* Secure Virtual Machine helpers */
4722
#if defined(CONFIG_USER_ONLY)
4724
void helper_vmrun(int aflag)
4727
void helper_vmmcall(void)
4730
void helper_vmload(int aflag)
4733
void helper_vmsave(int aflag)
4736
void helper_stgi(void)
4739
void helper_clgi(void)
4742
void helper_skinit(void)
4745
void helper_invlpga(int aflag)
4748
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4751
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4755
void helper_svm_check_io(uint32_t port, uint32_t param,
4756
uint32_t next_eip_addend)
4761
static inline void svm_save_seg(target_phys_addr_t addr,
4762
const SegmentCache *sc)
4764
stw_phys(addr + offsetof(struct vmcb_seg, selector),
4766
stq_phys(addr + offsetof(struct vmcb_seg, base),
4768
stl_phys(addr + offsetof(struct vmcb_seg, limit),
4770
stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4771
(sc->flags >> 8) | ((sc->flags >> 12) & 0x0f00));
4774
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4778
sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4779
sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4780
sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4781
flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4782
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4785
static inline void svm_load_seg_cache(target_phys_addr_t addr,
4786
CPUState *env, int seg_reg)
4788
SegmentCache sc1, *sc = &sc1;
4789
svm_load_seg(addr, sc);
4790
cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4791
sc->base, sc->limit, sc->flags);
4794
void helper_vmrun(int aflag)
4800
helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4805
addr = (uint32_t)EAX;
4807
if (loglevel & CPU_LOG_TB_IN_ASM)
4808
fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4810
env->vm_vmcb = addr;
4812
/* save the current CPU state in the hsave page */
4813
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4814
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4816
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4817
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4819
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4820
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4821
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4822
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4823
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4824
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4825
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4827
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4828
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4830
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4832
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4834
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4836
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4839
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4840
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4841
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4843
/* load the interception bitmaps so we do not need to access the
4845
env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4846
env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4847
env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4848
env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4849
env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4850
env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4852
/* enable intercepts */
4853
env->hflags |= HF_SVMI_MASK;
4855
env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4856
env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4858
env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4859
env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4861
/* clear exit_info_2 so we behave like the real hardware */
4862
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4864
cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4865
cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4866
cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4867
env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4868
int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4869
if (int_ctl & V_INTR_MASKING_MASK) {
4870
env->cr[8] = int_ctl & V_TPR_MASK;
4871
cpu_set_apic_tpr(env, env->cr[8]);
4872
if (env->eflags & IF_MASK)
4873
env->hflags |= HF_HIF_MASK;
4876
#ifdef TARGET_X86_64
4878
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4881
load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4882
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4883
CC_OP = CC_OP_EFLAGS;
4885
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4887
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4889
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4891
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4894
EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4896
ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4897
EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4898
env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4899
env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4900
cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4902
/* FIXME: guest state consistency checks */
4904
switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4905
case TLB_CONTROL_DO_NOTHING:
4907
case TLB_CONTROL_FLUSH_ALL_ASID:
4908
/* FIXME: this is not 100% correct but should work for now */
4915
/* maybe we need to inject an event */
4916
event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4917
if (event_inj & SVM_EVTINJ_VALID) {
4918
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4919
uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4920
uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4921
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4923
if (loglevel & CPU_LOG_TB_IN_ASM)
4924
fprintf(logfile, "Injecting(%#hx): ", valid_err);
4925
/* FIXME: need to implement valid_err */
4926
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4927
case SVM_EVTINJ_TYPE_INTR:
4928
env->exception_index = vector;
4929
env->error_code = event_inj_err;
4930
env->exception_is_int = 0;
4931
env->exception_next_eip = -1;
4932
if (loglevel & CPU_LOG_TB_IN_ASM)
4933
fprintf(logfile, "INTR");
4935
case SVM_EVTINJ_TYPE_NMI:
4936
env->exception_index = vector;
4937
env->error_code = event_inj_err;
4938
env->exception_is_int = 0;
4939
env->exception_next_eip = EIP;
4940
if (loglevel & CPU_LOG_TB_IN_ASM)
4941
fprintf(logfile, "NMI");
4943
case SVM_EVTINJ_TYPE_EXEPT:
4944
env->exception_index = vector;
4945
env->error_code = event_inj_err;
4946
env->exception_is_int = 0;
4947
env->exception_next_eip = -1;
4948
if (loglevel & CPU_LOG_TB_IN_ASM)
4949
fprintf(logfile, "EXEPT");
4951
case SVM_EVTINJ_TYPE_SOFT:
4952
env->exception_index = vector;
4953
env->error_code = event_inj_err;
4954
env->exception_is_int = 1;
4955
env->exception_next_eip = EIP;
4956
if (loglevel & CPU_LOG_TB_IN_ASM)
4957
fprintf(logfile, "SOFT");
4960
if (loglevel & CPU_LOG_TB_IN_ASM)
4961
fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4963
if ((int_ctl & V_IRQ_MASK) ||
4964
(env->intercept & (1ULL << (SVM_EXIT_INTR - SVM_EXIT_INTR)))) {
4965
env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4971
void helper_vmmcall(void)
4973
helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4974
raise_exception(EXCP06_ILLOP);
4977
void helper_vmload(int aflag)
4980
helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4985
addr = (uint32_t)EAX;
4987
if (loglevel & CPU_LOG_TB_IN_ASM)
4988
fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4989
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4990
env->segs[R_FS].base);
4992
svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4994
svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4996
svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4998
svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5001
#ifdef TARGET_X86_64
5002
env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5003
env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5004
env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5005
env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5007
env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5008
env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5009
env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5010
env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5013
void helper_vmsave(int aflag)
5016
helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5021
addr = (uint32_t)EAX;
5023
if (loglevel & CPU_LOG_TB_IN_ASM)
5024
fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5025
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5026
env->segs[R_FS].base);
5028
svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5030
svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5032
svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5034
svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5037
#ifdef TARGET_X86_64
5038
stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5039
stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5040
stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5041
stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5043
stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5044
stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5045
stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5046
stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5049
void helper_stgi(void)
5051
helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5052
env->hflags |= HF_GIF_MASK;
5055
void helper_clgi(void)
5057
helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5058
env->hflags &= ~HF_GIF_MASK;
5061
void helper_skinit(void)
5063
helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5064
/* XXX: not implemented */
5065
raise_exception(EXCP06_ILLOP);
5068
void helper_invlpga(int aflag)
5071
helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5076
addr = (uint32_t)EAX;
5078
/* XXX: could use the ASID to see if it is needed to do the
5080
tlb_flush_page(env, addr);
5083
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5085
if (likely(!(env->hflags & HF_SVMI_MASK)))
5088
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5089
if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5090
helper_vmexit(type, param);
5093
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5094
if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5095
helper_vmexit(type, param);
5098
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5099
if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5100
helper_vmexit(type, param);
5103
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5104
if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5105
helper_vmexit(type, param);
5108
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5109
if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5110
helper_vmexit(type, param);
5114
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5115
/* FIXME: this should be read in at vmrun (faster this way?) */
5116
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5118
switch((uint32_t)ECX) {
5123
case 0xc0000000 ... 0xc0001fff:
5124
t0 = (8192 + ECX - 0xc0000000) * 2;
5128
case 0xc0010000 ... 0xc0011fff:
5129
t0 = (16384 + ECX - 0xc0010000) * 2;
5134
helper_vmexit(type, param);
5139
if (ldub_phys(addr + t1) & ((1 << param) << t0))
5140
helper_vmexit(type, param);
5144
if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5145
helper_vmexit(type, param);
5151
void helper_svm_check_io(uint32_t port, uint32_t param,
5152
uint32_t next_eip_addend)
5154
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5155
/* FIXME: this should be read in at vmrun (faster this way?) */
5156
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5157
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5158
if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5160
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5161
env->eip + next_eip_addend);
5162
helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5167
/* Note: currently only 32 bits of exit_code are used */
5168
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5172
if (loglevel & CPU_LOG_TB_IN_ASM)
5173
fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5174
exit_code, exit_info_1,
5175
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5178
if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5179
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5180
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5182
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5185
/* Save the VM state in the vmcb */
5186
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5188
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5190
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5192
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5195
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5196
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5198
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5199
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5201
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5202
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5203
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5204
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5205
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5207
if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5208
int_ctl &= ~V_TPR_MASK;
5209
int_ctl |= env->cr[8] & V_TPR_MASK;
5210
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5213
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5214
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5215
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5216
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5217
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5218
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5219
stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5221
/* Reload the host state from vm_hsave */
5222
env->hflags &= ~HF_HIF_MASK;
5223
env->hflags &= ~HF_SVMI_MASK;
5225
env->intercept_exceptions = 0;
5226
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5228
env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5229
env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5231
env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5232
env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5234
cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5235
cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5236
cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5237
if (int_ctl & V_INTR_MASKING_MASK) {
5238
env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5239
cpu_set_apic_tpr(env, env->cr[8]);
5241
/* we need to set the efer after the crs so the hidden flags get
5243
#ifdef TARGET_X86_64
5245
ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5249
load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5250
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5251
CC_OP = CC_OP_EFLAGS;
5253
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5255
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5257
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5259
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5262
EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5263
ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5264
EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5266
env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5267
env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5270
cpu_x86_set_cpl(env, 0);
5271
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5272
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5275
/* FIXME: Resets the current ASID register to zero (host ASID). */
5277
/* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5279
/* Clears the TSC_OFFSET inside the processor. */
5281
/* If the host is in PAE mode, the processor reloads the host's PDPEs
5282
from the page table indicated the host's CR3. If the PDPEs contain
5283
illegal state, the processor causes a shutdown. */
5285
/* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5286
env->cr[0] |= CR0_PE_MASK;
5287
env->eflags &= ~VM_MASK;
5289
/* Disables all breakpoints in the host DR7 register. */
5291
/* Checks the reloaded host state for consistency. */
5293
/* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5294
host's code segment or non-canonical (in the case of long mode), a
5295
#GP fault is delivered inside the host.) */
5297
/* remove any pending exception */
5298
env->exception_index = -1;
5299
env->error_code = 0;
5300
env->old_exception = -1;
5308
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5309
void helper_enter_mmx(void)
5312
*(uint32_t *)(env->fptags) = 0;
5313
*(uint32_t *)(env->fptags + 4) = 0;
5316
void helper_emms(void)
5318
/* set to empty state */
5319
*(uint32_t *)(env->fptags) = 0x01010101;
5320
*(uint32_t *)(env->fptags + 4) = 0x01010101;
5324
void helper_movq(uint64_t *d, uint64_t *s)
5330
#include "ops_sse.h"
5333
#include "ops_sse.h"
5336
#include "helper_template.h"
5340
#include "helper_template.h"
5344
#include "helper_template.h"
5347
#ifdef TARGET_X86_64
5350
#include "helper_template.h"
5355
/* bit operations */
5356
target_ulong helper_bsf(target_ulong t0)
5363
while ((res & 1) == 0) {
5370
target_ulong helper_bsr(target_ulong t0)
5373
target_ulong res, mask;
5376
count = TARGET_LONG_BITS - 1;
5377
mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5378
while ((res & mask) == 0) {
5386
static int compute_all_eflags(void)
5391
static int compute_c_eflags(void)
5393
return CC_SRC & CC_C;
5396
CCTable cc_table[CC_OP_NB] = {
5397
[CC_OP_DYNAMIC] = { /* should never happen */ },
5399
[CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5401
[CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5402
[CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5403
[CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5405
[CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5406
[CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5407
[CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5409
[CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5410
[CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5411
[CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5413
[CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5414
[CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5415
[CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5417
[CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5418
[CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5419
[CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5421
[CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5422
[CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5423
[CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5425
[CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5426
[CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5427
[CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5429
[CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5430
[CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5431
[CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5433
[CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5434
[CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5435
[CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5437
[CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5438
[CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5439
[CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5441
#ifdef TARGET_X86_64
5442
[CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5444
[CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5446
[CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5448
[CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5450
[CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5452
[CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5454
[CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5456
[CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5458
[CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5460
[CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },