4
* Copyright (c) 2003 Fabrice Bellard
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
22
#include "host-utils.h"
29
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30
# define LOG_PCALL_STATE(env) \
31
log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33
# define LOG_PCALL(...) do { } while (0)
34
# define LOG_PCALL_STATE(env) do { } while (0)
39
#define raise_exception_err(a, b)\
41
qemu_log("raise_exception line=%d\n", __LINE__);\
42
(raise_exception_err)(a, b);\
46
static const uint8_t parity_table[256] = {
47
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82
static const uint8_t rclw_table[32] = {
83
0, 1, 2, 3, 4, 5, 6, 7,
84
8, 9,10,11,12,13,14,15,
85
16, 0, 1, 2, 3, 4, 5, 6,
86
7, 8, 9,10,11,12,13,14,
90
static const uint8_t rclb_table[32] = {
91
0, 1, 2, 3, 4, 5, 6, 7,
92
8, 0, 1, 2, 3, 4, 5, 6,
93
7, 8, 0, 1, 2, 3, 4, 5,
94
6, 7, 8, 0, 1, 2, 3, 4,
97
#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
98
#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
99
#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
101
/* broken thread support */
103
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
105
void helper_lock(void)
107
spin_lock(&global_cpu_lock);
110
void helper_unlock(void)
112
spin_unlock(&global_cpu_lock);
115
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
117
load_eflags(t0, update_mask);
120
target_ulong helper_read_eflags(void)
123
eflags = helper_cc_compute_all(CC_OP);
124
eflags |= (DF & DF_MASK);
125
eflags |= env->eflags & ~(VM_MASK | RF_MASK);
129
/* return non zero if error */
130
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
141
index = selector & ~7;
142
if ((index + 7) > dt->limit)
144
ptr = dt->base + index;
145
*e1_ptr = ldl_kernel(ptr);
146
*e2_ptr = ldl_kernel(ptr + 4);
150
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
153
limit = (e1 & 0xffff) | (e2 & 0x000f0000);
154
if (e2 & DESC_G_MASK)
155
limit = (limit << 12) | 0xfff;
159
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
161
return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
164
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
166
sc->base = get_seg_base(e1, e2);
167
sc->limit = get_seg_limit(e1, e2);
171
/* init the segment cache in vm86 mode. */
172
static inline void load_seg_vm(int seg, int selector)
175
cpu_x86_load_seg_cache(env, seg, selector,
176
(selector << 4), 0xffff, 0);
179
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
180
uint32_t *esp_ptr, int dpl)
182
int type, index, shift;
187
printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
188
for(i=0;i<env->tr.limit;i++) {
189
printf("%02x ", env->tr.base[i]);
190
if ((i & 7) == 7) printf("\n");
196
if (!(env->tr.flags & DESC_P_MASK))
197
cpu_abort(env, "invalid tss");
198
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
200
cpu_abort(env, "invalid tss type");
202
index = (dpl * 4 + 2) << shift;
203
if (index + (4 << shift) - 1 > env->tr.limit)
204
raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
206
*esp_ptr = lduw_kernel(env->tr.base + index);
207
*ss_ptr = lduw_kernel(env->tr.base + index + 2);
209
*esp_ptr = ldl_kernel(env->tr.base + index);
210
*ss_ptr = lduw_kernel(env->tr.base + index + 4);
214
/* XXX: merge with load_seg() */
215
static void tss_load_seg(int seg_reg, int selector)
220
if ((selector & 0xfffc) != 0) {
221
if (load_segment(&e1, &e2, selector) != 0)
222
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
if (!(e2 & DESC_S_MASK))
224
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
227
cpl = env->hflags & HF_CPL_MASK;
228
if (seg_reg == R_CS) {
229
if (!(e2 & DESC_CS_MASK))
230
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
/* XXX: is it correct ? */
233
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
234
if ((e2 & DESC_C_MASK) && dpl > rpl)
235
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
} else if (seg_reg == R_SS) {
237
/* SS must be writable data */
238
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
239
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240
if (dpl != cpl || dpl != rpl)
241
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
/* not readable code */
244
if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
245
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
/* if data or non conforming code, checks the rights */
247
if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
248
if (dpl < cpl || dpl < rpl)
249
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
if (!(e2 & DESC_P_MASK))
253
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
254
cpu_x86_load_seg_cache(env, seg_reg, selector,
255
get_seg_base(e1, e2),
256
get_seg_limit(e1, e2),
259
if (seg_reg == R_SS || seg_reg == R_CS)
260
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
264
#define SWITCH_TSS_JMP 0
265
#define SWITCH_TSS_IRET 1
266
#define SWITCH_TSS_CALL 2
268
/* XXX: restore CPU state in registers (PowerPC case) */
269
static void switch_tss(int tss_selector,
270
uint32_t e1, uint32_t e2, int source,
273
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
274
target_ulong tss_base;
275
uint32_t new_regs[8], new_segs[6];
276
uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
277
uint32_t old_eflags, eflags_mask;
282
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
283
LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
285
/* if task gate, we read the TSS segment and we load it */
287
if (!(e2 & DESC_P_MASK))
288
raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
289
tss_selector = e1 >> 16;
290
if (tss_selector & 4)
291
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
292
if (load_segment(&e1, &e2, tss_selector) != 0)
293
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294
if (e2 & DESC_S_MASK)
295
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
296
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
298
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301
if (!(e2 & DESC_P_MASK))
302
raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
308
tss_limit = get_seg_limit(e1, e2);
309
tss_base = get_seg_base(e1, e2);
310
if ((tss_selector & 4) != 0 ||
311
tss_limit < tss_limit_max)
312
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
313
old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
315
old_tss_limit_max = 103;
317
old_tss_limit_max = 43;
319
/* read all the registers from the new TSS */
322
new_cr3 = ldl_kernel(tss_base + 0x1c);
323
new_eip = ldl_kernel(tss_base + 0x20);
324
new_eflags = ldl_kernel(tss_base + 0x24);
325
for(i = 0; i < 8; i++)
326
new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
327
for(i = 0; i < 6; i++)
328
new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
329
new_ldt = lduw_kernel(tss_base + 0x60);
330
new_trap = ldl_kernel(tss_base + 0x64);
334
new_eip = lduw_kernel(tss_base + 0x0e);
335
new_eflags = lduw_kernel(tss_base + 0x10);
336
for(i = 0; i < 8; i++)
337
new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
338
for(i = 0; i < 4; i++)
339
new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
340
new_ldt = lduw_kernel(tss_base + 0x2a);
345
/* XXX: avoid a compiler warning, see
346
http://support.amd.com/us/Processor_TechDocs/24593.pdf
347
chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
350
/* NOTE: we must avoid memory exceptions during the task switch,
351
so we make dummy accesses before */
352
/* XXX: it can still fail in some cases, so a bigger hack is
353
necessary to valid the TLB after having done the accesses */
355
v1 = ldub_kernel(env->tr.base);
356
v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
357
stb_kernel(env->tr.base, v1);
358
stb_kernel(env->tr.base + old_tss_limit_max, v2);
360
/* clear busy bit (it is restartable) */
361
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
364
ptr = env->gdt.base + (env->tr.selector & ~7);
365
e2 = ldl_kernel(ptr + 4);
366
e2 &= ~DESC_TSS_BUSY_MASK;
367
stl_kernel(ptr + 4, e2);
369
old_eflags = compute_eflags();
370
if (source == SWITCH_TSS_IRET)
371
old_eflags &= ~NT_MASK;
373
/* save the current state in the old TSS */
376
stl_kernel(env->tr.base + 0x20, next_eip);
377
stl_kernel(env->tr.base + 0x24, old_eflags);
378
stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
379
stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
380
stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
381
stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
382
stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
383
stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
384
stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
385
stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
386
for(i = 0; i < 6; i++)
387
stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
390
stw_kernel(env->tr.base + 0x0e, next_eip);
391
stw_kernel(env->tr.base + 0x10, old_eflags);
392
stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
393
stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
394
stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
395
stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
396
stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
397
stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
398
stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
399
stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
400
for(i = 0; i < 4; i++)
401
stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
404
/* now if an exception occurs, it will occurs in the next task
407
if (source == SWITCH_TSS_CALL) {
408
stw_kernel(tss_base, env->tr.selector);
409
new_eflags |= NT_MASK;
413
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
416
ptr = env->gdt.base + (tss_selector & ~7);
417
e2 = ldl_kernel(ptr + 4);
418
e2 |= DESC_TSS_BUSY_MASK;
419
stl_kernel(ptr + 4, e2);
422
/* set the new CPU state */
423
/* from this point, any exception which occurs can give problems */
424
env->cr[0] |= CR0_TS_MASK;
425
env->hflags |= HF_TS_MASK;
426
env->tr.selector = tss_selector;
427
env->tr.base = tss_base;
428
env->tr.limit = tss_limit;
429
env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
431
if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
432
cpu_x86_update_cr3(env, new_cr3);
435
/* load all registers without an exception, then reload them with
436
possible exception */
438
eflags_mask = TF_MASK | AC_MASK | ID_MASK |
439
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
441
eflags_mask &= 0xffff;
442
load_eflags(new_eflags, eflags_mask);
443
/* XXX: what to do in 16 bit case ? */
452
if (new_eflags & VM_MASK) {
453
for(i = 0; i < 6; i++)
454
load_seg_vm(i, new_segs[i]);
455
/* in vm86, CPL is always 3 */
456
cpu_x86_set_cpl(env, 3);
458
/* CPL is set the RPL of CS */
459
cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
460
/* first just selectors as the rest may trigger exceptions */
461
for(i = 0; i < 6; i++)
462
cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
465
env->ldt.selector = new_ldt & ~4;
472
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
if ((new_ldt & 0xfffc) != 0) {
476
index = new_ldt & ~7;
477
if ((index + 7) > dt->limit)
478
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
ptr = dt->base + index;
480
e1 = ldl_kernel(ptr);
481
e2 = ldl_kernel(ptr + 4);
482
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
483
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
484
if (!(e2 & DESC_P_MASK))
485
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486
load_seg_cache_raw_dt(&env->ldt, e1, e2);
489
/* load the segments */
490
if (!(new_eflags & VM_MASK)) {
491
tss_load_seg(R_CS, new_segs[R_CS]);
492
tss_load_seg(R_SS, new_segs[R_SS]);
493
tss_load_seg(R_ES, new_segs[R_ES]);
494
tss_load_seg(R_DS, new_segs[R_DS]);
495
tss_load_seg(R_FS, new_segs[R_FS]);
496
tss_load_seg(R_GS, new_segs[R_GS]);
499
/* check that EIP is in the CS segment limits */
500
if (new_eip > env->segs[R_CS].limit) {
501
/* XXX: different exception if CALL ? */
502
raise_exception_err(EXCP0D_GPF, 0);
505
#ifndef CONFIG_USER_ONLY
506
/* reset local breakpoints */
507
if (env->dr[7] & 0x55) {
508
for (i = 0; i < 4; i++) {
509
if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
510
hw_breakpoint_remove(env, i);
517
/* check if Port I/O is allowed in TSS */
518
static inline void check_io(int addr, int size)
520
int io_offset, val, mask;
522
/* TSS must be a valid 32 bit one */
523
if (!(env->tr.flags & DESC_P_MASK) ||
524
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
527
io_offset = lduw_kernel(env->tr.base + 0x66);
528
io_offset += (addr >> 3);
529
/* Note: the check needs two bytes */
530
if ((io_offset + 1) > env->tr.limit)
532
val = lduw_kernel(env->tr.base + io_offset);
534
mask = (1 << size) - 1;
535
/* all bits must be zero to allow the I/O */
536
if ((val & mask) != 0) {
538
raise_exception_err(EXCP0D_GPF, 0);
542
void helper_check_iob(uint32_t t0)
547
void helper_check_iow(uint32_t t0)
552
void helper_check_iol(uint32_t t0)
557
void helper_outb(uint32_t port, uint32_t data)
559
cpu_outb(port, data & 0xff);
562
target_ulong helper_inb(uint32_t port)
564
return cpu_inb(port);
567
void helper_outw(uint32_t port, uint32_t data)
569
cpu_outw(port, data & 0xffff);
572
target_ulong helper_inw(uint32_t port)
574
return cpu_inw(port);
577
void helper_outl(uint32_t port, uint32_t data)
579
cpu_outl(port, data);
582
target_ulong helper_inl(uint32_t port)
584
return cpu_inl(port);
587
static inline unsigned int get_sp_mask(unsigned int e2)
589
if (e2 & DESC_B_MASK)
595
static int exeption_has_error_code(int intno)
611
#define SET_ESP(val, sp_mask)\
613
if ((sp_mask) == 0xffff)\
614
ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
615
else if ((sp_mask) == 0xffffffffLL)\
616
ESP = (uint32_t)(val);\
621
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
624
/* in 64-bit machines, this can overflow. So this segment addition macro
625
* can be used to trim the value to 32-bit whenever needed */
626
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
628
/* XXX: add a is_user flag to have proper security support */
629
#define PUSHW(ssp, sp, sp_mask, val)\
632
stw_kernel((ssp) + (sp & (sp_mask)), (val));\
635
#define PUSHL(ssp, sp, sp_mask, val)\
638
stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
641
#define POPW(ssp, sp, sp_mask, val)\
643
val = lduw_kernel((ssp) + (sp & (sp_mask)));\
647
#define POPL(ssp, sp, sp_mask, val)\
649
val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
653
/* protected mode interrupt */
654
static void do_interrupt_protected(int intno, int is_int, int error_code,
655
unsigned int next_eip, int is_hw)
658
target_ulong ptr, ssp;
659
int type, dpl, selector, ss_dpl, cpl;
660
int has_error_code, new_stack, shift;
661
uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
662
uint32_t old_eip, sp_mask;
665
if (!is_int && !is_hw)
666
has_error_code = exeption_has_error_code(intno);
673
if (intno * 8 + 7 > dt->limit)
674
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
675
ptr = dt->base + intno * 8;
676
e1 = ldl_kernel(ptr);
677
e2 = ldl_kernel(ptr + 4);
678
/* check gate type */
679
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
681
case 5: /* task gate */
682
/* must do that check here to return the correct error code */
683
if (!(e2 & DESC_P_MASK))
684
raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
685
switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
686
if (has_error_code) {
689
/* push the error code */
690
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
692
if (env->segs[R_SS].flags & DESC_B_MASK)
696
esp = (ESP - (2 << shift)) & mask;
697
ssp = env->segs[R_SS].base + esp;
699
stl_kernel(ssp, error_code);
701
stw_kernel(ssp, error_code);
705
case 6: /* 286 interrupt gate */
706
case 7: /* 286 trap gate */
707
case 14: /* 386 interrupt gate */
708
case 15: /* 386 trap gate */
711
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
714
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
715
cpl = env->hflags & HF_CPL_MASK;
716
/* check privilege if software int */
717
if (is_int && dpl < cpl)
718
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
719
/* check valid bit */
720
if (!(e2 & DESC_P_MASK))
721
raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
723
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
724
if ((selector & 0xfffc) == 0)
725
raise_exception_err(EXCP0D_GPF, 0);
727
if (load_segment(&e1, &e2, selector) != 0)
728
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
729
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
730
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
731
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
733
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734
if (!(e2 & DESC_P_MASK))
735
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
736
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
737
/* to inner privilege */
738
get_ss_esp_from_tss(&ss, &esp, dpl);
739
if ((ss & 0xfffc) == 0)
740
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
743
if (load_segment(&ss_e1, &ss_e2, ss) != 0)
744
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
747
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748
if (!(ss_e2 & DESC_S_MASK) ||
749
(ss_e2 & DESC_CS_MASK) ||
750
!(ss_e2 & DESC_W_MASK))
751
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752
if (!(ss_e2 & DESC_P_MASK))
753
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755
sp_mask = get_sp_mask(ss_e2);
756
ssp = get_seg_base(ss_e1, ss_e2);
757
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
758
/* to same privilege */
759
if (env->eflags & VM_MASK)
760
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
762
sp_mask = get_sp_mask(env->segs[R_SS].flags);
763
ssp = env->segs[R_SS].base;
767
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
768
new_stack = 0; /* avoid warning */
769
sp_mask = 0; /* avoid warning */
770
ssp = 0; /* avoid warning */
771
esp = 0; /* avoid warning */
777
/* XXX: check that enough room is available */
778
push_size = 6 + (new_stack << 2) + (has_error_code << 1);
779
if (env->eflags & VM_MASK)
785
if (env->eflags & VM_MASK) {
786
PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
787
PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
788
PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
789
PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
791
PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
792
PUSHL(ssp, esp, sp_mask, ESP);
794
PUSHL(ssp, esp, sp_mask, compute_eflags());
795
PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
796
PUSHL(ssp, esp, sp_mask, old_eip);
797
if (has_error_code) {
798
PUSHL(ssp, esp, sp_mask, error_code);
802
if (env->eflags & VM_MASK) {
803
PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
804
PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
805
PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
806
PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
808
PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
809
PUSHW(ssp, esp, sp_mask, ESP);
811
PUSHW(ssp, esp, sp_mask, compute_eflags());
812
PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
813
PUSHW(ssp, esp, sp_mask, old_eip);
814
if (has_error_code) {
815
PUSHW(ssp, esp, sp_mask, error_code);
820
if (env->eflags & VM_MASK) {
821
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
822
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
823
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
824
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
826
ss = (ss & ~3) | dpl;
827
cpu_x86_load_seg_cache(env, R_SS, ss,
828
ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
830
SET_ESP(esp, sp_mask);
832
selector = (selector & ~3) | dpl;
833
cpu_x86_load_seg_cache(env, R_CS, selector,
834
get_seg_base(e1, e2),
835
get_seg_limit(e1, e2),
837
cpu_x86_set_cpl(env, dpl);
840
/* interrupt gate clear IF mask */
841
if ((type & 1) == 0) {
842
env->eflags &= ~IF_MASK;
844
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
849
#define PUSHQ(sp, val)\
852
stq_kernel(sp, (val));\
855
#define POPQ(sp, val)\
857
val = ldq_kernel(sp);\
861
static inline target_ulong get_rsp_from_tss(int level)
866
printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
867
env->tr.base, env->tr.limit);
870
if (!(env->tr.flags & DESC_P_MASK))
871
cpu_abort(env, "invalid tss");
872
index = 8 * level + 4;
873
if ((index + 7) > env->tr.limit)
874
raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
875
return ldq_kernel(env->tr.base + index);
878
/* 64 bit interrupt */
879
static void do_interrupt64(int intno, int is_int, int error_code,
880
target_ulong next_eip, int is_hw)
884
int type, dpl, selector, cpl, ist;
885
int has_error_code, new_stack;
886
uint32_t e1, e2, e3, ss;
887
target_ulong old_eip, esp, offset;
890
if (!is_int && !is_hw)
891
has_error_code = exeption_has_error_code(intno);
898
if (intno * 16 + 15 > dt->limit)
899
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
900
ptr = dt->base + intno * 16;
901
e1 = ldl_kernel(ptr);
902
e2 = ldl_kernel(ptr + 4);
903
e3 = ldl_kernel(ptr + 8);
904
/* check gate type */
905
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
907
case 14: /* 386 interrupt gate */
908
case 15: /* 386 trap gate */
911
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
914
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
915
cpl = env->hflags & HF_CPL_MASK;
916
/* check privilege if software int */
917
if (is_int && dpl < cpl)
918
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
919
/* check valid bit */
920
if (!(e2 & DESC_P_MASK))
921
raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
923
offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
925
if ((selector & 0xfffc) == 0)
926
raise_exception_err(EXCP0D_GPF, 0);
928
if (load_segment(&e1, &e2, selector) != 0)
929
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
931
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
934
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935
if (!(e2 & DESC_P_MASK))
936
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
937
if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
938
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
940
/* to inner privilege */
942
esp = get_rsp_from_tss(ist + 3);
944
esp = get_rsp_from_tss(dpl);
945
esp &= ~0xfLL; /* align stack */
948
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
949
/* to same privilege */
950
if (env->eflags & VM_MASK)
951
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
954
esp = get_rsp_from_tss(ist + 3);
957
esp &= ~0xfLL; /* align stack */
960
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
961
new_stack = 0; /* avoid warning */
962
esp = 0; /* avoid warning */
965
PUSHQ(esp, env->segs[R_SS].selector);
967
PUSHQ(esp, compute_eflags());
968
PUSHQ(esp, env->segs[R_CS].selector);
970
if (has_error_code) {
971
PUSHQ(esp, error_code);
976
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
980
selector = (selector & ~3) | dpl;
981
cpu_x86_load_seg_cache(env, R_CS, selector,
982
get_seg_base(e1, e2),
983
get_seg_limit(e1, e2),
985
cpu_x86_set_cpl(env, dpl);
988
/* interrupt gate clear IF mask */
989
if ((type & 1) == 0) {
990
env->eflags &= ~IF_MASK;
992
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
997
#if defined(CONFIG_USER_ONLY)
998
void helper_syscall(int next_eip_addend)
1000
env->exception_index = EXCP_SYSCALL;
1001
env->exception_next_eip = env->eip + next_eip_addend;
1005
void helper_syscall(int next_eip_addend)
1009
if (!(env->efer & MSR_EFER_SCE)) {
1010
raise_exception_err(EXCP06_ILLOP, 0);
1012
selector = (env->star >> 32) & 0xffff;
1013
if (env->hflags & HF_LMA_MASK) {
1016
ECX = env->eip + next_eip_addend;
1017
env->regs[11] = compute_eflags();
1019
code64 = env->hflags & HF_CS64_MASK;
1021
cpu_x86_set_cpl(env, 0);
1022
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1024
DESC_G_MASK | DESC_P_MASK |
1026
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1027
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1029
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1031
DESC_W_MASK | DESC_A_MASK);
1032
env->eflags &= ~env->fmask;
1033
load_eflags(env->eflags, 0);
1035
env->eip = env->lstar;
1037
env->eip = env->cstar;
1039
ECX = (uint32_t)(env->eip + next_eip_addend);
1041
cpu_x86_set_cpl(env, 0);
1042
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1044
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1047
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1049
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1051
DESC_W_MASK | DESC_A_MASK);
1052
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1053
env->eip = (uint32_t)env->star;
1059
#ifdef TARGET_X86_64
1060
void helper_sysret(int dflag)
1064
if (!(env->efer & MSR_EFER_SCE)) {
1065
raise_exception_err(EXCP06_ILLOP, 0);
1067
cpl = env->hflags & HF_CPL_MASK;
1068
if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1069
raise_exception_err(EXCP0D_GPF, 0);
1071
selector = (env->star >> 48) & 0xffff;
1072
if (env->hflags & HF_LMA_MASK) {
1074
cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1076
DESC_G_MASK | DESC_P_MASK |
1077
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1084
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1087
env->eip = (uint32_t)ECX;
1089
cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1091
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093
DESC_W_MASK | DESC_A_MASK);
1094
load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1095
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1096
cpu_x86_set_cpl(env, 3);
1098
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1100
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1103
env->eip = (uint32_t)ECX;
1104
cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1106
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1107
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1108
DESC_W_MASK | DESC_A_MASK);
1109
env->eflags |= IF_MASK;
1110
cpu_x86_set_cpl(env, 3);
1115
/* real mode interrupt */
1116
static void do_interrupt_real(int intno, int is_int, int error_code,
1117
unsigned int next_eip)
1120
target_ulong ptr, ssp;
1122
uint32_t offset, esp;
1123
uint32_t old_cs, old_eip;
1125
/* real mode (simpler !) */
1127
if (intno * 4 + 3 > dt->limit)
1128
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1129
ptr = dt->base + intno * 4;
1130
offset = lduw_kernel(ptr);
1131
selector = lduw_kernel(ptr + 2);
1133
ssp = env->segs[R_SS].base;
1138
old_cs = env->segs[R_CS].selector;
1139
/* XXX: use SS segment size ? */
1140
PUSHW(ssp, esp, 0xffff, compute_eflags());
1141
PUSHW(ssp, esp, 0xffff, old_cs);
1142
PUSHW(ssp, esp, 0xffff, old_eip);
1144
/* update processor state */
1145
ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147
env->segs[R_CS].selector = selector;
1148
env->segs[R_CS].base = (selector << 4);
1149
env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1152
#if defined(CONFIG_USER_ONLY)
1153
/* fake user mode interrupt */
1154
static void do_interrupt_user(int intno, int is_int, int error_code,
1155
target_ulong next_eip)
1159
int dpl, cpl, shift;
1163
if (env->hflags & HF_LMA_MASK) {
1168
ptr = dt->base + (intno << shift);
1169
e2 = ldl_kernel(ptr + 4);
1171
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172
cpl = env->hflags & HF_CPL_MASK;
1173
/* check privilege if software int */
1174
if (is_int && dpl < cpl)
1175
raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1177
/* Since we emulate only user space, we cannot do more than
1178
exiting the emulation with the suitable exception and error
1186
static void handle_even_inj(int intno, int is_int, int error_code,
1189
uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1190
if (!(event_inj & SVM_EVTINJ_VALID)) {
1193
type = SVM_EVTINJ_TYPE_SOFT;
1195
type = SVM_EVTINJ_TYPE_EXEPT;
1196
event_inj = intno | type | SVM_EVTINJ_VALID;
1197
if (!rm && exeption_has_error_code(intno)) {
1198
event_inj |= SVM_EVTINJ_VALID_ERR;
1199
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1201
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1207
* Begin execution of an interruption. is_int is TRUE if coming from
1208
* the int instruction. next_eip is the EIP value AFTER the interrupt
1209
* instruction. It is only relevant if is_int is TRUE.
1211
static void do_interrupt_all(int intno, int is_int, int error_code,
1212
target_ulong next_eip, int is_hw)
1214
if (qemu_loglevel_mask(CPU_LOG_INT)) {
1215
if ((env->cr[0] & CR0_PE_MASK)) {
1217
qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218
count, intno, error_code, is_int,
1219
env->hflags & HF_CPL_MASK,
1220
env->segs[R_CS].selector, EIP,
1221
(int)env->segs[R_CS].base + EIP,
1222
env->segs[R_SS].selector, ESP);
1223
if (intno == 0x0e) {
1224
qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1226
qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1229
log_cpu_state(env, X86_DUMP_CCOP);
1235
ptr = env->segs[R_CS].base + env->eip;
1236
for(i = 0; i < 16; i++) {
1237
qemu_log(" %02x", ldub(ptr + i));
1245
if (env->cr[0] & CR0_PE_MASK) {
1246
#if !defined(CONFIG_USER_ONLY)
1247
if (env->hflags & HF_SVMI_MASK)
1248
handle_even_inj(intno, is_int, error_code, is_hw, 0);
1250
#ifdef TARGET_X86_64
1251
if (env->hflags & HF_LMA_MASK) {
1252
do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1256
do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1259
#if !defined(CONFIG_USER_ONLY)
1260
if (env->hflags & HF_SVMI_MASK)
1261
handle_even_inj(intno, is_int, error_code, is_hw, 1);
1263
do_interrupt_real(intno, is_int, error_code, next_eip);
1266
#if !defined(CONFIG_USER_ONLY)
1267
if (env->hflags & HF_SVMI_MASK) {
1268
uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1269
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1274
void do_interrupt(CPUState *env1)
1276
CPUState *saved_env;
1280
#if defined(CONFIG_USER_ONLY)
1281
/* if user mode only, we simulate a fake exception
1282
which will be handled outside the cpu execution
1284
do_interrupt_user(env->exception_index,
1285
env->exception_is_int,
1287
env->exception_next_eip);
1288
/* successfully delivered */
1289
env->old_exception = -1;
1291
/* simulate a real cpu exception. On i386, it can
1292
trigger new exceptions, but we do not handle
1293
double or triple faults yet. */
1294
do_interrupt_all(env->exception_index,
1295
env->exception_is_int,
1297
env->exception_next_eip, 0);
1298
/* successfully delivered */
1299
env->old_exception = -1;
1304
void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
1306
CPUState *saved_env;
1310
do_interrupt_all(intno, 0, 0, 0, is_hw);
1314
/* This should come from sysemu.h - if we could include it here... */
1315
void qemu_system_reset_request(void);
1318
* Check nested exceptions and change to double or triple fault if
1319
* needed. It should only be called, if this is not an interrupt.
1320
* Returns the new exception number.
1322
static int check_exception(int intno, int *error_code)
1324
int first_contributory = env->old_exception == 0 ||
1325
(env->old_exception >= 10 &&
1326
env->old_exception <= 13);
1327
int second_contributory = intno == 0 ||
1328
(intno >= 10 && intno <= 13);
1330
qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1331
env->old_exception, intno);
1333
#if !defined(CONFIG_USER_ONLY)
1334
if (env->old_exception == EXCP08_DBLE) {
1335
if (env->hflags & HF_SVMI_MASK)
1336
helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1338
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1340
qemu_system_reset_request();
1345
if ((first_contributory && second_contributory)
1346
|| (env->old_exception == EXCP0E_PAGE &&
1347
(second_contributory || (intno == EXCP0E_PAGE)))) {
1348
intno = EXCP08_DBLE;
1352
if (second_contributory || (intno == EXCP0E_PAGE) ||
1353
(intno == EXCP08_DBLE))
1354
env->old_exception = intno;
1360
* Signal an interruption. It is executed in the main CPU loop.
1361
* is_int is TRUE if coming from the int instruction. next_eip is the
1362
* EIP value AFTER the interrupt instruction. It is only relevant if
1365
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1366
int next_eip_addend)
1369
helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1370
intno = check_exception(intno, &error_code);
1372
helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1375
env->exception_index = intno;
1376
env->error_code = error_code;
1377
env->exception_is_int = is_int;
1378
env->exception_next_eip = env->eip + next_eip_addend;
1382
/* shortcuts to generate exceptions */
1384
void raise_exception_err(int exception_index, int error_code)
1386
raise_interrupt(exception_index, 0, error_code, 0);
1389
void raise_exception(int exception_index)
1391
raise_interrupt(exception_index, 0, 0, 0);
1394
void raise_exception_env(int exception_index, CPUState *nenv)
1397
raise_exception(exception_index);
1401
#if defined(CONFIG_USER_ONLY)
1403
void do_smm_enter(CPUState *env1)
1407
void helper_rsm(void)
1413
#ifdef TARGET_X86_64
1414
#define SMM_REVISION_ID 0x00020064
1416
#define SMM_REVISION_ID 0x00020000
1419
void do_smm_enter(CPUState *env1)
1421
target_ulong sm_state;
1424
CPUState *saved_env;
1429
qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1430
log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1432
env->hflags |= HF_SMM_MASK;
1433
cpu_smm_update(env);
1435
sm_state = env->smbase + 0x8000;
1437
#ifdef TARGET_X86_64
1438
for(i = 0; i < 6; i++) {
1440
offset = 0x7e00 + i * 16;
1441
stw_phys(sm_state + offset, dt->selector);
1442
stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1443
stl_phys(sm_state + offset + 4, dt->limit);
1444
stq_phys(sm_state + offset + 8, dt->base);
1447
stq_phys(sm_state + 0x7e68, env->gdt.base);
1448
stl_phys(sm_state + 0x7e64, env->gdt.limit);
1450
stw_phys(sm_state + 0x7e70, env->ldt.selector);
1451
stq_phys(sm_state + 0x7e78, env->ldt.base);
1452
stl_phys(sm_state + 0x7e74, env->ldt.limit);
1453
stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1455
stq_phys(sm_state + 0x7e88, env->idt.base);
1456
stl_phys(sm_state + 0x7e84, env->idt.limit);
1458
stw_phys(sm_state + 0x7e90, env->tr.selector);
1459
stq_phys(sm_state + 0x7e98, env->tr.base);
1460
stl_phys(sm_state + 0x7e94, env->tr.limit);
1461
stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1463
stq_phys(sm_state + 0x7ed0, env->efer);
1465
stq_phys(sm_state + 0x7ff8, EAX);
1466
stq_phys(sm_state + 0x7ff0, ECX);
1467
stq_phys(sm_state + 0x7fe8, EDX);
1468
stq_phys(sm_state + 0x7fe0, EBX);
1469
stq_phys(sm_state + 0x7fd8, ESP);
1470
stq_phys(sm_state + 0x7fd0, EBP);
1471
stq_phys(sm_state + 0x7fc8, ESI);
1472
stq_phys(sm_state + 0x7fc0, EDI);
1473
for(i = 8; i < 16; i++)
1474
stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1475
stq_phys(sm_state + 0x7f78, env->eip);
1476
stl_phys(sm_state + 0x7f70, compute_eflags());
1477
stl_phys(sm_state + 0x7f68, env->dr[6]);
1478
stl_phys(sm_state + 0x7f60, env->dr[7]);
1480
stl_phys(sm_state + 0x7f48, env->cr[4]);
1481
stl_phys(sm_state + 0x7f50, env->cr[3]);
1482
stl_phys(sm_state + 0x7f58, env->cr[0]);
1484
stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1485
stl_phys(sm_state + 0x7f00, env->smbase);
1487
stl_phys(sm_state + 0x7ffc, env->cr[0]);
1488
stl_phys(sm_state + 0x7ff8, env->cr[3]);
1489
stl_phys(sm_state + 0x7ff4, compute_eflags());
1490
stl_phys(sm_state + 0x7ff0, env->eip);
1491
stl_phys(sm_state + 0x7fec, EDI);
1492
stl_phys(sm_state + 0x7fe8, ESI);
1493
stl_phys(sm_state + 0x7fe4, EBP);
1494
stl_phys(sm_state + 0x7fe0, ESP);
1495
stl_phys(sm_state + 0x7fdc, EBX);
1496
stl_phys(sm_state + 0x7fd8, EDX);
1497
stl_phys(sm_state + 0x7fd4, ECX);
1498
stl_phys(sm_state + 0x7fd0, EAX);
1499
stl_phys(sm_state + 0x7fcc, env->dr[6]);
1500
stl_phys(sm_state + 0x7fc8, env->dr[7]);
1502
stl_phys(sm_state + 0x7fc4, env->tr.selector);
1503
stl_phys(sm_state + 0x7f64, env->tr.base);
1504
stl_phys(sm_state + 0x7f60, env->tr.limit);
1505
stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1507
stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1508
stl_phys(sm_state + 0x7f80, env->ldt.base);
1509
stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1510
stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1512
stl_phys(sm_state + 0x7f74, env->gdt.base);
1513
stl_phys(sm_state + 0x7f70, env->gdt.limit);
1515
stl_phys(sm_state + 0x7f58, env->idt.base);
1516
stl_phys(sm_state + 0x7f54, env->idt.limit);
1518
for(i = 0; i < 6; i++) {
1521
offset = 0x7f84 + i * 12;
1523
offset = 0x7f2c + (i - 3) * 12;
1524
stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1525
stl_phys(sm_state + offset + 8, dt->base);
1526
stl_phys(sm_state + offset + 4, dt->limit);
1527
stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1529
stl_phys(sm_state + 0x7f14, env->cr[4]);
1531
stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1532
stl_phys(sm_state + 0x7ef8, env->smbase);
1534
/* init SMM cpu state */
1536
#ifdef TARGET_X86_64
1537
cpu_load_efer(env, 0);
1539
load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1540
env->eip = 0x00008000;
1541
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1543
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1544
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1545
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1546
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1547
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1549
cpu_x86_update_cr0(env,
1550
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1551
cpu_x86_update_cr4(env, 0);
1552
env->dr[7] = 0x00000400;
1553
CC_OP = CC_OP_EFLAGS;
1557
void helper_rsm(void)
1559
target_ulong sm_state;
1563
sm_state = env->smbase + 0x8000;
1564
#ifdef TARGET_X86_64
1565
cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1567
for(i = 0; i < 6; i++) {
1568
offset = 0x7e00 + i * 16;
1569
cpu_x86_load_seg_cache(env, i,
1570
lduw_phys(sm_state + offset),
1571
ldq_phys(sm_state + offset + 8),
1572
ldl_phys(sm_state + offset + 4),
1573
(lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1576
env->gdt.base = ldq_phys(sm_state + 0x7e68);
1577
env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1579
env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1580
env->ldt.base = ldq_phys(sm_state + 0x7e78);
1581
env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1582
env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1584
env->idt.base = ldq_phys(sm_state + 0x7e88);
1585
env->idt.limit = ldl_phys(sm_state + 0x7e84);
1587
env->tr.selector = lduw_phys(sm_state + 0x7e90);
1588
env->tr.base = ldq_phys(sm_state + 0x7e98);
1589
env->tr.limit = ldl_phys(sm_state + 0x7e94);
1590
env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1592
EAX = ldq_phys(sm_state + 0x7ff8);
1593
ECX = ldq_phys(sm_state + 0x7ff0);
1594
EDX = ldq_phys(sm_state + 0x7fe8);
1595
EBX = ldq_phys(sm_state + 0x7fe0);
1596
ESP = ldq_phys(sm_state + 0x7fd8);
1597
EBP = ldq_phys(sm_state + 0x7fd0);
1598
ESI = ldq_phys(sm_state + 0x7fc8);
1599
EDI = ldq_phys(sm_state + 0x7fc0);
1600
for(i = 8; i < 16; i++)
1601
env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1602
env->eip = ldq_phys(sm_state + 0x7f78);
1603
load_eflags(ldl_phys(sm_state + 0x7f70),
1604
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1605
env->dr[6] = ldl_phys(sm_state + 0x7f68);
1606
env->dr[7] = ldl_phys(sm_state + 0x7f60);
1608
cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1609
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1610
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1612
val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1613
if (val & 0x20000) {
1614
env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1617
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1618
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1619
load_eflags(ldl_phys(sm_state + 0x7ff4),
1620
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1621
env->eip = ldl_phys(sm_state + 0x7ff0);
1622
EDI = ldl_phys(sm_state + 0x7fec);
1623
ESI = ldl_phys(sm_state + 0x7fe8);
1624
EBP = ldl_phys(sm_state + 0x7fe4);
1625
ESP = ldl_phys(sm_state + 0x7fe0);
1626
EBX = ldl_phys(sm_state + 0x7fdc);
1627
EDX = ldl_phys(sm_state + 0x7fd8);
1628
ECX = ldl_phys(sm_state + 0x7fd4);
1629
EAX = ldl_phys(sm_state + 0x7fd0);
1630
env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1631
env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1633
env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1634
env->tr.base = ldl_phys(sm_state + 0x7f64);
1635
env->tr.limit = ldl_phys(sm_state + 0x7f60);
1636
env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1638
env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1639
env->ldt.base = ldl_phys(sm_state + 0x7f80);
1640
env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1641
env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1643
env->gdt.base = ldl_phys(sm_state + 0x7f74);
1644
env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1646
env->idt.base = ldl_phys(sm_state + 0x7f58);
1647
env->idt.limit = ldl_phys(sm_state + 0x7f54);
1649
for(i = 0; i < 6; i++) {
1651
offset = 0x7f84 + i * 12;
1653
offset = 0x7f2c + (i - 3) * 12;
1654
cpu_x86_load_seg_cache(env, i,
1655
ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1656
ldl_phys(sm_state + offset + 8),
1657
ldl_phys(sm_state + offset + 4),
1658
(ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1660
cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1662
val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1663
if (val & 0x20000) {
1664
env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1667
CC_OP = CC_OP_EFLAGS;
1668
env->hflags &= ~HF_SMM_MASK;
1669
cpu_smm_update(env);
1671
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1672
log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1675
#endif /* !CONFIG_USER_ONLY */
1678
/* division, flags are undefined */
1680
void helper_divb_AL(target_ulong t0)
1682
unsigned int num, den, q, r;
1684
num = (EAX & 0xffff);
1687
raise_exception(EXCP00_DIVZ);
1691
raise_exception(EXCP00_DIVZ);
1693
r = (num % den) & 0xff;
1694
EAX = (EAX & ~0xffff) | (r << 8) | q;
1697
void helper_idivb_AL(target_ulong t0)
1704
raise_exception(EXCP00_DIVZ);
1708
raise_exception(EXCP00_DIVZ);
1710
r = (num % den) & 0xff;
1711
EAX = (EAX & ~0xffff) | (r << 8) | q;
1714
void helper_divw_AX(target_ulong t0)
1716
unsigned int num, den, q, r;
1718
num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1719
den = (t0 & 0xffff);
1721
raise_exception(EXCP00_DIVZ);
1725
raise_exception(EXCP00_DIVZ);
1727
r = (num % den) & 0xffff;
1728
EAX = (EAX & ~0xffff) | q;
1729
EDX = (EDX & ~0xffff) | r;
1732
void helper_idivw_AX(target_ulong t0)
1736
num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1739
raise_exception(EXCP00_DIVZ);
1742
if (q != (int16_t)q)
1743
raise_exception(EXCP00_DIVZ);
1745
r = (num % den) & 0xffff;
1746
EAX = (EAX & ~0xffff) | q;
1747
EDX = (EDX & ~0xffff) | r;
1750
void helper_divl_EAX(target_ulong t0)
1752
unsigned int den, r;
1755
num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1758
raise_exception(EXCP00_DIVZ);
1763
raise_exception(EXCP00_DIVZ);
1768
void helper_idivl_EAX(target_ulong t0)
1773
num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1776
raise_exception(EXCP00_DIVZ);
1780
if (q != (int32_t)q)
1781
raise_exception(EXCP00_DIVZ);
1788
/* XXX: exception */
1789
void helper_aam(int base)
1795
EAX = (EAX & ~0xffff) | al | (ah << 8);
1799
void helper_aad(int base)
1803
ah = (EAX >> 8) & 0xff;
1804
al = ((ah * base) + al) & 0xff;
1805
EAX = (EAX & ~0xffff) | al;
1809
void helper_aaa(void)
1815
eflags = helper_cc_compute_all(CC_OP);
1818
ah = (EAX >> 8) & 0xff;
1820
icarry = (al > 0xf9);
1821
if (((al & 0x0f) > 9 ) || af) {
1822
al = (al + 6) & 0x0f;
1823
ah = (ah + 1 + icarry) & 0xff;
1824
eflags |= CC_C | CC_A;
1826
eflags &= ~(CC_C | CC_A);
1829
EAX = (EAX & ~0xffff) | al | (ah << 8);
1833
void helper_aas(void)
1839
eflags = helper_cc_compute_all(CC_OP);
1842
ah = (EAX >> 8) & 0xff;
1845
if (((al & 0x0f) > 9 ) || af) {
1846
al = (al - 6) & 0x0f;
1847
ah = (ah - 1 - icarry) & 0xff;
1848
eflags |= CC_C | CC_A;
1850
eflags &= ~(CC_C | CC_A);
1853
EAX = (EAX & ~0xffff) | al | (ah << 8);
1857
void helper_daa(void)
1862
eflags = helper_cc_compute_all(CC_OP);
1868
if (((al & 0x0f) > 9 ) || af) {
1869
al = (al + 6) & 0xff;
1872
if ((al > 0x9f) || cf) {
1873
al = (al + 0x60) & 0xff;
1876
EAX = (EAX & ~0xff) | al;
1877
/* well, speed is not an issue here, so we compute the flags by hand */
1878
eflags |= (al == 0) << 6; /* zf */
1879
eflags |= parity_table[al]; /* pf */
1880
eflags |= (al & 0x80); /* sf */
1884
void helper_das(void)
1886
int al, al1, af, cf;
1889
eflags = helper_cc_compute_all(CC_OP);
1896
if (((al & 0x0f) > 9 ) || af) {
1900
al = (al - 6) & 0xff;
1902
if ((al1 > 0x99) || cf) {
1903
al = (al - 0x60) & 0xff;
1906
EAX = (EAX & ~0xff) | al;
1907
/* well, speed is not an issue here, so we compute the flags by hand */
1908
eflags |= (al == 0) << 6; /* zf */
1909
eflags |= parity_table[al]; /* pf */
1910
eflags |= (al & 0x80); /* sf */
1914
void helper_into(int next_eip_addend)
1917
eflags = helper_cc_compute_all(CC_OP);
1918
if (eflags & CC_O) {
1919
raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1923
void helper_cmpxchg8b(target_ulong a0)
1928
eflags = helper_cc_compute_all(CC_OP);
1930
if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1931
stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1934
/* always do the store */
1936
EDX = (uint32_t)(d >> 32);
1943
#ifdef TARGET_X86_64
1944
void helper_cmpxchg16b(target_ulong a0)
1949
if ((a0 & 0xf) != 0)
1950
raise_exception(EXCP0D_GPF);
1951
eflags = helper_cc_compute_all(CC_OP);
1954
if (d0 == EAX && d1 == EDX) {
1959
/* always do the store */
1970
void helper_single_step(void)
1972
#ifndef CONFIG_USER_ONLY
1973
check_hw_breakpoints(env, 1);
1974
env->dr[6] |= DR6_BS;
1976
raise_exception(EXCP01_DB);
1979
void helper_cpuid(void)
1981
uint32_t eax, ebx, ecx, edx;
1983
helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1985
cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1992
void helper_enter_level(int level, int data32, target_ulong t1)
1995
uint32_t esp_mask, esp, ebp;
1997
esp_mask = get_sp_mask(env->segs[R_SS].flags);
1998
ssp = env->segs[R_SS].base;
2007
stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2010
stl(ssp + (esp & esp_mask), t1);
2017
stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2020
stw(ssp + (esp & esp_mask), t1);
2024
#ifdef TARGET_X86_64
2025
void helper_enter64_level(int level, int data64, target_ulong t1)
2027
target_ulong esp, ebp;
2047
stw(esp, lduw(ebp));
2055
void helper_lldt(int selector)
2059
int index, entry_limit;
2063
if ((selector & 0xfffc) == 0) {
2064
/* XXX: NULL selector case: invalid LDT */
2069
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2071
index = selector & ~7;
2072
#ifdef TARGET_X86_64
2073
if (env->hflags & HF_LMA_MASK)
2078
if ((index + entry_limit) > dt->limit)
2079
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2080
ptr = dt->base + index;
2081
e1 = ldl_kernel(ptr);
2082
e2 = ldl_kernel(ptr + 4);
2083
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2084
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2085
if (!(e2 & DESC_P_MASK))
2086
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2087
#ifdef TARGET_X86_64
2088
if (env->hflags & HF_LMA_MASK) {
2090
e3 = ldl_kernel(ptr + 8);
2091
load_seg_cache_raw_dt(&env->ldt, e1, e2);
2092
env->ldt.base |= (target_ulong)e3 << 32;
2096
load_seg_cache_raw_dt(&env->ldt, e1, e2);
2099
env->ldt.selector = selector;
2102
void helper_ltr(int selector)
2106
int index, type, entry_limit;
2110
if ((selector & 0xfffc) == 0) {
2111
/* NULL selector case: invalid TR */
2117
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119
index = selector & ~7;
2120
#ifdef TARGET_X86_64
2121
if (env->hflags & HF_LMA_MASK)
2126
if ((index + entry_limit) > dt->limit)
2127
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2128
ptr = dt->base + index;
2129
e1 = ldl_kernel(ptr);
2130
e2 = ldl_kernel(ptr + 4);
2131
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2132
if ((e2 & DESC_S_MASK) ||
2133
(type != 1 && type != 9))
2134
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2135
if (!(e2 & DESC_P_MASK))
2136
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2137
#ifdef TARGET_X86_64
2138
if (env->hflags & HF_LMA_MASK) {
2140
e3 = ldl_kernel(ptr + 8);
2141
e4 = ldl_kernel(ptr + 12);
2142
if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2143
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2144
load_seg_cache_raw_dt(&env->tr, e1, e2);
2145
env->tr.base |= (target_ulong)e3 << 32;
2149
load_seg_cache_raw_dt(&env->tr, e1, e2);
2151
e2 |= DESC_TSS_BUSY_MASK;
2152
stl_kernel(ptr + 4, e2);
2154
env->tr.selector = selector;
2157
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2158
void helper_load_seg(int seg_reg, int selector)
2167
cpl = env->hflags & HF_CPL_MASK;
2168
if ((selector & 0xfffc) == 0) {
2169
/* null selector case */
2171
#ifdef TARGET_X86_64
2172
&& (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2175
raise_exception_err(EXCP0D_GPF, 0);
2176
cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2183
index = selector & ~7;
2184
if ((index + 7) > dt->limit)
2185
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2186
ptr = dt->base + index;
2187
e1 = ldl_kernel(ptr);
2188
e2 = ldl_kernel(ptr + 4);
2190
if (!(e2 & DESC_S_MASK))
2191
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2193
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2194
if (seg_reg == R_SS) {
2195
/* must be writable segment */
2196
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2197
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198
if (rpl != cpl || dpl != cpl)
2199
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2201
/* must be readable segment */
2202
if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2203
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2205
if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2206
/* if not conforming code, test rights */
2207
if (dpl < cpl || dpl < rpl)
2208
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2212
if (!(e2 & DESC_P_MASK)) {
2213
if (seg_reg == R_SS)
2214
raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2216
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2219
/* set the access bit if not already set */
2220
if (!(e2 & DESC_A_MASK)) {
2222
stl_kernel(ptr + 4, e2);
2225
cpu_x86_load_seg_cache(env, seg_reg, selector,
2226
get_seg_base(e1, e2),
2227
get_seg_limit(e1, e2),
2230
qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2231
selector, (unsigned long)sc->base, sc->limit, sc->flags);
2236
/* protected mode jump */
2237
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2238
int next_eip_addend)
2241
uint32_t e1, e2, cpl, dpl, rpl, limit;
2242
target_ulong next_eip;
2244
if ((new_cs & 0xfffc) == 0)
2245
raise_exception_err(EXCP0D_GPF, 0);
2246
if (load_segment(&e1, &e2, new_cs) != 0)
2247
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2248
cpl = env->hflags & HF_CPL_MASK;
2249
if (e2 & DESC_S_MASK) {
2250
if (!(e2 & DESC_CS_MASK))
2251
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2252
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2253
if (e2 & DESC_C_MASK) {
2254
/* conforming code segment */
2256
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2258
/* non conforming code segment */
2261
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2263
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2265
if (!(e2 & DESC_P_MASK))
2266
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2267
limit = get_seg_limit(e1, e2);
2268
if (new_eip > limit &&
2269
!(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2270
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2271
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2272
get_seg_base(e1, e2), limit, e2);
2275
/* jump to call or task gate */
2276
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2278
cpl = env->hflags & HF_CPL_MASK;
2279
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2281
case 1: /* 286 TSS */
2282
case 9: /* 386 TSS */
2283
case 5: /* task gate */
2284
if (dpl < cpl || dpl < rpl)
2285
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2286
next_eip = env->eip + next_eip_addend;
2287
switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2288
CC_OP = CC_OP_EFLAGS;
2290
case 4: /* 286 call gate */
2291
case 12: /* 386 call gate */
2292
if ((dpl < cpl) || (dpl < rpl))
2293
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2294
if (!(e2 & DESC_P_MASK))
2295
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2297
new_eip = (e1 & 0xffff);
2299
new_eip |= (e2 & 0xffff0000);
2300
if (load_segment(&e1, &e2, gate_cs) != 0)
2301
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2302
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2303
/* must be code segment */
2304
if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2305
(DESC_S_MASK | DESC_CS_MASK)))
2306
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2307
if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2308
(!(e2 & DESC_C_MASK) && (dpl != cpl)))
2309
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2310
if (!(e2 & DESC_P_MASK))
2311
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2312
limit = get_seg_limit(e1, e2);
2313
if (new_eip > limit)
2314
raise_exception_err(EXCP0D_GPF, 0);
2315
cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2316
get_seg_base(e1, e2), limit, e2);
2320
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2326
/* real mode call */
2327
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2328
int shift, int next_eip)
2331
uint32_t esp, esp_mask;
2336
esp_mask = get_sp_mask(env->segs[R_SS].flags);
2337
ssp = env->segs[R_SS].base;
2339
PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2340
PUSHL(ssp, esp, esp_mask, next_eip);
2342
PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2343
PUSHW(ssp, esp, esp_mask, next_eip);
2346
SET_ESP(esp, esp_mask);
2348
env->segs[R_CS].selector = new_cs;
2349
env->segs[R_CS].base = (new_cs << 4);
2352
/* protected mode call */
2353
void helper_lcall_protected(int new_cs, target_ulong new_eip,
2354
int shift, int next_eip_addend)
2357
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2358
uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2359
uint32_t val, limit, old_sp_mask;
2360
target_ulong ssp, old_ssp, next_eip;
2362
next_eip = env->eip + next_eip_addend;
2363
LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2364
LOG_PCALL_STATE(env);
2365
if ((new_cs & 0xfffc) == 0)
2366
raise_exception_err(EXCP0D_GPF, 0);
2367
if (load_segment(&e1, &e2, new_cs) != 0)
2368
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2369
cpl = env->hflags & HF_CPL_MASK;
2370
LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2371
if (e2 & DESC_S_MASK) {
2372
if (!(e2 & DESC_CS_MASK))
2373
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2374
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2375
if (e2 & DESC_C_MASK) {
2376
/* conforming code segment */
2378
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2380
/* non conforming code segment */
2383
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2385
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387
if (!(e2 & DESC_P_MASK))
2388
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2390
#ifdef TARGET_X86_64
2391
/* XXX: check 16/32 bit cases in long mode */
2396
PUSHQ(rsp, env->segs[R_CS].selector);
2397
PUSHQ(rsp, next_eip);
2398
/* from this point, not restartable */
2400
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2401
get_seg_base(e1, e2),
2402
get_seg_limit(e1, e2), e2);
2408
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2409
ssp = env->segs[R_SS].base;
2411
PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2412
PUSHL(ssp, sp, sp_mask, next_eip);
2414
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2415
PUSHW(ssp, sp, sp_mask, next_eip);
2418
limit = get_seg_limit(e1, e2);
2419
if (new_eip > limit)
2420
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2421
/* from this point, not restartable */
2422
SET_ESP(sp, sp_mask);
2423
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2424
get_seg_base(e1, e2), limit, e2);
2428
/* check gate type */
2429
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2430
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2433
case 1: /* available 286 TSS */
2434
case 9: /* available 386 TSS */
2435
case 5: /* task gate */
2436
if (dpl < cpl || dpl < rpl)
2437
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2438
switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2439
CC_OP = CC_OP_EFLAGS;
2441
case 4: /* 286 call gate */
2442
case 12: /* 386 call gate */
2445
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2450
if (dpl < cpl || dpl < rpl)
2451
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2452
/* check valid bit */
2453
if (!(e2 & DESC_P_MASK))
2454
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2455
selector = e1 >> 16;
2456
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2457
param_count = e2 & 0x1f;
2458
if ((selector & 0xfffc) == 0)
2459
raise_exception_err(EXCP0D_GPF, 0);
2461
if (load_segment(&e1, &e2, selector) != 0)
2462
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2463
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2464
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2465
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2467
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2468
if (!(e2 & DESC_P_MASK))
2469
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2471
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2472
/* to inner privilege */
2473
get_ss_esp_from_tss(&ss, &sp, dpl);
2474
LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2475
ss, sp, param_count, ESP);
2476
if ((ss & 0xfffc) == 0)
2477
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2478
if ((ss & 3) != dpl)
2479
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2480
if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2481
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2482
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2484
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2485
if (!(ss_e2 & DESC_S_MASK) ||
2486
(ss_e2 & DESC_CS_MASK) ||
2487
!(ss_e2 & DESC_W_MASK))
2488
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2489
if (!(ss_e2 & DESC_P_MASK))
2490
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492
// push_size = ((param_count * 2) + 8) << shift;
2494
old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2495
old_ssp = env->segs[R_SS].base;
2497
sp_mask = get_sp_mask(ss_e2);
2498
ssp = get_seg_base(ss_e1, ss_e2);
2500
PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2501
PUSHL(ssp, sp, sp_mask, ESP);
2502
for(i = param_count - 1; i >= 0; i--) {
2503
val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2504
PUSHL(ssp, sp, sp_mask, val);
2507
PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2508
PUSHW(ssp, sp, sp_mask, ESP);
2509
for(i = param_count - 1; i >= 0; i--) {
2510
val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2511
PUSHW(ssp, sp, sp_mask, val);
2516
/* to same privilege */
2518
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2519
ssp = env->segs[R_SS].base;
2520
// push_size = (4 << shift);
2525
PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2526
PUSHL(ssp, sp, sp_mask, next_eip);
2528
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2529
PUSHW(ssp, sp, sp_mask, next_eip);
2532
/* from this point, not restartable */
2535
ss = (ss & ~3) | dpl;
2536
cpu_x86_load_seg_cache(env, R_SS, ss,
2538
get_seg_limit(ss_e1, ss_e2),
2542
selector = (selector & ~3) | dpl;
2543
cpu_x86_load_seg_cache(env, R_CS, selector,
2544
get_seg_base(e1, e2),
2545
get_seg_limit(e1, e2),
2547
cpu_x86_set_cpl(env, dpl);
2548
SET_ESP(sp, sp_mask);
2553
/* real and vm86 mode iret */
2554
void helper_iret_real(int shift)
2556
uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2560
sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2562
ssp = env->segs[R_SS].base;
2565
POPL(ssp, sp, sp_mask, new_eip);
2566
POPL(ssp, sp, sp_mask, new_cs);
2568
POPL(ssp, sp, sp_mask, new_eflags);
2571
POPW(ssp, sp, sp_mask, new_eip);
2572
POPW(ssp, sp, sp_mask, new_cs);
2573
POPW(ssp, sp, sp_mask, new_eflags);
2575
ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2576
env->segs[R_CS].selector = new_cs;
2577
env->segs[R_CS].base = (new_cs << 4);
2579
if (env->eflags & VM_MASK)
2580
eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2582
eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2584
eflags_mask &= 0xffff;
2585
load_eflags(new_eflags, eflags_mask);
2586
env->hflags2 &= ~HF2_NMI_MASK;
2589
static inline void validate_seg(int seg_reg, int cpl)
2594
/* XXX: on x86_64, we do not want to nullify FS and GS because
2595
they may still contain a valid base. I would be interested to
2596
know how a real x86_64 CPU behaves */
2597
if ((seg_reg == R_FS || seg_reg == R_GS) &&
2598
(env->segs[seg_reg].selector & 0xfffc) == 0)
2601
e2 = env->segs[seg_reg].flags;
2602
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2603
if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2604
/* data or non conforming code segment */
2606
cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2611
/* protected mode iret */
2612
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2614
uint32_t new_cs, new_eflags, new_ss;
2615
uint32_t new_es, new_ds, new_fs, new_gs;
2616
uint32_t e1, e2, ss_e1, ss_e2;
2617
int cpl, dpl, rpl, eflags_mask, iopl;
2618
target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2620
#ifdef TARGET_X86_64
2625
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2627
ssp = env->segs[R_SS].base;
2628
new_eflags = 0; /* avoid warning */
2629
#ifdef TARGET_X86_64
2635
POPQ(sp, new_eflags);
2641
POPL(ssp, sp, sp_mask, new_eip);
2642
POPL(ssp, sp, sp_mask, new_cs);
2645
POPL(ssp, sp, sp_mask, new_eflags);
2646
if (new_eflags & VM_MASK)
2647
goto return_to_vm86;
2651
POPW(ssp, sp, sp_mask, new_eip);
2652
POPW(ssp, sp, sp_mask, new_cs);
2654
POPW(ssp, sp, sp_mask, new_eflags);
2656
LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2657
new_cs, new_eip, shift, addend);
2658
LOG_PCALL_STATE(env);
2659
if ((new_cs & 0xfffc) == 0)
2660
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2661
if (load_segment(&e1, &e2, new_cs) != 0)
2662
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2663
if (!(e2 & DESC_S_MASK) ||
2664
!(e2 & DESC_CS_MASK))
2665
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2666
cpl = env->hflags & HF_CPL_MASK;
2669
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2670
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2671
if (e2 & DESC_C_MASK) {
2673
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2676
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2678
if (!(e2 & DESC_P_MASK))
2679
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2682
if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2683
((env->hflags & HF_CS64_MASK) && !is_iret))) {
2684
/* return to same privilege level */
2685
cpu_x86_load_seg_cache(env, R_CS, new_cs,
2686
get_seg_base(e1, e2),
2687
get_seg_limit(e1, e2),
2690
/* return to different privilege level */
2691
#ifdef TARGET_X86_64
2700
POPL(ssp, sp, sp_mask, new_esp);
2701
POPL(ssp, sp, sp_mask, new_ss);
2705
POPW(ssp, sp, sp_mask, new_esp);
2706
POPW(ssp, sp, sp_mask, new_ss);
2708
LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2710
if ((new_ss & 0xfffc) == 0) {
2711
#ifdef TARGET_X86_64
2712
/* NULL ss is allowed in long mode if cpl != 3*/
2713
/* XXX: test CS64 ? */
2714
if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2715
cpu_x86_load_seg_cache(env, R_SS, new_ss,
2717
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2718
DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2719
DESC_W_MASK | DESC_A_MASK);
2720
ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2724
raise_exception_err(EXCP0D_GPF, 0);
2727
if ((new_ss & 3) != rpl)
2728
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2729
if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2730
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2731
if (!(ss_e2 & DESC_S_MASK) ||
2732
(ss_e2 & DESC_CS_MASK) ||
2733
!(ss_e2 & DESC_W_MASK))
2734
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2735
dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2737
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2738
if (!(ss_e2 & DESC_P_MASK))
2739
raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2740
cpu_x86_load_seg_cache(env, R_SS, new_ss,
2741
get_seg_base(ss_e1, ss_e2),
2742
get_seg_limit(ss_e1, ss_e2),
2746
cpu_x86_load_seg_cache(env, R_CS, new_cs,
2747
get_seg_base(e1, e2),
2748
get_seg_limit(e1, e2),
2750
cpu_x86_set_cpl(env, rpl);
2752
#ifdef TARGET_X86_64
2753
if (env->hflags & HF_CS64_MASK)
2757
sp_mask = get_sp_mask(ss_e2);
2759
/* validate data segments */
2760
validate_seg(R_ES, rpl);
2761
validate_seg(R_DS, rpl);
2762
validate_seg(R_FS, rpl);
2763
validate_seg(R_GS, rpl);
2767
SET_ESP(sp, sp_mask);
2770
/* NOTE: 'cpl' is the _old_ CPL */
2771
eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2773
eflags_mask |= IOPL_MASK;
2774
iopl = (env->eflags >> IOPL_SHIFT) & 3;
2776
eflags_mask |= IF_MASK;
2778
eflags_mask &= 0xffff;
2779
load_eflags(new_eflags, eflags_mask);
2784
POPL(ssp, sp, sp_mask, new_esp);
2785
POPL(ssp, sp, sp_mask, new_ss);
2786
POPL(ssp, sp, sp_mask, new_es);
2787
POPL(ssp, sp, sp_mask, new_ds);
2788
POPL(ssp, sp, sp_mask, new_fs);
2789
POPL(ssp, sp, sp_mask, new_gs);
2791
/* modify processor state */
2792
load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2793
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2794
load_seg_vm(R_CS, new_cs & 0xffff);
2795
cpu_x86_set_cpl(env, 3);
2796
load_seg_vm(R_SS, new_ss & 0xffff);
2797
load_seg_vm(R_ES, new_es & 0xffff);
2798
load_seg_vm(R_DS, new_ds & 0xffff);
2799
load_seg_vm(R_FS, new_fs & 0xffff);
2800
load_seg_vm(R_GS, new_gs & 0xffff);
2802
env->eip = new_eip & 0xffff;
2806
void helper_iret_protected(int shift, int next_eip)
2808
int tss_selector, type;
2811
/* specific case for TSS */
2812
if (env->eflags & NT_MASK) {
2813
#ifdef TARGET_X86_64
2814
if (env->hflags & HF_LMA_MASK)
2815
raise_exception_err(EXCP0D_GPF, 0);
2817
tss_selector = lduw_kernel(env->tr.base + 0);
2818
if (tss_selector & 4)
2819
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2820
if (load_segment(&e1, &e2, tss_selector) != 0)
2821
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2822
type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2823
/* NOTE: we check both segment and busy TSS */
2825
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2826
switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2828
helper_ret_protected(shift, 1, 0);
2830
env->hflags2 &= ~HF2_NMI_MASK;
2833
void helper_lret_protected(int shift, int addend)
2835
helper_ret_protected(shift, 0, addend);
2838
void helper_sysenter(void)
2840
if (env->sysenter_cs == 0) {
2841
raise_exception_err(EXCP0D_GPF, 0);
2843
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2844
cpu_x86_set_cpl(env, 0);
2846
#ifdef TARGET_X86_64
2847
if (env->hflags & HF_LMA_MASK) {
2848
cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2850
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2852
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2856
cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2858
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2860
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2862
cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2864
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2866
DESC_W_MASK | DESC_A_MASK);
2867
ESP = env->sysenter_esp;
2868
EIP = env->sysenter_eip;
2871
void helper_sysexit(int dflag)
2875
cpl = env->hflags & HF_CPL_MASK;
2876
if (env->sysenter_cs == 0 || cpl != 0) {
2877
raise_exception_err(EXCP0D_GPF, 0);
2879
cpu_x86_set_cpl(env, 3);
2880
#ifdef TARGET_X86_64
2882
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2884
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2885
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2886
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2887
cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2889
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2890
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2891
DESC_W_MASK | DESC_A_MASK);
2895
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2897
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2898
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2899
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2900
cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2902
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2903
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2904
DESC_W_MASK | DESC_A_MASK);
2910
#if defined(CONFIG_USER_ONLY)
2911
target_ulong helper_read_crN(int reg)
2916
void helper_write_crN(int reg, target_ulong t0)
2920
void helper_movl_drN_T0(int reg, target_ulong t0)
2924
target_ulong helper_read_crN(int reg)
2928
helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2934
if (!(env->hflags2 & HF2_VINTR_MASK)) {
2935
val = cpu_get_apic_tpr(env->apic_state);
2944
void helper_write_crN(int reg, target_ulong t0)
2946
helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2949
cpu_x86_update_cr0(env, t0);
2952
cpu_x86_update_cr3(env, t0);
2955
cpu_x86_update_cr4(env, t0);
2958
if (!(env->hflags2 & HF2_VINTR_MASK)) {
2959
cpu_set_apic_tpr(env->apic_state, t0);
2961
env->v_tpr = t0 & 0x0f;
2969
void helper_movl_drN_T0(int reg, target_ulong t0)
2974
hw_breakpoint_remove(env, reg);
2976
hw_breakpoint_insert(env, reg);
2977
} else if (reg == 7) {
2978
for (i = 0; i < 4; i++)
2979
hw_breakpoint_remove(env, i);
2981
for (i = 0; i < 4; i++)
2982
hw_breakpoint_insert(env, i);
2988
void helper_lmsw(target_ulong t0)
2990
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2991
if already set to one. */
2992
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2993
helper_write_crN(0, t0);
2996
void helper_clts(void)
2998
env->cr[0] &= ~CR0_TS_MASK;
2999
env->hflags &= ~HF_TS_MASK;
3002
void helper_invlpg(target_ulong addr)
3004
helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3005
tlb_flush_page(env, addr);
3008
void helper_rdtsc(void)
3012
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3013
raise_exception(EXCP0D_GPF);
3015
helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3017
val = cpu_get_tsc(env) + env->tsc_offset;
3018
EAX = (uint32_t)(val);
3019
EDX = (uint32_t)(val >> 32);
3022
void helper_rdtscp(void)
3025
ECX = (uint32_t)(env->tsc_aux);
3028
void helper_rdpmc(void)
3030
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3031
raise_exception(EXCP0D_GPF);
3033
helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3035
/* currently unimplemented */
3036
raise_exception_err(EXCP06_ILLOP, 0);
3039
#if defined(CONFIG_USER_ONLY)
3040
void helper_wrmsr(void)
3044
void helper_rdmsr(void)
3048
void helper_wrmsr(void)
3052
helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3054
val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3056
switch((uint32_t)ECX) {
3057
case MSR_IA32_SYSENTER_CS:
3058
env->sysenter_cs = val & 0xffff;
3060
case MSR_IA32_SYSENTER_ESP:
3061
env->sysenter_esp = val;
3063
case MSR_IA32_SYSENTER_EIP:
3064
env->sysenter_eip = val;
3066
case MSR_IA32_APICBASE:
3067
cpu_set_apic_base(env->apic_state, val);
3071
uint64_t update_mask;
3073
if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3074
update_mask |= MSR_EFER_SCE;
3075
if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3076
update_mask |= MSR_EFER_LME;
3077
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3078
update_mask |= MSR_EFER_FFXSR;
3079
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3080
update_mask |= MSR_EFER_NXE;
3081
if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3082
update_mask |= MSR_EFER_SVME;
3083
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3084
update_mask |= MSR_EFER_FFXSR;
3085
cpu_load_efer(env, (env->efer & ~update_mask) |
3086
(val & update_mask));
3095
case MSR_VM_HSAVE_PA:
3096
env->vm_hsave = val;
3098
#ifdef TARGET_X86_64
3109
env->segs[R_FS].base = val;
3112
env->segs[R_GS].base = val;
3114
case MSR_KERNELGSBASE:
3115
env->kernelgsbase = val;
3118
case MSR_MTRRphysBase(0):
3119
case MSR_MTRRphysBase(1):
3120
case MSR_MTRRphysBase(2):
3121
case MSR_MTRRphysBase(3):
3122
case MSR_MTRRphysBase(4):
3123
case MSR_MTRRphysBase(5):
3124
case MSR_MTRRphysBase(6):
3125
case MSR_MTRRphysBase(7):
3126
env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3128
case MSR_MTRRphysMask(0):
3129
case MSR_MTRRphysMask(1):
3130
case MSR_MTRRphysMask(2):
3131
case MSR_MTRRphysMask(3):
3132
case MSR_MTRRphysMask(4):
3133
case MSR_MTRRphysMask(5):
3134
case MSR_MTRRphysMask(6):
3135
case MSR_MTRRphysMask(7):
3136
env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3138
case MSR_MTRRfix64K_00000:
3139
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3141
case MSR_MTRRfix16K_80000:
3142
case MSR_MTRRfix16K_A0000:
3143
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3145
case MSR_MTRRfix4K_C0000:
3146
case MSR_MTRRfix4K_C8000:
3147
case MSR_MTRRfix4K_D0000:
3148
case MSR_MTRRfix4K_D8000:
3149
case MSR_MTRRfix4K_E0000:
3150
case MSR_MTRRfix4K_E8000:
3151
case MSR_MTRRfix4K_F0000:
3152
case MSR_MTRRfix4K_F8000:
3153
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3155
case MSR_MTRRdefType:
3156
env->mtrr_deftype = val;
3158
case MSR_MCG_STATUS:
3159
env->mcg_status = val;
3162
if ((env->mcg_cap & MCG_CTL_P)
3163
&& (val == 0 || val == ~(uint64_t)0))
3170
if ((uint32_t)ECX >= MSR_MC0_CTL
3171
&& (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3172
uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3173
if ((offset & 0x3) != 0
3174
|| (val == 0 || val == ~(uint64_t)0))
3175
env->mce_banks[offset] = val;
3178
/* XXX: exception ? */
3183
void helper_rdmsr(void)
3187
helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3189
switch((uint32_t)ECX) {
3190
case MSR_IA32_SYSENTER_CS:
3191
val = env->sysenter_cs;
3193
case MSR_IA32_SYSENTER_ESP:
3194
val = env->sysenter_esp;
3196
case MSR_IA32_SYSENTER_EIP:
3197
val = env->sysenter_eip;
3199
case MSR_IA32_APICBASE:
3200
val = cpu_get_apic_base(env->apic_state);
3211
case MSR_VM_HSAVE_PA:
3212
val = env->vm_hsave;
3214
case MSR_IA32_PERF_STATUS:
3215
/* tsc_increment_by_tick */
3217
/* CPU multiplier */
3218
val |= (((uint64_t)4ULL) << 40);
3220
#ifdef TARGET_X86_64
3231
val = env->segs[R_FS].base;
3234
val = env->segs[R_GS].base;
3236
case MSR_KERNELGSBASE:
3237
val = env->kernelgsbase;
3243
case MSR_MTRRphysBase(0):
3244
case MSR_MTRRphysBase(1):
3245
case MSR_MTRRphysBase(2):
3246
case MSR_MTRRphysBase(3):
3247
case MSR_MTRRphysBase(4):
3248
case MSR_MTRRphysBase(5):
3249
case MSR_MTRRphysBase(6):
3250
case MSR_MTRRphysBase(7):
3251
val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3253
case MSR_MTRRphysMask(0):
3254
case MSR_MTRRphysMask(1):
3255
case MSR_MTRRphysMask(2):
3256
case MSR_MTRRphysMask(3):
3257
case MSR_MTRRphysMask(4):
3258
case MSR_MTRRphysMask(5):
3259
case MSR_MTRRphysMask(6):
3260
case MSR_MTRRphysMask(7):
3261
val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3263
case MSR_MTRRfix64K_00000:
3264
val = env->mtrr_fixed[0];
3266
case MSR_MTRRfix16K_80000:
3267
case MSR_MTRRfix16K_A0000:
3268
val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3270
case MSR_MTRRfix4K_C0000:
3271
case MSR_MTRRfix4K_C8000:
3272
case MSR_MTRRfix4K_D0000:
3273
case MSR_MTRRfix4K_D8000:
3274
case MSR_MTRRfix4K_E0000:
3275
case MSR_MTRRfix4K_E8000:
3276
case MSR_MTRRfix4K_F0000:
3277
case MSR_MTRRfix4K_F8000:
3278
val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3280
case MSR_MTRRdefType:
3281
val = env->mtrr_deftype;
3284
if (env->cpuid_features & CPUID_MTRR)
3285
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3287
/* XXX: exception ? */
3294
if (env->mcg_cap & MCG_CTL_P)
3299
case MSR_MCG_STATUS:
3300
val = env->mcg_status;
3303
if ((uint32_t)ECX >= MSR_MC0_CTL
3304
&& (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3305
uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3306
val = env->mce_banks[offset];
3309
/* XXX: exception ? */
3313
EAX = (uint32_t)(val);
3314
EDX = (uint32_t)(val >> 32);
3318
target_ulong helper_lsl(target_ulong selector1)
3321
uint32_t e1, e2, eflags, selector;
3322
int rpl, dpl, cpl, type;
3324
selector = selector1 & 0xffff;
3325
eflags = helper_cc_compute_all(CC_OP);
3326
if ((selector & 0xfffc) == 0)
3328
if (load_segment(&e1, &e2, selector) != 0)
3331
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3332
cpl = env->hflags & HF_CPL_MASK;
3333
if (e2 & DESC_S_MASK) {
3334
if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3337
if (dpl < cpl || dpl < rpl)
3341
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3352
if (dpl < cpl || dpl < rpl) {
3354
CC_SRC = eflags & ~CC_Z;
3358
limit = get_seg_limit(e1, e2);
3359
CC_SRC = eflags | CC_Z;
3363
target_ulong helper_lar(target_ulong selector1)
3365
uint32_t e1, e2, eflags, selector;
3366
int rpl, dpl, cpl, type;
3368
selector = selector1 & 0xffff;
3369
eflags = helper_cc_compute_all(CC_OP);
3370
if ((selector & 0xfffc) == 0)
3372
if (load_segment(&e1, &e2, selector) != 0)
3375
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3376
cpl = env->hflags & HF_CPL_MASK;
3377
if (e2 & DESC_S_MASK) {
3378
if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3381
if (dpl < cpl || dpl < rpl)
3385
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3399
if (dpl < cpl || dpl < rpl) {
3401
CC_SRC = eflags & ~CC_Z;
3405
CC_SRC = eflags | CC_Z;
3406
return e2 & 0x00f0ff00;
3409
void helper_verr(target_ulong selector1)
3411
uint32_t e1, e2, eflags, selector;
3414
selector = selector1 & 0xffff;
3415
eflags = helper_cc_compute_all(CC_OP);
3416
if ((selector & 0xfffc) == 0)
3418
if (load_segment(&e1, &e2, selector) != 0)
3420
if (!(e2 & DESC_S_MASK))
3423
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3424
cpl = env->hflags & HF_CPL_MASK;
3425
if (e2 & DESC_CS_MASK) {
3426
if (!(e2 & DESC_R_MASK))
3428
if (!(e2 & DESC_C_MASK)) {
3429
if (dpl < cpl || dpl < rpl)
3433
if (dpl < cpl || dpl < rpl) {
3435
CC_SRC = eflags & ~CC_Z;
3439
CC_SRC = eflags | CC_Z;
3442
void helper_verw(target_ulong selector1)
3444
uint32_t e1, e2, eflags, selector;
3447
selector = selector1 & 0xffff;
3448
eflags = helper_cc_compute_all(CC_OP);
3449
if ((selector & 0xfffc) == 0)
3451
if (load_segment(&e1, &e2, selector) != 0)
3453
if (!(e2 & DESC_S_MASK))
3456
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3457
cpl = env->hflags & HF_CPL_MASK;
3458
if (e2 & DESC_CS_MASK) {
3461
if (dpl < cpl || dpl < rpl)
3463
if (!(e2 & DESC_W_MASK)) {
3465
CC_SRC = eflags & ~CC_Z;
3469
CC_SRC = eflags | CC_Z;
3472
/* x87 FPU helpers */
3474
static inline double floatx80_to_double(floatx80 a)
3481
u.f64 = floatx80_to_float64(a, &env->fp_status);
3485
static inline floatx80 double_to_floatx80(double a)
3493
return float64_to_floatx80(u.f64, &env->fp_status);
3496
static void fpu_set_exception(int mask)
3499
if (env->fpus & (~env->fpuc & FPUC_EM))
3500
env->fpus |= FPUS_SE | FPUS_B;
3503
static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
3505
if (floatx80_is_zero(b)) {
3506
fpu_set_exception(FPUS_ZE);
3508
return floatx80_div(a, b, &env->fp_status);
3511
static void fpu_raise_exception(void)
3513
if (env->cr[0] & CR0_NE_MASK) {
3514
raise_exception(EXCP10_COPR);
3516
#if !defined(CONFIG_USER_ONLY)
3523
void helper_flds_FT0(uint32_t val)
3530
FT0 = float32_to_floatx80(u.f, &env->fp_status);
3533
void helper_fldl_FT0(uint64_t val)
3540
FT0 = float64_to_floatx80(u.f, &env->fp_status);
3543
void helper_fildl_FT0(int32_t val)
3545
FT0 = int32_to_floatx80(val, &env->fp_status);
3548
void helper_flds_ST0(uint32_t val)
3555
new_fpstt = (env->fpstt - 1) & 7;
3557
env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
3558
env->fpstt = new_fpstt;
3559
env->fptags[new_fpstt] = 0; /* validate stack entry */
3562
void helper_fldl_ST0(uint64_t val)
3569
new_fpstt = (env->fpstt - 1) & 7;
3571
env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
3572
env->fpstt = new_fpstt;
3573
env->fptags[new_fpstt] = 0; /* validate stack entry */
3576
void helper_fildl_ST0(int32_t val)
3579
new_fpstt = (env->fpstt - 1) & 7;
3580
env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
3581
env->fpstt = new_fpstt;
3582
env->fptags[new_fpstt] = 0; /* validate stack entry */
3585
void helper_fildll_ST0(int64_t val)
3588
new_fpstt = (env->fpstt - 1) & 7;
3589
env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
3590
env->fpstt = new_fpstt;
3591
env->fptags[new_fpstt] = 0; /* validate stack entry */
3594
uint32_t helper_fsts_ST0(void)
3600
u.f = floatx80_to_float32(ST0, &env->fp_status);
3604
uint64_t helper_fstl_ST0(void)
3610
u.f = floatx80_to_float64(ST0, &env->fp_status);
3614
int32_t helper_fist_ST0(void)
3617
val = floatx80_to_int32(ST0, &env->fp_status);
3618
if (val != (int16_t)val)
3623
int32_t helper_fistl_ST0(void)
3626
val = floatx80_to_int32(ST0, &env->fp_status);
3630
int64_t helper_fistll_ST0(void)
3633
val = floatx80_to_int64(ST0, &env->fp_status);
3637
int32_t helper_fistt_ST0(void)
3640
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3641
if (val != (int16_t)val)
3646
int32_t helper_fisttl_ST0(void)
3649
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3653
int64_t helper_fisttll_ST0(void)
3656
val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
3660
void helper_fldt_ST0(target_ulong ptr)
3663
new_fpstt = (env->fpstt - 1) & 7;
3664
env->fpregs[new_fpstt].d = helper_fldt(ptr);
3665
env->fpstt = new_fpstt;
3666
env->fptags[new_fpstt] = 0; /* validate stack entry */
3669
void helper_fstt_ST0(target_ulong ptr)
3671
helper_fstt(ST0, ptr);
3674
void helper_fpush(void)
3679
void helper_fpop(void)
3684
void helper_fdecstp(void)
3686
env->fpstt = (env->fpstt - 1) & 7;
3687
env->fpus &= (~0x4700);
3690
void helper_fincstp(void)
3692
env->fpstt = (env->fpstt + 1) & 7;
3693
env->fpus &= (~0x4700);
3698
void helper_ffree_STN(int st_index)
3700
env->fptags[(env->fpstt + st_index) & 7] = 1;
3703
void helper_fmov_ST0_FT0(void)
3708
void helper_fmov_FT0_STN(int st_index)
3713
void helper_fmov_ST0_STN(int st_index)
3718
void helper_fmov_STN_ST0(int st_index)
3723
void helper_fxchg_ST0_STN(int st_index)
3731
/* FPU operations */
3733
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3735
void helper_fcom_ST0_FT0(void)
3739
ret = floatx80_compare(ST0, FT0, &env->fp_status);
3740
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3743
void helper_fucom_ST0_FT0(void)
3747
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3748
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3751
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3753
void helper_fcomi_ST0_FT0(void)
3758
ret = floatx80_compare(ST0, FT0, &env->fp_status);
3759
eflags = helper_cc_compute_all(CC_OP);
3760
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3764
void helper_fucomi_ST0_FT0(void)
3769
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3770
eflags = helper_cc_compute_all(CC_OP);
3771
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3775
void helper_fadd_ST0_FT0(void)
3777
ST0 = floatx80_add(ST0, FT0, &env->fp_status);
3780
void helper_fmul_ST0_FT0(void)
3782
ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
3785
void helper_fsub_ST0_FT0(void)
3787
ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
3790
void helper_fsubr_ST0_FT0(void)
3792
ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
3795
void helper_fdiv_ST0_FT0(void)
3797
ST0 = helper_fdiv(ST0, FT0);
3800
void helper_fdivr_ST0_FT0(void)
3802
ST0 = helper_fdiv(FT0, ST0);
3805
/* fp operations between STN and ST0 */
3807
void helper_fadd_STN_ST0(int st_index)
3809
ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
3812
void helper_fmul_STN_ST0(int st_index)
3814
ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
3817
void helper_fsub_STN_ST0(int st_index)
3819
ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
3822
void helper_fsubr_STN_ST0(int st_index)
3824
ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
3827
void helper_fdiv_STN_ST0(int st_index)
3831
*p = helper_fdiv(*p, ST0);
3834
void helper_fdivr_STN_ST0(int st_index)
3838
*p = helper_fdiv(ST0, *p);
3841
/* misc FPU operations */
3842
void helper_fchs_ST0(void)
3844
ST0 = floatx80_chs(ST0);
3847
void helper_fabs_ST0(void)
3849
ST0 = floatx80_abs(ST0);
3852
void helper_fld1_ST0(void)
3857
void helper_fldl2t_ST0(void)
3862
void helper_fldl2e_ST0(void)
3867
void helper_fldpi_ST0(void)
3872
void helper_fldlg2_ST0(void)
3877
void helper_fldln2_ST0(void)
3882
void helper_fldz_ST0(void)
3884
ST0 = floatx80_zero;
3887
void helper_fldz_FT0(void)
3889
FT0 = floatx80_zero;
3892
uint32_t helper_fnstsw(void)
3894
return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3897
uint32_t helper_fnstcw(void)
3902
static void update_fp_status(void)
3906
/* set rounding mode */
3907
switch(env->fpuc & RC_MASK) {
3910
rnd_type = float_round_nearest_even;
3913
rnd_type = float_round_down;
3916
rnd_type = float_round_up;
3919
rnd_type = float_round_to_zero;
3922
set_float_rounding_mode(rnd_type, &env->fp_status);
3923
switch((env->fpuc >> 8) & 3) {
3935
set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3938
void helper_fldcw(uint32_t val)
3944
void helper_fclex(void)
3946
env->fpus &= 0x7f00;
3949
void helper_fwait(void)
3951
if (env->fpus & FPUS_SE)
3952
fpu_raise_exception();
3955
void helper_fninit(void)
3972
void helper_fbld_ST0(target_ulong ptr)
3980
for(i = 8; i >= 0; i--) {
3982
val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3984
tmp = int64_to_floatx80(val, &env->fp_status);
3985
if (ldub(ptr + 9) & 0x80) {
3992
void helper_fbst_ST0(target_ulong ptr)
3995
target_ulong mem_ref, mem_end;
3998
val = floatx80_to_int64(ST0, &env->fp_status);
4000
mem_end = mem_ref + 9;
4007
while (mem_ref < mem_end) {
4012
v = ((v / 10) << 4) | (v % 10);
4015
while (mem_ref < mem_end) {
4020
void helper_f2xm1(void)
4022
double val = floatx80_to_double(ST0);
4023
val = pow(2.0, val) - 1.0;
4024
ST0 = double_to_floatx80(val);
4027
void helper_fyl2x(void)
4029
double fptemp = floatx80_to_double(ST0);
4032
fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4033
fptemp *= floatx80_to_double(ST1);
4034
ST1 = double_to_floatx80(fptemp);
4037
env->fpus &= (~0x4700);
4042
void helper_fptan(void)
4044
double fptemp = floatx80_to_double(ST0);
4046
if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4049
fptemp = tan(fptemp);
4050
ST0 = double_to_floatx80(fptemp);
4053
env->fpus &= (~0x400); /* C2 <-- 0 */
4054
/* the above code is for |arg| < 2**52 only */
4058
void helper_fpatan(void)
4060
double fptemp, fpsrcop;
4062
fpsrcop = floatx80_to_double(ST1);
4063
fptemp = floatx80_to_double(ST0);
4064
ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
4068
void helper_fxtract(void)
4074
if (floatx80_is_zero(ST0)) {
4075
/* Easy way to generate -inf and raising division by 0 exception */
4076
ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
4082
expdif = EXPD(temp) - EXPBIAS;
4083
/*DP exponent bias*/
4084
ST0 = int32_to_floatx80(expdif, &env->fp_status);
4091
void helper_fprem1(void)
4093
double st0, st1, dblq, fpsrcop, fptemp;
4094
CPU_LDoubleU fpsrcop1, fptemp1;
4096
signed long long int q;
4098
st0 = floatx80_to_double(ST0);
4099
st1 = floatx80_to_double(ST1);
4101
if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4102
ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4103
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4111
expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4114
/* optimisation? taken from the AMD docs */
4115
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4116
/* ST0 is unchanged */
4121
dblq = fpsrcop / fptemp;
4122
/* round dblq towards nearest integer */
4124
st0 = fpsrcop - fptemp * dblq;
4126
/* convert dblq to q by truncating towards zero */
4128
q = (signed long long int)(-dblq);
4130
q = (signed long long int)dblq;
4132
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4133
/* (C0,C3,C1) <-- (q2,q1,q0) */
4134
env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4135
env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4136
env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4138
env->fpus |= 0x400; /* C2 <-- 1 */
4139
fptemp = pow(2.0, expdif - 50);
4140
fpsrcop = (st0 / st1) / fptemp;
4141
/* fpsrcop = integer obtained by chopping */
4142
fpsrcop = (fpsrcop < 0.0) ?
4143
-(floor(fabs(fpsrcop))) : floor(fpsrcop);
4144
st0 -= (st1 * fpsrcop * fptemp);
4146
ST0 = double_to_floatx80(st0);
4149
void helper_fprem(void)
4151
double st0, st1, dblq, fpsrcop, fptemp;
4152
CPU_LDoubleU fpsrcop1, fptemp1;
4154
signed long long int q;
4156
st0 = floatx80_to_double(ST0);
4157
st1 = floatx80_to_double(ST1);
4159
if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4160
ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4161
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4169
expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4172
/* optimisation? taken from the AMD docs */
4173
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4174
/* ST0 is unchanged */
4178
if ( expdif < 53 ) {
4179
dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4180
/* round dblq towards zero */
4181
dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4182
st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4184
/* convert dblq to q by truncating towards zero */
4186
q = (signed long long int)(-dblq);
4188
q = (signed long long int)dblq;
4190
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4191
/* (C0,C3,C1) <-- (q2,q1,q0) */
4192
env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4193
env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4194
env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4196
int N = 32 + (expdif % 32); /* as per AMD docs */
4197
env->fpus |= 0x400; /* C2 <-- 1 */
4198
fptemp = pow(2.0, (double)(expdif - N));
4199
fpsrcop = (st0 / st1) / fptemp;
4200
/* fpsrcop = integer obtained by chopping */
4201
fpsrcop = (fpsrcop < 0.0) ?
4202
-(floor(fabs(fpsrcop))) : floor(fpsrcop);
4203
st0 -= (st1 * fpsrcop * fptemp);
4205
ST0 = double_to_floatx80(st0);
4208
void helper_fyl2xp1(void)
4210
double fptemp = floatx80_to_double(ST0);
4212
if ((fptemp+1.0)>0.0) {
4213
fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4214
fptemp *= floatx80_to_double(ST1);
4215
ST1 = double_to_floatx80(fptemp);
4218
env->fpus &= (~0x4700);
4223
void helper_fsqrt(void)
4225
if (floatx80_is_neg(ST0)) {
4226
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4229
ST0 = floatx80_sqrt(ST0, &env->fp_status);
4232
void helper_fsincos(void)
4234
double fptemp = floatx80_to_double(ST0);
4236
if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4239
ST0 = double_to_floatx80(sin(fptemp));
4241
ST0 = double_to_floatx80(cos(fptemp));
4242
env->fpus &= (~0x400); /* C2 <-- 0 */
4243
/* the above code is for |arg| < 2**63 only */
4247
void helper_frndint(void)
4249
ST0 = floatx80_round_to_int(ST0, &env->fp_status);
4252
void helper_fscale(void)
4254
if (floatx80_is_any_nan(ST1)) {
4257
int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4258
ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
4262
void helper_fsin(void)
4264
double fptemp = floatx80_to_double(ST0);
4266
if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4269
ST0 = double_to_floatx80(sin(fptemp));
4270
env->fpus &= (~0x400); /* C2 <-- 0 */
4271
/* the above code is for |arg| < 2**53 only */
4275
void helper_fcos(void)
4277
double fptemp = floatx80_to_double(ST0);
4279
if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4282
ST0 = double_to_floatx80(cos(fptemp));
4283
env->fpus &= (~0x400); /* C2 <-- 0 */
4284
/* the above code is for |arg5 < 2**63 only */
4288
void helper_fxam_ST0(void)
4295
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4297
env->fpus |= 0x200; /* C1 <-- 1 */
4299
/* XXX: test fptags too */
4300
expdif = EXPD(temp);
4301
if (expdif == MAXEXPD) {
4302
if (MANTD(temp) == 0x8000000000000000ULL)
4303
env->fpus |= 0x500 /*Infinity*/;
4305
env->fpus |= 0x100 /*NaN*/;
4306
} else if (expdif == 0) {
4307
if (MANTD(temp) == 0)
4308
env->fpus |= 0x4000 /*Zero*/;
4310
env->fpus |= 0x4400 /*Denormal*/;
4316
void helper_fstenv(target_ulong ptr, int data32)
4318
int fpus, fptag, exp, i;
4322
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4324
for (i=7; i>=0; i--) {
4326
if (env->fptags[i]) {
4329
tmp.d = env->fpregs[i].d;
4332
if (exp == 0 && mant == 0) {
4335
} else if (exp == 0 || exp == MAXEXPD
4336
|| (mant & (1LL << 63)) == 0
4338
/* NaNs, infinity, denormal */
4345
stl(ptr, env->fpuc);
4347
stl(ptr + 8, fptag);
4348
stl(ptr + 12, 0); /* fpip */
4349
stl(ptr + 16, 0); /* fpcs */
4350
stl(ptr + 20, 0); /* fpoo */
4351
stl(ptr + 24, 0); /* fpos */
4354
stw(ptr, env->fpuc);
4356
stw(ptr + 4, fptag);
4364
void helper_fldenv(target_ulong ptr, int data32)
4369
env->fpuc = lduw(ptr);
4370
fpus = lduw(ptr + 4);
4371
fptag = lduw(ptr + 8);
4374
env->fpuc = lduw(ptr);
4375
fpus = lduw(ptr + 2);
4376
fptag = lduw(ptr + 4);
4378
env->fpstt = (fpus >> 11) & 7;
4379
env->fpus = fpus & ~0x3800;
4380
for(i = 0;i < 8; i++) {
4381
env->fptags[i] = ((fptag & 3) == 3);
4386
void helper_fsave(target_ulong ptr, int data32)
4391
helper_fstenv(ptr, data32);
4393
ptr += (14 << data32);
4394
for(i = 0;i < 8; i++) {
4396
helper_fstt(tmp, ptr);
4414
void helper_frstor(target_ulong ptr, int data32)
4419
helper_fldenv(ptr, data32);
4420
ptr += (14 << data32);
4422
for(i = 0;i < 8; i++) {
4423
tmp = helper_fldt(ptr);
4429
void helper_fxsave(target_ulong ptr, int data64)
4431
int fpus, fptag, i, nb_xmm_regs;
4435
/* The operand must be 16 byte aligned */
4437
raise_exception(EXCP0D_GPF);
4440
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4442
for(i = 0; i < 8; i++) {
4443
fptag |= (env->fptags[i] << i);
4445
stw(ptr, env->fpuc);
4447
stw(ptr + 4, fptag ^ 0xff);
4448
#ifdef TARGET_X86_64
4450
stq(ptr + 0x08, 0); /* rip */
4451
stq(ptr + 0x10, 0); /* rdp */
4455
stl(ptr + 0x08, 0); /* eip */
4456
stl(ptr + 0x0c, 0); /* sel */
4457
stl(ptr + 0x10, 0); /* dp */
4458
stl(ptr + 0x14, 0); /* sel */
4462
for(i = 0;i < 8; i++) {
4464
helper_fstt(tmp, addr);
4468
if (env->cr[4] & CR4_OSFXSR_MASK) {
4469
/* XXX: finish it */
4470
stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4471
stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4472
if (env->hflags & HF_CS64_MASK)
4477
/* Fast FXSAVE leaves out the XMM registers */
4478
if (!(env->efer & MSR_EFER_FFXSR)
4479
|| (env->hflags & HF_CPL_MASK)
4480
|| !(env->hflags & HF_LMA_MASK)) {
4481
for(i = 0; i < nb_xmm_regs; i++) {
4482
stq(addr, env->xmm_regs[i].XMM_Q(0));
4483
stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4490
void helper_fxrstor(target_ulong ptr, int data64)
4492
int i, fpus, fptag, nb_xmm_regs;
4496
/* The operand must be 16 byte aligned */
4498
raise_exception(EXCP0D_GPF);
4501
env->fpuc = lduw(ptr);
4502
fpus = lduw(ptr + 2);
4503
fptag = lduw(ptr + 4);
4504
env->fpstt = (fpus >> 11) & 7;
4505
env->fpus = fpus & ~0x3800;
4507
for(i = 0;i < 8; i++) {
4508
env->fptags[i] = ((fptag >> i) & 1);
4512
for(i = 0;i < 8; i++) {
4513
tmp = helper_fldt(addr);
4518
if (env->cr[4] & CR4_OSFXSR_MASK) {
4519
/* XXX: finish it */
4520
env->mxcsr = ldl(ptr + 0x18);
4522
if (env->hflags & HF_CS64_MASK)
4527
/* Fast FXRESTORE leaves out the XMM registers */
4528
if (!(env->efer & MSR_EFER_FFXSR)
4529
|| (env->hflags & HF_CPL_MASK)
4530
|| !(env->hflags & HF_LMA_MASK)) {
4531
for(i = 0; i < nb_xmm_regs; i++) {
4532
env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4533
env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4540
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
4545
*pmant = temp.l.lower;
4546
*pexp = temp.l.upper;
4549
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
4553
temp.l.upper = upper;
4554
temp.l.lower = mant;
4558
#ifdef TARGET_X86_64
4560
//#define DEBUG_MULDIV
4562
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4571
static void neg128(uint64_t *plow, uint64_t *phigh)
4575
add128(plow, phigh, 1, 0);
4578
/* return TRUE if overflow */
4579
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4581
uint64_t q, r, a1, a0;
4594
/* XXX: use a better algorithm */
4595
for(i = 0; i < 64; i++) {
4597
a1 = (a1 << 1) | (a0 >> 63);
4598
if (ab || a1 >= b) {
4604
a0 = (a0 << 1) | qb;
4606
#if defined(DEBUG_MULDIV)
4607
printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4608
*phigh, *plow, b, a0, a1);
4616
/* return TRUE if overflow */
4617
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4620
sa = ((int64_t)*phigh < 0);
4622
neg128(plow, phigh);
4626
if (div64(plow, phigh, b) != 0)
4629
if (*plow > (1ULL << 63))
4633
if (*plow >= (1ULL << 63))
4641
void helper_mulq_EAX_T0(target_ulong t0)
4645
mulu64(&r0, &r1, EAX, t0);
4652
void helper_imulq_EAX_T0(target_ulong t0)
4656
muls64(&r0, &r1, EAX, t0);
4660
CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4663
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4667
muls64(&r0, &r1, t0, t1);
4669
CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4673
void helper_divq_EAX(target_ulong t0)
4677
raise_exception(EXCP00_DIVZ);
4681
if (div64(&r0, &r1, t0))
4682
raise_exception(EXCP00_DIVZ);
4687
void helper_idivq_EAX(target_ulong t0)
4691
raise_exception(EXCP00_DIVZ);
4695
if (idiv64(&r0, &r1, t0))
4696
raise_exception(EXCP00_DIVZ);
4702
static void do_hlt(void)
4704
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4706
env->exception_index = EXCP_HLT;
4710
void helper_hlt(int next_eip_addend)
4712
helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4713
EIP += next_eip_addend;
4718
void helper_monitor(target_ulong ptr)
4720
if ((uint32_t)ECX != 0)
4721
raise_exception(EXCP0D_GPF);
4722
/* XXX: store address ? */
4723
helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4726
void helper_mwait(int next_eip_addend)
4728
if ((uint32_t)ECX != 0)
4729
raise_exception(EXCP0D_GPF);
4730
helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4731
EIP += next_eip_addend;
4733
/* XXX: not complete but not completely erroneous */
4734
if (env->cpu_index != 0 || env->next_cpu != NULL) {
4735
/* more than one CPU: do not sleep because another CPU may
4742
void helper_debug(void)
4744
env->exception_index = EXCP_DEBUG;
4748
void helper_reset_rf(void)
4750
env->eflags &= ~RF_MASK;
4753
void helper_raise_interrupt(int intno, int next_eip_addend)
4755
raise_interrupt(intno, 1, 0, next_eip_addend);
4758
void helper_raise_exception(int exception_index)
4760
raise_exception(exception_index);
4763
void helper_cli(void)
4765
env->eflags &= ~IF_MASK;
4768
void helper_sti(void)
4770
env->eflags |= IF_MASK;
4774
/* vm86plus instructions */
4775
void helper_cli_vm(void)
4777
env->eflags &= ~VIF_MASK;
4780
void helper_sti_vm(void)
4782
env->eflags |= VIF_MASK;
4783
if (env->eflags & VIP_MASK) {
4784
raise_exception(EXCP0D_GPF);
4789
void helper_set_inhibit_irq(void)
4791
env->hflags |= HF_INHIBIT_IRQ_MASK;
4794
void helper_reset_inhibit_irq(void)
4796
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4799
void helper_boundw(target_ulong a0, int v)
4803
high = ldsw(a0 + 2);
4805
if (v < low || v > high) {
4806
raise_exception(EXCP05_BOUND);
4810
void helper_boundl(target_ulong a0, int v)
4815
if (v < low || v > high) {
4816
raise_exception(EXCP05_BOUND);
4820
#if !defined(CONFIG_USER_ONLY)
4822
#define MMUSUFFIX _mmu
4825
#include "softmmu_template.h"
4828
#include "softmmu_template.h"
4831
#include "softmmu_template.h"
4834
#include "softmmu_template.h"
4838
#if !defined(CONFIG_USER_ONLY)
4839
/* try to fill the TLB and return an exception if error. If retaddr is
4840
NULL, it means that the function was called in C code (i.e. not
4841
from generated code or from helper.c) */
4842
/* XXX: fix it to restore all registers */
4843
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4845
TranslationBlock *tb;
4848
CPUX86State *saved_env;
4850
/* XXX: hack to restore env in all cases, even if not called from
4853
env = cpu_single_env;
4855
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4858
/* now we have a real cpu fault */
4859
pc = (unsigned long)retaddr;
4860
tb = tb_find_pc(pc);
4862
/* the PC is inside the translated code. It means that we have
4863
a virtual CPU fault */
4864
cpu_restore_state(tb, env, pc);
4867
raise_exception_err(env->exception_index, env->error_code);
4873
/* Secure Virtual Machine helpers */
4875
#if defined(CONFIG_USER_ONLY)
4877
void helper_vmrun(int aflag, int next_eip_addend)
4880
void helper_vmmcall(void)
4883
void helper_vmload(int aflag)
4886
void helper_vmsave(int aflag)
4889
void helper_stgi(void)
4892
void helper_clgi(void)
4895
void helper_skinit(void)
4898
void helper_invlpga(int aflag)
4901
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4904
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4908
void svm_check_intercept(CPUState *env1, uint32_t type)
4912
void helper_svm_check_io(uint32_t port, uint32_t param,
4913
uint32_t next_eip_addend)
4918
static inline void svm_save_seg(target_phys_addr_t addr,
4919
const SegmentCache *sc)
4921
stw_phys(addr + offsetof(struct vmcb_seg, selector),
4923
stq_phys(addr + offsetof(struct vmcb_seg, base),
4925
stl_phys(addr + offsetof(struct vmcb_seg, limit),
4927
stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4928
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4931
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4935
sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4936
sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4937
sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4938
flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4939
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4942
static inline void svm_load_seg_cache(target_phys_addr_t addr,
4943
CPUState *env, int seg_reg)
4945
SegmentCache sc1, *sc = &sc1;
4946
svm_load_seg(addr, sc);
4947
cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4948
sc->base, sc->limit, sc->flags);
4951
void helper_vmrun(int aflag, int next_eip_addend)
4957
helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4962
addr = (uint32_t)EAX;
4964
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4966
env->vm_vmcb = addr;
4968
/* save the current CPU state in the hsave page */
4969
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4970
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4972
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4973
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4975
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4976
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4977
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4978
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4979
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4980
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4982
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4983
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4985
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4987
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4989
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4991
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4994
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4995
EIP + next_eip_addend);
4996
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4997
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4999
/* load the interception bitmaps so we do not need to access the
5001
env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
5002
env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5003
env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5004
env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5005
env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5006
env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5008
/* enable intercepts */
5009
env->hflags |= HF_SVMI_MASK;
5011
env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5013
env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5014
env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5016
env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5017
env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5019
/* clear exit_info_2 so we behave like the real hardware */
5020
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5022
cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5023
cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5024
cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5025
env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5026
int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5027
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5028
if (int_ctl & V_INTR_MASKING_MASK) {
5029
env->v_tpr = int_ctl & V_TPR_MASK;
5030
env->hflags2 |= HF2_VINTR_MASK;
5031
if (env->eflags & IF_MASK)
5032
env->hflags2 |= HF2_HIF_MASK;
5036
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5038
load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5039
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5040
CC_OP = CC_OP_EFLAGS;
5042
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5044
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5046
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5048
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5051
EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5053
ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5054
EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5055
env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5056
env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5057
cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5059
/* FIXME: guest state consistency checks */
5061
switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5062
case TLB_CONTROL_DO_NOTHING:
5064
case TLB_CONTROL_FLUSH_ALL_ASID:
5065
/* FIXME: this is not 100% correct but should work for now */
5070
env->hflags2 |= HF2_GIF_MASK;
5072
if (int_ctl & V_IRQ_MASK) {
5073
env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5076
/* maybe we need to inject an event */
5077
event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5078
if (event_inj & SVM_EVTINJ_VALID) {
5079
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5080
uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5081
uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5083
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5084
/* FIXME: need to implement valid_err */
5085
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5086
case SVM_EVTINJ_TYPE_INTR:
5087
env->exception_index = vector;
5088
env->error_code = event_inj_err;
5089
env->exception_is_int = 0;
5090
env->exception_next_eip = -1;
5091
qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5092
/* XXX: is it always correct ? */
5093
do_interrupt_all(vector, 0, 0, 0, 1);
5095
case SVM_EVTINJ_TYPE_NMI:
5096
env->exception_index = EXCP02_NMI;
5097
env->error_code = event_inj_err;
5098
env->exception_is_int = 0;
5099
env->exception_next_eip = EIP;
5100
qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5103
case SVM_EVTINJ_TYPE_EXEPT:
5104
env->exception_index = vector;
5105
env->error_code = event_inj_err;
5106
env->exception_is_int = 0;
5107
env->exception_next_eip = -1;
5108
qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5111
case SVM_EVTINJ_TYPE_SOFT:
5112
env->exception_index = vector;
5113
env->error_code = event_inj_err;
5114
env->exception_is_int = 1;
5115
env->exception_next_eip = EIP;
5116
qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5120
qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5124
void helper_vmmcall(void)
5126
helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5127
raise_exception(EXCP06_ILLOP);
5130
void helper_vmload(int aflag)
5133
helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5138
addr = (uint32_t)EAX;
5140
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5141
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5142
env->segs[R_FS].base);
5144
svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5146
svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5148
svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5150
svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5153
#ifdef TARGET_X86_64
5154
env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5155
env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5156
env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5157
env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5159
env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5160
env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5161
env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5162
env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5165
void helper_vmsave(int aflag)
5168
helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5173
addr = (uint32_t)EAX;
5175
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5176
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5177
env->segs[R_FS].base);
5179
svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5181
svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5183
svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5185
svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5188
#ifdef TARGET_X86_64
5189
stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5190
stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5191
stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5192
stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5194
stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5195
stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5196
stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5197
stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5200
void helper_stgi(void)
5202
helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5203
env->hflags2 |= HF2_GIF_MASK;
5206
void helper_clgi(void)
5208
helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5209
env->hflags2 &= ~HF2_GIF_MASK;
5212
void helper_skinit(void)
5214
helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5215
/* XXX: not implemented */
5216
raise_exception(EXCP06_ILLOP);
5219
void helper_invlpga(int aflag)
5222
helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5227
addr = (uint32_t)EAX;
5229
/* XXX: could use the ASID to see if it is needed to do the
5231
tlb_flush_page(env, addr);
5234
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5236
if (likely(!(env->hflags & HF_SVMI_MASK)))
5239
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5240
if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5241
helper_vmexit(type, param);
5244
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5245
if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5246
helper_vmexit(type, param);
5249
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5250
if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5251
helper_vmexit(type, param);
5254
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5255
if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5256
helper_vmexit(type, param);
5259
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5260
if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5261
helper_vmexit(type, param);
5265
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5266
/* FIXME: this should be read in at vmrun (faster this way?) */
5267
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5269
switch((uint32_t)ECX) {
5274
case 0xc0000000 ... 0xc0001fff:
5275
t0 = (8192 + ECX - 0xc0000000) * 2;
5279
case 0xc0010000 ... 0xc0011fff:
5280
t0 = (16384 + ECX - 0xc0010000) * 2;
5285
helper_vmexit(type, param);
5290
if (ldub_phys(addr + t1) & ((1 << param) << t0))
5291
helper_vmexit(type, param);
5295
if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5296
helper_vmexit(type, param);
5302
void svm_check_intercept(CPUState *env1, uint32_t type)
5304
CPUState *saved_env;
5308
helper_svm_check_intercept_param(type, 0);
5312
void helper_svm_check_io(uint32_t port, uint32_t param,
5313
uint32_t next_eip_addend)
5315
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5316
/* FIXME: this should be read in at vmrun (faster this way?) */
5317
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5318
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5319
if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5321
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5322
env->eip + next_eip_addend);
5323
helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5328
/* Note: currently only 32 bits of exit_code are used */
5329
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5333
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5334
exit_code, exit_info_1,
5335
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5338
if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5339
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5340
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5342
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5345
/* Save the VM state in the vmcb */
5346
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5348
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5350
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5352
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5355
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5356
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5358
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5359
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5361
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5362
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5363
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5364
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5365
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5367
int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5368
int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5369
int_ctl |= env->v_tpr & V_TPR_MASK;
5370
if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5371
int_ctl |= V_IRQ_MASK;
5372
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5374
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5375
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5376
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5377
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5378
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5379
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5380
stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5382
/* Reload the host state from vm_hsave */
5383
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5384
env->hflags &= ~HF_SVMI_MASK;
5386
env->intercept_exceptions = 0;
5387
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5388
env->tsc_offset = 0;
5390
env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5391
env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5393
env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5394
env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5396
cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5397
cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5398
cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5399
/* we need to set the efer after the crs so the hidden flags get
5402
ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5404
load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5405
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5406
CC_OP = CC_OP_EFLAGS;
5408
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5410
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5412
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5414
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5417
EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5418
ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5419
EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5421
env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5422
env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5425
cpu_x86_set_cpl(env, 0);
5426
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5427
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5429
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5430
ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5431
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5432
ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5433
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5435
env->hflags2 &= ~HF2_GIF_MASK;
5436
/* FIXME: Resets the current ASID register to zero (host ASID). */
5438
/* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5440
/* Clears the TSC_OFFSET inside the processor. */
5442
/* If the host is in PAE mode, the processor reloads the host's PDPEs
5443
from the page table indicated the host's CR3. If the PDPEs contain
5444
illegal state, the processor causes a shutdown. */
5446
/* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5447
env->cr[0] |= CR0_PE_MASK;
5448
env->eflags &= ~VM_MASK;
5450
/* Disables all breakpoints in the host DR7 register. */
5452
/* Checks the reloaded host state for consistency. */
5454
/* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5455
host's code segment or non-canonical (in the case of long mode), a
5456
#GP fault is delivered inside the host.) */
5458
/* remove any pending exception */
5459
env->exception_index = -1;
5460
env->error_code = 0;
5461
env->old_exception = -1;
5469
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5470
void helper_enter_mmx(void)
5473
*(uint32_t *)(env->fptags) = 0;
5474
*(uint32_t *)(env->fptags + 4) = 0;
5477
void helper_emms(void)
5479
/* set to empty state */
5480
*(uint32_t *)(env->fptags) = 0x01010101;
5481
*(uint32_t *)(env->fptags + 4) = 0x01010101;
5485
void helper_movq(void *d, void *s)
5487
*(uint64_t *)d = *(uint64_t *)s;
5491
#include "ops_sse.h"
5494
#include "ops_sse.h"
5497
#include "helper_template.h"
5501
#include "helper_template.h"
5505
#include "helper_template.h"
5508
#ifdef TARGET_X86_64
5511
#include "helper_template.h"
5516
/* bit operations */
5517
target_ulong helper_bsf(target_ulong t0)
5524
while ((res & 1) == 0) {
5531
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5534
target_ulong res, mask;
5536
if (wordsize > 0 && t0 == 0) {
5540
count = TARGET_LONG_BITS - 1;
5541
mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5542
while ((res & mask) == 0) {
5547
return wordsize - 1 - count;
5552
target_ulong helper_bsr(target_ulong t0)
5554
return helper_lzcnt(t0, 0);
5557
static int compute_all_eflags(void)
5562
static int compute_c_eflags(void)
5564
return CC_SRC & CC_C;
5567
uint32_t helper_cc_compute_all(int op)
5570
default: /* should never happen */ return 0;
5572
case CC_OP_EFLAGS: return compute_all_eflags();
5574
case CC_OP_MULB: return compute_all_mulb();
5575
case CC_OP_MULW: return compute_all_mulw();
5576
case CC_OP_MULL: return compute_all_mull();
5578
case CC_OP_ADDB: return compute_all_addb();
5579
case CC_OP_ADDW: return compute_all_addw();
5580
case CC_OP_ADDL: return compute_all_addl();
5582
case CC_OP_ADCB: return compute_all_adcb();
5583
case CC_OP_ADCW: return compute_all_adcw();
5584
case CC_OP_ADCL: return compute_all_adcl();
5586
case CC_OP_SUBB: return compute_all_subb();
5587
case CC_OP_SUBW: return compute_all_subw();
5588
case CC_OP_SUBL: return compute_all_subl();
5590
case CC_OP_SBBB: return compute_all_sbbb();
5591
case CC_OP_SBBW: return compute_all_sbbw();
5592
case CC_OP_SBBL: return compute_all_sbbl();
5594
case CC_OP_LOGICB: return compute_all_logicb();
5595
case CC_OP_LOGICW: return compute_all_logicw();
5596
case CC_OP_LOGICL: return compute_all_logicl();
5598
case CC_OP_INCB: return compute_all_incb();
5599
case CC_OP_INCW: return compute_all_incw();
5600
case CC_OP_INCL: return compute_all_incl();
5602
case CC_OP_DECB: return compute_all_decb();
5603
case CC_OP_DECW: return compute_all_decw();
5604
case CC_OP_DECL: return compute_all_decl();
5606
case CC_OP_SHLB: return compute_all_shlb();
5607
case CC_OP_SHLW: return compute_all_shlw();
5608
case CC_OP_SHLL: return compute_all_shll();
5610
case CC_OP_SARB: return compute_all_sarb();
5611
case CC_OP_SARW: return compute_all_sarw();
5612
case CC_OP_SARL: return compute_all_sarl();
5614
#ifdef TARGET_X86_64
5615
case CC_OP_MULQ: return compute_all_mulq();
5617
case CC_OP_ADDQ: return compute_all_addq();
5619
case CC_OP_ADCQ: return compute_all_adcq();
5621
case CC_OP_SUBQ: return compute_all_subq();
5623
case CC_OP_SBBQ: return compute_all_sbbq();
5625
case CC_OP_LOGICQ: return compute_all_logicq();
5627
case CC_OP_INCQ: return compute_all_incq();
5629
case CC_OP_DECQ: return compute_all_decq();
5631
case CC_OP_SHLQ: return compute_all_shlq();
5633
case CC_OP_SARQ: return compute_all_sarq();
5638
uint32_t cpu_cc_compute_all(CPUState *env1, int op)
5640
CPUState *saved_env;
5645
ret = helper_cc_compute_all(op);
5650
uint32_t helper_cc_compute_c(int op)
5653
default: /* should never happen */ return 0;
5655
case CC_OP_EFLAGS: return compute_c_eflags();
5657
case CC_OP_MULB: return compute_c_mull();
5658
case CC_OP_MULW: return compute_c_mull();
5659
case CC_OP_MULL: return compute_c_mull();
5661
case CC_OP_ADDB: return compute_c_addb();
5662
case CC_OP_ADDW: return compute_c_addw();
5663
case CC_OP_ADDL: return compute_c_addl();
5665
case CC_OP_ADCB: return compute_c_adcb();
5666
case CC_OP_ADCW: return compute_c_adcw();
5667
case CC_OP_ADCL: return compute_c_adcl();
5669
case CC_OP_SUBB: return compute_c_subb();
5670
case CC_OP_SUBW: return compute_c_subw();
5671
case CC_OP_SUBL: return compute_c_subl();
5673
case CC_OP_SBBB: return compute_c_sbbb();
5674
case CC_OP_SBBW: return compute_c_sbbw();
5675
case CC_OP_SBBL: return compute_c_sbbl();
5677
case CC_OP_LOGICB: return compute_c_logicb();
5678
case CC_OP_LOGICW: return compute_c_logicw();
5679
case CC_OP_LOGICL: return compute_c_logicl();
5681
case CC_OP_INCB: return compute_c_incl();
5682
case CC_OP_INCW: return compute_c_incl();
5683
case CC_OP_INCL: return compute_c_incl();
5685
case CC_OP_DECB: return compute_c_incl();
5686
case CC_OP_DECW: return compute_c_incl();
5687
case CC_OP_DECL: return compute_c_incl();
5689
case CC_OP_SHLB: return compute_c_shlb();
5690
case CC_OP_SHLW: return compute_c_shlw();
5691
case CC_OP_SHLL: return compute_c_shll();
5693
case CC_OP_SARB: return compute_c_sarl();
5694
case CC_OP_SARW: return compute_c_sarl();
5695
case CC_OP_SARL: return compute_c_sarl();
5697
#ifdef TARGET_X86_64
5698
case CC_OP_MULQ: return compute_c_mull();
5700
case CC_OP_ADDQ: return compute_c_addq();
5702
case CC_OP_ADCQ: return compute_c_adcq();
5704
case CC_OP_SUBQ: return compute_c_subq();
5706
case CC_OP_SBBQ: return compute_c_sbbq();
5708
case CC_OP_LOGICQ: return compute_c_logicq();
5710
case CC_OP_INCQ: return compute_c_incl();
5712
case CC_OP_DECQ: return compute_c_incl();
5714
case CC_OP_SHLQ: return compute_c_shlq();
5716
case CC_OP_SARQ: return compute_c_sarl();