4
* Copyright (c) 2003 Fabrice Bellard
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
22
#include "dyngen-exec.h"
23
#include "host-utils.h"
29
#if !defined(CONFIG_USER_ONLY)
30
#include "softmmu_exec.h"
31
#endif /* !defined(CONFIG_USER_ONLY) */
36
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
37
# define LOG_PCALL_STATE(env) \
38
log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
40
# define LOG_PCALL(...) do { } while (0)
41
# define LOG_PCALL_STATE(env) do { } while (0)
44
/* n must be a constant to be efficient */
45
static inline target_long lshift(target_long x, int n)
54
#define FPU_RC_MASK 0xc00
55
#define FPU_RC_NEAR 0x000
56
#define FPU_RC_DOWN 0x400
57
#define FPU_RC_UP 0x800
58
#define FPU_RC_CHOP 0xc00
60
#define MAXTAN 9223372036854775808.0
62
/* the following deal with x86 long double-precision numbers */
63
#define MAXEXPD 0x7fff
65
#define EXPD(fp) (fp.l.upper & 0x7fff)
66
#define SIGND(fp) ((fp.l.upper) & 0x8000)
67
#define MANTD(fp) (fp.l.lower)
68
#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
70
static inline void fpush(void)
72
env->fpstt = (env->fpstt - 1) & 7;
73
env->fptags[env->fpstt] = 0; /* validate stack entry */
76
static inline void fpop(void)
78
env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
79
env->fpstt = (env->fpstt + 1) & 7;
82
static inline floatx80 helper_fldt(target_ulong ptr)
86
temp.l.lower = ldq(ptr);
87
temp.l.upper = lduw(ptr + 8);
91
static inline void helper_fstt(floatx80 f, target_ulong ptr)
96
stq(ptr, temp.l.lower);
97
stw(ptr + 8, temp.l.upper);
100
#define FPUS_IE (1 << 0)
101
#define FPUS_DE (1 << 1)
102
#define FPUS_ZE (1 << 2)
103
#define FPUS_OE (1 << 3)
104
#define FPUS_UE (1 << 4)
105
#define FPUS_PE (1 << 5)
106
#define FPUS_SF (1 << 6)
107
#define FPUS_SE (1 << 7)
108
#define FPUS_B (1 << 15)
112
static inline uint32_t compute_eflags(void)
114
return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
117
/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
118
static inline void load_eflags(int eflags, int update_mask)
120
CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
121
DF = 1 - (2 * ((eflags >> 10) & 1));
122
env->eflags = (env->eflags & ~update_mask) |
123
(eflags & update_mask) | 0x2;
126
/* load efer and update the corresponding hflags. XXX: do consistency
127
checks with cpuid bits ? */
128
static inline void cpu_load_efer(CPUState *env, uint64_t val)
131
env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
132
if (env->efer & MSR_EFER_LMA) {
133
env->hflags |= HF_LMA_MASK;
135
if (env->efer & MSR_EFER_SVME) {
136
env->hflags |= HF_SVME_MASK;
141
#define raise_exception_err(a, b)\
143
qemu_log("raise_exception line=%d\n", __LINE__);\
144
(raise_exception_err)(a, b);\
148
static void QEMU_NORETURN raise_exception_err(int exception_index,
151
static const uint8_t parity_table[256] = {
152
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
153
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
154
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
155
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
156
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
157
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
158
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
159
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
160
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
161
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
162
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
163
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
164
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
165
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
166
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
167
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
168
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
169
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
170
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
171
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
172
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
173
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
174
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
175
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
176
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
177
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
178
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
179
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
180
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
181
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
182
CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
183
0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
186
/* modulo 17 table */
187
static const uint8_t rclw_table[32] = {
188
0, 1, 2, 3, 4, 5, 6, 7,
189
8, 9,10,11,12,13,14,15,
190
16, 0, 1, 2, 3, 4, 5, 6,
191
7, 8, 9,10,11,12,13,14,
195
static const uint8_t rclb_table[32] = {
196
0, 1, 2, 3, 4, 5, 6, 7,
197
8, 0, 1, 2, 3, 4, 5, 6,
198
7, 8, 0, 1, 2, 3, 4, 5,
199
6, 7, 8, 0, 1, 2, 3, 4,
202
#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
203
#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
204
#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
206
/* broken thread support */
208
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
210
void helper_lock(void)
212
spin_lock(&global_cpu_lock);
215
void helper_unlock(void)
217
spin_unlock(&global_cpu_lock);
220
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
222
load_eflags(t0, update_mask);
225
target_ulong helper_read_eflags(void)
228
eflags = helper_cc_compute_all(CC_OP);
229
eflags |= (DF & DF_MASK);
230
eflags |= env->eflags & ~(VM_MASK | RF_MASK);
234
/* return non zero if error */
235
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
246
index = selector & ~7;
247
if ((index + 7) > dt->limit)
249
ptr = dt->base + index;
250
*e1_ptr = ldl_kernel(ptr);
251
*e2_ptr = ldl_kernel(ptr + 4);
255
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
258
limit = (e1 & 0xffff) | (e2 & 0x000f0000);
259
if (e2 & DESC_G_MASK)
260
limit = (limit << 12) | 0xfff;
264
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266
return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
269
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271
sc->base = get_seg_base(e1, e2);
272
sc->limit = get_seg_limit(e1, e2);
276
/* init the segment cache in vm86 mode. */
277
static inline void load_seg_vm(int seg, int selector)
280
cpu_x86_load_seg_cache(env, seg, selector,
281
(selector << 4), 0xffff, 0);
284
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
285
uint32_t *esp_ptr, int dpl)
287
int type, index, shift;
292
printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
293
for(i=0;i<env->tr.limit;i++) {
294
printf("%02x ", env->tr.base[i]);
295
if ((i & 7) == 7) printf("\n");
301
if (!(env->tr.flags & DESC_P_MASK))
302
cpu_abort(env, "invalid tss");
303
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
305
cpu_abort(env, "invalid tss type");
307
index = (dpl * 4 + 2) << shift;
308
if (index + (4 << shift) - 1 > env->tr.limit)
309
raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
311
*esp_ptr = lduw_kernel(env->tr.base + index);
312
*ss_ptr = lduw_kernel(env->tr.base + index + 2);
314
*esp_ptr = ldl_kernel(env->tr.base + index);
315
*ss_ptr = lduw_kernel(env->tr.base + index + 4);
319
/* XXX: merge with load_seg() */
320
static void tss_load_seg(int seg_reg, int selector)
325
if ((selector & 0xfffc) != 0) {
326
if (load_segment(&e1, &e2, selector) != 0)
327
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
328
if (!(e2 & DESC_S_MASK))
329
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
331
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
332
cpl = env->hflags & HF_CPL_MASK;
333
if (seg_reg == R_CS) {
334
if (!(e2 & DESC_CS_MASK))
335
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336
/* XXX: is it correct ? */
338
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
339
if ((e2 & DESC_C_MASK) && dpl > rpl)
340
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
341
} else if (seg_reg == R_SS) {
342
/* SS must be writable data */
343
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
344
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345
if (dpl != cpl || dpl != rpl)
346
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348
/* not readable code */
349
if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
350
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351
/* if data or non conforming code, checks the rights */
352
if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
353
if (dpl < cpl || dpl < rpl)
354
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357
if (!(e2 & DESC_P_MASK))
358
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
359
cpu_x86_load_seg_cache(env, seg_reg, selector,
360
get_seg_base(e1, e2),
361
get_seg_limit(e1, e2),
364
if (seg_reg == R_SS || seg_reg == R_CS)
365
raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
369
#define SWITCH_TSS_JMP 0
370
#define SWITCH_TSS_IRET 1
371
#define SWITCH_TSS_CALL 2
373
/* XXX: restore CPU state in registers (PowerPC case) */
374
static void switch_tss(int tss_selector,
375
uint32_t e1, uint32_t e2, int source,
378
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
379
target_ulong tss_base;
380
uint32_t new_regs[8], new_segs[6];
381
uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
382
uint32_t old_eflags, eflags_mask;
387
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
388
LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
390
/* if task gate, we read the TSS segment and we load it */
392
if (!(e2 & DESC_P_MASK))
393
raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
394
tss_selector = e1 >> 16;
395
if (tss_selector & 4)
396
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
397
if (load_segment(&e1, &e2, tss_selector) != 0)
398
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
399
if (e2 & DESC_S_MASK)
400
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
401
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
403
raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
406
if (!(e2 & DESC_P_MASK))
407
raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
413
tss_limit = get_seg_limit(e1, e2);
414
tss_base = get_seg_base(e1, e2);
415
if ((tss_selector & 4) != 0 ||
416
tss_limit < tss_limit_max)
417
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
418
old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
420
old_tss_limit_max = 103;
422
old_tss_limit_max = 43;
424
/* read all the registers from the new TSS */
427
new_cr3 = ldl_kernel(tss_base + 0x1c);
428
new_eip = ldl_kernel(tss_base + 0x20);
429
new_eflags = ldl_kernel(tss_base + 0x24);
430
for(i = 0; i < 8; i++)
431
new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
432
for(i = 0; i < 6; i++)
433
new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
434
new_ldt = lduw_kernel(tss_base + 0x60);
435
new_trap = ldl_kernel(tss_base + 0x64);
439
new_eip = lduw_kernel(tss_base + 0x0e);
440
new_eflags = lduw_kernel(tss_base + 0x10);
441
for(i = 0; i < 8; i++)
442
new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
443
for(i = 0; i < 4; i++)
444
new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
445
new_ldt = lduw_kernel(tss_base + 0x2a);
450
/* XXX: avoid a compiler warning, see
451
http://support.amd.com/us/Processor_TechDocs/24593.pdf
452
chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
455
/* NOTE: we must avoid memory exceptions during the task switch,
456
so we make dummy accesses before */
457
/* XXX: it can still fail in some cases, so a bigger hack is
458
necessary to valid the TLB after having done the accesses */
460
v1 = ldub_kernel(env->tr.base);
461
v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
462
stb_kernel(env->tr.base, v1);
463
stb_kernel(env->tr.base + old_tss_limit_max, v2);
465
/* clear busy bit (it is restartable) */
466
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
469
ptr = env->gdt.base + (env->tr.selector & ~7);
470
e2 = ldl_kernel(ptr + 4);
471
e2 &= ~DESC_TSS_BUSY_MASK;
472
stl_kernel(ptr + 4, e2);
474
old_eflags = compute_eflags();
475
if (source == SWITCH_TSS_IRET)
476
old_eflags &= ~NT_MASK;
478
/* save the current state in the old TSS */
481
stl_kernel(env->tr.base + 0x20, next_eip);
482
stl_kernel(env->tr.base + 0x24, old_eflags);
483
stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
484
stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
485
stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
486
stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
487
stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
488
stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
489
stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
490
stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
491
for(i = 0; i < 6; i++)
492
stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
495
stw_kernel(env->tr.base + 0x0e, next_eip);
496
stw_kernel(env->tr.base + 0x10, old_eflags);
497
stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
498
stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
499
stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
500
stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
501
stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
502
stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
503
stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
504
stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
505
for(i = 0; i < 4; i++)
506
stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
509
/* now if an exception occurs, it will occurs in the next task
512
if (source == SWITCH_TSS_CALL) {
513
stw_kernel(tss_base, env->tr.selector);
514
new_eflags |= NT_MASK;
518
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
521
ptr = env->gdt.base + (tss_selector & ~7);
522
e2 = ldl_kernel(ptr + 4);
523
e2 |= DESC_TSS_BUSY_MASK;
524
stl_kernel(ptr + 4, e2);
527
/* set the new CPU state */
528
/* from this point, any exception which occurs can give problems */
529
env->cr[0] |= CR0_TS_MASK;
530
env->hflags |= HF_TS_MASK;
531
env->tr.selector = tss_selector;
532
env->tr.base = tss_base;
533
env->tr.limit = tss_limit;
534
env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
536
if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
537
cpu_x86_update_cr3(env, new_cr3);
540
/* load all registers without an exception, then reload them with
541
possible exception */
543
eflags_mask = TF_MASK | AC_MASK | ID_MASK |
544
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
546
eflags_mask &= 0xffff;
547
load_eflags(new_eflags, eflags_mask);
548
/* XXX: what to do in 16 bit case ? */
557
if (new_eflags & VM_MASK) {
558
for(i = 0; i < 6; i++)
559
load_seg_vm(i, new_segs[i]);
560
/* in vm86, CPL is always 3 */
561
cpu_x86_set_cpl(env, 3);
563
/* CPL is set the RPL of CS */
564
cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
565
/* first just selectors as the rest may trigger exceptions */
566
for(i = 0; i < 6; i++)
567
cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
570
env->ldt.selector = new_ldt & ~4;
577
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
579
if ((new_ldt & 0xfffc) != 0) {
581
index = new_ldt & ~7;
582
if ((index + 7) > dt->limit)
583
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
584
ptr = dt->base + index;
585
e1 = ldl_kernel(ptr);
586
e2 = ldl_kernel(ptr + 4);
587
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
588
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
589
if (!(e2 & DESC_P_MASK))
590
raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
591
load_seg_cache_raw_dt(&env->ldt, e1, e2);
594
/* load the segments */
595
if (!(new_eflags & VM_MASK)) {
596
tss_load_seg(R_CS, new_segs[R_CS]);
597
tss_load_seg(R_SS, new_segs[R_SS]);
598
tss_load_seg(R_ES, new_segs[R_ES]);
599
tss_load_seg(R_DS, new_segs[R_DS]);
600
tss_load_seg(R_FS, new_segs[R_FS]);
601
tss_load_seg(R_GS, new_segs[R_GS]);
604
/* check that EIP is in the CS segment limits */
605
if (new_eip > env->segs[R_CS].limit) {
606
/* XXX: different exception if CALL ? */
607
raise_exception_err(EXCP0D_GPF, 0);
610
#ifndef CONFIG_USER_ONLY
611
/* reset local breakpoints */
612
if (env->dr[7] & 0x55) {
613
for (i = 0; i < 4; i++) {
614
if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
615
hw_breakpoint_remove(env, i);
622
/* check if Port I/O is allowed in TSS */
623
static inline void check_io(int addr, int size)
625
int io_offset, val, mask;
627
/* TSS must be a valid 32 bit one */
628
if (!(env->tr.flags & DESC_P_MASK) ||
629
((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
632
io_offset = lduw_kernel(env->tr.base + 0x66);
633
io_offset += (addr >> 3);
634
/* Note: the check needs two bytes */
635
if ((io_offset + 1) > env->tr.limit)
637
val = lduw_kernel(env->tr.base + io_offset);
639
mask = (1 << size) - 1;
640
/* all bits must be zero to allow the I/O */
641
if ((val & mask) != 0) {
643
raise_exception_err(EXCP0D_GPF, 0);
647
void helper_check_iob(uint32_t t0)
652
void helper_check_iow(uint32_t t0)
657
void helper_check_iol(uint32_t t0)
662
void helper_outb(uint32_t port, uint32_t data)
664
cpu_outb(port, data & 0xff);
667
target_ulong helper_inb(uint32_t port)
669
return cpu_inb(port);
672
void helper_outw(uint32_t port, uint32_t data)
674
cpu_outw(port, data & 0xffff);
677
target_ulong helper_inw(uint32_t port)
679
return cpu_inw(port);
682
void helper_outl(uint32_t port, uint32_t data)
684
cpu_outl(port, data);
687
target_ulong helper_inl(uint32_t port)
689
return cpu_inl(port);
692
static inline unsigned int get_sp_mask(unsigned int e2)
694
if (e2 & DESC_B_MASK)
700
static int exeption_has_error_code(int intno)
716
#define SET_ESP(val, sp_mask)\
718
if ((sp_mask) == 0xffff)\
719
ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
720
else if ((sp_mask) == 0xffffffffLL)\
721
ESP = (uint32_t)(val);\
726
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
729
/* in 64-bit machines, this can overflow. So this segment addition macro
730
* can be used to trim the value to 32-bit whenever needed */
731
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
733
/* XXX: add a is_user flag to have proper security support */
734
#define PUSHW(ssp, sp, sp_mask, val)\
737
stw_kernel((ssp) + (sp & (sp_mask)), (val));\
740
#define PUSHL(ssp, sp, sp_mask, val)\
743
stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
746
#define POPW(ssp, sp, sp_mask, val)\
748
val = lduw_kernel((ssp) + (sp & (sp_mask)));\
752
#define POPL(ssp, sp, sp_mask, val)\
754
val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
758
/* protected mode interrupt */
759
static void do_interrupt_protected(int intno, int is_int, int error_code,
760
unsigned int next_eip, int is_hw)
763
target_ulong ptr, ssp;
764
int type, dpl, selector, ss_dpl, cpl;
765
int has_error_code, new_stack, shift;
766
uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
767
uint32_t old_eip, sp_mask;
770
if (!is_int && !is_hw)
771
has_error_code = exeption_has_error_code(intno);
778
if (intno * 8 + 7 > dt->limit)
779
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
780
ptr = dt->base + intno * 8;
781
e1 = ldl_kernel(ptr);
782
e2 = ldl_kernel(ptr + 4);
783
/* check gate type */
784
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
786
case 5: /* task gate */
787
/* must do that check here to return the correct error code */
788
if (!(e2 & DESC_P_MASK))
789
raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
790
switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
791
if (has_error_code) {
794
/* push the error code */
795
type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
797
if (env->segs[R_SS].flags & DESC_B_MASK)
801
esp = (ESP - (2 << shift)) & mask;
802
ssp = env->segs[R_SS].base + esp;
804
stl_kernel(ssp, error_code);
806
stw_kernel(ssp, error_code);
810
case 6: /* 286 interrupt gate */
811
case 7: /* 286 trap gate */
812
case 14: /* 386 interrupt gate */
813
case 15: /* 386 trap gate */
816
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
819
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
820
cpl = env->hflags & HF_CPL_MASK;
821
/* check privilege if software int */
822
if (is_int && dpl < cpl)
823
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
824
/* check valid bit */
825
if (!(e2 & DESC_P_MASK))
826
raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
828
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
829
if ((selector & 0xfffc) == 0)
830
raise_exception_err(EXCP0D_GPF, 0);
832
if (load_segment(&e1, &e2, selector) != 0)
833
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
834
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
835
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
836
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
838
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
839
if (!(e2 & DESC_P_MASK))
840
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
841
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
842
/* to inner privilege */
843
get_ss_esp_from_tss(&ss, &esp, dpl);
844
if ((ss & 0xfffc) == 0)
845
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
847
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
848
if (load_segment(&ss_e1, &ss_e2, ss) != 0)
849
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
850
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
852
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
853
if (!(ss_e2 & DESC_S_MASK) ||
854
(ss_e2 & DESC_CS_MASK) ||
855
!(ss_e2 & DESC_W_MASK))
856
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
857
if (!(ss_e2 & DESC_P_MASK))
858
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
860
sp_mask = get_sp_mask(ss_e2);
861
ssp = get_seg_base(ss_e1, ss_e2);
862
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
863
/* to same privilege */
864
if (env->eflags & VM_MASK)
865
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
867
sp_mask = get_sp_mask(env->segs[R_SS].flags);
868
ssp = env->segs[R_SS].base;
872
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
873
new_stack = 0; /* avoid warning */
874
sp_mask = 0; /* avoid warning */
875
ssp = 0; /* avoid warning */
876
esp = 0; /* avoid warning */
882
/* XXX: check that enough room is available */
883
push_size = 6 + (new_stack << 2) + (has_error_code << 1);
884
if (env->eflags & VM_MASK)
890
if (env->eflags & VM_MASK) {
891
PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
892
PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
893
PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
894
PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
896
PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
897
PUSHL(ssp, esp, sp_mask, ESP);
899
PUSHL(ssp, esp, sp_mask, compute_eflags());
900
PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
901
PUSHL(ssp, esp, sp_mask, old_eip);
902
if (has_error_code) {
903
PUSHL(ssp, esp, sp_mask, error_code);
907
if (env->eflags & VM_MASK) {
908
PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
909
PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
910
PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
911
PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
913
PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
914
PUSHW(ssp, esp, sp_mask, ESP);
916
PUSHW(ssp, esp, sp_mask, compute_eflags());
917
PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
918
PUSHW(ssp, esp, sp_mask, old_eip);
919
if (has_error_code) {
920
PUSHW(ssp, esp, sp_mask, error_code);
925
if (env->eflags & VM_MASK) {
926
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
927
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
928
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
929
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
931
ss = (ss & ~3) | dpl;
932
cpu_x86_load_seg_cache(env, R_SS, ss,
933
ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
935
SET_ESP(esp, sp_mask);
937
selector = (selector & ~3) | dpl;
938
cpu_x86_load_seg_cache(env, R_CS, selector,
939
get_seg_base(e1, e2),
940
get_seg_limit(e1, e2),
942
cpu_x86_set_cpl(env, dpl);
945
/* interrupt gate clear IF mask */
946
if ((type & 1) == 0) {
947
env->eflags &= ~IF_MASK;
949
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
954
#define PUSHQ(sp, val)\
957
stq_kernel(sp, (val));\
960
#define POPQ(sp, val)\
962
val = ldq_kernel(sp);\
966
static inline target_ulong get_rsp_from_tss(int level)
971
printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
972
env->tr.base, env->tr.limit);
975
if (!(env->tr.flags & DESC_P_MASK))
976
cpu_abort(env, "invalid tss");
977
index = 8 * level + 4;
978
if ((index + 7) > env->tr.limit)
979
raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
980
return ldq_kernel(env->tr.base + index);
983
/* 64 bit interrupt */
984
static void do_interrupt64(int intno, int is_int, int error_code,
985
target_ulong next_eip, int is_hw)
989
int type, dpl, selector, cpl, ist;
990
int has_error_code, new_stack;
991
uint32_t e1, e2, e3, ss;
992
target_ulong old_eip, esp, offset;
995
if (!is_int && !is_hw)
996
has_error_code = exeption_has_error_code(intno);
1003
if (intno * 16 + 15 > dt->limit)
1004
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1005
ptr = dt->base + intno * 16;
1006
e1 = ldl_kernel(ptr);
1007
e2 = ldl_kernel(ptr + 4);
1008
e3 = ldl_kernel(ptr + 8);
1009
/* check gate type */
1010
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1012
case 14: /* 386 interrupt gate */
1013
case 15: /* 386 trap gate */
1016
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1019
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1020
cpl = env->hflags & HF_CPL_MASK;
1021
/* check privilege if software int */
1022
if (is_int && dpl < cpl)
1023
raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1024
/* check valid bit */
1025
if (!(e2 & DESC_P_MASK))
1026
raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1027
selector = e1 >> 16;
1028
offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1030
if ((selector & 0xfffc) == 0)
1031
raise_exception_err(EXCP0D_GPF, 0);
1033
if (load_segment(&e1, &e2, selector) != 0)
1034
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1035
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1036
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1037
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1039
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1040
if (!(e2 & DESC_P_MASK))
1041
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1042
if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1043
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1044
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1045
/* to inner privilege */
1047
esp = get_rsp_from_tss(ist + 3);
1049
esp = get_rsp_from_tss(dpl);
1050
esp &= ~0xfLL; /* align stack */
1053
} else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1054
/* to same privilege */
1055
if (env->eflags & VM_MASK)
1056
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1059
esp = get_rsp_from_tss(ist + 3);
1062
esp &= ~0xfLL; /* align stack */
1065
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1066
new_stack = 0; /* avoid warning */
1067
esp = 0; /* avoid warning */
1070
PUSHQ(esp, env->segs[R_SS].selector);
1072
PUSHQ(esp, compute_eflags());
1073
PUSHQ(esp, env->segs[R_CS].selector);
1074
PUSHQ(esp, old_eip);
1075
if (has_error_code) {
1076
PUSHQ(esp, error_code);
1081
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1085
selector = (selector & ~3) | dpl;
1086
cpu_x86_load_seg_cache(env, R_CS, selector,
1087
get_seg_base(e1, e2),
1088
get_seg_limit(e1, e2),
1090
cpu_x86_set_cpl(env, dpl);
1093
/* interrupt gate clear IF mask */
1094
if ((type & 1) == 0) {
1095
env->eflags &= ~IF_MASK;
1097
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1101
#ifdef TARGET_X86_64
1102
#if defined(CONFIG_USER_ONLY)
1103
void helper_syscall(int next_eip_addend)
1105
env->exception_index = EXCP_SYSCALL;
1106
env->exception_next_eip = env->eip + next_eip_addend;
1110
void helper_syscall(int next_eip_addend)
1114
if (!(env->efer & MSR_EFER_SCE)) {
1115
raise_exception_err(EXCP06_ILLOP, 0);
1117
selector = (env->star >> 32) & 0xffff;
1118
if (env->hflags & HF_LMA_MASK) {
1121
ECX = env->eip + next_eip_addend;
1122
env->regs[11] = compute_eflags();
1124
code64 = env->hflags & HF_CS64_MASK;
1126
cpu_x86_set_cpl(env, 0);
1127
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1129
DESC_G_MASK | DESC_P_MASK |
1131
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1132
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1134
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1136
DESC_W_MASK | DESC_A_MASK);
1137
env->eflags &= ~env->fmask;
1138
load_eflags(env->eflags, 0);
1140
env->eip = env->lstar;
1142
env->eip = env->cstar;
1144
ECX = (uint32_t)(env->eip + next_eip_addend);
1146
cpu_x86_set_cpl(env, 0);
1147
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1149
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1151
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1152
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1154
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1156
DESC_W_MASK | DESC_A_MASK);
1157
env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1158
env->eip = (uint32_t)env->star;
1164
#ifdef TARGET_X86_64
1165
void helper_sysret(int dflag)
1169
if (!(env->efer & MSR_EFER_SCE)) {
1170
raise_exception_err(EXCP06_ILLOP, 0);
1172
cpl = env->hflags & HF_CPL_MASK;
1173
if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1174
raise_exception_err(EXCP0D_GPF, 0);
1176
selector = (env->star >> 48) & 0xffff;
1177
if (env->hflags & HF_LMA_MASK) {
1179
cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1181
DESC_G_MASK | DESC_P_MASK |
1182
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1183
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1187
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1189
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1190
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1191
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1192
env->eip = (uint32_t)ECX;
1194
cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1196
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1197
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1198
DESC_W_MASK | DESC_A_MASK);
1199
load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1200
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1201
cpu_x86_set_cpl(env, 3);
1203
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1205
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1206
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1207
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1208
env->eip = (uint32_t)ECX;
1209
cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1211
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1212
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1213
DESC_W_MASK | DESC_A_MASK);
1214
env->eflags |= IF_MASK;
1215
cpu_x86_set_cpl(env, 3);
1220
/* real mode interrupt */
1221
static void do_interrupt_real(int intno, int is_int, int error_code,
1222
unsigned int next_eip)
1225
target_ulong ptr, ssp;
1227
uint32_t offset, esp;
1228
uint32_t old_cs, old_eip;
1230
/* real mode (simpler !) */
1232
if (intno * 4 + 3 > dt->limit)
1233
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1234
ptr = dt->base + intno * 4;
1235
offset = lduw_kernel(ptr);
1236
selector = lduw_kernel(ptr + 2);
1238
ssp = env->segs[R_SS].base;
1243
old_cs = env->segs[R_CS].selector;
1244
/* XXX: use SS segment size ? */
1245
PUSHW(ssp, esp, 0xffff, compute_eflags());
1246
PUSHW(ssp, esp, 0xffff, old_cs);
1247
PUSHW(ssp, esp, 0xffff, old_eip);
1249
/* update processor state */
1250
ESP = (ESP & ~0xffff) | (esp & 0xffff);
1252
env->segs[R_CS].selector = selector;
1253
env->segs[R_CS].base = (selector << 4);
1254
env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1257
#if defined(CONFIG_USER_ONLY)
1258
/* fake user mode interrupt */
1259
static void do_interrupt_user(int intno, int is_int, int error_code,
1260
target_ulong next_eip)
1264
int dpl, cpl, shift;
1268
if (env->hflags & HF_LMA_MASK) {
1273
ptr = dt->base + (intno << shift);
1274
e2 = ldl_kernel(ptr + 4);
1276
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1277
cpl = env->hflags & HF_CPL_MASK;
1278
/* check privilege if software int */
1279
if (is_int && dpl < cpl)
1280
raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1282
/* Since we emulate only user space, we cannot do more than
1283
exiting the emulation with the suitable exception and error
1291
static void handle_even_inj(int intno, int is_int, int error_code,
1294
uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1295
if (!(event_inj & SVM_EVTINJ_VALID)) {
1298
type = SVM_EVTINJ_TYPE_SOFT;
1300
type = SVM_EVTINJ_TYPE_EXEPT;
1301
event_inj = intno | type | SVM_EVTINJ_VALID;
1302
if (!rm && exeption_has_error_code(intno)) {
1303
event_inj |= SVM_EVTINJ_VALID_ERR;
1304
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1306
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1312
* Begin execution of an interruption. is_int is TRUE if coming from
1313
* the int instruction. next_eip is the EIP value AFTER the interrupt
1314
* instruction. It is only relevant if is_int is TRUE.
1316
static void do_interrupt_all(int intno, int is_int, int error_code,
1317
target_ulong next_eip, int is_hw)
1319
if (qemu_loglevel_mask(CPU_LOG_INT)) {
1320
if ((env->cr[0] & CR0_PE_MASK)) {
1322
qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1323
count, intno, error_code, is_int,
1324
env->hflags & HF_CPL_MASK,
1325
env->segs[R_CS].selector, EIP,
1326
(int)env->segs[R_CS].base + EIP,
1327
env->segs[R_SS].selector, ESP);
1328
if (intno == 0x0e) {
1329
qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1331
qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1334
log_cpu_state(env, X86_DUMP_CCOP);
1340
ptr = env->segs[R_CS].base + env->eip;
1341
for(i = 0; i < 16; i++) {
1342
qemu_log(" %02x", ldub(ptr + i));
1350
if (env->cr[0] & CR0_PE_MASK) {
1351
#if !defined(CONFIG_USER_ONLY)
1352
if (env->hflags & HF_SVMI_MASK)
1353
handle_even_inj(intno, is_int, error_code, is_hw, 0);
1355
#ifdef TARGET_X86_64
1356
if (env->hflags & HF_LMA_MASK) {
1357
do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1361
do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1364
#if !defined(CONFIG_USER_ONLY)
1365
if (env->hflags & HF_SVMI_MASK)
1366
handle_even_inj(intno, is_int, error_code, is_hw, 1);
1368
do_interrupt_real(intno, is_int, error_code, next_eip);
1371
#if !defined(CONFIG_USER_ONLY)
1372
if (env->hflags & HF_SVMI_MASK) {
1373
uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1374
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1379
void do_interrupt(CPUState *env1)
1381
CPUState *saved_env;
1385
#if defined(CONFIG_USER_ONLY)
1386
/* if user mode only, we simulate a fake exception
1387
which will be handled outside the cpu execution
1389
do_interrupt_user(env->exception_index,
1390
env->exception_is_int,
1392
env->exception_next_eip);
1393
/* successfully delivered */
1394
env->old_exception = -1;
1396
/* simulate a real cpu exception. On i386, it can
1397
trigger new exceptions, but we do not handle
1398
double or triple faults yet. */
1399
do_interrupt_all(env->exception_index,
1400
env->exception_is_int,
1402
env->exception_next_eip, 0);
1403
/* successfully delivered */
1404
env->old_exception = -1;
1409
void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
1411
CPUState *saved_env;
1415
do_interrupt_all(intno, 0, 0, 0, is_hw);
1419
/* This should come from sysemu.h - if we could include it here... */
1420
void qemu_system_reset_request(void);
1423
* Check nested exceptions and change to double or triple fault if
1424
* needed. It should only be called, if this is not an interrupt.
1425
* Returns the new exception number.
1427
static int check_exception(int intno, int *error_code)
1429
int first_contributory = env->old_exception == 0 ||
1430
(env->old_exception >= 10 &&
1431
env->old_exception <= 13);
1432
int second_contributory = intno == 0 ||
1433
(intno >= 10 && intno <= 13);
1435
qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1436
env->old_exception, intno);
1438
#if !defined(CONFIG_USER_ONLY)
1439
if (env->old_exception == EXCP08_DBLE) {
1440
if (env->hflags & HF_SVMI_MASK)
1441
helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1443
qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1445
qemu_system_reset_request();
1450
if ((first_contributory && second_contributory)
1451
|| (env->old_exception == EXCP0E_PAGE &&
1452
(second_contributory || (intno == EXCP0E_PAGE)))) {
1453
intno = EXCP08_DBLE;
1457
if (second_contributory || (intno == EXCP0E_PAGE) ||
1458
(intno == EXCP08_DBLE))
1459
env->old_exception = intno;
1465
* Signal an interruption. It is executed in the main CPU loop.
1466
* is_int is TRUE if coming from the int instruction. next_eip is the
1467
* EIP value AFTER the interrupt instruction. It is only relevant if
1470
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1471
int next_eip_addend)
1474
helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1475
intno = check_exception(intno, &error_code);
1477
helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1480
env->exception_index = intno;
1481
env->error_code = error_code;
1482
env->exception_is_int = is_int;
1483
env->exception_next_eip = env->eip + next_eip_addend;
1487
/* shortcuts to generate exceptions */
1489
static void QEMU_NORETURN raise_exception_err(int exception_index,
1492
raise_interrupt(exception_index, 0, error_code, 0);
1495
void raise_exception_err_env(CPUState *nenv, int exception_index,
1499
raise_interrupt(exception_index, 0, error_code, 0);
1502
static void QEMU_NORETURN raise_exception(int exception_index)
1504
raise_interrupt(exception_index, 0, 0, 0);
1507
void raise_exception_env(int exception_index, CPUState *nenv)
1510
raise_exception(exception_index);
1514
#if defined(CONFIG_USER_ONLY)
1516
void do_smm_enter(CPUState *env1)
1520
void helper_rsm(void)
1526
#ifdef TARGET_X86_64
1527
#define SMM_REVISION_ID 0x00020064
1529
#define SMM_REVISION_ID 0x00020000
1532
void do_smm_enter(CPUState *env1)
1534
target_ulong sm_state;
1537
CPUState *saved_env;
1542
qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1543
log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1545
env->hflags |= HF_SMM_MASK;
1546
cpu_smm_update(env);
1548
sm_state = env->smbase + 0x8000;
1550
#ifdef TARGET_X86_64
1551
for(i = 0; i < 6; i++) {
1553
offset = 0x7e00 + i * 16;
1554
stw_phys(sm_state + offset, dt->selector);
1555
stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1556
stl_phys(sm_state + offset + 4, dt->limit);
1557
stq_phys(sm_state + offset + 8, dt->base);
1560
stq_phys(sm_state + 0x7e68, env->gdt.base);
1561
stl_phys(sm_state + 0x7e64, env->gdt.limit);
1563
stw_phys(sm_state + 0x7e70, env->ldt.selector);
1564
stq_phys(sm_state + 0x7e78, env->ldt.base);
1565
stl_phys(sm_state + 0x7e74, env->ldt.limit);
1566
stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1568
stq_phys(sm_state + 0x7e88, env->idt.base);
1569
stl_phys(sm_state + 0x7e84, env->idt.limit);
1571
stw_phys(sm_state + 0x7e90, env->tr.selector);
1572
stq_phys(sm_state + 0x7e98, env->tr.base);
1573
stl_phys(sm_state + 0x7e94, env->tr.limit);
1574
stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1576
stq_phys(sm_state + 0x7ed0, env->efer);
1578
stq_phys(sm_state + 0x7ff8, EAX);
1579
stq_phys(sm_state + 0x7ff0, ECX);
1580
stq_phys(sm_state + 0x7fe8, EDX);
1581
stq_phys(sm_state + 0x7fe0, EBX);
1582
stq_phys(sm_state + 0x7fd8, ESP);
1583
stq_phys(sm_state + 0x7fd0, EBP);
1584
stq_phys(sm_state + 0x7fc8, ESI);
1585
stq_phys(sm_state + 0x7fc0, EDI);
1586
for(i = 8; i < 16; i++)
1587
stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1588
stq_phys(sm_state + 0x7f78, env->eip);
1589
stl_phys(sm_state + 0x7f70, compute_eflags());
1590
stl_phys(sm_state + 0x7f68, env->dr[6]);
1591
stl_phys(sm_state + 0x7f60, env->dr[7]);
1593
stl_phys(sm_state + 0x7f48, env->cr[4]);
1594
stl_phys(sm_state + 0x7f50, env->cr[3]);
1595
stl_phys(sm_state + 0x7f58, env->cr[0]);
1597
stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1598
stl_phys(sm_state + 0x7f00, env->smbase);
1600
stl_phys(sm_state + 0x7ffc, env->cr[0]);
1601
stl_phys(sm_state + 0x7ff8, env->cr[3]);
1602
stl_phys(sm_state + 0x7ff4, compute_eflags());
1603
stl_phys(sm_state + 0x7ff0, env->eip);
1604
stl_phys(sm_state + 0x7fec, EDI);
1605
stl_phys(sm_state + 0x7fe8, ESI);
1606
stl_phys(sm_state + 0x7fe4, EBP);
1607
stl_phys(sm_state + 0x7fe0, ESP);
1608
stl_phys(sm_state + 0x7fdc, EBX);
1609
stl_phys(sm_state + 0x7fd8, EDX);
1610
stl_phys(sm_state + 0x7fd4, ECX);
1611
stl_phys(sm_state + 0x7fd0, EAX);
1612
stl_phys(sm_state + 0x7fcc, env->dr[6]);
1613
stl_phys(sm_state + 0x7fc8, env->dr[7]);
1615
stl_phys(sm_state + 0x7fc4, env->tr.selector);
1616
stl_phys(sm_state + 0x7f64, env->tr.base);
1617
stl_phys(sm_state + 0x7f60, env->tr.limit);
1618
stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1620
stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1621
stl_phys(sm_state + 0x7f80, env->ldt.base);
1622
stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1623
stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1625
stl_phys(sm_state + 0x7f74, env->gdt.base);
1626
stl_phys(sm_state + 0x7f70, env->gdt.limit);
1628
stl_phys(sm_state + 0x7f58, env->idt.base);
1629
stl_phys(sm_state + 0x7f54, env->idt.limit);
1631
for(i = 0; i < 6; i++) {
1634
offset = 0x7f84 + i * 12;
1636
offset = 0x7f2c + (i - 3) * 12;
1637
stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1638
stl_phys(sm_state + offset + 8, dt->base);
1639
stl_phys(sm_state + offset + 4, dt->limit);
1640
stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1642
stl_phys(sm_state + 0x7f14, env->cr[4]);
1644
stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1645
stl_phys(sm_state + 0x7ef8, env->smbase);
1647
/* init SMM cpu state */
1649
#ifdef TARGET_X86_64
1650
cpu_load_efer(env, 0);
1652
load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1653
env->eip = 0x00008000;
1654
cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1656
cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1657
cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1658
cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1659
cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1660
cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1662
cpu_x86_update_cr0(env,
1663
env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1664
cpu_x86_update_cr4(env, 0);
1665
env->dr[7] = 0x00000400;
1666
CC_OP = CC_OP_EFLAGS;
1670
void helper_rsm(void)
1672
target_ulong sm_state;
1676
sm_state = env->smbase + 0x8000;
1677
#ifdef TARGET_X86_64
1678
cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1680
for(i = 0; i < 6; i++) {
1681
offset = 0x7e00 + i * 16;
1682
cpu_x86_load_seg_cache(env, i,
1683
lduw_phys(sm_state + offset),
1684
ldq_phys(sm_state + offset + 8),
1685
ldl_phys(sm_state + offset + 4),
1686
(lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1689
env->gdt.base = ldq_phys(sm_state + 0x7e68);
1690
env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1692
env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1693
env->ldt.base = ldq_phys(sm_state + 0x7e78);
1694
env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1695
env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1697
env->idt.base = ldq_phys(sm_state + 0x7e88);
1698
env->idt.limit = ldl_phys(sm_state + 0x7e84);
1700
env->tr.selector = lduw_phys(sm_state + 0x7e90);
1701
env->tr.base = ldq_phys(sm_state + 0x7e98);
1702
env->tr.limit = ldl_phys(sm_state + 0x7e94);
1703
env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1705
EAX = ldq_phys(sm_state + 0x7ff8);
1706
ECX = ldq_phys(sm_state + 0x7ff0);
1707
EDX = ldq_phys(sm_state + 0x7fe8);
1708
EBX = ldq_phys(sm_state + 0x7fe0);
1709
ESP = ldq_phys(sm_state + 0x7fd8);
1710
EBP = ldq_phys(sm_state + 0x7fd0);
1711
ESI = ldq_phys(sm_state + 0x7fc8);
1712
EDI = ldq_phys(sm_state + 0x7fc0);
1713
for(i = 8; i < 16; i++)
1714
env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1715
env->eip = ldq_phys(sm_state + 0x7f78);
1716
load_eflags(ldl_phys(sm_state + 0x7f70),
1717
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1718
env->dr[6] = ldl_phys(sm_state + 0x7f68);
1719
env->dr[7] = ldl_phys(sm_state + 0x7f60);
1721
cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1722
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1723
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1725
val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1726
if (val & 0x20000) {
1727
env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1730
cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1731
cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1732
load_eflags(ldl_phys(sm_state + 0x7ff4),
1733
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1734
env->eip = ldl_phys(sm_state + 0x7ff0);
1735
EDI = ldl_phys(sm_state + 0x7fec);
1736
ESI = ldl_phys(sm_state + 0x7fe8);
1737
EBP = ldl_phys(sm_state + 0x7fe4);
1738
ESP = ldl_phys(sm_state + 0x7fe0);
1739
EBX = ldl_phys(sm_state + 0x7fdc);
1740
EDX = ldl_phys(sm_state + 0x7fd8);
1741
ECX = ldl_phys(sm_state + 0x7fd4);
1742
EAX = ldl_phys(sm_state + 0x7fd0);
1743
env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1744
env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1746
env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1747
env->tr.base = ldl_phys(sm_state + 0x7f64);
1748
env->tr.limit = ldl_phys(sm_state + 0x7f60);
1749
env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1751
env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1752
env->ldt.base = ldl_phys(sm_state + 0x7f80);
1753
env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1754
env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1756
env->gdt.base = ldl_phys(sm_state + 0x7f74);
1757
env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1759
env->idt.base = ldl_phys(sm_state + 0x7f58);
1760
env->idt.limit = ldl_phys(sm_state + 0x7f54);
1762
for(i = 0; i < 6; i++) {
1764
offset = 0x7f84 + i * 12;
1766
offset = 0x7f2c + (i - 3) * 12;
1767
cpu_x86_load_seg_cache(env, i,
1768
ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1769
ldl_phys(sm_state + offset + 8),
1770
ldl_phys(sm_state + offset + 4),
1771
(ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1773
cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1775
val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1776
if (val & 0x20000) {
1777
env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1780
CC_OP = CC_OP_EFLAGS;
1781
env->hflags &= ~HF_SMM_MASK;
1782
cpu_smm_update(env);
1784
qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1785
log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1788
#endif /* !CONFIG_USER_ONLY */
1791
/* division, flags are undefined */
1793
void helper_divb_AL(target_ulong t0)
1795
unsigned int num, den, q, r;
1797
num = (EAX & 0xffff);
1800
raise_exception(EXCP00_DIVZ);
1804
raise_exception(EXCP00_DIVZ);
1806
r = (num % den) & 0xff;
1807
EAX = (EAX & ~0xffff) | (r << 8) | q;
1810
void helper_idivb_AL(target_ulong t0)
1817
raise_exception(EXCP00_DIVZ);
1821
raise_exception(EXCP00_DIVZ);
1823
r = (num % den) & 0xff;
1824
EAX = (EAX & ~0xffff) | (r << 8) | q;
1827
void helper_divw_AX(target_ulong t0)
1829
unsigned int num, den, q, r;
1831
num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1832
den = (t0 & 0xffff);
1834
raise_exception(EXCP00_DIVZ);
1838
raise_exception(EXCP00_DIVZ);
1840
r = (num % den) & 0xffff;
1841
EAX = (EAX & ~0xffff) | q;
1842
EDX = (EDX & ~0xffff) | r;
1845
void helper_idivw_AX(target_ulong t0)
1849
num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1852
raise_exception(EXCP00_DIVZ);
1855
if (q != (int16_t)q)
1856
raise_exception(EXCP00_DIVZ);
1858
r = (num % den) & 0xffff;
1859
EAX = (EAX & ~0xffff) | q;
1860
EDX = (EDX & ~0xffff) | r;
1863
void helper_divl_EAX(target_ulong t0)
1865
unsigned int den, r;
1868
num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1871
raise_exception(EXCP00_DIVZ);
1876
raise_exception(EXCP00_DIVZ);
1881
void helper_idivl_EAX(target_ulong t0)
1886
num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1889
raise_exception(EXCP00_DIVZ);
1893
if (q != (int32_t)q)
1894
raise_exception(EXCP00_DIVZ);
1901
/* XXX: exception */
1902
void helper_aam(int base)
1908
EAX = (EAX & ~0xffff) | al | (ah << 8);
1912
void helper_aad(int base)
1916
ah = (EAX >> 8) & 0xff;
1917
al = ((ah * base) + al) & 0xff;
1918
EAX = (EAX & ~0xffff) | al;
1922
void helper_aaa(void)
1928
eflags = helper_cc_compute_all(CC_OP);
1931
ah = (EAX >> 8) & 0xff;
1933
icarry = (al > 0xf9);
1934
if (((al & 0x0f) > 9 ) || af) {
1935
al = (al + 6) & 0x0f;
1936
ah = (ah + 1 + icarry) & 0xff;
1937
eflags |= CC_C | CC_A;
1939
eflags &= ~(CC_C | CC_A);
1942
EAX = (EAX & ~0xffff) | al | (ah << 8);
1946
void helper_aas(void)
1952
eflags = helper_cc_compute_all(CC_OP);
1955
ah = (EAX >> 8) & 0xff;
1958
if (((al & 0x0f) > 9 ) || af) {
1959
al = (al - 6) & 0x0f;
1960
ah = (ah - 1 - icarry) & 0xff;
1961
eflags |= CC_C | CC_A;
1963
eflags &= ~(CC_C | CC_A);
1966
EAX = (EAX & ~0xffff) | al | (ah << 8);
1970
void helper_daa(void)
1972
int old_al, al, af, cf;
1975
eflags = helper_cc_compute_all(CC_OP);
1978
old_al = al = EAX & 0xff;
1981
if (((al & 0x0f) > 9 ) || af) {
1982
al = (al + 6) & 0xff;
1985
if ((old_al > 0x99) || cf) {
1986
al = (al + 0x60) & 0xff;
1989
EAX = (EAX & ~0xff) | al;
1990
/* well, speed is not an issue here, so we compute the flags by hand */
1991
eflags |= (al == 0) << 6; /* zf */
1992
eflags |= parity_table[al]; /* pf */
1993
eflags |= (al & 0x80); /* sf */
1997
void helper_das(void)
1999
int al, al1, af, cf;
2002
eflags = helper_cc_compute_all(CC_OP);
2009
if (((al & 0x0f) > 9 ) || af) {
2013
al = (al - 6) & 0xff;
2015
if ((al1 > 0x99) || cf) {
2016
al = (al - 0x60) & 0xff;
2019
EAX = (EAX & ~0xff) | al;
2020
/* well, speed is not an issue here, so we compute the flags by hand */
2021
eflags |= (al == 0) << 6; /* zf */
2022
eflags |= parity_table[al]; /* pf */
2023
eflags |= (al & 0x80); /* sf */
2027
void helper_into(int next_eip_addend)
2030
eflags = helper_cc_compute_all(CC_OP);
2031
if (eflags & CC_O) {
2032
raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2036
void helper_cmpxchg8b(target_ulong a0)
2041
eflags = helper_cc_compute_all(CC_OP);
2043
if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2044
stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2047
/* always do the store */
2049
EDX = (uint32_t)(d >> 32);
2056
#ifdef TARGET_X86_64
2057
void helper_cmpxchg16b(target_ulong a0)
2062
if ((a0 & 0xf) != 0)
2063
raise_exception(EXCP0D_GPF);
2064
eflags = helper_cc_compute_all(CC_OP);
2067
if (d0 == EAX && d1 == EDX) {
2072
/* always do the store */
2083
void helper_single_step(void)
2085
#ifndef CONFIG_USER_ONLY
2086
check_hw_breakpoints(env, 1);
2087
env->dr[6] |= DR6_BS;
2089
raise_exception(EXCP01_DB);
2092
void helper_cpuid(void)
2094
uint32_t eax, ebx, ecx, edx;
2096
helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2098
cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2105
void helper_enter_level(int level, int data32, target_ulong t1)
2108
uint32_t esp_mask, esp, ebp;
2110
esp_mask = get_sp_mask(env->segs[R_SS].flags);
2111
ssp = env->segs[R_SS].base;
2120
stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2123
stl(ssp + (esp & esp_mask), t1);
2130
stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2133
stw(ssp + (esp & esp_mask), t1);
2137
#ifdef TARGET_X86_64
2138
void helper_enter64_level(int level, int data64, target_ulong t1)
2140
target_ulong esp, ebp;
2160
stw(esp, lduw(ebp));
2168
void helper_lldt(int selector)
2172
int index, entry_limit;
2176
if ((selector & 0xfffc) == 0) {
2177
/* XXX: NULL selector case: invalid LDT */
2182
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2184
index = selector & ~7;
2185
#ifdef TARGET_X86_64
2186
if (env->hflags & HF_LMA_MASK)
2191
if ((index + entry_limit) > dt->limit)
2192
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2193
ptr = dt->base + index;
2194
e1 = ldl_kernel(ptr);
2195
e2 = ldl_kernel(ptr + 4);
2196
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2197
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198
if (!(e2 & DESC_P_MASK))
2199
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2200
#ifdef TARGET_X86_64
2201
if (env->hflags & HF_LMA_MASK) {
2203
e3 = ldl_kernel(ptr + 8);
2204
load_seg_cache_raw_dt(&env->ldt, e1, e2);
2205
env->ldt.base |= (target_ulong)e3 << 32;
2209
load_seg_cache_raw_dt(&env->ldt, e1, e2);
2212
env->ldt.selector = selector;
2215
void helper_ltr(int selector)
2219
int index, type, entry_limit;
2223
if ((selector & 0xfffc) == 0) {
2224
/* NULL selector case: invalid TR */
2230
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2232
index = selector & ~7;
2233
#ifdef TARGET_X86_64
2234
if (env->hflags & HF_LMA_MASK)
2239
if ((index + entry_limit) > dt->limit)
2240
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2241
ptr = dt->base + index;
2242
e1 = ldl_kernel(ptr);
2243
e2 = ldl_kernel(ptr + 4);
2244
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2245
if ((e2 & DESC_S_MASK) ||
2246
(type != 1 && type != 9))
2247
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2248
if (!(e2 & DESC_P_MASK))
2249
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2250
#ifdef TARGET_X86_64
2251
if (env->hflags & HF_LMA_MASK) {
2253
e3 = ldl_kernel(ptr + 8);
2254
e4 = ldl_kernel(ptr + 12);
2255
if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2256
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2257
load_seg_cache_raw_dt(&env->tr, e1, e2);
2258
env->tr.base |= (target_ulong)e3 << 32;
2262
load_seg_cache_raw_dt(&env->tr, e1, e2);
2264
e2 |= DESC_TSS_BUSY_MASK;
2265
stl_kernel(ptr + 4, e2);
2267
env->tr.selector = selector;
2270
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2271
void helper_load_seg(int seg_reg, int selector)
2280
cpl = env->hflags & HF_CPL_MASK;
2281
if ((selector & 0xfffc) == 0) {
2282
/* null selector case */
2284
#ifdef TARGET_X86_64
2285
&& (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2288
raise_exception_err(EXCP0D_GPF, 0);
2289
cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2296
index = selector & ~7;
2297
if ((index + 7) > dt->limit)
2298
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2299
ptr = dt->base + index;
2300
e1 = ldl_kernel(ptr);
2301
e2 = ldl_kernel(ptr + 4);
2303
if (!(e2 & DESC_S_MASK))
2304
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2306
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2307
if (seg_reg == R_SS) {
2308
/* must be writable segment */
2309
if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2310
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2311
if (rpl != cpl || dpl != cpl)
2312
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2314
/* must be readable segment */
2315
if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2316
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2318
if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2319
/* if not conforming code, test rights */
2320
if (dpl < cpl || dpl < rpl)
2321
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2325
if (!(e2 & DESC_P_MASK)) {
2326
if (seg_reg == R_SS)
2327
raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2329
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2332
/* set the access bit if not already set */
2333
if (!(e2 & DESC_A_MASK)) {
2335
stl_kernel(ptr + 4, e2);
2338
cpu_x86_load_seg_cache(env, seg_reg, selector,
2339
get_seg_base(e1, e2),
2340
get_seg_limit(e1, e2),
2343
qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2344
selector, (unsigned long)sc->base, sc->limit, sc->flags);
2349
/* protected mode jump */
2350
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2351
int next_eip_addend)
2354
uint32_t e1, e2, cpl, dpl, rpl, limit;
2355
target_ulong next_eip;
2357
if ((new_cs & 0xfffc) == 0)
2358
raise_exception_err(EXCP0D_GPF, 0);
2359
if (load_segment(&e1, &e2, new_cs) != 0)
2360
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2361
cpl = env->hflags & HF_CPL_MASK;
2362
if (e2 & DESC_S_MASK) {
2363
if (!(e2 & DESC_CS_MASK))
2364
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2365
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2366
if (e2 & DESC_C_MASK) {
2367
/* conforming code segment */
2369
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2371
/* non conforming code segment */
2374
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2376
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2378
if (!(e2 & DESC_P_MASK))
2379
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2380
limit = get_seg_limit(e1, e2);
2381
if (new_eip > limit &&
2382
!(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2383
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2385
get_seg_base(e1, e2), limit, e2);
2388
/* jump to call or task gate */
2389
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391
cpl = env->hflags & HF_CPL_MASK;
2392
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2394
case 1: /* 286 TSS */
2395
case 9: /* 386 TSS */
2396
case 5: /* task gate */
2397
if (dpl < cpl || dpl < rpl)
2398
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2399
next_eip = env->eip + next_eip_addend;
2400
switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2401
CC_OP = CC_OP_EFLAGS;
2403
case 4: /* 286 call gate */
2404
case 12: /* 386 call gate */
2405
if ((dpl < cpl) || (dpl < rpl))
2406
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407
if (!(e2 & DESC_P_MASK))
2408
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2410
new_eip = (e1 & 0xffff);
2412
new_eip |= (e2 & 0xffff0000);
2413
if (load_segment(&e1, &e2, gate_cs) != 0)
2414
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2415
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2416
/* must be code segment */
2417
if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2418
(DESC_S_MASK | DESC_CS_MASK)))
2419
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2420
if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2421
(!(e2 & DESC_C_MASK) && (dpl != cpl)))
2422
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2423
if (!(e2 & DESC_P_MASK))
2424
raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2425
limit = get_seg_limit(e1, e2);
2426
if (new_eip > limit)
2427
raise_exception_err(EXCP0D_GPF, 0);
2428
cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2429
get_seg_base(e1, e2), limit, e2);
2433
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2439
/* real mode call */
2440
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2441
int shift, int next_eip)
2444
uint32_t esp, esp_mask;
2449
esp_mask = get_sp_mask(env->segs[R_SS].flags);
2450
ssp = env->segs[R_SS].base;
2452
PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2453
PUSHL(ssp, esp, esp_mask, next_eip);
2455
PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2456
PUSHW(ssp, esp, esp_mask, next_eip);
2459
SET_ESP(esp, esp_mask);
2461
env->segs[R_CS].selector = new_cs;
2462
env->segs[R_CS].base = (new_cs << 4);
2465
/* protected mode call */
2466
void helper_lcall_protected(int new_cs, target_ulong new_eip,
2467
int shift, int next_eip_addend)
2470
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2471
uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2472
uint32_t val, limit, old_sp_mask;
2473
target_ulong ssp, old_ssp, next_eip;
2475
next_eip = env->eip + next_eip_addend;
2476
LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2477
LOG_PCALL_STATE(env);
2478
if ((new_cs & 0xfffc) == 0)
2479
raise_exception_err(EXCP0D_GPF, 0);
2480
if (load_segment(&e1, &e2, new_cs) != 0)
2481
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2482
cpl = env->hflags & HF_CPL_MASK;
2483
LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2484
if (e2 & DESC_S_MASK) {
2485
if (!(e2 & DESC_CS_MASK))
2486
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2487
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2488
if (e2 & DESC_C_MASK) {
2489
/* conforming code segment */
2491
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2493
/* non conforming code segment */
2496
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2498
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2500
if (!(e2 & DESC_P_MASK))
2501
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2503
#ifdef TARGET_X86_64
2504
/* XXX: check 16/32 bit cases in long mode */
2509
PUSHQ(rsp, env->segs[R_CS].selector);
2510
PUSHQ(rsp, next_eip);
2511
/* from this point, not restartable */
2513
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2514
get_seg_base(e1, e2),
2515
get_seg_limit(e1, e2), e2);
2521
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2522
ssp = env->segs[R_SS].base;
2524
PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2525
PUSHL(ssp, sp, sp_mask, next_eip);
2527
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2528
PUSHW(ssp, sp, sp_mask, next_eip);
2531
limit = get_seg_limit(e1, e2);
2532
if (new_eip > limit)
2533
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2534
/* from this point, not restartable */
2535
SET_ESP(sp, sp_mask);
2536
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2537
get_seg_base(e1, e2), limit, e2);
2541
/* check gate type */
2542
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2543
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2546
case 1: /* available 286 TSS */
2547
case 9: /* available 386 TSS */
2548
case 5: /* task gate */
2549
if (dpl < cpl || dpl < rpl)
2550
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2551
switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2552
CC_OP = CC_OP_EFLAGS;
2554
case 4: /* 286 call gate */
2555
case 12: /* 386 call gate */
2558
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2563
if (dpl < cpl || dpl < rpl)
2564
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2565
/* check valid bit */
2566
if (!(e2 & DESC_P_MASK))
2567
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2568
selector = e1 >> 16;
2569
offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2570
param_count = e2 & 0x1f;
2571
if ((selector & 0xfffc) == 0)
2572
raise_exception_err(EXCP0D_GPF, 0);
2574
if (load_segment(&e1, &e2, selector) != 0)
2575
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2576
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2577
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2578
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2580
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2581
if (!(e2 & DESC_P_MASK))
2582
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2584
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2585
/* to inner privilege */
2586
get_ss_esp_from_tss(&ss, &sp, dpl);
2587
LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2588
ss, sp, param_count, ESP);
2589
if ((ss & 0xfffc) == 0)
2590
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2591
if ((ss & 3) != dpl)
2592
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2593
if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2594
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2595
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2597
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2598
if (!(ss_e2 & DESC_S_MASK) ||
2599
(ss_e2 & DESC_CS_MASK) ||
2600
!(ss_e2 & DESC_W_MASK))
2601
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2602
if (!(ss_e2 & DESC_P_MASK))
2603
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2605
// push_size = ((param_count * 2) + 8) << shift;
2607
old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2608
old_ssp = env->segs[R_SS].base;
2610
sp_mask = get_sp_mask(ss_e2);
2611
ssp = get_seg_base(ss_e1, ss_e2);
2613
PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2614
PUSHL(ssp, sp, sp_mask, ESP);
2615
for(i = param_count - 1; i >= 0; i--) {
2616
val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2617
PUSHL(ssp, sp, sp_mask, val);
2620
PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2621
PUSHW(ssp, sp, sp_mask, ESP);
2622
for(i = param_count - 1; i >= 0; i--) {
2623
val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2624
PUSHW(ssp, sp, sp_mask, val);
2629
/* to same privilege */
2631
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2632
ssp = env->segs[R_SS].base;
2633
// push_size = (4 << shift);
2638
PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2639
PUSHL(ssp, sp, sp_mask, next_eip);
2641
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2642
PUSHW(ssp, sp, sp_mask, next_eip);
2645
/* from this point, not restartable */
2648
ss = (ss & ~3) | dpl;
2649
cpu_x86_load_seg_cache(env, R_SS, ss,
2651
get_seg_limit(ss_e1, ss_e2),
2655
selector = (selector & ~3) | dpl;
2656
cpu_x86_load_seg_cache(env, R_CS, selector,
2657
get_seg_base(e1, e2),
2658
get_seg_limit(e1, e2),
2660
cpu_x86_set_cpl(env, dpl);
2661
SET_ESP(sp, sp_mask);
2666
/* real and vm86 mode iret */
2667
void helper_iret_real(int shift)
2669
uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2673
sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2675
ssp = env->segs[R_SS].base;
2678
POPL(ssp, sp, sp_mask, new_eip);
2679
POPL(ssp, sp, sp_mask, new_cs);
2681
POPL(ssp, sp, sp_mask, new_eflags);
2684
POPW(ssp, sp, sp_mask, new_eip);
2685
POPW(ssp, sp, sp_mask, new_cs);
2686
POPW(ssp, sp, sp_mask, new_eflags);
2688
ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2689
env->segs[R_CS].selector = new_cs;
2690
env->segs[R_CS].base = (new_cs << 4);
2692
if (env->eflags & VM_MASK)
2693
eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2695
eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2697
eflags_mask &= 0xffff;
2698
load_eflags(new_eflags, eflags_mask);
2699
env->hflags2 &= ~HF2_NMI_MASK;
2702
static inline void validate_seg(int seg_reg, int cpl)
2707
/* XXX: on x86_64, we do not want to nullify FS and GS because
2708
they may still contain a valid base. I would be interested to
2709
know how a real x86_64 CPU behaves */
2710
if ((seg_reg == R_FS || seg_reg == R_GS) &&
2711
(env->segs[seg_reg].selector & 0xfffc) == 0)
2714
e2 = env->segs[seg_reg].flags;
2715
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2716
if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2717
/* data or non conforming code segment */
2719
cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2724
/* protected mode iret */
2725
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2727
uint32_t new_cs, new_eflags, new_ss;
2728
uint32_t new_es, new_ds, new_fs, new_gs;
2729
uint32_t e1, e2, ss_e1, ss_e2;
2730
int cpl, dpl, rpl, eflags_mask, iopl;
2731
target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2733
#ifdef TARGET_X86_64
2738
sp_mask = get_sp_mask(env->segs[R_SS].flags);
2740
ssp = env->segs[R_SS].base;
2741
new_eflags = 0; /* avoid warning */
2742
#ifdef TARGET_X86_64
2748
POPQ(sp, new_eflags);
2754
POPL(ssp, sp, sp_mask, new_eip);
2755
POPL(ssp, sp, sp_mask, new_cs);
2758
POPL(ssp, sp, sp_mask, new_eflags);
2759
if (new_eflags & VM_MASK)
2760
goto return_to_vm86;
2764
POPW(ssp, sp, sp_mask, new_eip);
2765
POPW(ssp, sp, sp_mask, new_cs);
2767
POPW(ssp, sp, sp_mask, new_eflags);
2769
LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2770
new_cs, new_eip, shift, addend);
2771
LOG_PCALL_STATE(env);
2772
if ((new_cs & 0xfffc) == 0)
2773
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774
if (load_segment(&e1, &e2, new_cs) != 0)
2775
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776
if (!(e2 & DESC_S_MASK) ||
2777
!(e2 & DESC_CS_MASK))
2778
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779
cpl = env->hflags & HF_CPL_MASK;
2782
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2783
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2784
if (e2 & DESC_C_MASK) {
2786
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2789
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791
if (!(e2 & DESC_P_MASK))
2792
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2795
if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2796
((env->hflags & HF_CS64_MASK) && !is_iret))) {
2797
/* return to same privilege level */
2798
cpu_x86_load_seg_cache(env, R_CS, new_cs,
2799
get_seg_base(e1, e2),
2800
get_seg_limit(e1, e2),
2803
/* return to different privilege level */
2804
#ifdef TARGET_X86_64
2813
POPL(ssp, sp, sp_mask, new_esp);
2814
POPL(ssp, sp, sp_mask, new_ss);
2818
POPW(ssp, sp, sp_mask, new_esp);
2819
POPW(ssp, sp, sp_mask, new_ss);
2821
LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2823
if ((new_ss & 0xfffc) == 0) {
2824
#ifdef TARGET_X86_64
2825
/* NULL ss is allowed in long mode if cpl != 3*/
2826
/* XXX: test CS64 ? */
2827
if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2828
cpu_x86_load_seg_cache(env, R_SS, new_ss,
2830
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2831
DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2832
DESC_W_MASK | DESC_A_MASK);
2833
ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2837
raise_exception_err(EXCP0D_GPF, 0);
2840
if ((new_ss & 3) != rpl)
2841
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2842
if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2843
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2844
if (!(ss_e2 & DESC_S_MASK) ||
2845
(ss_e2 & DESC_CS_MASK) ||
2846
!(ss_e2 & DESC_W_MASK))
2847
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2848
dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2850
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2851
if (!(ss_e2 & DESC_P_MASK))
2852
raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2853
cpu_x86_load_seg_cache(env, R_SS, new_ss,
2854
get_seg_base(ss_e1, ss_e2),
2855
get_seg_limit(ss_e1, ss_e2),
2859
cpu_x86_load_seg_cache(env, R_CS, new_cs,
2860
get_seg_base(e1, e2),
2861
get_seg_limit(e1, e2),
2863
cpu_x86_set_cpl(env, rpl);
2865
#ifdef TARGET_X86_64
2866
if (env->hflags & HF_CS64_MASK)
2870
sp_mask = get_sp_mask(ss_e2);
2872
/* validate data segments */
2873
validate_seg(R_ES, rpl);
2874
validate_seg(R_DS, rpl);
2875
validate_seg(R_FS, rpl);
2876
validate_seg(R_GS, rpl);
2880
SET_ESP(sp, sp_mask);
2883
/* NOTE: 'cpl' is the _old_ CPL */
2884
eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2886
eflags_mask |= IOPL_MASK;
2887
iopl = (env->eflags >> IOPL_SHIFT) & 3;
2889
eflags_mask |= IF_MASK;
2891
eflags_mask &= 0xffff;
2892
load_eflags(new_eflags, eflags_mask);
2897
POPL(ssp, sp, sp_mask, new_esp);
2898
POPL(ssp, sp, sp_mask, new_ss);
2899
POPL(ssp, sp, sp_mask, new_es);
2900
POPL(ssp, sp, sp_mask, new_ds);
2901
POPL(ssp, sp, sp_mask, new_fs);
2902
POPL(ssp, sp, sp_mask, new_gs);
2904
/* modify processor state */
2905
load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2906
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2907
load_seg_vm(R_CS, new_cs & 0xffff);
2908
cpu_x86_set_cpl(env, 3);
2909
load_seg_vm(R_SS, new_ss & 0xffff);
2910
load_seg_vm(R_ES, new_es & 0xffff);
2911
load_seg_vm(R_DS, new_ds & 0xffff);
2912
load_seg_vm(R_FS, new_fs & 0xffff);
2913
load_seg_vm(R_GS, new_gs & 0xffff);
2915
env->eip = new_eip & 0xffff;
2919
void helper_iret_protected(int shift, int next_eip)
2921
int tss_selector, type;
2924
/* specific case for TSS */
2925
if (env->eflags & NT_MASK) {
2926
#ifdef TARGET_X86_64
2927
if (env->hflags & HF_LMA_MASK)
2928
raise_exception_err(EXCP0D_GPF, 0);
2930
tss_selector = lduw_kernel(env->tr.base + 0);
2931
if (tss_selector & 4)
2932
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2933
if (load_segment(&e1, &e2, tss_selector) != 0)
2934
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2935
type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2936
/* NOTE: we check both segment and busy TSS */
2938
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2939
switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2941
helper_ret_protected(shift, 1, 0);
2943
env->hflags2 &= ~HF2_NMI_MASK;
2946
void helper_lret_protected(int shift, int addend)
2948
helper_ret_protected(shift, 0, addend);
2951
void helper_sysenter(void)
2953
if (env->sysenter_cs == 0) {
2954
raise_exception_err(EXCP0D_GPF, 0);
2956
env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2957
cpu_x86_set_cpl(env, 0);
2959
#ifdef TARGET_X86_64
2960
if (env->hflags & HF_LMA_MASK) {
2961
cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2963
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2965
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2969
cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2971
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2973
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2975
cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2977
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2979
DESC_W_MASK | DESC_A_MASK);
2980
ESP = env->sysenter_esp;
2981
EIP = env->sysenter_eip;
2984
void helper_sysexit(int dflag)
2988
cpl = env->hflags & HF_CPL_MASK;
2989
if (env->sysenter_cs == 0 || cpl != 0) {
2990
raise_exception_err(EXCP0D_GPF, 0);
2992
cpu_x86_set_cpl(env, 3);
2993
#ifdef TARGET_X86_64
2995
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2997
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2998
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2999
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3000
cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3002
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3003
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3004
DESC_W_MASK | DESC_A_MASK);
3008
cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3010
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3011
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3012
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3013
cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3015
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3016
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3017
DESC_W_MASK | DESC_A_MASK);
3023
#if defined(CONFIG_USER_ONLY)
3024
target_ulong helper_read_crN(int reg)
3029
void helper_write_crN(int reg, target_ulong t0)
3033
void helper_movl_drN_T0(int reg, target_ulong t0)
3037
target_ulong helper_read_crN(int reg)
3041
helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3047
if (!(env->hflags2 & HF2_VINTR_MASK)) {
3048
val = cpu_get_apic_tpr(env->apic_state);
3057
void helper_write_crN(int reg, target_ulong t0)
3059
helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3062
cpu_x86_update_cr0(env, t0);
3065
cpu_x86_update_cr3(env, t0);
3068
cpu_x86_update_cr4(env, t0);
3071
if (!(env->hflags2 & HF2_VINTR_MASK)) {
3072
cpu_set_apic_tpr(env->apic_state, t0);
3074
env->v_tpr = t0 & 0x0f;
3082
void helper_movl_drN_T0(int reg, target_ulong t0)
3087
hw_breakpoint_remove(env, reg);
3089
hw_breakpoint_insert(env, reg);
3090
} else if (reg == 7) {
3091
for (i = 0; i < 4; i++)
3092
hw_breakpoint_remove(env, i);
3094
for (i = 0; i < 4; i++)
3095
hw_breakpoint_insert(env, i);
3101
void helper_lmsw(target_ulong t0)
3103
/* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3104
if already set to one. */
3105
t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3106
helper_write_crN(0, t0);
3109
void helper_clts(void)
3111
env->cr[0] &= ~CR0_TS_MASK;
3112
env->hflags &= ~HF_TS_MASK;
3115
void helper_invlpg(target_ulong addr)
3117
helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3118
tlb_flush_page(env, addr);
3121
void helper_rdtsc(void)
3125
if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3126
raise_exception(EXCP0D_GPF);
3128
helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3130
val = cpu_get_tsc(env) + env->tsc_offset;
3131
EAX = (uint32_t)(val);
3132
EDX = (uint32_t)(val >> 32);
3135
void helper_rdtscp(void)
3138
ECX = (uint32_t)(env->tsc_aux);
3141
void helper_rdpmc(void)
3143
if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3144
raise_exception(EXCP0D_GPF);
3146
helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3148
/* currently unimplemented */
3149
raise_exception_err(EXCP06_ILLOP, 0);
3152
#if defined(CONFIG_USER_ONLY)
3153
void helper_wrmsr(void)
3157
void helper_rdmsr(void)
3161
void helper_wrmsr(void)
3165
helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3167
val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3169
switch((uint32_t)ECX) {
3170
case MSR_IA32_SYSENTER_CS:
3171
env->sysenter_cs = val & 0xffff;
3173
case MSR_IA32_SYSENTER_ESP:
3174
env->sysenter_esp = val;
3176
case MSR_IA32_SYSENTER_EIP:
3177
env->sysenter_eip = val;
3179
case MSR_IA32_APICBASE:
3180
cpu_set_apic_base(env->apic_state, val);
3184
uint64_t update_mask;
3186
if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3187
update_mask |= MSR_EFER_SCE;
3188
if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3189
update_mask |= MSR_EFER_LME;
3190
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3191
update_mask |= MSR_EFER_FFXSR;
3192
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3193
update_mask |= MSR_EFER_NXE;
3194
if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3195
update_mask |= MSR_EFER_SVME;
3196
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3197
update_mask |= MSR_EFER_FFXSR;
3198
cpu_load_efer(env, (env->efer & ~update_mask) |
3199
(val & update_mask));
3208
case MSR_VM_HSAVE_PA:
3209
env->vm_hsave = val;
3211
#ifdef TARGET_X86_64
3222
env->segs[R_FS].base = val;
3225
env->segs[R_GS].base = val;
3227
case MSR_KERNELGSBASE:
3228
env->kernelgsbase = val;
3231
case MSR_MTRRphysBase(0):
3232
case MSR_MTRRphysBase(1):
3233
case MSR_MTRRphysBase(2):
3234
case MSR_MTRRphysBase(3):
3235
case MSR_MTRRphysBase(4):
3236
case MSR_MTRRphysBase(5):
3237
case MSR_MTRRphysBase(6):
3238
case MSR_MTRRphysBase(7):
3239
env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3241
case MSR_MTRRphysMask(0):
3242
case MSR_MTRRphysMask(1):
3243
case MSR_MTRRphysMask(2):
3244
case MSR_MTRRphysMask(3):
3245
case MSR_MTRRphysMask(4):
3246
case MSR_MTRRphysMask(5):
3247
case MSR_MTRRphysMask(6):
3248
case MSR_MTRRphysMask(7):
3249
env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3251
case MSR_MTRRfix64K_00000:
3252
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3254
case MSR_MTRRfix16K_80000:
3255
case MSR_MTRRfix16K_A0000:
3256
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3258
case MSR_MTRRfix4K_C0000:
3259
case MSR_MTRRfix4K_C8000:
3260
case MSR_MTRRfix4K_D0000:
3261
case MSR_MTRRfix4K_D8000:
3262
case MSR_MTRRfix4K_E0000:
3263
case MSR_MTRRfix4K_E8000:
3264
case MSR_MTRRfix4K_F0000:
3265
case MSR_MTRRfix4K_F8000:
3266
env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3268
case MSR_MTRRdefType:
3269
env->mtrr_deftype = val;
3271
case MSR_MCG_STATUS:
3272
env->mcg_status = val;
3275
if ((env->mcg_cap & MCG_CTL_P)
3276
&& (val == 0 || val == ~(uint64_t)0))
3282
case MSR_IA32_MISC_ENABLE:
3283
env->msr_ia32_misc_enable = val;
3286
if ((uint32_t)ECX >= MSR_MC0_CTL
3287
&& (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3288
uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3289
if ((offset & 0x3) != 0
3290
|| (val == 0 || val == ~(uint64_t)0))
3291
env->mce_banks[offset] = val;
3294
/* XXX: exception ? */
3299
void helper_rdmsr(void)
3303
helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3305
switch((uint32_t)ECX) {
3306
case MSR_IA32_SYSENTER_CS:
3307
val = env->sysenter_cs;
3309
case MSR_IA32_SYSENTER_ESP:
3310
val = env->sysenter_esp;
3312
case MSR_IA32_SYSENTER_EIP:
3313
val = env->sysenter_eip;
3315
case MSR_IA32_APICBASE:
3316
val = cpu_get_apic_base(env->apic_state);
3327
case MSR_VM_HSAVE_PA:
3328
val = env->vm_hsave;
3330
case MSR_IA32_PERF_STATUS:
3331
/* tsc_increment_by_tick */
3333
/* CPU multiplier */
3334
val |= (((uint64_t)4ULL) << 40);
3336
#ifdef TARGET_X86_64
3347
val = env->segs[R_FS].base;
3350
val = env->segs[R_GS].base;
3352
case MSR_KERNELGSBASE:
3353
val = env->kernelgsbase;
3359
case MSR_MTRRphysBase(0):
3360
case MSR_MTRRphysBase(1):
3361
case MSR_MTRRphysBase(2):
3362
case MSR_MTRRphysBase(3):
3363
case MSR_MTRRphysBase(4):
3364
case MSR_MTRRphysBase(5):
3365
case MSR_MTRRphysBase(6):
3366
case MSR_MTRRphysBase(7):
3367
val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3369
case MSR_MTRRphysMask(0):
3370
case MSR_MTRRphysMask(1):
3371
case MSR_MTRRphysMask(2):
3372
case MSR_MTRRphysMask(3):
3373
case MSR_MTRRphysMask(4):
3374
case MSR_MTRRphysMask(5):
3375
case MSR_MTRRphysMask(6):
3376
case MSR_MTRRphysMask(7):
3377
val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3379
case MSR_MTRRfix64K_00000:
3380
val = env->mtrr_fixed[0];
3382
case MSR_MTRRfix16K_80000:
3383
case MSR_MTRRfix16K_A0000:
3384
val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3386
case MSR_MTRRfix4K_C0000:
3387
case MSR_MTRRfix4K_C8000:
3388
case MSR_MTRRfix4K_D0000:
3389
case MSR_MTRRfix4K_D8000:
3390
case MSR_MTRRfix4K_E0000:
3391
case MSR_MTRRfix4K_E8000:
3392
case MSR_MTRRfix4K_F0000:
3393
case MSR_MTRRfix4K_F8000:
3394
val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3396
case MSR_MTRRdefType:
3397
val = env->mtrr_deftype;
3400
if (env->cpuid_features & CPUID_MTRR)
3401
val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3403
/* XXX: exception ? */
3410
if (env->mcg_cap & MCG_CTL_P)
3415
case MSR_MCG_STATUS:
3416
val = env->mcg_status;
3418
case MSR_IA32_MISC_ENABLE:
3419
val = env->msr_ia32_misc_enable;
3422
if ((uint32_t)ECX >= MSR_MC0_CTL
3423
&& (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3424
uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3425
val = env->mce_banks[offset];
3428
/* XXX: exception ? */
3432
EAX = (uint32_t)(val);
3433
EDX = (uint32_t)(val >> 32);
3437
target_ulong helper_lsl(target_ulong selector1)
3440
uint32_t e1, e2, eflags, selector;
3441
int rpl, dpl, cpl, type;
3443
selector = selector1 & 0xffff;
3444
eflags = helper_cc_compute_all(CC_OP);
3445
if ((selector & 0xfffc) == 0)
3447
if (load_segment(&e1, &e2, selector) != 0)
3450
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3451
cpl = env->hflags & HF_CPL_MASK;
3452
if (e2 & DESC_S_MASK) {
3453
if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3456
if (dpl < cpl || dpl < rpl)
3460
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3471
if (dpl < cpl || dpl < rpl) {
3473
CC_SRC = eflags & ~CC_Z;
3477
limit = get_seg_limit(e1, e2);
3478
CC_SRC = eflags | CC_Z;
3482
target_ulong helper_lar(target_ulong selector1)
3484
uint32_t e1, e2, eflags, selector;
3485
int rpl, dpl, cpl, type;
3487
selector = selector1 & 0xffff;
3488
eflags = helper_cc_compute_all(CC_OP);
3489
if ((selector & 0xfffc) == 0)
3491
if (load_segment(&e1, &e2, selector) != 0)
3494
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3495
cpl = env->hflags & HF_CPL_MASK;
3496
if (e2 & DESC_S_MASK) {
3497
if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3500
if (dpl < cpl || dpl < rpl)
3504
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3518
if (dpl < cpl || dpl < rpl) {
3520
CC_SRC = eflags & ~CC_Z;
3524
CC_SRC = eflags | CC_Z;
3525
return e2 & 0x00f0ff00;
3528
void helper_verr(target_ulong selector1)
3530
uint32_t e1, e2, eflags, selector;
3533
selector = selector1 & 0xffff;
3534
eflags = helper_cc_compute_all(CC_OP);
3535
if ((selector & 0xfffc) == 0)
3537
if (load_segment(&e1, &e2, selector) != 0)
3539
if (!(e2 & DESC_S_MASK))
3542
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3543
cpl = env->hflags & HF_CPL_MASK;
3544
if (e2 & DESC_CS_MASK) {
3545
if (!(e2 & DESC_R_MASK))
3547
if (!(e2 & DESC_C_MASK)) {
3548
if (dpl < cpl || dpl < rpl)
3552
if (dpl < cpl || dpl < rpl) {
3554
CC_SRC = eflags & ~CC_Z;
3558
CC_SRC = eflags | CC_Z;
3561
void helper_verw(target_ulong selector1)
3563
uint32_t e1, e2, eflags, selector;
3566
selector = selector1 & 0xffff;
3567
eflags = helper_cc_compute_all(CC_OP);
3568
if ((selector & 0xfffc) == 0)
3570
if (load_segment(&e1, &e2, selector) != 0)
3572
if (!(e2 & DESC_S_MASK))
3575
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3576
cpl = env->hflags & HF_CPL_MASK;
3577
if (e2 & DESC_CS_MASK) {
3580
if (dpl < cpl || dpl < rpl)
3582
if (!(e2 & DESC_W_MASK)) {
3584
CC_SRC = eflags & ~CC_Z;
3588
CC_SRC = eflags | CC_Z;
3591
/* x87 FPU helpers */
3593
static inline double floatx80_to_double(floatx80 a)
3600
u.f64 = floatx80_to_float64(a, &env->fp_status);
3604
static inline floatx80 double_to_floatx80(double a)
3612
return float64_to_floatx80(u.f64, &env->fp_status);
3615
static void fpu_set_exception(int mask)
3618
if (env->fpus & (~env->fpuc & FPUC_EM))
3619
env->fpus |= FPUS_SE | FPUS_B;
3622
static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
3624
if (floatx80_is_zero(b)) {
3625
fpu_set_exception(FPUS_ZE);
3627
return floatx80_div(a, b, &env->fp_status);
3630
static void fpu_raise_exception(void)
3632
if (env->cr[0] & CR0_NE_MASK) {
3633
raise_exception(EXCP10_COPR);
3635
#if !defined(CONFIG_USER_ONLY)
3642
void helper_flds_FT0(uint32_t val)
3649
FT0 = float32_to_floatx80(u.f, &env->fp_status);
3652
void helper_fldl_FT0(uint64_t val)
3659
FT0 = float64_to_floatx80(u.f, &env->fp_status);
3662
void helper_fildl_FT0(int32_t val)
3664
FT0 = int32_to_floatx80(val, &env->fp_status);
3667
void helper_flds_ST0(uint32_t val)
3674
new_fpstt = (env->fpstt - 1) & 7;
3676
env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
3677
env->fpstt = new_fpstt;
3678
env->fptags[new_fpstt] = 0; /* validate stack entry */
3681
void helper_fldl_ST0(uint64_t val)
3688
new_fpstt = (env->fpstt - 1) & 7;
3690
env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
3691
env->fpstt = new_fpstt;
3692
env->fptags[new_fpstt] = 0; /* validate stack entry */
3695
void helper_fildl_ST0(int32_t val)
3698
new_fpstt = (env->fpstt - 1) & 7;
3699
env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
3700
env->fpstt = new_fpstt;
3701
env->fptags[new_fpstt] = 0; /* validate stack entry */
3704
void helper_fildll_ST0(int64_t val)
3707
new_fpstt = (env->fpstt - 1) & 7;
3708
env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
3709
env->fpstt = new_fpstt;
3710
env->fptags[new_fpstt] = 0; /* validate stack entry */
3713
uint32_t helper_fsts_ST0(void)
3719
u.f = floatx80_to_float32(ST0, &env->fp_status);
3723
uint64_t helper_fstl_ST0(void)
3729
u.f = floatx80_to_float64(ST0, &env->fp_status);
3733
int32_t helper_fist_ST0(void)
3736
val = floatx80_to_int32(ST0, &env->fp_status);
3737
if (val != (int16_t)val)
3742
int32_t helper_fistl_ST0(void)
3745
val = floatx80_to_int32(ST0, &env->fp_status);
3749
int64_t helper_fistll_ST0(void)
3752
val = floatx80_to_int64(ST0, &env->fp_status);
3756
int32_t helper_fistt_ST0(void)
3759
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3760
if (val != (int16_t)val)
3765
int32_t helper_fisttl_ST0(void)
3768
val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3772
int64_t helper_fisttll_ST0(void)
3775
val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
3779
void helper_fldt_ST0(target_ulong ptr)
3782
new_fpstt = (env->fpstt - 1) & 7;
3783
env->fpregs[new_fpstt].d = helper_fldt(ptr);
3784
env->fpstt = new_fpstt;
3785
env->fptags[new_fpstt] = 0; /* validate stack entry */
3788
void helper_fstt_ST0(target_ulong ptr)
3790
helper_fstt(ST0, ptr);
3793
void helper_fpush(void)
3798
void helper_fpop(void)
3803
void helper_fdecstp(void)
3805
env->fpstt = (env->fpstt - 1) & 7;
3806
env->fpus &= (~0x4700);
3809
void helper_fincstp(void)
3811
env->fpstt = (env->fpstt + 1) & 7;
3812
env->fpus &= (~0x4700);
3817
void helper_ffree_STN(int st_index)
3819
env->fptags[(env->fpstt + st_index) & 7] = 1;
3822
void helper_fmov_ST0_FT0(void)
3827
void helper_fmov_FT0_STN(int st_index)
3832
void helper_fmov_ST0_STN(int st_index)
3837
void helper_fmov_STN_ST0(int st_index)
3842
void helper_fxchg_ST0_STN(int st_index)
3850
/* FPU operations */
3852
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3854
void helper_fcom_ST0_FT0(void)
3858
ret = floatx80_compare(ST0, FT0, &env->fp_status);
3859
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3862
void helper_fucom_ST0_FT0(void)
3866
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3867
env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3870
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3872
void helper_fcomi_ST0_FT0(void)
3877
ret = floatx80_compare(ST0, FT0, &env->fp_status);
3878
eflags = helper_cc_compute_all(CC_OP);
3879
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3883
void helper_fucomi_ST0_FT0(void)
3888
ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3889
eflags = helper_cc_compute_all(CC_OP);
3890
eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3894
void helper_fadd_ST0_FT0(void)
3896
ST0 = floatx80_add(ST0, FT0, &env->fp_status);
3899
void helper_fmul_ST0_FT0(void)
3901
ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
3904
void helper_fsub_ST0_FT0(void)
3906
ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
3909
void helper_fsubr_ST0_FT0(void)
3911
ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
3914
void helper_fdiv_ST0_FT0(void)
3916
ST0 = helper_fdiv(ST0, FT0);
3919
void helper_fdivr_ST0_FT0(void)
3921
ST0 = helper_fdiv(FT0, ST0);
3924
/* fp operations between STN and ST0 */
3926
void helper_fadd_STN_ST0(int st_index)
3928
ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
3931
void helper_fmul_STN_ST0(int st_index)
3933
ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
3936
void helper_fsub_STN_ST0(int st_index)
3938
ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
3941
void helper_fsubr_STN_ST0(int st_index)
3943
ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
3946
void helper_fdiv_STN_ST0(int st_index)
3950
*p = helper_fdiv(*p, ST0);
3953
void helper_fdivr_STN_ST0(int st_index)
3957
*p = helper_fdiv(ST0, *p);
3960
/* misc FPU operations */
3961
void helper_fchs_ST0(void)
3963
ST0 = floatx80_chs(ST0);
3966
void helper_fabs_ST0(void)
3968
ST0 = floatx80_abs(ST0);
3971
void helper_fld1_ST0(void)
3976
void helper_fldl2t_ST0(void)
3981
void helper_fldl2e_ST0(void)
3986
void helper_fldpi_ST0(void)
3991
void helper_fldlg2_ST0(void)
3996
void helper_fldln2_ST0(void)
4001
void helper_fldz_ST0(void)
4003
ST0 = floatx80_zero;
4006
void helper_fldz_FT0(void)
4008
FT0 = floatx80_zero;
4011
uint32_t helper_fnstsw(void)
4013
return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4016
uint32_t helper_fnstcw(void)
4021
static void update_fp_status(void)
4025
/* set rounding mode */
4026
switch(env->fpuc & FPU_RC_MASK) {
4029
rnd_type = float_round_nearest_even;
4032
rnd_type = float_round_down;
4035
rnd_type = float_round_up;
4038
rnd_type = float_round_to_zero;
4041
set_float_rounding_mode(rnd_type, &env->fp_status);
4042
switch((env->fpuc >> 8) & 3) {
4054
set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4057
void helper_fldcw(uint32_t val)
4063
void helper_fclex(void)
4065
env->fpus &= 0x7f00;
4068
void helper_fwait(void)
4070
if (env->fpus & FPUS_SE)
4071
fpu_raise_exception();
4074
void helper_fninit(void)
4091
void helper_fbld_ST0(target_ulong ptr)
4099
for(i = 8; i >= 0; i--) {
4101
val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4103
tmp = int64_to_floatx80(val, &env->fp_status);
4104
if (ldub(ptr + 9) & 0x80) {
4111
void helper_fbst_ST0(target_ulong ptr)
4114
target_ulong mem_ref, mem_end;
4117
val = floatx80_to_int64(ST0, &env->fp_status);
4119
mem_end = mem_ref + 9;
4126
while (mem_ref < mem_end) {
4131
v = ((v / 10) << 4) | (v % 10);
4134
while (mem_ref < mem_end) {
4139
void helper_f2xm1(void)
4141
double val = floatx80_to_double(ST0);
4142
val = pow(2.0, val) - 1.0;
4143
ST0 = double_to_floatx80(val);
4146
void helper_fyl2x(void)
4148
double fptemp = floatx80_to_double(ST0);
4151
fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4152
fptemp *= floatx80_to_double(ST1);
4153
ST1 = double_to_floatx80(fptemp);
4156
env->fpus &= (~0x4700);
4161
void helper_fptan(void)
4163
double fptemp = floatx80_to_double(ST0);
4165
if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4168
fptemp = tan(fptemp);
4169
ST0 = double_to_floatx80(fptemp);
4172
env->fpus &= (~0x400); /* C2 <-- 0 */
4173
/* the above code is for |arg| < 2**52 only */
4177
void helper_fpatan(void)
4179
double fptemp, fpsrcop;
4181
fpsrcop = floatx80_to_double(ST1);
4182
fptemp = floatx80_to_double(ST0);
4183
ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
4187
void helper_fxtract(void)
4193
if (floatx80_is_zero(ST0)) {
4194
/* Easy way to generate -inf and raising division by 0 exception */
4195
ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
4201
expdif = EXPD(temp) - EXPBIAS;
4202
/*DP exponent bias*/
4203
ST0 = int32_to_floatx80(expdif, &env->fp_status);
4210
void helper_fprem1(void)
4212
double st0, st1, dblq, fpsrcop, fptemp;
4213
CPU_LDoubleU fpsrcop1, fptemp1;
4215
signed long long int q;
4217
st0 = floatx80_to_double(ST0);
4218
st1 = floatx80_to_double(ST1);
4220
if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4221
ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4222
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4230
expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4233
/* optimisation? taken from the AMD docs */
4234
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4235
/* ST0 is unchanged */
4240
dblq = fpsrcop / fptemp;
4241
/* round dblq towards nearest integer */
4243
st0 = fpsrcop - fptemp * dblq;
4245
/* convert dblq to q by truncating towards zero */
4247
q = (signed long long int)(-dblq);
4249
q = (signed long long int)dblq;
4251
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4252
/* (C0,C3,C1) <-- (q2,q1,q0) */
4253
env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4254
env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4255
env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4257
env->fpus |= 0x400; /* C2 <-- 1 */
4258
fptemp = pow(2.0, expdif - 50);
4259
fpsrcop = (st0 / st1) / fptemp;
4260
/* fpsrcop = integer obtained by chopping */
4261
fpsrcop = (fpsrcop < 0.0) ?
4262
-(floor(fabs(fpsrcop))) : floor(fpsrcop);
4263
st0 -= (st1 * fpsrcop * fptemp);
4265
ST0 = double_to_floatx80(st0);
4268
void helper_fprem(void)
4270
double st0, st1, dblq, fpsrcop, fptemp;
4271
CPU_LDoubleU fpsrcop1, fptemp1;
4273
signed long long int q;
4275
st0 = floatx80_to_double(ST0);
4276
st1 = floatx80_to_double(ST1);
4278
if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4279
ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4280
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4288
expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4291
/* optimisation? taken from the AMD docs */
4292
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4293
/* ST0 is unchanged */
4297
if ( expdif < 53 ) {
4298
dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4299
/* round dblq towards zero */
4300
dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4301
st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4303
/* convert dblq to q by truncating towards zero */
4305
q = (signed long long int)(-dblq);
4307
q = (signed long long int)dblq;
4309
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4310
/* (C0,C3,C1) <-- (q2,q1,q0) */
4311
env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4312
env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4313
env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4315
int N = 32 + (expdif % 32); /* as per AMD docs */
4316
env->fpus |= 0x400; /* C2 <-- 1 */
4317
fptemp = pow(2.0, (double)(expdif - N));
4318
fpsrcop = (st0 / st1) / fptemp;
4319
/* fpsrcop = integer obtained by chopping */
4320
fpsrcop = (fpsrcop < 0.0) ?
4321
-(floor(fabs(fpsrcop))) : floor(fpsrcop);
4322
st0 -= (st1 * fpsrcop * fptemp);
4324
ST0 = double_to_floatx80(st0);
4327
void helper_fyl2xp1(void)
4329
double fptemp = floatx80_to_double(ST0);
4331
if ((fptemp+1.0)>0.0) {
4332
fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4333
fptemp *= floatx80_to_double(ST1);
4334
ST1 = double_to_floatx80(fptemp);
4337
env->fpus &= (~0x4700);
4342
void helper_fsqrt(void)
4344
if (floatx80_is_neg(ST0)) {
4345
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4348
ST0 = floatx80_sqrt(ST0, &env->fp_status);
4351
void helper_fsincos(void)
4353
double fptemp = floatx80_to_double(ST0);
4355
if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4358
ST0 = double_to_floatx80(sin(fptemp));
4360
ST0 = double_to_floatx80(cos(fptemp));
4361
env->fpus &= (~0x400); /* C2 <-- 0 */
4362
/* the above code is for |arg| < 2**63 only */
4366
void helper_frndint(void)
4368
ST0 = floatx80_round_to_int(ST0, &env->fp_status);
4371
void helper_fscale(void)
4373
if (floatx80_is_any_nan(ST1)) {
4376
int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4377
ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
4381
void helper_fsin(void)
4383
double fptemp = floatx80_to_double(ST0);
4385
if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4388
ST0 = double_to_floatx80(sin(fptemp));
4389
env->fpus &= (~0x400); /* C2 <-- 0 */
4390
/* the above code is for |arg| < 2**53 only */
4394
void helper_fcos(void)
4396
double fptemp = floatx80_to_double(ST0);
4398
if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4401
ST0 = double_to_floatx80(cos(fptemp));
4402
env->fpus &= (~0x400); /* C2 <-- 0 */
4403
/* the above code is for |arg5 < 2**63 only */
4407
void helper_fxam_ST0(void)
4414
env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4416
env->fpus |= 0x200; /* C1 <-- 1 */
4418
/* XXX: test fptags too */
4419
expdif = EXPD(temp);
4420
if (expdif == MAXEXPD) {
4421
if (MANTD(temp) == 0x8000000000000000ULL)
4422
env->fpus |= 0x500 /*Infinity*/;
4424
env->fpus |= 0x100 /*NaN*/;
4425
} else if (expdif == 0) {
4426
if (MANTD(temp) == 0)
4427
env->fpus |= 0x4000 /*Zero*/;
4429
env->fpus |= 0x4400 /*Denormal*/;
4435
void helper_fstenv(target_ulong ptr, int data32)
4437
int fpus, fptag, exp, i;
4441
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4443
for (i=7; i>=0; i--) {
4445
if (env->fptags[i]) {
4448
tmp.d = env->fpregs[i].d;
4451
if (exp == 0 && mant == 0) {
4454
} else if (exp == 0 || exp == MAXEXPD
4455
|| (mant & (1LL << 63)) == 0
4457
/* NaNs, infinity, denormal */
4464
stl(ptr, env->fpuc);
4466
stl(ptr + 8, fptag);
4467
stl(ptr + 12, 0); /* fpip */
4468
stl(ptr + 16, 0); /* fpcs */
4469
stl(ptr + 20, 0); /* fpoo */
4470
stl(ptr + 24, 0); /* fpos */
4473
stw(ptr, env->fpuc);
4475
stw(ptr + 4, fptag);
4483
void helper_fldenv(target_ulong ptr, int data32)
4488
env->fpuc = lduw(ptr);
4489
fpus = lduw(ptr + 4);
4490
fptag = lduw(ptr + 8);
4493
env->fpuc = lduw(ptr);
4494
fpus = lduw(ptr + 2);
4495
fptag = lduw(ptr + 4);
4497
env->fpstt = (fpus >> 11) & 7;
4498
env->fpus = fpus & ~0x3800;
4499
for(i = 0;i < 8; i++) {
4500
env->fptags[i] = ((fptag & 3) == 3);
4505
void helper_fsave(target_ulong ptr, int data32)
4510
helper_fstenv(ptr, data32);
4512
ptr += (14 << data32);
4513
for(i = 0;i < 8; i++) {
4515
helper_fstt(tmp, ptr);
4533
void helper_frstor(target_ulong ptr, int data32)
4538
helper_fldenv(ptr, data32);
4539
ptr += (14 << data32);
4541
for(i = 0;i < 8; i++) {
4542
tmp = helper_fldt(ptr);
4549
#if defined(CONFIG_USER_ONLY)
4550
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
4552
CPUX86State *saved_env;
4556
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
4558
cpu_x86_load_seg_cache(env, seg_reg, selector,
4559
(selector << 4), 0xffff, 0);
4561
helper_load_seg(seg_reg, selector);
4566
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
4568
CPUX86State *saved_env;
4573
helper_fsave(ptr, data32);
4578
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
4580
CPUX86State *saved_env;
4585
helper_frstor(ptr, data32);
4591
void helper_fxsave(target_ulong ptr, int data64)
4593
int fpus, fptag, i, nb_xmm_regs;
4597
/* The operand must be 16 byte aligned */
4599
raise_exception(EXCP0D_GPF);
4602
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4604
for(i = 0; i < 8; i++) {
4605
fptag |= (env->fptags[i] << i);
4607
stw(ptr, env->fpuc);
4609
stw(ptr + 4, fptag ^ 0xff);
4610
#ifdef TARGET_X86_64
4612
stq(ptr + 0x08, 0); /* rip */
4613
stq(ptr + 0x10, 0); /* rdp */
4617
stl(ptr + 0x08, 0); /* eip */
4618
stl(ptr + 0x0c, 0); /* sel */
4619
stl(ptr + 0x10, 0); /* dp */
4620
stl(ptr + 0x14, 0); /* sel */
4624
for(i = 0;i < 8; i++) {
4626
helper_fstt(tmp, addr);
4630
if (env->cr[4] & CR4_OSFXSR_MASK) {
4631
/* XXX: finish it */
4632
stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4633
stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4634
if (env->hflags & HF_CS64_MASK)
4639
/* Fast FXSAVE leaves out the XMM registers */
4640
if (!(env->efer & MSR_EFER_FFXSR)
4641
|| (env->hflags & HF_CPL_MASK)
4642
|| !(env->hflags & HF_LMA_MASK)) {
4643
for(i = 0; i < nb_xmm_regs; i++) {
4644
stq(addr, env->xmm_regs[i].XMM_Q(0));
4645
stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4652
void helper_fxrstor(target_ulong ptr, int data64)
4654
int i, fpus, fptag, nb_xmm_regs;
4658
/* The operand must be 16 byte aligned */
4660
raise_exception(EXCP0D_GPF);
4663
env->fpuc = lduw(ptr);
4664
fpus = lduw(ptr + 2);
4665
fptag = lduw(ptr + 4);
4666
env->fpstt = (fpus >> 11) & 7;
4667
env->fpus = fpus & ~0x3800;
4669
for(i = 0;i < 8; i++) {
4670
env->fptags[i] = ((fptag >> i) & 1);
4674
for(i = 0;i < 8; i++) {
4675
tmp = helper_fldt(addr);
4680
if (env->cr[4] & CR4_OSFXSR_MASK) {
4681
/* XXX: finish it */
4682
env->mxcsr = ldl(ptr + 0x18);
4684
if (env->hflags & HF_CS64_MASK)
4689
/* Fast FXRESTORE leaves out the XMM registers */
4690
if (!(env->efer & MSR_EFER_FFXSR)
4691
|| (env->hflags & HF_CPL_MASK)
4692
|| !(env->hflags & HF_LMA_MASK)) {
4693
for(i = 0; i < nb_xmm_regs; i++) {
4694
env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4695
env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4702
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
4707
*pmant = temp.l.lower;
4708
*pexp = temp.l.upper;
4711
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
4715
temp.l.upper = upper;
4716
temp.l.lower = mant;
4720
#ifdef TARGET_X86_64
4722
//#define DEBUG_MULDIV
4724
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4733
static void neg128(uint64_t *plow, uint64_t *phigh)
4737
add128(plow, phigh, 1, 0);
4740
/* return TRUE if overflow */
4741
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4743
uint64_t q, r, a1, a0;
4756
/* XXX: use a better algorithm */
4757
for(i = 0; i < 64; i++) {
4759
a1 = (a1 << 1) | (a0 >> 63);
4760
if (ab || a1 >= b) {
4766
a0 = (a0 << 1) | qb;
4768
#if defined(DEBUG_MULDIV)
4769
printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4770
*phigh, *plow, b, a0, a1);
4778
/* return TRUE if overflow */
4779
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4782
sa = ((int64_t)*phigh < 0);
4784
neg128(plow, phigh);
4788
if (div64(plow, phigh, b) != 0)
4791
if (*plow > (1ULL << 63))
4795
if (*plow >= (1ULL << 63))
4803
void helper_mulq_EAX_T0(target_ulong t0)
4807
mulu64(&r0, &r1, EAX, t0);
4814
void helper_imulq_EAX_T0(target_ulong t0)
4818
muls64(&r0, &r1, EAX, t0);
4822
CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4825
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4829
muls64(&r0, &r1, t0, t1);
4831
CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4835
void helper_divq_EAX(target_ulong t0)
4839
raise_exception(EXCP00_DIVZ);
4843
if (div64(&r0, &r1, t0))
4844
raise_exception(EXCP00_DIVZ);
4849
void helper_idivq_EAX(target_ulong t0)
4853
raise_exception(EXCP00_DIVZ);
4857
if (idiv64(&r0, &r1, t0))
4858
raise_exception(EXCP00_DIVZ);
4864
static void do_hlt(void)
4866
env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4868
env->exception_index = EXCP_HLT;
4872
void helper_hlt(int next_eip_addend)
4874
helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4875
EIP += next_eip_addend;
4880
void helper_monitor(target_ulong ptr)
4882
if ((uint32_t)ECX != 0)
4883
raise_exception(EXCP0D_GPF);
4884
/* XXX: store address ? */
4885
helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4888
void helper_mwait(int next_eip_addend)
4890
if ((uint32_t)ECX != 0)
4891
raise_exception(EXCP0D_GPF);
4892
helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4893
EIP += next_eip_addend;
4895
/* XXX: not complete but not completely erroneous */
4896
if (env->cpu_index != 0 || env->next_cpu != NULL) {
4897
/* more than one CPU: do not sleep because another CPU may
4904
void helper_debug(void)
4906
env->exception_index = EXCP_DEBUG;
4910
void helper_reset_rf(void)
4912
env->eflags &= ~RF_MASK;
4915
void helper_raise_interrupt(int intno, int next_eip_addend)
4917
raise_interrupt(intno, 1, 0, next_eip_addend);
4920
void helper_raise_exception(int exception_index)
4922
raise_exception(exception_index);
4925
void helper_cli(void)
4927
env->eflags &= ~IF_MASK;
4930
void helper_sti(void)
4932
env->eflags |= IF_MASK;
4936
/* vm86plus instructions */
4937
void helper_cli_vm(void)
4939
env->eflags &= ~VIF_MASK;
4942
void helper_sti_vm(void)
4944
env->eflags |= VIF_MASK;
4945
if (env->eflags & VIP_MASK) {
4946
raise_exception(EXCP0D_GPF);
4951
void helper_set_inhibit_irq(void)
4953
env->hflags |= HF_INHIBIT_IRQ_MASK;
4956
void helper_reset_inhibit_irq(void)
4958
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4961
void helper_boundw(target_ulong a0, int v)
4965
high = ldsw(a0 + 2);
4967
if (v < low || v > high) {
4968
raise_exception(EXCP05_BOUND);
4972
void helper_boundl(target_ulong a0, int v)
4977
if (v < low || v > high) {
4978
raise_exception(EXCP05_BOUND);
4982
#if !defined(CONFIG_USER_ONLY)
4984
#define MMUSUFFIX _mmu
4987
#include "softmmu_template.h"
4990
#include "softmmu_template.h"
4993
#include "softmmu_template.h"
4996
#include "softmmu_template.h"
5000
#if !defined(CONFIG_USER_ONLY)
5001
/* try to fill the TLB and return an exception if error. If retaddr is
5002
NULL, it means that the function was called in C code (i.e. not
5003
from generated code or from helper.c) */
5004
/* XXX: fix it to restore all registers */
5005
void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
5008
TranslationBlock *tb;
5011
CPUX86State *saved_env;
5016
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
5019
/* now we have a real cpu fault */
5020
pc = (unsigned long)retaddr;
5021
tb = tb_find_pc(pc);
5023
/* the PC is inside the translated code. It means that we have
5024
a virtual CPU fault */
5025
cpu_restore_state(tb, env, pc);
5028
raise_exception_err(env->exception_index, env->error_code);
5034
/* Secure Virtual Machine helpers */
5036
#if defined(CONFIG_USER_ONLY)
5038
void helper_vmrun(int aflag, int next_eip_addend)
5041
void helper_vmmcall(void)
5044
void helper_vmload(int aflag)
5047
void helper_vmsave(int aflag)
5050
void helper_stgi(void)
5053
void helper_clgi(void)
5056
void helper_skinit(void)
5059
void helper_invlpga(int aflag)
5062
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5065
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5069
void svm_check_intercept(CPUState *env1, uint32_t type)
5073
void helper_svm_check_io(uint32_t port, uint32_t param,
5074
uint32_t next_eip_addend)
5079
static inline void svm_save_seg(target_phys_addr_t addr,
5080
const SegmentCache *sc)
5082
stw_phys(addr + offsetof(struct vmcb_seg, selector),
5084
stq_phys(addr + offsetof(struct vmcb_seg, base),
5086
stl_phys(addr + offsetof(struct vmcb_seg, limit),
5088
stw_phys(addr + offsetof(struct vmcb_seg, attrib),
5089
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
5092
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
5096
sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
5097
sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
5098
sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
5099
flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
5100
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
5103
static inline void svm_load_seg_cache(target_phys_addr_t addr,
5104
CPUState *env, int seg_reg)
5106
SegmentCache sc1, *sc = &sc1;
5107
svm_load_seg(addr, sc);
5108
cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
5109
sc->base, sc->limit, sc->flags);
5112
void helper_vmrun(int aflag, int next_eip_addend)
5118
helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
5123
addr = (uint32_t)EAX;
5125
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
5127
env->vm_vmcb = addr;
5129
/* save the current CPU state in the hsave page */
5130
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5131
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5133
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5134
stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5136
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
5137
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
5138
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
5139
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
5140
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
5141
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
5143
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
5144
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
5146
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
5148
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
5150
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
5152
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
5155
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
5156
EIP + next_eip_addend);
5157
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
5158
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
5160
/* load the interception bitmaps so we do not need to access the
5162
env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
5163
env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5164
env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5165
env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5166
env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5167
env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5169
/* enable intercepts */
5170
env->hflags |= HF_SVMI_MASK;
5172
env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5174
env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5175
env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5177
env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5178
env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5180
/* clear exit_info_2 so we behave like the real hardware */
5181
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5183
cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5184
cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5185
cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5186
env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5187
int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5188
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5189
if (int_ctl & V_INTR_MASKING_MASK) {
5190
env->v_tpr = int_ctl & V_TPR_MASK;
5191
env->hflags2 |= HF2_VINTR_MASK;
5192
if (env->eflags & IF_MASK)
5193
env->hflags2 |= HF2_HIF_MASK;
5197
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5199
load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5200
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5201
CC_OP = CC_OP_EFLAGS;
5203
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5205
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5207
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5209
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5212
EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5214
ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5215
EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5216
env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5217
env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5218
cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5220
/* FIXME: guest state consistency checks */
5222
switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5223
case TLB_CONTROL_DO_NOTHING:
5225
case TLB_CONTROL_FLUSH_ALL_ASID:
5226
/* FIXME: this is not 100% correct but should work for now */
5231
env->hflags2 |= HF2_GIF_MASK;
5233
if (int_ctl & V_IRQ_MASK) {
5234
env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5237
/* maybe we need to inject an event */
5238
event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5239
if (event_inj & SVM_EVTINJ_VALID) {
5240
uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5241
uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5242
uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5244
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5245
/* FIXME: need to implement valid_err */
5246
switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5247
case SVM_EVTINJ_TYPE_INTR:
5248
env->exception_index = vector;
5249
env->error_code = event_inj_err;
5250
env->exception_is_int = 0;
5251
env->exception_next_eip = -1;
5252
qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5253
/* XXX: is it always correct ? */
5254
do_interrupt_all(vector, 0, 0, 0, 1);
5256
case SVM_EVTINJ_TYPE_NMI:
5257
env->exception_index = EXCP02_NMI;
5258
env->error_code = event_inj_err;
5259
env->exception_is_int = 0;
5260
env->exception_next_eip = EIP;
5261
qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5264
case SVM_EVTINJ_TYPE_EXEPT:
5265
env->exception_index = vector;
5266
env->error_code = event_inj_err;
5267
env->exception_is_int = 0;
5268
env->exception_next_eip = -1;
5269
qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5272
case SVM_EVTINJ_TYPE_SOFT:
5273
env->exception_index = vector;
5274
env->error_code = event_inj_err;
5275
env->exception_is_int = 1;
5276
env->exception_next_eip = EIP;
5277
qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5281
qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5285
void helper_vmmcall(void)
5287
helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5288
raise_exception(EXCP06_ILLOP);
5291
void helper_vmload(int aflag)
5294
helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5299
addr = (uint32_t)EAX;
5301
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5302
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5303
env->segs[R_FS].base);
5305
svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5307
svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5309
svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5311
svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5314
#ifdef TARGET_X86_64
5315
env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5316
env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5317
env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5318
env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5320
env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5321
env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5322
env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5323
env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5326
void helper_vmsave(int aflag)
5329
helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5334
addr = (uint32_t)EAX;
5336
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5337
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5338
env->segs[R_FS].base);
5340
svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5342
svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5344
svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5346
svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5349
#ifdef TARGET_X86_64
5350
stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5351
stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5352
stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5353
stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5355
stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5356
stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5357
stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5358
stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5361
void helper_stgi(void)
5363
helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5364
env->hflags2 |= HF2_GIF_MASK;
5367
void helper_clgi(void)
5369
helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5370
env->hflags2 &= ~HF2_GIF_MASK;
5373
void helper_skinit(void)
5375
helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5376
/* XXX: not implemented */
5377
raise_exception(EXCP06_ILLOP);
5380
void helper_invlpga(int aflag)
5383
helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5388
addr = (uint32_t)EAX;
5390
/* XXX: could use the ASID to see if it is needed to do the
5392
tlb_flush_page(env, addr);
5395
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5397
if (likely(!(env->hflags & HF_SVMI_MASK)))
5400
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5401
if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5402
helper_vmexit(type, param);
5405
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5406
if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5407
helper_vmexit(type, param);
5410
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5411
if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5412
helper_vmexit(type, param);
5415
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5416
if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5417
helper_vmexit(type, param);
5420
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5421
if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5422
helper_vmexit(type, param);
5426
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5427
/* FIXME: this should be read in at vmrun (faster this way?) */
5428
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5430
switch((uint32_t)ECX) {
5435
case 0xc0000000 ... 0xc0001fff:
5436
t0 = (8192 + ECX - 0xc0000000) * 2;
5440
case 0xc0010000 ... 0xc0011fff:
5441
t0 = (16384 + ECX - 0xc0010000) * 2;
5446
helper_vmexit(type, param);
5451
if (ldub_phys(addr + t1) & ((1 << param) << t0))
5452
helper_vmexit(type, param);
5456
if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5457
helper_vmexit(type, param);
5463
void svm_check_intercept(CPUState *env1, uint32_t type)
5465
CPUState *saved_env;
5469
helper_svm_check_intercept_param(type, 0);
5473
void helper_svm_check_io(uint32_t port, uint32_t param,
5474
uint32_t next_eip_addend)
5476
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5477
/* FIXME: this should be read in at vmrun (faster this way?) */
5478
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5479
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5480
if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5482
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5483
env->eip + next_eip_addend);
5484
helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5489
/* Note: currently only 32 bits of exit_code are used */
5490
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5494
qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5495
exit_code, exit_info_1,
5496
ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5499
if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5500
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5501
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5503
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5506
/* Save the VM state in the vmcb */
5507
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5509
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5511
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5513
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5516
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5517
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5519
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5520
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5522
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5523
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5524
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5525
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5526
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5528
int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5529
int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5530
int_ctl |= env->v_tpr & V_TPR_MASK;
5531
if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5532
int_ctl |= V_IRQ_MASK;
5533
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5535
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5536
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5537
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5538
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5539
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5540
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5541
stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5543
/* Reload the host state from vm_hsave */
5544
env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5545
env->hflags &= ~HF_SVMI_MASK;
5547
env->intercept_exceptions = 0;
5548
env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5549
env->tsc_offset = 0;
5551
env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5552
env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5554
env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5555
env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5557
cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5558
cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5559
cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5560
/* we need to set the efer after the crs so the hidden flags get
5563
ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5565
load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5566
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5567
CC_OP = CC_OP_EFLAGS;
5569
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5571
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5573
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5575
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5578
EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5579
ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5580
EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5582
env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5583
env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5586
cpu_x86_set_cpl(env, 0);
5587
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5588
stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5590
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5591
ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5592
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5593
ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5594
stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5596
env->hflags2 &= ~HF2_GIF_MASK;
5597
/* FIXME: Resets the current ASID register to zero (host ASID). */
5599
/* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5601
/* Clears the TSC_OFFSET inside the processor. */
5603
/* If the host is in PAE mode, the processor reloads the host's PDPEs
5604
from the page table indicated the host's CR3. If the PDPEs contain
5605
illegal state, the processor causes a shutdown. */
5607
/* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5608
env->cr[0] |= CR0_PE_MASK;
5609
env->eflags &= ~VM_MASK;
5611
/* Disables all breakpoints in the host DR7 register. */
5613
/* Checks the reloaded host state for consistency. */
5615
/* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5616
host's code segment or non-canonical (in the case of long mode), a
5617
#GP fault is delivered inside the host.) */
5619
/* remove any pending exception */
5620
env->exception_index = -1;
5621
env->error_code = 0;
5622
env->old_exception = -1;
5630
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5632
#define SSE_DAZ 0x0040
5633
#define SSE_RC_MASK 0x6000
5634
#define SSE_RC_NEAR 0x0000
5635
#define SSE_RC_DOWN 0x2000
5636
#define SSE_RC_UP 0x4000
5637
#define SSE_RC_CHOP 0x6000
5638
#define SSE_FZ 0x8000
5640
static void update_sse_status(void)
5644
/* set rounding mode */
5645
switch(env->mxcsr & SSE_RC_MASK) {
5648
rnd_type = float_round_nearest_even;
5651
rnd_type = float_round_down;
5654
rnd_type = float_round_up;
5657
rnd_type = float_round_to_zero;
5660
set_float_rounding_mode(rnd_type, &env->sse_status);
5662
/* set denormals are zero */
5663
set_flush_inputs_to_zero((env->mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
5665
/* set flush to zero */
5666
set_flush_to_zero((env->mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
5669
void helper_ldmxcsr(uint32_t val)
5672
update_sse_status();
5675
void helper_enter_mmx(void)
5678
*(uint32_t *)(env->fptags) = 0;
5679
*(uint32_t *)(env->fptags + 4) = 0;
5682
void helper_emms(void)
5684
/* set to empty state */
5685
*(uint32_t *)(env->fptags) = 0x01010101;
5686
*(uint32_t *)(env->fptags + 4) = 0x01010101;
5690
void helper_movq(void *d, void *s)
5692
*(uint64_t *)d = *(uint64_t *)s;
5696
#include "ops_sse.h"
5699
#include "ops_sse.h"
5702
#include "helper_template.h"
5706
#include "helper_template.h"
5710
#include "helper_template.h"
5713
#ifdef TARGET_X86_64
5716
#include "helper_template.h"
5721
/* bit operations */
5722
target_ulong helper_bsf(target_ulong t0)
5729
while ((res & 1) == 0) {
5736
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5739
target_ulong res, mask;
5741
if (wordsize > 0 && t0 == 0) {
5745
count = TARGET_LONG_BITS - 1;
5746
mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5747
while ((res & mask) == 0) {
5752
return wordsize - 1 - count;
5757
target_ulong helper_bsr(target_ulong t0)
5759
return helper_lzcnt(t0, 0);
5762
static int compute_all_eflags(void)
5767
static int compute_c_eflags(void)
5769
return CC_SRC & CC_C;
5772
uint32_t helper_cc_compute_all(int op)
5775
default: /* should never happen */ return 0;
5777
case CC_OP_EFLAGS: return compute_all_eflags();
5779
case CC_OP_MULB: return compute_all_mulb();
5780
case CC_OP_MULW: return compute_all_mulw();
5781
case CC_OP_MULL: return compute_all_mull();
5783
case CC_OP_ADDB: return compute_all_addb();
5784
case CC_OP_ADDW: return compute_all_addw();
5785
case CC_OP_ADDL: return compute_all_addl();
5787
case CC_OP_ADCB: return compute_all_adcb();
5788
case CC_OP_ADCW: return compute_all_adcw();
5789
case CC_OP_ADCL: return compute_all_adcl();
5791
case CC_OP_SUBB: return compute_all_subb();
5792
case CC_OP_SUBW: return compute_all_subw();
5793
case CC_OP_SUBL: return compute_all_subl();
5795
case CC_OP_SBBB: return compute_all_sbbb();
5796
case CC_OP_SBBW: return compute_all_sbbw();
5797
case CC_OP_SBBL: return compute_all_sbbl();
5799
case CC_OP_LOGICB: return compute_all_logicb();
5800
case CC_OP_LOGICW: return compute_all_logicw();
5801
case CC_OP_LOGICL: return compute_all_logicl();
5803
case CC_OP_INCB: return compute_all_incb();
5804
case CC_OP_INCW: return compute_all_incw();
5805
case CC_OP_INCL: return compute_all_incl();
5807
case CC_OP_DECB: return compute_all_decb();
5808
case CC_OP_DECW: return compute_all_decw();
5809
case CC_OP_DECL: return compute_all_decl();
5811
case CC_OP_SHLB: return compute_all_shlb();
5812
case CC_OP_SHLW: return compute_all_shlw();
5813
case CC_OP_SHLL: return compute_all_shll();
5815
case CC_OP_SARB: return compute_all_sarb();
5816
case CC_OP_SARW: return compute_all_sarw();
5817
case CC_OP_SARL: return compute_all_sarl();
5819
#ifdef TARGET_X86_64
5820
case CC_OP_MULQ: return compute_all_mulq();
5822
case CC_OP_ADDQ: return compute_all_addq();
5824
case CC_OP_ADCQ: return compute_all_adcq();
5826
case CC_OP_SUBQ: return compute_all_subq();
5828
case CC_OP_SBBQ: return compute_all_sbbq();
5830
case CC_OP_LOGICQ: return compute_all_logicq();
5832
case CC_OP_INCQ: return compute_all_incq();
5834
case CC_OP_DECQ: return compute_all_decq();
5836
case CC_OP_SHLQ: return compute_all_shlq();
5838
case CC_OP_SARQ: return compute_all_sarq();
5843
uint32_t cpu_cc_compute_all(CPUState *env1, int op)
5845
CPUState *saved_env;
5850
ret = helper_cc_compute_all(op);
5855
uint32_t helper_cc_compute_c(int op)
5858
default: /* should never happen */ return 0;
5860
case CC_OP_EFLAGS: return compute_c_eflags();
5862
case CC_OP_MULB: return compute_c_mull();
5863
case CC_OP_MULW: return compute_c_mull();
5864
case CC_OP_MULL: return compute_c_mull();
5866
case CC_OP_ADDB: return compute_c_addb();
5867
case CC_OP_ADDW: return compute_c_addw();
5868
case CC_OP_ADDL: return compute_c_addl();
5870
case CC_OP_ADCB: return compute_c_adcb();
5871
case CC_OP_ADCW: return compute_c_adcw();
5872
case CC_OP_ADCL: return compute_c_adcl();
5874
case CC_OP_SUBB: return compute_c_subb();
5875
case CC_OP_SUBW: return compute_c_subw();
5876
case CC_OP_SUBL: return compute_c_subl();
5878
case CC_OP_SBBB: return compute_c_sbbb();
5879
case CC_OP_SBBW: return compute_c_sbbw();
5880
case CC_OP_SBBL: return compute_c_sbbl();
5882
case CC_OP_LOGICB: return compute_c_logicb();
5883
case CC_OP_LOGICW: return compute_c_logicw();
5884
case CC_OP_LOGICL: return compute_c_logicl();
5886
case CC_OP_INCB: return compute_c_incl();
5887
case CC_OP_INCW: return compute_c_incl();
5888
case CC_OP_INCL: return compute_c_incl();
5890
case CC_OP_DECB: return compute_c_incl();
5891
case CC_OP_DECW: return compute_c_incl();
5892
case CC_OP_DECL: return compute_c_incl();
5894
case CC_OP_SHLB: return compute_c_shlb();
5895
case CC_OP_SHLW: return compute_c_shlw();
5896
case CC_OP_SHLL: return compute_c_shll();
5898
case CC_OP_SARB: return compute_c_sarl();
5899
case CC_OP_SARW: return compute_c_sarl();
5900
case CC_OP_SARL: return compute_c_sarl();
5902
#ifdef TARGET_X86_64
5903
case CC_OP_MULQ: return compute_c_mull();
5905
case CC_OP_ADDQ: return compute_c_addq();
5907
case CC_OP_ADCQ: return compute_c_adcq();
5909
case CC_OP_SUBQ: return compute_c_subq();
5911
case CC_OP_SBBQ: return compute_c_sbbq();
5913
case CC_OP_LOGICQ: return compute_c_logicq();
5915
case CC_OP_INCQ: return compute_c_incl();
5917
case CC_OP_DECQ: return compute_c_incl();
5919
case CC_OP_SHLQ: return compute_c_shlq();
5921
case CC_OP_SARQ: return compute_c_sarl();