625
625
int has_error_code, new_stack, shift;
626
626
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627
627
uint32_t old_eip, sp_mask;
628
int svm_should_check = 1;
630
if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
632
svm_should_check = 0;
636
&& (INTERCEPTEDl(_exceptions, 1 << intno)
638
raise_interrupt(intno, is_int, error_code, 0);
640
629
has_error_code = 0;
641
630
if (!is_int && !is_hw) {
872
861
int has_error_code, new_stack;
873
862
uint32_t e1, e2, e3, ss;
874
863
target_ulong old_eip, esp, offset;
875
int svm_should_check = 1;
877
if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
879
svm_should_check = 0;
882
&& INTERCEPTEDl(_exceptions, 1 << intno)
884
raise_interrupt(intno, is_int, error_code, 0);
886
865
has_error_code = 0;
887
866
if (!is_int && !is_hw) {
1140
1119
uint32_t offset, esp;
1141
1120
uint32_t old_cs, old_eip;
1142
int svm_should_check = 1;
1144
if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1146
svm_should_check = 0;
1148
if (svm_should_check
1149
&& INTERCEPTEDl(_exceptions, 1 << intno)
1151
raise_interrupt(intno, is_int, error_code, 0);
1153
1122
/* real mode (simpler !) */
1154
1123
dt = &env->idt;
1155
1124
if (intno * 4 + 3 > dt->limit)
1316
1287
cpu_loop_exit();
1319
/* same as raise_exception_err, but do not restore global registers */
1320
static void raise_exception_err_norestore(int exception_index, int error_code)
1322
exception_index = check_exception(exception_index, &error_code);
1324
env->exception_index = exception_index;
1325
env->error_code = error_code;
1326
env->exception_is_int = 0;
1327
env->exception_next_eip = 0;
1328
longjmp(env->jmp_env, 1);
1331
1290
/* shortcuts to generate exceptions */
1333
1292
void (raise_exception_err)(int exception_index, int error_code)
2960
void helper_movl_crN_T0(int reg, target_ulong t0)
2962
#if !defined(CONFIG_USER_ONLY)
2921
#if defined(CONFIG_USER_ONLY)
2922
target_ulong helper_read_crN(int reg)
2927
void helper_write_crN(int reg, target_ulong t0)
2931
target_ulong helper_read_crN(int reg)
2935
helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2941
val = cpu_get_apic_tpr(env);
2947
void helper_write_crN(int reg, target_ulong t0)
2949
helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2965
2952
cpu_x86_update_cr0(env, t0);
4560
4556
if ((uint32_t)ECX != 0)
4561
4557
raise_exception(EXCP0D_GPF);
4562
4558
/* XXX: store address ? */
4559
helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4565
4562
void helper_mwait(void)
4567
4564
if ((uint32_t)ECX != 0)
4568
4565
raise_exception(EXCP0D_GPF);
4566
helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4569
4567
/* XXX: not complete but not completely erroneous */
4570
4568
if (env->cpu_index != 0 || env->next_cpu != NULL) {
4571
4569
/* more than one CPU: do not sleep because another CPU may
4763
static inline uint32_t
4764
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4766
return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
4767
| ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
4768
| ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
4769
| (vmcb_base & 0xff000000) /* Base 31-24 */
4770
| (vmcb_limit & 0xf0000); /* Limit 19-16 */
4773
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4775
return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
4776
| ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
4754
static inline void svm_save_seg(target_phys_addr_t addr,
4755
const SegmentCache *sc)
4757
stw_phys(addr + offsetof(struct vmcb_seg, selector),
4759
stq_phys(addr + offsetof(struct vmcb_seg, base),
4761
stl_phys(addr + offsetof(struct vmcb_seg, limit),
4763
stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4764
(sc->flags >> 8) | ((sc->flags >> 12) & 0x0f00));
4767
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4771
sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4772
sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4773
sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4774
flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4775
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4778
static inline void svm_load_seg_cache(target_phys_addr_t addr,
4779
CPUState *env, int seg_reg)
4781
SegmentCache sc1, *sc = &sc1;
4782
svm_load_seg(addr, sc);
4783
cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4784
sc->base, sc->limit, sc->flags);
4779
4787
void helper_vmrun(void)
4806
4816
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4807
4817
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4809
SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4810
SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4811
SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4812
SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4819
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4821
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4823
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4825
svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4814
4828
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4815
4829
stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4818
4832
/* load the interception bitmaps so we do not need to access the
4819
4833
vmcb in svm mode */
4820
/* We shift all the intercept bits so we can OR them with the TB
4822
env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4834
env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4823
4835
env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4824
4836
env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4825
4837
env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4826
4838
env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4827
4839
env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4841
/* enable intercepts */
4842
env->hflags |= HF_SVMI_MASK;
4829
4844
env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4830
4845
env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4857
4872
load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4858
4873
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4859
4874
CC_OP = CC_OP_EFLAGS;
4860
CC_DST = 0xffffffff;
4862
SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4863
SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4864
SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4865
SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4876
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4878
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4880
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4882
svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4867
4885
EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4868
4886
env->eip = EIP;
4943
4962
void helper_vmmcall(void)
4945
if (loglevel & CPU_LOG_TB_IN_ASM)
4946
fprintf(logfile,"vmmcall!\n");
4964
helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4965
raise_exception(EXCP06_ILLOP);
4949
4968
void helper_vmload(void)
4951
4970
target_ulong addr;
4971
helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4973
/* XXX: invalid in 32 bit */
4953
4975
if (loglevel & CPU_LOG_TB_IN_ASM)
4954
4976
fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4955
4977
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4956
4978
env->segs[R_FS].base);
4958
SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4959
SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4960
SVM_LOAD_SEG2(addr, tr, tr);
4961
SVM_LOAD_SEG2(addr, ldt, ldtr);
4980
svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4982
svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4984
svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4986
svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4963
4989
#ifdef TARGET_X86_64
4964
4990
env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4975
5001
void helper_vmsave(void)
4977
5003
target_ulong addr;
5004
helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4979
5006
if (loglevel & CPU_LOG_TB_IN_ASM)
4980
5007
fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4981
5008
addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4982
5009
env->segs[R_FS].base);
4984
SVM_SAVE_SEG(addr, segs[R_FS], fs);
4985
SVM_SAVE_SEG(addr, segs[R_GS], gs);
4986
SVM_SAVE_SEG(addr, tr, tr);
4987
SVM_SAVE_SEG(addr, ldt, ldtr);
5011
svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5013
svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5015
svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5017
svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
4989
5020
#ifdef TARGET_X86_64
4990
5021
stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4998
5029
stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5032
void helper_stgi(void)
5034
helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5035
env->hflags |= HF_GIF_MASK;
5038
void helper_clgi(void)
5040
helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5041
env->hflags &= ~HF_GIF_MASK;
5001
5044
void helper_skinit(void)
5046
helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5047
/* XXX: not implemented */
5003
5048
if (loglevel & CPU_LOG_TB_IN_ASM)
5004
5049
fprintf(logfile,"skinit!\n");
5050
raise_exception(EXCP06_ILLOP);
5007
5053
void helper_invlpga(void)
5055
helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5009
5056
tlb_flush(env, 0);
5012
5059
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5061
if (likely(!(env->hflags & HF_SVMI_MASK)))
5015
5064
case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5016
if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
5017
helper_vmexit(type, param);
5020
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
5021
if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
5065
if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5022
5066
helper_vmexit(type, param);
5025
5069
case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5026
if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
5027
helper_vmexit(type, param);
5030
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
5031
if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
5032
helper_vmexit(type, param);
5035
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
5036
if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
5037
helper_vmexit(type, param);
5070
if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5071
helper_vmexit(type, param);
5074
case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5075
if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5076
helper_vmexit(type, param);
5079
case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5080
if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5081
helper_vmexit(type, param);
5084
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5085
if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5086
helper_vmexit(type, param);
5043
5089
case SVM_EXIT_MSR:
5044
if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
5090
if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5045
5091
/* FIXME: this should be read in at vmrun (faster this way?) */
5046
5092
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5047
5093
uint32_t t0, t1;
5081
5127
void helper_svm_check_io(uint32_t port, uint32_t param,
5082
5128
uint32_t next_eip_addend)
5084
if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5130
if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5085
5131
/* FIXME: this should be read in at vmrun (faster this way?) */
5086
5132
uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5087
5133
uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5115
5161
/* Save the VM state in the vmcb */
5116
SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5117
SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5118
SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5119
SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5162
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5164
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5166
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5168
svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5121
5171
stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5122
5172
stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5169
5220
env->hflags &= ~HF_LMA_MASK;
5170
5221
if (env->efer & MSR_EFER_LMA)
5171
5222
env->hflags |= HF_LMA_MASK;
5223
/* XXX: should also emulate the VM_CR MSR */
5224
env->hflags &= ~HF_SVME_MASK;
5225
if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
5226
if (env->efer & MSR_EFER_SVME)
5227
env->hflags |= HF_SVME_MASK;
5229
env->efer &= ~MSR_EFER_SVME;
5174
5233
env->eflags = 0;
5176
5235
~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5177
5236
CC_OP = CC_OP_EFLAGS;
5179
SVM_LOAD_SEG(env->vm_hsave, ES, es);
5180
SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5181
SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5182
SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5238
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5240
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5242
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5244
svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5184
5247
EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5185
5248
ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));