197
198
pVM->hwaccm.s.vmx.pAPICPhys = 0;
200
/* Allocate the MSR bitmap if this feature is supported. */
201
if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
203
rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjMSRBitmap, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
208
pVM->hwaccm.s.vmx.pMSRBitmap = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjMSRBitmap);
209
pVM->hwaccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjMSRBitmap, 0);
210
memset(pVM->hwaccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);
213
201
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
215
203
rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjScratch, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
256
244
pVCpu->hwaccm.s.vmx.pVAPICPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjVAPIC, 0);
257
245
ASMMemZero32(pVCpu->hwaccm.s.vmx.pVAPIC, PAGE_SIZE);
247
/* Allocate the MSR bitmap if this feature is supported. */
248
if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
250
rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
255
pVCpu->hwaccm.s.vmx.pMSRBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap);
256
pVCpu->hwaccm.s.vmx.pMSRBitmapPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, 0);
257
memset(pVCpu->hwaccm.s.vmx.pMSRBitmap, 0xff, PAGE_SIZE);
260
#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
261
/* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */
262
rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
267
pVCpu->hwaccm.s.vmx.pGuestMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR);
268
pVCpu->hwaccm.s.vmx.pGuestMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, 0);
269
memset(pVCpu->hwaccm.s.vmx.pGuestMSR, 0, PAGE_SIZE);
271
/* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */
272
rc = RTR0MemObjAllocCont(&pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
277
pVCpu->hwaccm.s.vmx.pHostMSR = (uint8_t *)RTR0MemObjAddress(pVCpu->hwaccm.s.vmx.pMemObjHostMSR);
278
pVCpu->hwaccm.s.vmx.pHostMSRPhys = RTR0MemObjGetPagePhysAddr(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, 0);
279
memset(pVCpu->hwaccm.s.vmx.pHostMSR, 0, PAGE_SIZE);
280
#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
259
282
/* Current guest paging mode. */
260
283
pVCpu->hwaccm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
293
316
pVCpu->hwaccm.s.vmx.pVAPIC = 0;
294
317
pVCpu->hwaccm.s.vmx.pVAPICPhys = 0;
319
if (pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
321
RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap, false);
322
pVCpu->hwaccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
323
pVCpu->hwaccm.s.vmx.pMSRBitmap = 0;
324
pVCpu->hwaccm.s.vmx.pMSRBitmapPhys = 0;
326
#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
327
if (pVCpu->hwaccm.s.vmx.pMemObjHostMSR != NIL_RTR0MEMOBJ)
329
RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjHostMSR, false);
330
pVCpu->hwaccm.s.vmx.pMemObjHostMSR = NIL_RTR0MEMOBJ;
331
pVCpu->hwaccm.s.vmx.pHostMSR = 0;
332
pVCpu->hwaccm.s.vmx.pHostMSRPhys = 0;
334
if (pVCpu->hwaccm.s.vmx.pMemObjGuestMSR != NIL_RTR0MEMOBJ)
336
RTR0MemObjFree(pVCpu->hwaccm.s.vmx.pMemObjGuestMSR, false);
337
pVCpu->hwaccm.s.vmx.pMemObjGuestMSR = NIL_RTR0MEMOBJ;
338
pVCpu->hwaccm.s.vmx.pGuestMSR = 0;
339
pVCpu->hwaccm.s.vmx.pGuestMSRPhys = 0;
341
#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
297
343
if (pVM->hwaccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)
301
347
pVM->hwaccm.s.vmx.pAPIC = 0;
302
348
pVM->hwaccm.s.vmx.pAPICPhys = 0;
304
if (pVM->hwaccm.s.vmx.pMemObjMSRBitmap != NIL_RTR0MEMOBJ)
306
RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjMSRBitmap, false);
307
pVM->hwaccm.s.vmx.pMemObjMSRBitmap = NIL_RTR0MEMOBJ;
308
pVM->hwaccm.s.vmx.pMSRBitmap = 0;
309
pVM->hwaccm.s.vmx.pMSRBitmapPhys = 0;
311
350
#ifdef VBOX_WITH_CRASHDUMP_MAGIC
312
351
if (pVM->hwaccm.s.vmx.pMemObjScratch != NIL_RTR0MEMOBJ)
394
433
/* Exit on CR8 reads & writes in case the TPR shadow feature isn't present. */
395
434
val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT;
397
#ifdef VBOX_WITH_VTX_MSR_BITMAPS
398
436
if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
400
Assert(pVM->hwaccm.s.vmx.pMSRBitmapPhys);
438
Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
401
439
val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
405
442
/* We will use the secondary control if it's present. */
406
443
val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
479
516
/* Set the MSR bitmap address. */
480
517
if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
483
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVM->hwaccm.s.vmx.pMSRBitmapPhys);
519
Assert(pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
521
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_MSR_BITMAP_FULL, pVCpu->hwaccm.s.vmx.pMSRBitmapPhys);
524
/* Allow the guest to directly modify these MSRs; they are restored and saved automatically. */
525
vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
526
vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
527
vmxR0SetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true);
528
vmxR0SetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
529
vmxR0SetMSRPermission(pVCpu, MSR_K6_STAR, true, true);
530
vmxR0SetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true);
531
vmxR0SetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true);
532
vmxR0SetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
533
vmxR0SetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
487
/* Clear MSR controls. */
488
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, 0);
489
rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, 0);
490
rc |= VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, 0);
491
rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
492
rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0);
536
#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
537
/* Set the guest & host MSR load/store physical addresses. */
538
Assert(pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
539
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
541
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, pVCpu->hwaccm.s.vmx.pGuestMSRPhys);
544
Assert(pVCpu->hwaccm.s.vmx.pHostMSRPhys);
545
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, pVCpu->hwaccm.s.vmx.pHostMSRPhys);
547
#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
549
rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
552
rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
495
555
if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
675
* Sets the permission bits for the specified MSR
677
* @param pVCpu The VMCPU to operate on.
678
* @param ulMSR MSR value
679
* @param fRead Reading allowed/disallowed
680
* @param fWrite Writing allowed/disallowed
682
static void vmxR0SetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
685
uint8_t *pMSRBitmap = (uint8_t *)pVCpu->hwaccm.s.vmx.pMSRBitmap;
688
* 0x000 - 0x3ff - Low MSR read bits
689
* 0x400 - 0x7ff - High MSR read bits
690
* 0x800 - 0xbff - Low MSR write bits
691
* 0xc00 - 0xfff - High MSR write bits
693
if (ulMSR <= 0x00001FFF)
695
/* Pentium-compatible MSRs */
699
if ( ulMSR >= 0xC0000000
700
&& ulMSR <= 0xC0001FFF)
702
/* AMD Sixth Generation x86 Processor MSRs */
703
ulBit = (ulMSR - 0xC0000000);
712
Assert(ulBit <= 0x1fff);
714
ASMBitClear(pMSRBitmap, ulBit);
716
ASMBitSet(pMSRBitmap, ulBit);
719
ASMBitClear(pMSRBitmap + 0x800, ulBit);
721
ASMBitSet(pMSRBitmap + 0x800, ulBit);
616
726
* Injects an event (trap or external interrupt)
999
1108
return VERR_VMX_INVALID_HOST_STATE;
1111
pDesc = (PCX86DESCHC)(gdtr.pGdt + (SelTR & X86_SEL_MASK));
1002
1112
#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1003
1113
if (VMX_IS_64BIT_HOST_MODE())
1005
pDesc = &((PX86DESCHC)gdtr.pGdt)[SelTR >> X86_SEL_SHIFT_HC]; /// ????
1006
1115
uint64_t trBase64 = X86DESC64_BASE(*(PX86DESC64)pDesc);
1007
1116
rc = VMXWriteVMCS64(VMX_VMCS_HOST_TR_BASE, trBase64);
1008
1117
Log2(("VMX_VMCS_HOST_TR_BASE %RX64\n", trBase64));
1069
#if 0 /* @todo deal with 32/64 */
1070
/* Restore the host EFER - on CPUs that support it. */
1071
if (pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR)
1073
uint64_t msrEFER = ASMRdMsr(MSR_IA32_EFER);
1074
rc = VMXWriteVMCS64(VMX_VMCS_HOST_FIELD_EFER_FULL, msrEFER);
1177
#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1178
/* Store all host MSRs in the VM-Exit load area, so they will be reloaded after the world switch back to the host. */
1179
PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pHostMSR;
1180
unsigned idxMsr = 0;
1182
/* EFER MSR present? */
1183
if (ASMCpuId_EDX(0x80000001) & (X86_CPUID_AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1185
if (ASMCpuId_EDX(0x80000001) & X86_CPUID_AMD_FEATURE_EDX_SEP)
1187
pMsr->u32IndexMSR = MSR_K6_STAR;
1188
pMsr->u32Reserved = 0;
1189
pMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
1193
pMsr->u32IndexMSR = MSR_K6_EFER;
1194
pMsr->u32Reserved = 0;
1195
# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1196
if (CPUMIsGuestInLongMode(pVCpu))
1198
/* Must match the efer value in our 64 bits switcher. */
1199
pMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
1203
pMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
1207
# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1208
if (VMX_IS_64BIT_HOST_MODE())
1210
pMsr->u32IndexMSR = MSR_K8_LSTAR;
1211
pMsr->u32Reserved = 0;
1212
pMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64 bits mode syscall rip */
1214
pMsr->u32IndexMSR = MSR_K8_SF_MASK;
1215
pMsr->u32Reserved = 0;
1216
pMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
1218
pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
1219
pMsr->u32Reserved = 0;
1220
pMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
1224
rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, idxMsr);
1226
#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
1078
1228
pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
1132
1282
#ifdef DEBUG /* till after branching, enable it by default then. */
1133
1283
/* Intercept X86_XCPT_DB if stepping is enabled */
1134
if (DBGFIsStepping(pVCpu))
1284
if ( DBGFIsStepping(pVCpu)
1285
|| CPUMIsHyperDebugStateActive(pVCpu))
1135
1286
u32TrapMask |= RT_BIT(X86_XCPT_DB);
1136
1287
/** @todo Don't trap it unless the debugger has armed breakpoints. */
1137
1288
u32TrapMask |= RT_BIT(X86_XCPT_BP);
1173
1324
val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
1174
1325
/* Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
1175
1326
val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
1176
#if 0 /* @todo deal with 32/64 */
1177
/* Required for the EFER write below, not supported on all CPUs. */
1178
val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR;
1180
1327
/* 64 bits guest mode? */
1181
1328
if (CPUMIsGuestInLongModeEx(pCtx))
1182
1329
val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
1193
1340
val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
1195
1342
/* Save debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
1196
#if 0 /* @todo deal with 32/64 */
1197
val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG | VMX_VMCS_CTRL_EXIT_CONTROLS_LOAD_HOST_EFER_MSR;
1199
1343
val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
1202
1345
#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1203
1346
if (VMX_IS_64BIT_HOST_MODE())
1243
1386
pCtx->gsHid.Attr.n.u2Dpl = 0;
1244
1387
pCtx->ssHid.Attr.n.u2Dpl = 0;
1246
/* The limit must correspond to the granularity bit. */
1247
if (!pCtx->csHid.Attr.n.u1Granularity)
1389
/* The limit must correspond to the 32 bits setting. */
1390
if (!pCtx->csHid.Attr.n.u1DefBig)
1248
1391
pCtx->csHid.u32Limit &= 0xffff;
1249
if (!pCtx->dsHid.Attr.n.u1Granularity)
1392
if (!pCtx->dsHid.Attr.n.u1DefBig)
1250
1393
pCtx->dsHid.u32Limit &= 0xffff;
1251
if (!pCtx->esHid.Attr.n.u1Granularity)
1394
if (!pCtx->esHid.Attr.n.u1DefBig)
1252
1395
pCtx->esHid.u32Limit &= 0xffff;
1253
if (!pCtx->fsHid.Attr.n.u1Granularity)
1396
if (!pCtx->fsHid.Attr.n.u1DefBig)
1254
1397
pCtx->fsHid.u32Limit &= 0xffff;
1255
if (!pCtx->gsHid.Attr.n.u1Granularity)
1398
if (!pCtx->gsHid.Attr.n.u1DefBig)
1256
1399
pCtx->gsHid.u32Limit &= 0xffff;
1257
if (!pCtx->ssHid.Attr.n.u1Granularity)
1400
if (!pCtx->ssHid.Attr.n.u1DefBig)
1258
1401
pCtx->ssHid.u32Limit &= 0xffff;
1592
1734
rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_DR7, pCtx->dr[7]);
1738
/* Sync the hypervisor debug state now if any breakpoint is armed. */
1739
if ( CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK|X86_DR7_GD)
1740
&& !CPUMIsHyperDebugStateActive(pVCpu)
1741
&& !DBGFIsStepping(pVCpu))
1743
/* Save the host and load the hypervisor debug state. */
1744
rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
1747
/* DRx intercepts remain enabled. */
1749
/* Override dr7 with the hypervisor value. */
1750
rc = VMXWriteVMCS64(VMX_VMCS64_GUEST_DR7, CPUMGetHyperDR7(pVCpu));
1595
1755
/* Sync the debug state now if any breakpoint is armed. */
1596
1756
if ( (pCtx->dr[7] & (X86_DR7_ENABLED_MASK|X86_DR7_GD))
1597
1757
&& !CPUMIsGuestDebugStateActive(pVCpu)
1641
1801
rc = VMXWriteVMCS(VMX_VMCS_GUEST_RFLAGS, eflags.u32);
1645
uint64_t u64TSCOffset;
1647
if (TMCpuTickCanUseRealTSC(pVCpu, &u64TSCOffset))
1804
if (TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hwaccm.s.vmx.u64TSCOffset))
1649
/* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET */
1650
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, u64TSCOffset);
1806
uint64_t u64CurTSC = ASMReadTSC();
1807
if (u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
1809
/* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET */
1810
rc = VMXWriteVMCS64(VMX_VMCS_CTRL_TSC_OFFSET_FULL, pVCpu->hwaccm.s.vmx.u64TSCOffset);
1653
pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
1654
rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
1656
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCOffset);
1813
pVCpu->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
1814
rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
1816
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCOffset);
1820
/* Fall back to rdtsc emulation as we would otherwise pass decreasing tsc values to the guest. */
1821
Log(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC, pVCpu->hwaccm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hwaccm.s.vmx.u64TSCOffset, TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hwaccm.s.vmx.u64TSCOffset, TMCpuTickGet(pVCpu)));
1822
pVCpu->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
1823
rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hwaccm.s.vmx.proc_ctls);
1825
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTSCInterceptOverFlow);
1688
1858
pVCpu->hwaccm.s.vmx.pfnStartVM = VMXR0StartVM32;
1691
#if 0 /* @todo deal with 32/64 */
1692
/* Unconditionally update the guest EFER - on CPUs that supports it. */
1693
if (pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1 & VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_GUEST_EFER_MSR)
1695
rc = VMXWriteVMCS64(VMX_VMCS_GUEST_EFER_FULL, pCtx->msrEFER);
1700
1861
vmxR0UpdateExceptionBitmap(pVM, pVCpu, pCtx);
1863
#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1864
/* Store all guest MSRs in the VM-Entry load area, so they will be loaded during the world switch. */
1865
PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
1866
unsigned idxMsr = 0;
1870
CPUMGetGuestCpuId(pVCpu, 0x80000001, &ulTemp, &ulTemp, &ulTemp, &ulEdx);
1871
/* EFER MSR present? */
1872
if (ulEdx & (X86_CPUID_AMD_FEATURE_EDX_NX|X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
1874
pMsr->u32IndexMSR = MSR_K6_EFER;
1875
pMsr->u32Reserved = 0;
1876
pMsr->u64Value = pCtx->msrEFER;
1877
/* VT-x will complain if only MSR_K6_EFER_LME is set. */
1878
if (!CPUMIsGuestInLongModeEx(pCtx))
1879
pMsr->u64Value &= ~(MSR_K6_EFER_LMA|MSR_K6_EFER_LME);
1882
if (ulEdx & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
1884
pMsr->u32IndexMSR = MSR_K8_LSTAR;
1885
pMsr->u32Reserved = 0;
1886
pMsr->u64Value = pCtx->msrLSTAR; /* 64 bits mode syscall rip */
1888
pMsr->u32IndexMSR = MSR_K6_STAR;
1889
pMsr->u32Reserved = 0;
1890
pMsr->u64Value = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */
1892
pMsr->u32IndexMSR = MSR_K8_SF_MASK;
1893
pMsr->u32Reserved = 0;
1894
pMsr->u64Value = pCtx->msrSFMASK; /* syscall flag mask */
1896
pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
1897
pMsr->u32Reserved = 0;
1898
pMsr->u64Value = pCtx->msrKERNELGSBASE; /* swapgs exchange value */
1902
pVCpu->hwaccm.s.vmx.cCachedMSRs = idxMsr;
1904
rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr);
1907
rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, idxMsr);
1909
#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
1703
1912
pVCpu->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
1828
2037
/* In real mode we have a fake TSS, so only sync it back when it's supposed to be valid. */
1829
2038
VMX_READ_SELREG(TR, tr);
2041
#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2042
/* Save the possibly changed MSRs that we automatically restore and save during a world switch. */
2043
for (unsigned i = 0; i < pVCpu->hwaccm.s.vmx.cCachedMSRs; i++)
2045
PVMXMSR pMsr = (PVMXMSR)pVCpu->hwaccm.s.vmx.pGuestMSR;
2048
switch (pMsr->u32IndexMSR)
2051
pCtx->msrLSTAR = pMsr->u64Value;
2054
pCtx->msrSTAR = pMsr->u64Value;
2056
case MSR_K8_SF_MASK:
2057
pCtx->msrSFMASK = pMsr->u64Value;
2059
case MSR_K8_KERNEL_GS_BASE:
2060
pCtx->msrKERNELGSBASE = pMsr->u64Value;
2063
/* EFER can't be changed without causing a VM-exit. */
2064
// Assert(pCtx->msrEFER == pMsr->u64Value);
2068
return VERR_INTERNAL_ERROR;
2071
#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
1831
2072
return VINF_SUCCESS;
2120
2363
("Expected %d, I'm %d; cResume=%d exitReason=%RGv exitQualification=%RGv\n",
2121
2364
(int)pVCpu->hwaccm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));
2122
2365
Assert(!HWACCMR0SuspendPending());
2366
/* Not allowed to switch modes without reloading the host state (32->64 switcher)!! */
2367
Assert(fWasInLongMode == CPUMIsGuestInLongMode(pVCpu));
2124
2369
/* Safety precaution; looping for too long here can have a very bad effect on the host */
2125
2370
if (RT_UNLIKELY(++cResume > pVM->hwaccm.s.cMaxResumeLoops))
2344
2589
rc = pVCpu->hwaccm.s.vmx.pfnStartVM(pVCpu->hwaccm.s.fResumeVM, pCtx, &pVCpu->hwaccm.s.vmx.VMCSCache, pVM, pVCpu);
2591
/* Possibly the last TSC value seen by the guest (too high) (only when we're in tsc offset mode). */
2592
if (!(pVCpu->hwaccm.s.vmx.proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
2593
TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + pVCpu->hwaccm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
2346
2595
TMNotifyEndOfExecution(pVCpu);
2347
2596
VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
2348
2597
Assert(!(ASMGetFlags() & X86_EFL_IF));
3400
3651
STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatExitIOWrite);
3401
3652
rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize);
3653
if (rc == VINF_IOM_HC_IOPORT_WRITE)
3654
HWACCMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
3652
3908
case VMX_EXIT_APIC_ACCESS: /* 44 APIC access. Guest software attempted to access memory at a physical address on the APIC-access page. */
3653
3909
case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
3654
3910
case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */
3655
/* Note: If we decide to emulate them here, then we must sync the MSRs that could have been changed (sysenter, fs/gs base)!!! */
3656
rc = VERR_EM_INTERPRETER;
3659
3911
case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */
3660
3912
case VMX_EXIT_PAUSE: /* 40 Guest software attempted to execute PAUSE. */
3661
rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
3913
/* Note: If we decide to emulate them here, then we must sync the MSRs that could have been changed (sysenter, fs/gs base)!!! */
3914
rc = VERR_EM_INTERPRETER;
3664
3917
case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */